diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7a018e37d156de33120c08a3b9fc5a712b50e313 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.0005, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "8d7efffa-5683-4ced-9cad-1e7715132fd9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..35f797ba5ccc4f0370ee97d68033777db8421945 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15eb22b3b3daacb0e68e82cac2129d3cf2ba009dc1ec838d325ef0d5f9a82ed1 +size 470605 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..8295f2a549af20d80b04c8395b94bd5ce6a5a5be --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5cca200208213c737c26bd00aeca34cd76c358810a35588e1eb742aef2b2a04 +size 485819 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..aef87ba30f37d178cda49fc373d6de0a6af45000 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32a766c516951096e4a976c916c489f6c5f7a5016e378c0c9d48859fcdfeba13 +size 107996 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..3b617a2f868fe74bbe69ba504963398cee30e044 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b4c96192af8a5ca0a549dc1e1ca35fbbd5d92bfb88a0d2f568776f828c5ba69 +size 113304 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/training_log_8d7efffa-5683-4ced-9cad-1e7715132fd9.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/training_log_8d7efffa-5683-4ced-9cad-1e7715132fd9.txt new file mode 100644 index 0000000000000000000000000000000000000000..b71f12caba35486f58fe342af62de07c92e6d83f --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/training_log_8d7efffa-5683-4ced-9cad-1e7715132fd9.txt @@ -0,0 +1,5614 @@ +[2025-09-05 17:07:50] [Rank 0] PRINT: --- Script Start: Fri Sep 5 17:07:50 2025 --- +[2025-09-05 17:07:50] [Rank 0] PRINT: --- Script Start: Fri Sep 5 17:07:50 2025 --- +[2025-09-05 17:07:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.0005, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 17:07:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.0005, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 17:07:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 17:07:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 17:07:50] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 17:07:50] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 17:07:50] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42 +[2025-09-05 17:07:50] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42 +[2025-09-05 17:07:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 17:07:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 17:07:50] [Rank 0] PRINT: Constructing model... +[2025-09-05 17:07:50] [Rank 0] PRINT: Constructing model... +[2025-09-05 17:07:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 17:07:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 17:07:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 17:07:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 17:07:51] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 17:07:51] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 17:07:56] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 17:07:56] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 17:07:56] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 17:07:56] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 17:07:56] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 17:07:56] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 17:07:56] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 17:07:56] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 17:07:56] [Rank 0] PRINT: Model returns: +[2025-09-05 17:07:56] [Rank 0] PRINT: Model returns: +[2025-09-05 17:07:56] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 17:07:56] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 17:07:56] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 17:07:56] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 17:07:56] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-09-05 17:07:56] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-09-05 17:07:56] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 17:07:56] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 17:07:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 17:07:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 17:07:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 17:07:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 17:08:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 17:08:01] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 17:08:01] [Rank 0] PRINT: Starting warmup... +[2025-09-05 17:08:01] [Rank 0] PRINT: Starting warmup... +[2025-09-05 17:08:41] [Rank 0] PRINT: Warmup complete. +[2025-09-05 17:08:41] [Rank 0] PRINT: Warmup complete. +[2025-09-05 17:08:41] [Rank 0] PRINT: Starting training... +[2025-09-05 17:08:41] [Rank 0] PRINT: Starting training... +[2025-09-05 17:08:47] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/fixed_eval_indices.json +[2025-09-05 17:08:47] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/fixed_eval_indices.json +[2025-09-05 17:08:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:08:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:08:51] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 17:08:51] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 17:09:25] [Rank 0] step:21/10000 train_time:33187ms step_avg:1580.34ms +[2025-09-05 17:09:25] [Rank 0] step:21/10000 train_time:33187ms step_avg:1580.34ms +[2025-09-05 17:09:25] [Rank 0] step:41/10000 train_time:33835ms step_avg:825.25ms +[2025-09-05 17:09:25] [Rank 0] step:41/10000 train_time:33835ms step_avg:825.25ms +[2025-09-05 17:09:26] [Rank 0] step:61/10000 train_time:34482ms step_avg:565.28ms +[2025-09-05 17:09:26] [Rank 0] step:61/10000 train_time:34482ms step_avg:565.28ms +[2025-09-05 17:09:27] [Rank 0] step:81/10000 train_time:35130ms step_avg:433.70ms +[2025-09-05 17:09:27] [Rank 0] step:81/10000 train_time:35130ms step_avg:433.70ms +[2025-09-05 17:09:27] [Rank 0] step:101/10000 train_time:35777ms step_avg:354.23ms +[2025-09-05 17:09:27] [Rank 0] step:101/10000 train_time:35777ms step_avg:354.23ms +[2025-09-05 17:09:28] [Rank 0] step:121/10000 train_time:36425ms step_avg:301.03ms +[2025-09-05 17:09:28] [Rank 0] step:121/10000 train_time:36425ms step_avg:301.03ms +[2025-09-05 17:09:29] [Rank 0] step:141/10000 train_time:37071ms step_avg:262.92ms +[2025-09-05 17:09:29] [Rank 0] step:141/10000 train_time:37071ms step_avg:262.92ms +[2025-09-05 17:09:29] [Rank 0] step:161/10000 train_time:37719ms step_avg:234.28ms +[2025-09-05 17:09:29] [Rank 0] step:161/10000 train_time:37719ms step_avg:234.28ms +[2025-09-05 17:09:30] [Rank 0] step:181/10000 train_time:38365ms step_avg:211.96ms +[2025-09-05 17:09:30] [Rank 0] step:181/10000 train_time:38365ms step_avg:211.96ms +[2025-09-05 17:09:31] [Rank 0] step:201/10000 train_time:39013ms step_avg:194.09ms +[2025-09-05 17:09:31] [Rank 0] step:201/10000 train_time:39013ms step_avg:194.09ms +[2025-09-05 17:09:31] [Rank 0] step:221/10000 train_time:39659ms step_avg:179.45ms +[2025-09-05 17:09:31] [Rank 0] step:221/10000 train_time:39659ms step_avg:179.45ms +[2025-09-05 17:09:32] [Rank 0] step:241/10000 train_time:40307ms step_avg:167.25ms +[2025-09-05 17:09:32] [Rank 0] step:241/10000 train_time:40307ms step_avg:167.25ms +[2025-09-05 17:09:33] [Rank 0] step:261/10000 train_time:40954ms step_avg:156.91ms +[2025-09-05 17:09:33] [Rank 0] step:261/10000 train_time:40954ms step_avg:156.91ms +[2025-09-05 17:09:33] [Rank 0] step:281/10000 train_time:41602ms step_avg:148.05ms +[2025-09-05 17:09:33] [Rank 0] step:281/10000 train_time:41602ms step_avg:148.05ms +[2025-09-05 17:09:34] [Rank 0] step:301/10000 train_time:42249ms step_avg:140.36ms +[2025-09-05 17:09:34] [Rank 0] step:301/10000 train_time:42249ms step_avg:140.36ms +[2025-09-05 17:09:34] [Rank 0] step:321/10000 train_time:42895ms step_avg:133.63ms +[2025-09-05 17:09:34] [Rank 0] step:321/10000 train_time:42895ms step_avg:133.63ms +[2025-09-05 17:09:35] [Rank 0] step:341/10000 train_time:43543ms step_avg:127.69ms +[2025-09-05 17:09:35] [Rank 0] step:341/10000 train_time:43543ms step_avg:127.69ms +[2025-09-05 17:09:36] [Rank 0] step:361/10000 train_time:44191ms step_avg:122.41ms +[2025-09-05 17:09:36] [Rank 0] step:361/10000 train_time:44191ms step_avg:122.41ms +[2025-09-05 17:09:36] [Rank 0] step:381/10000 train_time:44836ms step_avg:117.68ms +[2025-09-05 17:09:36] [Rank 0] step:381/10000 train_time:44836ms step_avg:117.68ms +[2025-09-05 17:09:37] [Rank 0] step:401/10000 train_time:45483ms step_avg:113.42ms +[2025-09-05 17:09:37] [Rank 0] step:401/10000 train_time:45483ms step_avg:113.42ms +[2025-09-05 17:09:38] [Rank 0] step:421/10000 train_time:46130ms step_avg:109.57ms +[2025-09-05 17:09:38] [Rank 0] step:421/10000 train_time:46130ms step_avg:109.57ms +[2025-09-05 17:09:38] [Rank 0] step:441/10000 train_time:46777ms step_avg:106.07ms +[2025-09-05 17:09:38] [Rank 0] step:441/10000 train_time:46777ms step_avg:106.07ms +[2025-09-05 17:09:39] [Rank 0] step:461/10000 train_time:47527ms step_avg:103.09ms +[2025-09-05 17:09:39] [Rank 0] step:461/10000 train_time:47527ms step_avg:103.09ms +[2025-09-05 17:09:40] [Rank 0] step:481/10000 train_time:48174ms step_avg:100.15ms +[2025-09-05 17:09:40] [Rank 0] step:481/10000 train_time:48174ms step_avg:100.15ms +[2025-09-05 17:09:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:09:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:09:41] [Rank 0] PRINT: step:500/10000 train_loss:6.8927 val_loss:4.6786 train_time:49051ms step_avg:98.10ms +[2025-09-05 17:09:41] [Rank 0] PRINT: step:500/10000 train_loss:6.8927 val_loss:4.6786 train_time:49051ms step_avg:98.10ms +[2025-09-05 17:09:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:09:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:09:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:09:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:11:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:11:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:11:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:11:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:11:03] [Rank 0] Total Loss: 5.7660 +[2025-09-05 17:11:03] [Rank 0] Total Loss: 5.7660 +[2025-09-05 17:11:03] [Rank 0] Total FTA (Unweighted): 0.0000 +[2025-09-05 17:11:03] [Rank 0] Total FTA (Unweighted): 0.0000 +[2025-09-05 17:11:03] [Rank 0] Total FTA (Weighted): 0.0000 +[2025-09-05 17:11:03] [Rank 0] Total FTA (Weighted): 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 0 Loss: 4.7897 +[2025-09-05 17:11:03] [Rank 0] Group 0 Loss: 4.7897 +[2025-09-05 17:11:03] [Rank 0] Group 1 Loss: 4.8939 +[2025-09-05 17:11:03] [Rank 0] Group 1 Loss: 4.8939 +[2025-09-05 17:11:03] [Rank 0] Group 2 Loss: 5.0497 +[2025-09-05 17:11:03] [Rank 0] Group 2 Loss: 5.0497 +[2025-09-05 17:11:03] [Rank 0] Group 3 Loss: 5.3185 +[2025-09-05 17:11:03] [Rank 0] Group 3 Loss: 5.3185 +[2025-09-05 17:11:03] [Rank 0] Group 4 Loss: 5.6211 +[2025-09-05 17:11:03] [Rank 0] Group 4 Loss: 5.6211 +[2025-09-05 17:11:03] [Rank 0] Group 5 Loss: 5.7647 +[2025-09-05 17:11:03] [Rank 0] Group 5 Loss: 5.7647 +[2025-09-05 17:11:03] [Rank 0] Group 6 Loss: 5.8912 +[2025-09-05 17:11:03] [Rank 0] Group 6 Loss: 5.8912 +[2025-09-05 17:11:03] [Rank 0] Group 7 Loss: 5.9174 +[2025-09-05 17:11:03] [Rank 0] Group 7 Loss: 5.9174 +[2025-09-05 17:11:03] [Rank 0] Group 8 Loss: 6.0541 +[2025-09-05 17:11:03] [Rank 0] Group 8 Loss: 6.0541 +[2025-09-05 17:11:03] [Rank 0] Group 9 Loss: 6.1452 +[2025-09-05 17:11:03] [Rank 0] Group 9 Loss: 6.1452 +[2025-09-05 17:11:03] [Rank 0] Group 10 Loss: 6.1409 +[2025-09-05 17:11:03] [Rank 0] Group 10 Loss: 6.1409 +[2025-09-05 17:11:03] [Rank 0] Group 11 Loss: 6.1983 +[2025-09-05 17:11:03] [Rank 0] Group 11 Loss: 6.1983 +[2025-09-05 17:11:03] [Rank 0] Group 12 Loss: 6.1074 +[2025-09-05 17:11:03] [Rank 0] Group 12 Loss: 6.1074 +[2025-09-05 17:11:03] [Rank 0] Group 13 Loss: 6.1145 +[2025-09-05 17:11:03] [Rank 0] Group 13 Loss: 6.1145 +[2025-09-05 17:11:03] [Rank 0] Group 14 Loss: 6.1586 +[2025-09-05 17:11:03] [Rank 0] Group 14 Loss: 6.1586 +[2025-09-05 17:11:03] [Rank 0] Group 15 Loss: 6.0909 +[2025-09-05 17:11:03] [Rank 0] Group 15 Loss: 6.0909 +[2025-09-05 17:11:03] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 4 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 4 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 6 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 6 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 7 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 7 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 8 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 8 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 9 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 9 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 10 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 10 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 11 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 11 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 12 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 12 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 13 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 13 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 14 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 14 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 15 FTA: 0.0000 +[2025-09-05 17:11:03] [Rank 0] Group 15 FTA: 0.0000 +[2025-09-05 17:11:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:11:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:11:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:11:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:11:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:11:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:11:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:11:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:11:05] [Rank 0] step:501/10000 train_time:49060ms step_avg:97.92ms +[2025-09-05 17:11:05] [Rank 0] step:501/10000 train_time:49060ms step_avg:97.92ms +[2025-09-05 17:11:05] [Rank 0] step:521/10000 train_time:49500ms step_avg:95.01ms +[2025-09-05 17:11:05] [Rank 0] step:521/10000 train_time:49500ms step_avg:95.01ms +[2025-09-05 17:11:06] [Rank 0] step:541/10000 train_time:50148ms step_avg:92.69ms +[2025-09-05 17:11:06] [Rank 0] step:541/10000 train_time:50148ms step_avg:92.69ms +[2025-09-05 17:11:07] [Rank 0] step:561/10000 train_time:50793ms step_avg:90.54ms +[2025-09-05 17:11:07] [Rank 0] step:561/10000 train_time:50793ms step_avg:90.54ms +[2025-09-05 17:11:07] [Rank 0] step:581/10000 train_time:51439ms step_avg:88.54ms +[2025-09-05 17:11:07] [Rank 0] step:581/10000 train_time:51439ms step_avg:88.54ms +[2025-09-05 17:11:08] [Rank 0] step:601/10000 train_time:52086ms step_avg:86.66ms +[2025-09-05 17:11:08] [Rank 0] step:601/10000 train_time:52086ms step_avg:86.66ms +[2025-09-05 17:11:09] [Rank 0] step:621/10000 train_time:52732ms step_avg:84.92ms +[2025-09-05 17:11:09] [Rank 0] step:621/10000 train_time:52732ms step_avg:84.92ms +[2025-09-05 17:11:09] [Rank 0] step:641/10000 train_time:53379ms step_avg:83.27ms +[2025-09-05 17:11:09] [Rank 0] step:641/10000 train_time:53379ms step_avg:83.27ms +[2025-09-05 17:11:10] [Rank 0] step:661/10000 train_time:54025ms step_avg:81.73ms +[2025-09-05 17:11:10] [Rank 0] step:661/10000 train_time:54025ms step_avg:81.73ms +[2025-09-05 17:11:11] [Rank 0] step:681/10000 train_time:54671ms step_avg:80.28ms +[2025-09-05 17:11:11] [Rank 0] step:681/10000 train_time:54671ms step_avg:80.28ms +[2025-09-05 17:11:11] [Rank 0] step:701/10000 train_time:55317ms step_avg:78.91ms +[2025-09-05 17:11:11] [Rank 0] step:701/10000 train_time:55317ms step_avg:78.91ms +[2025-09-05 17:11:12] [Rank 0] step:721/10000 train_time:55963ms step_avg:77.62ms +[2025-09-05 17:11:12] [Rank 0] step:721/10000 train_time:55963ms step_avg:77.62ms +[2025-09-05 17:11:12] [Rank 0] step:741/10000 train_time:56609ms step_avg:76.40ms +[2025-09-05 17:11:12] [Rank 0] step:741/10000 train_time:56609ms step_avg:76.40ms +[2025-09-05 17:11:13] [Rank 0] step:761/10000 train_time:57258ms step_avg:75.24ms +[2025-09-05 17:11:13] [Rank 0] step:761/10000 train_time:57258ms step_avg:75.24ms +[2025-09-05 17:11:14] [Rank 0] step:781/10000 train_time:57910ms step_avg:74.15ms +[2025-09-05 17:11:14] [Rank 0] step:781/10000 train_time:57910ms step_avg:74.15ms +[2025-09-05 17:11:14] [Rank 0] step:801/10000 train_time:58561ms step_avg:73.11ms +[2025-09-05 17:11:14] [Rank 0] step:801/10000 train_time:58561ms step_avg:73.11ms +[2025-09-05 17:11:16] [Rank 0] step:821/10000 train_time:59321ms step_avg:72.25ms +[2025-09-05 17:11:16] [Rank 0] step:821/10000 train_time:59321ms step_avg:72.25ms +[2025-09-05 17:11:16] [Rank 0] step:841/10000 train_time:60338ms step_avg:71.75ms +[2025-09-05 17:11:16] [Rank 0] step:841/10000 train_time:60338ms step_avg:71.75ms +[2025-09-05 17:11:17] [Rank 0] step:861/10000 train_time:60992ms step_avg:70.84ms +[2025-09-05 17:11:17] [Rank 0] step:861/10000 train_time:60992ms step_avg:70.84ms +[2025-09-05 17:11:18] [Rank 0] step:881/10000 train_time:61641ms step_avg:69.97ms +[2025-09-05 17:11:18] [Rank 0] step:881/10000 train_time:61641ms step_avg:69.97ms +[2025-09-05 17:11:18] [Rank 0] step:901/10000 train_time:62292ms step_avg:69.14ms +[2025-09-05 17:11:18] [Rank 0] step:901/10000 train_time:62292ms step_avg:69.14ms +[2025-09-05 17:11:19] [Rank 0] step:921/10000 train_time:62943ms step_avg:68.34ms +[2025-09-05 17:11:19] [Rank 0] step:921/10000 train_time:62943ms step_avg:68.34ms +[2025-09-05 17:11:19] [Rank 0] step:941/10000 train_time:63595ms step_avg:67.58ms +[2025-09-05 17:11:19] [Rank 0] step:941/10000 train_time:63595ms step_avg:67.58ms +[2025-09-05 17:11:20] [Rank 0] step:961/10000 train_time:64247ms step_avg:66.85ms +[2025-09-05 17:11:20] [Rank 0] step:961/10000 train_time:64247ms step_avg:66.85ms +[2025-09-05 17:11:21] [Rank 0] step:981/10000 train_time:64898ms step_avg:66.16ms +[2025-09-05 17:11:21] [Rank 0] step:981/10000 train_time:64898ms step_avg:66.16ms +[2025-09-05 17:11:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:11:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:11:22] [Rank 0] PRINT: step:1000/10000 train_loss:3.2304 val_loss:2.1588 train_time:65989ms step_avg:65.99ms +[2025-09-05 17:11:22] [Rank 0] PRINT: step:1000/10000 train_loss:3.2304 val_loss:2.1588 train_time:65989ms step_avg:65.99ms +[2025-09-05 17:11:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:11:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:11:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:11:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:12:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:12:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:12:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:12:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:12:43] [Rank 0] Total Loss: 4.1004 +[2025-09-05 17:12:43] [Rank 0] Total Loss: 4.1004 +[2025-09-05 17:12:43] [Rank 0] Total FTA (Unweighted): 0.1306 +[2025-09-05 17:12:43] [Rank 0] Total FTA (Unweighted): 0.1306 +[2025-09-05 17:12:43] [Rank 0] Total FTA (Weighted): 0.1306 +[2025-09-05 17:12:43] [Rank 0] Total FTA (Weighted): 0.1306 +[2025-09-05 17:12:43] [Rank 0] Group 0 Loss: 3.0248 +[2025-09-05 17:12:43] [Rank 0] Group 0 Loss: 3.0248 +[2025-09-05 17:12:43] [Rank 0] Group 1 Loss: 3.0235 +[2025-09-05 17:12:43] [Rank 0] Group 1 Loss: 3.0235 +[2025-09-05 17:12:43] [Rank 0] Group 2 Loss: 3.0529 +[2025-09-05 17:12:43] [Rank 0] Group 2 Loss: 3.0529 +[2025-09-05 17:12:44] [Rank 0] Group 3 Loss: 3.4652 +[2025-09-05 17:12:44] [Rank 0] Group 3 Loss: 3.4652 +[2025-09-05 17:12:44] [Rank 0] Group 4 Loss: 3.6582 +[2025-09-05 17:12:44] [Rank 0] Group 4 Loss: 3.6582 +[2025-09-05 17:12:44] [Rank 0] Group 5 Loss: 3.9762 +[2025-09-05 17:12:44] [Rank 0] Group 5 Loss: 3.9762 +[2025-09-05 17:12:44] [Rank 0] Group 6 Loss: 4.1757 +[2025-09-05 17:12:44] [Rank 0] Group 6 Loss: 4.1757 +[2025-09-05 17:12:44] [Rank 0] Group 7 Loss: 4.2619 +[2025-09-05 17:12:44] [Rank 0] Group 7 Loss: 4.2619 +[2025-09-05 17:12:44] [Rank 0] Group 8 Loss: 4.4524 +[2025-09-05 17:12:44] [Rank 0] Group 8 Loss: 4.4524 +[2025-09-05 17:12:44] [Rank 0] Group 9 Loss: 4.5533 +[2025-09-05 17:12:44] [Rank 0] Group 9 Loss: 4.5533 +[2025-09-05 17:12:44] [Rank 0] Group 10 Loss: 4.6304 +[2025-09-05 17:12:44] [Rank 0] Group 10 Loss: 4.6304 +[2025-09-05 17:12:44] [Rank 0] Group 11 Loss: 4.6960 +[2025-09-05 17:12:44] [Rank 0] Group 11 Loss: 4.6960 +[2025-09-05 17:12:44] [Rank 0] Group 12 Loss: 4.6403 +[2025-09-05 17:12:44] [Rank 0] Group 12 Loss: 4.6403 +[2025-09-05 17:12:44] [Rank 0] Group 13 Loss: 4.6622 +[2025-09-05 17:12:44] [Rank 0] Group 13 Loss: 4.6622 +[2025-09-05 17:12:44] [Rank 0] Group 14 Loss: 4.6994 +[2025-09-05 17:12:44] [Rank 0] Group 14 Loss: 4.6994 +[2025-09-05 17:12:44] [Rank 0] Group 15 Loss: 4.6336 +[2025-09-05 17:12:44] [Rank 0] Group 15 Loss: 4.6336 +[2025-09-05 17:12:44] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-05 17:12:44] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-05 17:12:44] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 17:12:44] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 17:12:44] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 17:12:44] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 17:12:44] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 17:12:44] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 17:12:44] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 17:12:44] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 17:12:44] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 17:12:44] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 17:12:44] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 17:12:44] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 17:12:44] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 17:12:44] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 17:12:44] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 17:12:44] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 17:12:44] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 17:12:44] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 17:12:44] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 17:12:44] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 17:12:44] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 17:12:44] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 17:12:44] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 17:12:44] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 17:12:44] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 17:12:44] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 17:12:44] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 17:12:44] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 17:12:44] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 17:12:44] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 17:12:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:12:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:12:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:12:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:12:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:12:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:12:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:12:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:12:46] [Rank 0] step:1001/10000 train_time:65998ms step_avg:65.93ms +[2025-09-05 17:12:46] [Rank 0] step:1001/10000 train_time:65998ms step_avg:65.93ms +[2025-09-05 17:12:46] [Rank 0] step:1021/10000 train_time:66440ms step_avg:65.07ms +[2025-09-05 17:12:46] [Rank 0] step:1021/10000 train_time:66440ms step_avg:65.07ms +[2025-09-05 17:12:47] [Rank 0] step:1041/10000 train_time:67094ms step_avg:64.45ms +[2025-09-05 17:12:47] [Rank 0] step:1041/10000 train_time:67094ms step_avg:64.45ms +[2025-09-05 17:12:48] [Rank 0] step:1061/10000 train_time:67745ms step_avg:63.85ms +[2025-09-05 17:12:48] [Rank 0] step:1061/10000 train_time:67745ms step_avg:63.85ms +[2025-09-05 17:12:48] [Rank 0] step:1081/10000 train_time:68397ms step_avg:63.27ms +[2025-09-05 17:12:48] [Rank 0] step:1081/10000 train_time:68397ms step_avg:63.27ms +[2025-09-05 17:12:49] [Rank 0] step:1101/10000 train_time:69050ms step_avg:62.72ms +[2025-09-05 17:12:49] [Rank 0] step:1101/10000 train_time:69050ms step_avg:62.72ms +[2025-09-05 17:12:50] [Rank 0] step:1121/10000 train_time:69707ms step_avg:62.18ms +[2025-09-05 17:12:50] [Rank 0] step:1121/10000 train_time:69707ms step_avg:62.18ms +[2025-09-05 17:12:50] [Rank 0] step:1141/10000 train_time:70359ms step_avg:61.66ms +[2025-09-05 17:12:50] [Rank 0] step:1141/10000 train_time:70359ms step_avg:61.66ms +[2025-09-05 17:12:51] [Rank 0] step:1161/10000 train_time:71011ms step_avg:61.16ms +[2025-09-05 17:12:51] [Rank 0] step:1161/10000 train_time:71011ms step_avg:61.16ms +[2025-09-05 17:12:52] [Rank 0] step:1181/10000 train_time:71663ms step_avg:60.68ms +[2025-09-05 17:12:52] [Rank 0] step:1181/10000 train_time:71663ms step_avg:60.68ms +[2025-09-05 17:12:52] [Rank 0] step:1201/10000 train_time:72317ms step_avg:60.21ms +[2025-09-05 17:12:52] [Rank 0] step:1201/10000 train_time:72317ms step_avg:60.21ms +[2025-09-05 17:12:53] [Rank 0] step:1221/10000 train_time:72969ms step_avg:59.76ms +[2025-09-05 17:12:53] [Rank 0] step:1221/10000 train_time:72969ms step_avg:59.76ms +[2025-09-05 17:12:54] [Rank 0] step:1241/10000 train_time:73620ms step_avg:59.32ms +[2025-09-05 17:12:54] [Rank 0] step:1241/10000 train_time:73620ms step_avg:59.32ms +[2025-09-05 17:12:54] [Rank 0] step:1261/10000 train_time:74273ms step_avg:58.90ms +[2025-09-05 17:12:54] [Rank 0] step:1261/10000 train_time:74273ms step_avg:58.90ms +[2025-09-05 17:12:55] [Rank 0] step:1281/10000 train_time:74925ms step_avg:58.49ms +[2025-09-05 17:12:55] [Rank 0] step:1281/10000 train_time:74925ms step_avg:58.49ms +[2025-09-05 17:12:55] [Rank 0] step:1301/10000 train_time:75578ms step_avg:58.09ms +[2025-09-05 17:12:55] [Rank 0] step:1301/10000 train_time:75578ms step_avg:58.09ms +[2025-09-05 17:12:56] [Rank 0] step:1321/10000 train_time:76235ms step_avg:57.71ms +[2025-09-05 17:12:56] [Rank 0] step:1321/10000 train_time:76235ms step_avg:57.71ms +[2025-09-05 17:12:57] [Rank 0] step:1341/10000 train_time:76887ms step_avg:57.34ms +[2025-09-05 17:12:57] [Rank 0] step:1341/10000 train_time:76887ms step_avg:57.34ms +[2025-09-05 17:12:57] [Rank 0] step:1361/10000 train_time:77540ms step_avg:56.97ms +[2025-09-05 17:12:57] [Rank 0] step:1361/10000 train_time:77540ms step_avg:56.97ms +[2025-09-05 17:12:58] [Rank 0] step:1381/10000 train_time:78191ms step_avg:56.62ms +[2025-09-05 17:12:58] [Rank 0] step:1381/10000 train_time:78191ms step_avg:56.62ms +[2025-09-05 17:12:59] [Rank 0] step:1401/10000 train_time:78844ms step_avg:56.28ms +[2025-09-05 17:12:59] [Rank 0] step:1401/10000 train_time:78844ms step_avg:56.28ms +[2025-09-05 17:12:59] [Rank 0] step:1421/10000 train_time:79497ms step_avg:55.94ms +[2025-09-05 17:12:59] [Rank 0] step:1421/10000 train_time:79497ms step_avg:55.94ms +[2025-09-05 17:13:00] [Rank 0] step:1441/10000 train_time:80149ms step_avg:55.62ms +[2025-09-05 17:13:00] [Rank 0] step:1441/10000 train_time:80149ms step_avg:55.62ms +[2025-09-05 17:13:01] [Rank 0] step:1461/10000 train_time:80802ms step_avg:55.31ms +[2025-09-05 17:13:01] [Rank 0] step:1461/10000 train_time:80802ms step_avg:55.31ms +[2025-09-05 17:13:01] [Rank 0] step:1481/10000 train_time:81454ms step_avg:55.00ms +[2025-09-05 17:13:01] [Rank 0] step:1481/10000 train_time:81454ms step_avg:55.00ms +[2025-09-05 17:13:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:13:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:13:02] [Rank 0] PRINT: step:1500/10000 train_loss:1.7335 val_loss:1.4325 train_time:82339ms step_avg:54.89ms +[2025-09-05 17:13:02] [Rank 0] PRINT: step:1500/10000 train_loss:1.7335 val_loss:1.4325 train_time:82339ms step_avg:54.89ms +[2025-09-05 17:13:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:13:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:13:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:13:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:14:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:14:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:14:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:14:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:14:24] [Rank 0] Total Loss: 3.9876 +[2025-09-05 17:14:24] [Rank 0] Total Loss: 3.9876 +[2025-09-05 17:14:24] [Rank 0] Total FTA (Unweighted): 0.3494 +[2025-09-05 17:14:24] [Rank 0] Total FTA (Unweighted): 0.3494 +[2025-09-05 17:14:24] [Rank 0] Total FTA (Weighted): 0.3494 +[2025-09-05 17:14:24] [Rank 0] Total FTA (Weighted): 0.3494 +[2025-09-05 17:14:24] [Rank 0] Group 0 Loss: 3.0821 +[2025-09-05 17:14:24] [Rank 0] Group 0 Loss: 3.0821 +[2025-09-05 17:14:24] [Rank 0] Group 1 Loss: 3.1612 +[2025-09-05 17:14:24] [Rank 0] Group 1 Loss: 3.1612 +[2025-09-05 17:14:24] [Rank 0] Group 2 Loss: 3.1124 +[2025-09-05 17:14:24] [Rank 0] Group 2 Loss: 3.1124 +[2025-09-05 17:14:24] [Rank 0] Group 3 Loss: 3.3948 +[2025-09-05 17:14:24] [Rank 0] Group 3 Loss: 3.3948 +[2025-09-05 17:14:24] [Rank 0] Group 4 Loss: 3.5218 +[2025-09-05 17:14:24] [Rank 0] Group 4 Loss: 3.5218 +[2025-09-05 17:14:24] [Rank 0] Group 5 Loss: 3.6811 +[2025-09-05 17:14:24] [Rank 0] Group 5 Loss: 3.6811 +[2025-09-05 17:14:24] [Rank 0] Group 6 Loss: 3.8542 +[2025-09-05 17:14:24] [Rank 0] Group 6 Loss: 3.8542 +[2025-09-05 17:14:24] [Rank 0] Group 7 Loss: 4.0360 +[2025-09-05 17:14:24] [Rank 0] Group 7 Loss: 4.0360 +[2025-09-05 17:14:24] [Rank 0] Group 8 Loss: 4.2906 +[2025-09-05 17:14:24] [Rank 0] Group 8 Loss: 4.2906 +[2025-09-05 17:14:24] [Rank 0] Group 9 Loss: 4.3367 +[2025-09-05 17:14:24] [Rank 0] Group 9 Loss: 4.3367 +[2025-09-05 17:14:24] [Rank 0] Group 10 Loss: 4.4785 +[2025-09-05 17:14:24] [Rank 0] Group 10 Loss: 4.4785 +[2025-09-05 17:14:24] [Rank 0] Group 11 Loss: 4.5109 +[2025-09-05 17:14:24] [Rank 0] Group 11 Loss: 4.5109 +[2025-09-05 17:14:24] [Rank 0] Group 12 Loss: 4.5137 +[2025-09-05 17:14:24] [Rank 0] Group 12 Loss: 4.5137 +[2025-09-05 17:14:24] [Rank 0] Group 13 Loss: 4.6150 +[2025-09-05 17:14:24] [Rank 0] Group 13 Loss: 4.6150 +[2025-09-05 17:14:24] [Rank 0] Group 14 Loss: 4.6185 +[2025-09-05 17:14:24] [Rank 0] Group 14 Loss: 4.6185 +[2025-09-05 17:14:24] [Rank 0] Group 15 Loss: 4.5945 +[2025-09-05 17:14:24] [Rank 0] Group 15 Loss: 4.5945 +[2025-09-05 17:14:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:14:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:14:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:14:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:14:24] [Rank 0] Group 2 FTA: 0.9000 +[2025-09-05 17:14:24] [Rank 0] Group 2 FTA: 0.9000 +[2025-09-05 17:14:24] [Rank 0] Group 3 FTA: 0.7000 +[2025-09-05 17:14:24] [Rank 0] Group 3 FTA: 0.7000 +[2025-09-05 17:14:24] [Rank 0] Group 4 FTA: 0.3300 +[2025-09-05 17:14:24] [Rank 0] Group 4 FTA: 0.3300 +[2025-09-05 17:14:24] [Rank 0] Group 5 FTA: 0.2900 +[2025-09-05 17:14:24] [Rank 0] Group 5 FTA: 0.2900 +[2025-09-05 17:14:24] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-05 17:14:24] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-05 17:14:24] [Rank 0] Group 7 FTA: 0.2100 +[2025-09-05 17:14:24] [Rank 0] Group 7 FTA: 0.2100 +[2025-09-05 17:14:24] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 17:14:24] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 17:14:24] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 17:14:24] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 17:14:24] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 17:14:24] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 17:14:24] [Rank 0] Group 11 FTA: 0.0800 +[2025-09-05 17:14:24] [Rank 0] Group 11 FTA: 0.0800 +[2025-09-05 17:14:24] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 17:14:24] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 17:14:24] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 17:14:24] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 17:14:24] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:14:24] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:14:24] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:14:24] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:14:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:14:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:14:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:14:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:14:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:14:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:14:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:14:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:14:26] [Rank 0] step:1501/10000 train_time:82349ms step_avg:54.86ms +[2025-09-05 17:14:26] [Rank 0] step:1501/10000 train_time:82349ms step_avg:54.86ms +[2025-09-05 17:14:26] [Rank 0] step:1521/10000 train_time:82799ms step_avg:54.44ms +[2025-09-05 17:14:26] [Rank 0] step:1521/10000 train_time:82799ms step_avg:54.44ms +[2025-09-05 17:14:27] [Rank 0] step:1541/10000 train_time:83450ms step_avg:54.15ms +[2025-09-05 17:14:27] [Rank 0] step:1541/10000 train_time:83450ms step_avg:54.15ms +[2025-09-05 17:14:28] [Rank 0] step:1561/10000 train_time:84101ms step_avg:53.88ms +[2025-09-05 17:14:28] [Rank 0] step:1561/10000 train_time:84101ms step_avg:53.88ms +[2025-09-05 17:14:28] [Rank 0] step:1581/10000 train_time:84753ms step_avg:53.61ms +[2025-09-05 17:14:28] [Rank 0] step:1581/10000 train_time:84753ms step_avg:53.61ms +[2025-09-05 17:14:29] [Rank 0] step:1601/10000 train_time:85406ms step_avg:53.35ms +[2025-09-05 17:14:29] [Rank 0] step:1601/10000 train_time:85406ms step_avg:53.35ms +[2025-09-05 17:14:30] [Rank 0] step:1621/10000 train_time:86055ms step_avg:53.09ms +[2025-09-05 17:14:30] [Rank 0] step:1621/10000 train_time:86055ms step_avg:53.09ms +[2025-09-05 17:14:30] [Rank 0] step:1641/10000 train_time:86706ms step_avg:52.84ms +[2025-09-05 17:14:30] [Rank 0] step:1641/10000 train_time:86706ms step_avg:52.84ms +[2025-09-05 17:14:31] [Rank 0] step:1661/10000 train_time:87357ms step_avg:52.59ms +[2025-09-05 17:14:31] [Rank 0] step:1661/10000 train_time:87357ms step_avg:52.59ms +[2025-09-05 17:14:32] [Rank 0] step:1681/10000 train_time:88208ms step_avg:52.47ms +[2025-09-05 17:14:32] [Rank 0] step:1681/10000 train_time:88208ms step_avg:52.47ms +[2025-09-05 17:14:32] [Rank 0] step:1701/10000 train_time:88857ms step_avg:52.24ms +[2025-09-05 17:14:32] [Rank 0] step:1701/10000 train_time:88857ms step_avg:52.24ms +[2025-09-05 17:14:33] [Rank 0] step:1721/10000 train_time:89508ms step_avg:52.01ms +[2025-09-05 17:14:33] [Rank 0] step:1721/10000 train_time:89508ms step_avg:52.01ms +[2025-09-05 17:14:34] [Rank 0] step:1741/10000 train_time:90380ms step_avg:51.91ms +[2025-09-05 17:14:34] [Rank 0] step:1741/10000 train_time:90380ms step_avg:51.91ms +[2025-09-05 17:14:35] [Rank 0] step:1761/10000 train_time:91031ms step_avg:51.69ms +[2025-09-05 17:14:35] [Rank 0] step:1761/10000 train_time:91031ms step_avg:51.69ms +[2025-09-05 17:14:35] [Rank 0] step:1781/10000 train_time:91683ms step_avg:51.48ms +[2025-09-05 17:14:35] [Rank 0] step:1781/10000 train_time:91683ms step_avg:51.48ms +[2025-09-05 17:14:36] [Rank 0] step:1801/10000 train_time:92334ms step_avg:51.27ms +[2025-09-05 17:14:36] [Rank 0] step:1801/10000 train_time:92334ms step_avg:51.27ms +[2025-09-05 17:14:37] [Rank 0] step:1821/10000 train_time:92985ms step_avg:51.06ms +[2025-09-05 17:14:37] [Rank 0] step:1821/10000 train_time:92985ms step_avg:51.06ms +[2025-09-05 17:14:37] [Rank 0] step:1841/10000 train_time:93636ms step_avg:50.86ms +[2025-09-05 17:14:37] [Rank 0] step:1841/10000 train_time:93636ms step_avg:50.86ms +[2025-09-05 17:14:38] [Rank 0] step:1861/10000 train_time:94287ms step_avg:50.66ms +[2025-09-05 17:14:38] [Rank 0] step:1861/10000 train_time:94287ms step_avg:50.66ms +[2025-09-05 17:14:38] [Rank 0] step:1881/10000 train_time:94939ms step_avg:50.47ms +[2025-09-05 17:14:38] [Rank 0] step:1881/10000 train_time:94939ms step_avg:50.47ms +[2025-09-05 17:14:39] [Rank 0] step:1901/10000 train_time:95592ms step_avg:50.29ms +[2025-09-05 17:14:39] [Rank 0] step:1901/10000 train_time:95592ms step_avg:50.29ms +[2025-09-05 17:14:40] [Rank 0] step:1921/10000 train_time:96243ms step_avg:50.10ms +[2025-09-05 17:14:40] [Rank 0] step:1921/10000 train_time:96243ms step_avg:50.10ms +[2025-09-05 17:14:40] [Rank 0] step:1941/10000 train_time:96895ms step_avg:49.92ms +[2025-09-05 17:14:40] [Rank 0] step:1941/10000 train_time:96895ms step_avg:49.92ms +[2025-09-05 17:14:41] [Rank 0] step:1961/10000 train_time:97546ms step_avg:49.74ms +[2025-09-05 17:14:41] [Rank 0] step:1961/10000 train_time:97546ms step_avg:49.74ms +[2025-09-05 17:14:42] [Rank 0] step:1981/10000 train_time:98198ms step_avg:49.57ms +[2025-09-05 17:14:42] [Rank 0] step:1981/10000 train_time:98198ms step_avg:49.57ms +[2025-09-05 17:14:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:14:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:14:43] [Rank 0] PRINT: step:2000/10000 train_loss:1.2700 val_loss:1.1390 train_time:99081ms step_avg:49.54ms +[2025-09-05 17:14:43] [Rank 0] PRINT: step:2000/10000 train_loss:1.2700 val_loss:1.1390 train_time:99081ms step_avg:49.54ms +[2025-09-05 17:14:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:14:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:14:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:14:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:16:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:16:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:16:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:16:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:16:05] [Rank 0] Total Loss: 4.0676 +[2025-09-05 17:16:05] [Rank 0] Total Loss: 4.0676 +[2025-09-05 17:16:05] [Rank 0] Total FTA (Unweighted): 0.4712 +[2025-09-05 17:16:05] [Rank 0] Total FTA (Unweighted): 0.4712 +[2025-09-05 17:16:05] [Rank 0] Total FTA (Weighted): 0.4713 +[2025-09-05 17:16:05] [Rank 0] Total FTA (Weighted): 0.4713 +[2025-09-05 17:16:05] [Rank 0] Group 0 Loss: 3.5470 +[2025-09-05 17:16:05] [Rank 0] Group 0 Loss: 3.5470 +[2025-09-05 17:16:05] [Rank 0] Group 1 Loss: 3.3128 +[2025-09-05 17:16:05] [Rank 0] Group 1 Loss: 3.3128 +[2025-09-05 17:16:05] [Rank 0] Group 2 Loss: 3.2083 +[2025-09-05 17:16:05] [Rank 0] Group 2 Loss: 3.2083 +[2025-09-05 17:16:05] [Rank 0] Group 3 Loss: 3.5886 +[2025-09-05 17:16:05] [Rank 0] Group 3 Loss: 3.5886 +[2025-09-05 17:16:05] [Rank 0] Group 4 Loss: 3.6256 +[2025-09-05 17:16:05] [Rank 0] Group 4 Loss: 3.6256 +[2025-09-05 17:16:05] [Rank 0] Group 5 Loss: 3.7595 +[2025-09-05 17:16:05] [Rank 0] Group 5 Loss: 3.7595 +[2025-09-05 17:16:05] [Rank 0] Group 6 Loss: 3.8528 +[2025-09-05 17:16:05] [Rank 0] Group 6 Loss: 3.8528 +[2025-09-05 17:16:05] [Rank 0] Group 7 Loss: 3.9849 +[2025-09-05 17:16:05] [Rank 0] Group 7 Loss: 3.9849 +[2025-09-05 17:16:05] [Rank 0] Group 8 Loss: 4.2247 +[2025-09-05 17:16:05] [Rank 0] Group 8 Loss: 4.2247 +[2025-09-05 17:16:05] [Rank 0] Group 9 Loss: 4.3157 +[2025-09-05 17:16:05] [Rank 0] Group 9 Loss: 4.3157 +[2025-09-05 17:16:05] [Rank 0] Group 10 Loss: 4.4779 +[2025-09-05 17:16:05] [Rank 0] Group 10 Loss: 4.4779 +[2025-09-05 17:16:05] [Rank 0] Group 11 Loss: 4.5436 +[2025-09-05 17:16:05] [Rank 0] Group 11 Loss: 4.5436 +[2025-09-05 17:16:05] [Rank 0] Group 12 Loss: 4.5371 +[2025-09-05 17:16:05] [Rank 0] Group 12 Loss: 4.5371 +[2025-09-05 17:16:05] [Rank 0] Group 13 Loss: 4.7262 +[2025-09-05 17:16:05] [Rank 0] Group 13 Loss: 4.7262 +[2025-09-05 17:16:05] [Rank 0] Group 14 Loss: 4.6779 +[2025-09-05 17:16:05] [Rank 0] Group 14 Loss: 4.6779 +[2025-09-05 17:16:05] [Rank 0] Group 15 Loss: 4.6992 +[2025-09-05 17:16:05] [Rank 0] Group 15 Loss: 4.6992 +[2025-09-05 17:16:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:16:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:16:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:16:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:16:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:16:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:16:06] [Rank 0] Group 3 FTA: 0.9700 +[2025-09-05 17:16:06] [Rank 0] Group 3 FTA: 0.9700 +[2025-09-05 17:16:06] [Rank 0] Group 4 FTA: 0.7500 +[2025-09-05 17:16:06] [Rank 0] Group 4 FTA: 0.7500 +[2025-09-05 17:16:06] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:16:06] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:16:06] [Rank 0] Group 6 FTA: 0.4300 +[2025-09-05 17:16:06] [Rank 0] Group 6 FTA: 0.4300 +[2025-09-05 17:16:06] [Rank 0] Group 7 FTA: 0.4300 +[2025-09-05 17:16:06] [Rank 0] Group 7 FTA: 0.4300 +[2025-09-05 17:16:06] [Rank 0] Group 8 FTA: 0.3900 +[2025-09-05 17:16:06] [Rank 0] Group 8 FTA: 0.3900 +[2025-09-05 17:16:06] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 17:16:06] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 17:16:06] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 17:16:06] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 17:16:06] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 17:16:06] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 17:16:06] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 17:16:06] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 17:16:06] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 17:16:06] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 17:16:06] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:16:06] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:16:06] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:16:06] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:16:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:16:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:16:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:16:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:16:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:16:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:16:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:16:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:16:07] [Rank 0] step:2001/10000 train_time:99091ms step_avg:49.52ms +[2025-09-05 17:16:07] [Rank 0] step:2001/10000 train_time:99091ms step_avg:49.52ms +[2025-09-05 17:16:08] [Rank 0] step:2021/10000 train_time:99737ms step_avg:49.35ms +[2025-09-05 17:16:08] [Rank 0] step:2021/10000 train_time:99737ms step_avg:49.35ms +[2025-09-05 17:16:09] [Rank 0] step:2041/10000 train_time:100389ms step_avg:49.19ms +[2025-09-05 17:16:09] [Rank 0] step:2041/10000 train_time:100389ms step_avg:49.19ms +[2025-09-05 17:16:09] [Rank 0] step:2061/10000 train_time:101042ms step_avg:49.03ms +[2025-09-05 17:16:09] [Rank 0] step:2061/10000 train_time:101042ms step_avg:49.03ms +[2025-09-05 17:16:10] [Rank 0] step:2081/10000 train_time:101693ms step_avg:48.87ms +[2025-09-05 17:16:10] [Rank 0] step:2081/10000 train_time:101693ms step_avg:48.87ms +[2025-09-05 17:16:11] [Rank 0] step:2101/10000 train_time:102344ms step_avg:48.71ms +[2025-09-05 17:16:11] [Rank 0] step:2101/10000 train_time:102344ms step_avg:48.71ms +[2025-09-05 17:16:11] [Rank 0] step:2121/10000 train_time:102997ms step_avg:48.56ms +[2025-09-05 17:16:11] [Rank 0] step:2121/10000 train_time:102997ms step_avg:48.56ms +[2025-09-05 17:16:12] [Rank 0] step:2141/10000 train_time:103649ms step_avg:48.41ms +[2025-09-05 17:16:12] [Rank 0] step:2141/10000 train_time:103649ms step_avg:48.41ms +[2025-09-05 17:16:12] [Rank 0] step:2161/10000 train_time:104300ms step_avg:48.26ms +[2025-09-05 17:16:12] [Rank 0] step:2161/10000 train_time:104300ms step_avg:48.26ms +[2025-09-05 17:16:13] [Rank 0] step:2181/10000 train_time:104952ms step_avg:48.12ms +[2025-09-05 17:16:13] [Rank 0] step:2181/10000 train_time:104952ms step_avg:48.12ms +[2025-09-05 17:16:14] [Rank 0] step:2201/10000 train_time:105603ms step_avg:47.98ms +[2025-09-05 17:16:14] [Rank 0] step:2201/10000 train_time:105603ms step_avg:47.98ms +[2025-09-05 17:16:14] [Rank 0] step:2221/10000 train_time:106254ms step_avg:47.84ms +[2025-09-05 17:16:14] [Rank 0] step:2221/10000 train_time:106254ms step_avg:47.84ms +[2025-09-05 17:16:15] [Rank 0] step:2241/10000 train_time:106911ms step_avg:47.71ms +[2025-09-05 17:16:15] [Rank 0] step:2241/10000 train_time:106911ms step_avg:47.71ms +[2025-09-05 17:16:16] [Rank 0] step:2261/10000 train_time:107567ms step_avg:47.58ms +[2025-09-05 17:16:16] [Rank 0] step:2261/10000 train_time:107567ms step_avg:47.58ms +[2025-09-05 17:16:16] [Rank 0] step:2281/10000 train_time:108225ms step_avg:47.45ms +[2025-09-05 17:16:16] [Rank 0] step:2281/10000 train_time:108225ms step_avg:47.45ms +[2025-09-05 17:16:17] [Rank 0] step:2301/10000 train_time:108883ms step_avg:47.32ms +[2025-09-05 17:16:17] [Rank 0] step:2301/10000 train_time:108883ms step_avg:47.32ms +[2025-09-05 17:16:18] [Rank 0] step:2321/10000 train_time:109540ms step_avg:47.20ms +[2025-09-05 17:16:18] [Rank 0] step:2321/10000 train_time:109540ms step_avg:47.20ms +[2025-09-05 17:16:18] [Rank 0] step:2341/10000 train_time:110199ms step_avg:47.07ms +[2025-09-05 17:16:18] [Rank 0] step:2341/10000 train_time:110199ms step_avg:47.07ms +[2025-09-05 17:16:19] [Rank 0] step:2361/10000 train_time:110857ms step_avg:46.95ms +[2025-09-05 17:16:19] [Rank 0] step:2361/10000 train_time:110857ms step_avg:46.95ms +[2025-09-05 17:16:20] [Rank 0] step:2381/10000 train_time:111515ms step_avg:46.84ms +[2025-09-05 17:16:20] [Rank 0] step:2381/10000 train_time:111515ms step_avg:46.84ms +[2025-09-05 17:16:20] [Rank 0] step:2401/10000 train_time:112176ms step_avg:46.72ms +[2025-09-05 17:16:20] [Rank 0] step:2401/10000 train_time:112176ms step_avg:46.72ms +[2025-09-05 17:16:21] [Rank 0] step:2421/10000 train_time:112833ms step_avg:46.61ms +[2025-09-05 17:16:21] [Rank 0] step:2421/10000 train_time:112833ms step_avg:46.61ms +[2025-09-05 17:16:22] [Rank 0] step:2441/10000 train_time:113492ms step_avg:46.49ms +[2025-09-05 17:16:22] [Rank 0] step:2441/10000 train_time:113492ms step_avg:46.49ms +[2025-09-05 17:16:22] [Rank 0] step:2461/10000 train_time:114151ms step_avg:46.38ms +[2025-09-05 17:16:22] [Rank 0] step:2461/10000 train_time:114151ms step_avg:46.38ms +[2025-09-05 17:16:23] [Rank 0] step:2481/10000 train_time:114810ms step_avg:46.28ms +[2025-09-05 17:16:23] [Rank 0] step:2481/10000 train_time:114810ms step_avg:46.28ms +[2025-09-05 17:16:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:16:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:16:24] [Rank 0] PRINT: step:2500/10000 train_loss:1.0738 val_loss:1.0044 train_time:115703ms step_avg:46.28ms +[2025-09-05 17:16:24] [Rank 0] PRINT: step:2500/10000 train_loss:1.0738 val_loss:1.0044 train_time:115703ms step_avg:46.28ms +[2025-09-05 17:16:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:16:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:16:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:16:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:17:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:17:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:17:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:17:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:17:46] [Rank 0] Total Loss: 4.2424 +[2025-09-05 17:17:46] [Rank 0] Total Loss: 4.2424 +[2025-09-05 17:17:46] [Rank 0] Total FTA (Unweighted): 0.5313 +[2025-09-05 17:17:46] [Rank 0] Total FTA (Unweighted): 0.5313 +[2025-09-05 17:17:46] [Rank 0] Total FTA (Weighted): 0.5312 +[2025-09-05 17:17:46] [Rank 0] Total FTA (Weighted): 0.5312 +[2025-09-05 17:17:46] [Rank 0] Group 0 Loss: 3.9531 +[2025-09-05 17:17:46] [Rank 0] Group 0 Loss: 3.9531 +[2025-09-05 17:17:46] [Rank 0] Group 1 Loss: 3.6362 +[2025-09-05 17:17:46] [Rank 0] Group 1 Loss: 3.6362 +[2025-09-05 17:17:46] [Rank 0] Group 2 Loss: 3.6130 +[2025-09-05 17:17:46] [Rank 0] Group 2 Loss: 3.6130 +[2025-09-05 17:17:46] [Rank 0] Group 3 Loss: 3.8729 +[2025-09-05 17:17:46] [Rank 0] Group 3 Loss: 3.8729 +[2025-09-05 17:17:46] [Rank 0] Group 4 Loss: 3.8591 +[2025-09-05 17:17:46] [Rank 0] Group 4 Loss: 3.8591 +[2025-09-05 17:17:46] [Rank 0] Group 5 Loss: 3.9705 +[2025-09-05 17:17:46] [Rank 0] Group 5 Loss: 3.9705 +[2025-09-05 17:17:46] [Rank 0] Group 6 Loss: 3.9311 +[2025-09-05 17:17:46] [Rank 0] Group 6 Loss: 3.9311 +[2025-09-05 17:17:46] [Rank 0] Group 7 Loss: 4.0791 +[2025-09-05 17:17:46] [Rank 0] Group 7 Loss: 4.0791 +[2025-09-05 17:17:46] [Rank 0] Group 8 Loss: 4.2678 +[2025-09-05 17:17:46] [Rank 0] Group 8 Loss: 4.2678 +[2025-09-05 17:17:46] [Rank 0] Group 9 Loss: 4.3823 +[2025-09-05 17:17:46] [Rank 0] Group 9 Loss: 4.3823 +[2025-09-05 17:17:46] [Rank 0] Group 10 Loss: 4.5479 +[2025-09-05 17:17:46] [Rank 0] Group 10 Loss: 4.5479 +[2025-09-05 17:17:46] [Rank 0] Group 11 Loss: 4.5992 +[2025-09-05 17:17:46] [Rank 0] Group 11 Loss: 4.5992 +[2025-09-05 17:17:46] [Rank 0] Group 12 Loss: 4.6617 +[2025-09-05 17:17:46] [Rank 0] Group 12 Loss: 4.6617 +[2025-09-05 17:17:46] [Rank 0] Group 13 Loss: 4.8358 +[2025-09-05 17:17:46] [Rank 0] Group 13 Loss: 4.8358 +[2025-09-05 17:17:46] [Rank 0] Group 14 Loss: 4.7975 +[2025-09-05 17:17:46] [Rank 0] Group 14 Loss: 4.7975 +[2025-09-05 17:17:46] [Rank 0] Group 15 Loss: 4.8707 +[2025-09-05 17:17:46] [Rank 0] Group 15 Loss: 4.8707 +[2025-09-05 17:17:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:17:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:17:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:17:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:17:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:17:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:17:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:17:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:17:47] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 17:17:47] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 17:17:47] [Rank 0] Group 5 FTA: 0.7100 +[2025-09-05 17:17:47] [Rank 0] Group 5 FTA: 0.7100 +[2025-09-05 17:17:47] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-05 17:17:47] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-05 17:17:47] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 17:17:47] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 17:17:47] [Rank 0] Group 8 FTA: 0.5500 +[2025-09-05 17:17:47] [Rank 0] Group 8 FTA: 0.5500 +[2025-09-05 17:17:47] [Rank 0] Group 9 FTA: 0.3400 +[2025-09-05 17:17:47] [Rank 0] Group 9 FTA: 0.3400 +[2025-09-05 17:17:47] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 17:17:47] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 17:17:47] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 17:17:47] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 17:17:47] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 17:17:47] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 17:17:47] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:17:47] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:17:47] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:17:47] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:17:47] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:17:47] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:17:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:17:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:17:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:17:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:17:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:17:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:17:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:17:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:17:48] [Rank 0] step:2501/10000 train_time:115712ms step_avg:46.27ms +[2025-09-05 17:17:48] [Rank 0] step:2501/10000 train_time:115712ms step_avg:46.27ms +[2025-09-05 17:17:49] [Rank 0] step:2521/10000 train_time:116148ms step_avg:46.07ms +[2025-09-05 17:17:49] [Rank 0] step:2521/10000 train_time:116148ms step_avg:46.07ms +[2025-09-05 17:17:49] [Rank 0] step:2541/10000 train_time:116805ms step_avg:45.97ms +[2025-09-05 17:17:49] [Rank 0] step:2541/10000 train_time:116805ms step_avg:45.97ms +[2025-09-05 17:17:50] [Rank 0] step:2561/10000 train_time:117462ms step_avg:45.87ms +[2025-09-05 17:17:50] [Rank 0] step:2561/10000 train_time:117462ms step_avg:45.87ms +[2025-09-05 17:17:51] [Rank 0] step:2581/10000 train_time:118119ms step_avg:45.76ms +[2025-09-05 17:17:51] [Rank 0] step:2581/10000 train_time:118119ms step_avg:45.76ms +[2025-09-05 17:17:51] [Rank 0] step:2601/10000 train_time:118776ms step_avg:45.67ms +[2025-09-05 17:17:51] [Rank 0] step:2601/10000 train_time:118776ms step_avg:45.67ms +[2025-09-05 17:17:52] [Rank 0] step:2621/10000 train_time:119433ms step_avg:45.57ms +[2025-09-05 17:17:52] [Rank 0] step:2621/10000 train_time:119433ms step_avg:45.57ms +[2025-09-05 17:17:53] [Rank 0] step:2641/10000 train_time:120089ms step_avg:45.47ms +[2025-09-05 17:17:53] [Rank 0] step:2641/10000 train_time:120089ms step_avg:45.47ms +[2025-09-05 17:17:53] [Rank 0] step:2661/10000 train_time:120746ms step_avg:45.38ms +[2025-09-05 17:17:53] [Rank 0] step:2661/10000 train_time:120746ms step_avg:45.38ms +[2025-09-05 17:17:54] [Rank 0] step:2681/10000 train_time:121404ms step_avg:45.28ms +[2025-09-05 17:17:54] [Rank 0] step:2681/10000 train_time:121404ms step_avg:45.28ms +[2025-09-05 17:17:55] [Rank 0] step:2701/10000 train_time:122060ms step_avg:45.19ms +[2025-09-05 17:17:55] [Rank 0] step:2701/10000 train_time:122060ms step_avg:45.19ms +[2025-09-05 17:17:55] [Rank 0] step:2721/10000 train_time:122717ms step_avg:45.10ms +[2025-09-05 17:17:55] [Rank 0] step:2721/10000 train_time:122717ms step_avg:45.10ms +[2025-09-05 17:17:56] [Rank 0] step:2741/10000 train_time:123374ms step_avg:45.01ms +[2025-09-05 17:17:56] [Rank 0] step:2741/10000 train_time:123374ms step_avg:45.01ms +[2025-09-05 17:17:57] [Rank 0] step:2761/10000 train_time:124031ms step_avg:44.92ms +[2025-09-05 17:17:57] [Rank 0] step:2761/10000 train_time:124031ms step_avg:44.92ms +[2025-09-05 17:17:57] [Rank 0] step:2781/10000 train_time:124688ms step_avg:44.84ms +[2025-09-05 17:17:57] [Rank 0] step:2781/10000 train_time:124688ms step_avg:44.84ms +[2025-09-05 17:17:58] [Rank 0] step:2801/10000 train_time:125345ms step_avg:44.75ms +[2025-09-05 17:17:58] [Rank 0] step:2801/10000 train_time:125345ms step_avg:44.75ms +[2025-09-05 17:17:59] [Rank 0] step:2821/10000 train_time:126629ms step_avg:44.89ms +[2025-09-05 17:17:59] [Rank 0] step:2821/10000 train_time:126629ms step_avg:44.89ms +[2025-09-05 17:18:00] [Rank 0] step:2841/10000 train_time:127121ms step_avg:44.75ms +[2025-09-05 17:18:00] [Rank 0] step:2841/10000 train_time:127121ms step_avg:44.75ms +[2025-09-05 17:18:00] [Rank 0] step:2861/10000 train_time:127777ms step_avg:44.66ms +[2025-09-05 17:18:00] [Rank 0] step:2861/10000 train_time:127777ms step_avg:44.66ms +[2025-09-05 17:18:01] [Rank 0] step:2881/10000 train_time:128435ms step_avg:44.58ms +[2025-09-05 17:18:01] [Rank 0] step:2881/10000 train_time:128435ms step_avg:44.58ms +[2025-09-05 17:18:02] [Rank 0] step:2901/10000 train_time:129092ms step_avg:44.50ms +[2025-09-05 17:18:02] [Rank 0] step:2901/10000 train_time:129092ms step_avg:44.50ms +[2025-09-05 17:18:02] [Rank 0] step:2921/10000 train_time:129749ms step_avg:44.42ms +[2025-09-05 17:18:02] [Rank 0] step:2921/10000 train_time:129749ms step_avg:44.42ms +[2025-09-05 17:18:03] [Rank 0] step:2941/10000 train_time:130406ms step_avg:44.34ms +[2025-09-05 17:18:03] [Rank 0] step:2941/10000 train_time:130406ms step_avg:44.34ms +[2025-09-05 17:18:04] [Rank 0] step:2961/10000 train_time:131063ms step_avg:44.26ms +[2025-09-05 17:18:04] [Rank 0] step:2961/10000 train_time:131063ms step_avg:44.26ms +[2025-09-05 17:18:04] [Rank 0] step:2981/10000 train_time:131722ms step_avg:44.19ms +[2025-09-05 17:18:04] [Rank 0] step:2981/10000 train_time:131722ms step_avg:44.19ms +[2025-09-05 17:18:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:18:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:18:05] [Rank 0] PRINT: step:3000/10000 train_loss:0.9706 val_loss:0.9256 train_time:132612ms step_avg:44.20ms +[2025-09-05 17:18:05] [Rank 0] PRINT: step:3000/10000 train_loss:0.9706 val_loss:0.9256 train_time:132612ms step_avg:44.20ms +[2025-09-05 17:18:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:18:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:18:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:18:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:19:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:19:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:19:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:19:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:19:28] [Rank 0] Total Loss: 4.3247 +[2025-09-05 17:19:28] [Rank 0] Total Loss: 4.3247 +[2025-09-05 17:19:28] [Rank 0] Total FTA (Unweighted): 0.6100 +[2025-09-05 17:19:28] [Rank 0] Total FTA (Unweighted): 0.6100 +[2025-09-05 17:19:28] [Rank 0] Total FTA (Weighted): 0.6100 +[2025-09-05 17:19:28] [Rank 0] Total FTA (Weighted): 0.6100 +[2025-09-05 17:19:28] [Rank 0] Group 0 Loss: 4.0254 +[2025-09-05 17:19:28] [Rank 0] Group 0 Loss: 4.0254 +[2025-09-05 17:19:28] [Rank 0] Group 1 Loss: 3.7718 +[2025-09-05 17:19:28] [Rank 0] Group 1 Loss: 3.7718 +[2025-09-05 17:19:28] [Rank 0] Group 2 Loss: 3.7510 +[2025-09-05 17:19:28] [Rank 0] Group 2 Loss: 3.7510 +[2025-09-05 17:19:28] [Rank 0] Group 3 Loss: 4.0549 +[2025-09-05 17:19:28] [Rank 0] Group 3 Loss: 4.0549 +[2025-09-05 17:19:28] [Rank 0] Group 4 Loss: 4.0123 +[2025-09-05 17:19:28] [Rank 0] Group 4 Loss: 4.0123 +[2025-09-05 17:19:28] [Rank 0] Group 5 Loss: 4.0673 +[2025-09-05 17:19:28] [Rank 0] Group 5 Loss: 4.0673 +[2025-09-05 17:19:28] [Rank 0] Group 6 Loss: 4.0302 +[2025-09-05 17:19:28] [Rank 0] Group 6 Loss: 4.0302 +[2025-09-05 17:19:28] [Rank 0] Group 7 Loss: 4.1150 +[2025-09-05 17:19:28] [Rank 0] Group 7 Loss: 4.1150 +[2025-09-05 17:19:28] [Rank 0] Group 8 Loss: 4.3049 +[2025-09-05 17:19:28] [Rank 0] Group 8 Loss: 4.3049 +[2025-09-05 17:19:28] [Rank 0] Group 9 Loss: 4.3821 +[2025-09-05 17:19:28] [Rank 0] Group 9 Loss: 4.3821 +[2025-09-05 17:19:28] [Rank 0] Group 10 Loss: 4.6233 +[2025-09-05 17:19:28] [Rank 0] Group 10 Loss: 4.6233 +[2025-09-05 17:19:28] [Rank 0] Group 11 Loss: 4.6498 +[2025-09-05 17:19:28] [Rank 0] Group 11 Loss: 4.6498 +[2025-09-05 17:19:28] [Rank 0] Group 12 Loss: 4.7181 +[2025-09-05 17:19:28] [Rank 0] Group 12 Loss: 4.7181 +[2025-09-05 17:19:28] [Rank 0] Group 13 Loss: 4.8384 +[2025-09-05 17:19:28] [Rank 0] Group 13 Loss: 4.8384 +[2025-09-05 17:19:28] [Rank 0] Group 14 Loss: 4.8874 +[2025-09-05 17:19:28] [Rank 0] Group 14 Loss: 4.8874 +[2025-09-05 17:19:28] [Rank 0] Group 15 Loss: 4.9637 +[2025-09-05 17:19:28] [Rank 0] Group 15 Loss: 4.9637 +[2025-09-05 17:19:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:19:28] [Rank 0] Group 5 FTA: 0.9400 +[2025-09-05 17:19:28] [Rank 0] Group 5 FTA: 0.9400 +[2025-09-05 17:19:28] [Rank 0] Group 6 FTA: 0.6700 +[2025-09-05 17:19:28] [Rank 0] Group 6 FTA: 0.6700 +[2025-09-05 17:19:28] [Rank 0] Group 7 FTA: 0.6200 +[2025-09-05 17:19:28] [Rank 0] Group 7 FTA: 0.6200 +[2025-09-05 17:19:28] [Rank 0] Group 8 FTA: 0.7000 +[2025-09-05 17:19:28] [Rank 0] Group 8 FTA: 0.7000 +[2025-09-05 17:19:28] [Rank 0] Group 9 FTA: 0.4900 +[2025-09-05 17:19:28] [Rank 0] Group 9 FTA: 0.4900 +[2025-09-05 17:19:28] [Rank 0] Group 10 FTA: 0.5300 +[2025-09-05 17:19:28] [Rank 0] Group 10 FTA: 0.5300 +[2025-09-05 17:19:28] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 17:19:28] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 17:19:28] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 17:19:28] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 17:19:28] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 17:19:28] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 17:19:28] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:19:28] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:19:28] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:19:28] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:19:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:19:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:19:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:19:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:19:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:19:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:19:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:19:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:19:29] [Rank 0] step:3001/10000 train_time:132621ms step_avg:44.19ms +[2025-09-05 17:19:29] [Rank 0] step:3001/10000 train_time:132621ms step_avg:44.19ms +[2025-09-05 17:19:30] [Rank 0] step:3021/10000 train_time:133066ms step_avg:44.05ms +[2025-09-05 17:19:30] [Rank 0] step:3021/10000 train_time:133066ms step_avg:44.05ms +[2025-09-05 17:19:31] [Rank 0] step:3041/10000 train_time:133725ms step_avg:43.97ms +[2025-09-05 17:19:31] [Rank 0] step:3041/10000 train_time:133725ms step_avg:43.97ms +[2025-09-05 17:19:31] [Rank 0] step:3061/10000 train_time:134383ms step_avg:43.90ms +[2025-09-05 17:19:31] [Rank 0] step:3061/10000 train_time:134383ms step_avg:43.90ms +[2025-09-05 17:19:32] [Rank 0] step:3081/10000 train_time:135040ms step_avg:43.83ms +[2025-09-05 17:19:32] [Rank 0] step:3081/10000 train_time:135040ms step_avg:43.83ms +[2025-09-05 17:19:33] [Rank 0] step:3101/10000 train_time:135699ms step_avg:43.76ms +[2025-09-05 17:19:33] [Rank 0] step:3101/10000 train_time:135699ms step_avg:43.76ms +[2025-09-05 17:19:33] [Rank 0] step:3121/10000 train_time:136356ms step_avg:43.69ms +[2025-09-05 17:19:33] [Rank 0] step:3121/10000 train_time:136356ms step_avg:43.69ms +[2025-09-05 17:19:34] [Rank 0] step:3141/10000 train_time:137014ms step_avg:43.62ms +[2025-09-05 17:19:34] [Rank 0] step:3141/10000 train_time:137014ms step_avg:43.62ms +[2025-09-05 17:19:35] [Rank 0] step:3161/10000 train_time:137672ms step_avg:43.55ms +[2025-09-05 17:19:35] [Rank 0] step:3161/10000 train_time:137672ms step_avg:43.55ms +[2025-09-05 17:19:35] [Rank 0] step:3181/10000 train_time:138330ms step_avg:43.49ms +[2025-09-05 17:19:35] [Rank 0] step:3181/10000 train_time:138330ms step_avg:43.49ms +[2025-09-05 17:19:36] [Rank 0] step:3201/10000 train_time:138987ms step_avg:43.42ms +[2025-09-05 17:19:36] [Rank 0] step:3201/10000 train_time:138987ms step_avg:43.42ms +[2025-09-05 17:19:37] [Rank 0] step:3221/10000 train_time:139645ms step_avg:43.35ms +[2025-09-05 17:19:37] [Rank 0] step:3221/10000 train_time:139645ms step_avg:43.35ms +[2025-09-05 17:19:37] [Rank 0] step:3241/10000 train_time:140303ms step_avg:43.29ms +[2025-09-05 17:19:37] [Rank 0] step:3241/10000 train_time:140303ms step_avg:43.29ms +[2025-09-05 17:19:38] [Rank 0] step:3261/10000 train_time:140962ms step_avg:43.23ms +[2025-09-05 17:19:38] [Rank 0] step:3261/10000 train_time:140962ms step_avg:43.23ms +[2025-09-05 17:19:39] [Rank 0] step:3281/10000 train_time:141620ms step_avg:43.16ms +[2025-09-05 17:19:39] [Rank 0] step:3281/10000 train_time:141620ms step_avg:43.16ms +[2025-09-05 17:19:39] [Rank 0] step:3301/10000 train_time:142279ms step_avg:43.10ms +[2025-09-05 17:19:39] [Rank 0] step:3301/10000 train_time:142279ms step_avg:43.10ms +[2025-09-05 17:19:40] [Rank 0] step:3321/10000 train_time:142943ms step_avg:43.04ms +[2025-09-05 17:19:40] [Rank 0] step:3321/10000 train_time:142943ms step_avg:43.04ms +[2025-09-05 17:19:41] [Rank 0] step:3341/10000 train_time:143598ms step_avg:42.98ms +[2025-09-05 17:19:41] [Rank 0] step:3341/10000 train_time:143598ms step_avg:42.98ms +[2025-09-05 17:19:41] [Rank 0] step:3361/10000 train_time:144255ms step_avg:42.92ms +[2025-09-05 17:19:41] [Rank 0] step:3361/10000 train_time:144255ms step_avg:42.92ms +[2025-09-05 17:19:42] [Rank 0] step:3381/10000 train_time:144913ms step_avg:42.86ms +[2025-09-05 17:19:42] [Rank 0] step:3381/10000 train_time:144913ms step_avg:42.86ms +[2025-09-05 17:19:43] [Rank 0] step:3401/10000 train_time:145571ms step_avg:42.80ms +[2025-09-05 17:19:43] [Rank 0] step:3401/10000 train_time:145571ms step_avg:42.80ms +[2025-09-05 17:19:43] [Rank 0] step:3421/10000 train_time:146229ms step_avg:42.74ms +[2025-09-05 17:19:43] [Rank 0] step:3421/10000 train_time:146229ms step_avg:42.74ms +[2025-09-05 17:19:44] [Rank 0] step:3441/10000 train_time:146887ms step_avg:42.69ms +[2025-09-05 17:19:44] [Rank 0] step:3441/10000 train_time:146887ms step_avg:42.69ms +[2025-09-05 17:19:45] [Rank 0] step:3461/10000 train_time:147545ms step_avg:42.63ms +[2025-09-05 17:19:45] [Rank 0] step:3461/10000 train_time:147545ms step_avg:42.63ms +[2025-09-05 17:19:45] [Rank 0] step:3481/10000 train_time:148208ms step_avg:42.58ms +[2025-09-05 17:19:45] [Rank 0] step:3481/10000 train_time:148208ms step_avg:42.58ms +[2025-09-05 17:19:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:19:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:19:46] [Rank 0] PRINT: step:3500/10000 train_loss:0.9070 val_loss:0.8752 train_time:149096ms step_avg:42.60ms +[2025-09-05 17:19:46] [Rank 0] PRINT: step:3500/10000 train_loss:0.9070 val_loss:0.8752 train_time:149096ms step_avg:42.60ms +[2025-09-05 17:19:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:19:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:19:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:19:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:21:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:21:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:21:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:21:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:21:08] [Rank 0] Total Loss: 4.5238 +[2025-09-05 17:21:08] [Rank 0] Total Loss: 4.5238 +[2025-09-05 17:21:08] [Rank 0] Total FTA (Unweighted): 0.6475 +[2025-09-05 17:21:08] [Rank 0] Total FTA (Unweighted): 0.6475 +[2025-09-05 17:21:08] [Rank 0] Total FTA (Weighted): 0.6475 +[2025-09-05 17:21:08] [Rank 0] Total FTA (Weighted): 0.6475 +[2025-09-05 17:21:08] [Rank 0] Group 0 Loss: 4.2973 +[2025-09-05 17:21:08] [Rank 0] Group 0 Loss: 4.2973 +[2025-09-05 17:21:08] [Rank 0] Group 1 Loss: 4.0226 +[2025-09-05 17:21:08] [Rank 0] Group 1 Loss: 4.0226 +[2025-09-05 17:21:08] [Rank 0] Group 2 Loss: 3.9753 +[2025-09-05 17:21:08] [Rank 0] Group 2 Loss: 3.9753 +[2025-09-05 17:21:08] [Rank 0] Group 3 Loss: 4.3421 +[2025-09-05 17:21:08] [Rank 0] Group 3 Loss: 4.3421 +[2025-09-05 17:21:08] [Rank 0] Group 4 Loss: 4.3036 +[2025-09-05 17:21:08] [Rank 0] Group 4 Loss: 4.3036 +[2025-09-05 17:21:08] [Rank 0] Group 5 Loss: 4.2798 +[2025-09-05 17:21:08] [Rank 0] Group 5 Loss: 4.2798 +[2025-09-05 17:21:08] [Rank 0] Group 6 Loss: 4.2259 +[2025-09-05 17:21:08] [Rank 0] Group 6 Loss: 4.2259 +[2025-09-05 17:21:08] [Rank 0] Group 7 Loss: 4.3214 +[2025-09-05 17:21:08] [Rank 0] Group 7 Loss: 4.3214 +[2025-09-05 17:21:08] [Rank 0] Group 8 Loss: 4.4497 +[2025-09-05 17:21:08] [Rank 0] Group 8 Loss: 4.4497 +[2025-09-05 17:21:08] [Rank 0] Group 9 Loss: 4.5277 +[2025-09-05 17:21:08] [Rank 0] Group 9 Loss: 4.5277 +[2025-09-05 17:21:08] [Rank 0] Group 10 Loss: 4.7364 +[2025-09-05 17:21:08] [Rank 0] Group 10 Loss: 4.7364 +[2025-09-05 17:21:08] [Rank 0] Group 11 Loss: 4.7979 +[2025-09-05 17:21:08] [Rank 0] Group 11 Loss: 4.7979 +[2025-09-05 17:21:08] [Rank 0] Group 12 Loss: 4.9191 +[2025-09-05 17:21:08] [Rank 0] Group 12 Loss: 4.9191 +[2025-09-05 17:21:08] [Rank 0] Group 13 Loss: 5.0377 +[2025-09-05 17:21:08] [Rank 0] Group 13 Loss: 5.0377 +[2025-09-05 17:21:09] [Rank 0] Group 14 Loss: 5.0250 +[2025-09-05 17:21:09] [Rank 0] Group 14 Loss: 5.0250 +[2025-09-05 17:21:09] [Rank 0] Group 15 Loss: 5.1189 +[2025-09-05 17:21:09] [Rank 0] Group 15 Loss: 5.1189 +[2025-09-05 17:21:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:21:09] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 17:21:09] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 17:21:09] [Rank 0] Group 6 FTA: 0.8100 +[2025-09-05 17:21:09] [Rank 0] Group 6 FTA: 0.8100 +[2025-09-05 17:21:09] [Rank 0] Group 7 FTA: 0.6900 +[2025-09-05 17:21:09] [Rank 0] Group 7 FTA: 0.6900 +[2025-09-05 17:21:09] [Rank 0] Group 8 FTA: 0.7500 +[2025-09-05 17:21:09] [Rank 0] Group 8 FTA: 0.7500 +[2025-09-05 17:21:09] [Rank 0] Group 9 FTA: 0.5900 +[2025-09-05 17:21:09] [Rank 0] Group 9 FTA: 0.5900 +[2025-09-05 17:21:09] [Rank 0] Group 10 FTA: 0.5900 +[2025-09-05 17:21:09] [Rank 0] Group 10 FTA: 0.5900 +[2025-09-05 17:21:09] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 17:21:09] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 17:21:09] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 17:21:09] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 17:21:09] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:21:09] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:21:09] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:21:09] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:21:09] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:21:09] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:21:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:21:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:21:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:21:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:21:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:21:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:21:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:21:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:21:10] [Rank 0] step:3501/10000 train_time:149105ms step_avg:42.59ms +[2025-09-05 17:21:10] [Rank 0] step:3501/10000 train_time:149105ms step_avg:42.59ms +[2025-09-05 17:21:11] [Rank 0] step:3521/10000 train_time:149559ms step_avg:42.48ms +[2025-09-05 17:21:11] [Rank 0] step:3521/10000 train_time:149559ms step_avg:42.48ms +[2025-09-05 17:21:12] [Rank 0] step:3541/10000 train_time:150216ms step_avg:42.42ms +[2025-09-05 17:21:12] [Rank 0] step:3541/10000 train_time:150216ms step_avg:42.42ms +[2025-09-05 17:21:12] [Rank 0] step:3561/10000 train_time:150874ms step_avg:42.37ms +[2025-09-05 17:21:12] [Rank 0] step:3561/10000 train_time:150874ms step_avg:42.37ms +[2025-09-05 17:21:13] [Rank 0] step:3581/10000 train_time:151531ms step_avg:42.32ms +[2025-09-05 17:21:13] [Rank 0] step:3581/10000 train_time:151531ms step_avg:42.32ms +[2025-09-05 17:21:14] [Rank 0] step:3601/10000 train_time:152189ms step_avg:42.26ms +[2025-09-05 17:21:14] [Rank 0] step:3601/10000 train_time:152189ms step_avg:42.26ms +[2025-09-05 17:21:14] [Rank 0] step:3621/10000 train_time:152846ms step_avg:42.21ms +[2025-09-05 17:21:14] [Rank 0] step:3621/10000 train_time:152846ms step_avg:42.21ms +[2025-09-05 17:21:15] [Rank 0] step:3641/10000 train_time:153568ms step_avg:42.18ms +[2025-09-05 17:21:15] [Rank 0] step:3641/10000 train_time:153568ms step_avg:42.18ms +[2025-09-05 17:21:16] [Rank 0] step:3661/10000 train_time:154225ms step_avg:42.13ms +[2025-09-05 17:21:16] [Rank 0] step:3661/10000 train_time:154225ms step_avg:42.13ms +[2025-09-05 17:21:16] [Rank 0] step:3681/10000 train_time:154882ms step_avg:42.08ms +[2025-09-05 17:21:16] [Rank 0] step:3681/10000 train_time:154882ms step_avg:42.08ms +[2025-09-05 17:21:17] [Rank 0] step:3701/10000 train_time:155539ms step_avg:42.03ms +[2025-09-05 17:21:17] [Rank 0] step:3701/10000 train_time:155539ms step_avg:42.03ms +[2025-09-05 17:21:18] [Rank 0] step:3721/10000 train_time:156197ms step_avg:41.98ms +[2025-09-05 17:21:18] [Rank 0] step:3721/10000 train_time:156197ms step_avg:41.98ms +[2025-09-05 17:21:18] [Rank 0] step:3741/10000 train_time:156854ms step_avg:41.93ms +[2025-09-05 17:21:18] [Rank 0] step:3741/10000 train_time:156854ms step_avg:41.93ms +[2025-09-05 17:21:19] [Rank 0] step:3761/10000 train_time:157511ms step_avg:41.88ms +[2025-09-05 17:21:19] [Rank 0] step:3761/10000 train_time:157511ms step_avg:41.88ms +[2025-09-05 17:21:20] [Rank 0] step:3781/10000 train_time:158169ms step_avg:41.83ms +[2025-09-05 17:21:20] [Rank 0] step:3781/10000 train_time:158169ms step_avg:41.83ms +[2025-09-05 17:21:20] [Rank 0] step:3801/10000 train_time:158827ms step_avg:41.79ms +[2025-09-05 17:21:20] [Rank 0] step:3801/10000 train_time:158827ms step_avg:41.79ms +[2025-09-05 17:21:21] [Rank 0] step:3821/10000 train_time:159485ms step_avg:41.74ms +[2025-09-05 17:21:21] [Rank 0] step:3821/10000 train_time:159485ms step_avg:41.74ms +[2025-09-05 17:21:22] [Rank 0] step:3841/10000 train_time:160142ms step_avg:41.69ms +[2025-09-05 17:21:22] [Rank 0] step:3841/10000 train_time:160142ms step_avg:41.69ms +[2025-09-05 17:21:22] [Rank 0] step:3861/10000 train_time:160799ms step_avg:41.65ms +[2025-09-05 17:21:22] [Rank 0] step:3861/10000 train_time:160799ms step_avg:41.65ms +[2025-09-05 17:21:23] [Rank 0] step:3881/10000 train_time:161456ms step_avg:41.60ms +[2025-09-05 17:21:23] [Rank 0] step:3881/10000 train_time:161456ms step_avg:41.60ms +[2025-09-05 17:21:24] [Rank 0] step:3901/10000 train_time:162112ms step_avg:41.56ms +[2025-09-05 17:21:24] [Rank 0] step:3901/10000 train_time:162112ms step_avg:41.56ms +[2025-09-05 17:21:24] [Rank 0] step:3921/10000 train_time:162769ms step_avg:41.51ms +[2025-09-05 17:21:24] [Rank 0] step:3921/10000 train_time:162769ms step_avg:41.51ms +[2025-09-05 17:21:25] [Rank 0] step:3941/10000 train_time:163427ms step_avg:41.47ms +[2025-09-05 17:21:25] [Rank 0] step:3941/10000 train_time:163427ms step_avg:41.47ms +[2025-09-05 17:21:26] [Rank 0] step:3961/10000 train_time:164084ms step_avg:41.42ms +[2025-09-05 17:21:26] [Rank 0] step:3961/10000 train_time:164084ms step_avg:41.42ms +[2025-09-05 17:21:26] [Rank 0] step:3981/10000 train_time:164742ms step_avg:41.38ms +[2025-09-05 17:21:26] [Rank 0] step:3981/10000 train_time:164742ms step_avg:41.38ms +[2025-09-05 17:21:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:21:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:21:27] [Rank 0] PRINT: step:4000/10000 train_loss:0.8636 val_loss:0.8377 train_time:165633ms step_avg:41.41ms +[2025-09-05 17:21:27] [Rank 0] PRINT: step:4000/10000 train_loss:0.8636 val_loss:0.8377 train_time:165633ms step_avg:41.41ms +[2025-09-05 17:21:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:21:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:21:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:21:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:22:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:22:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:22:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:22:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:22:50] [Rank 0] Total Loss: 4.6099 +[2025-09-05 17:22:50] [Rank 0] Total Loss: 4.6099 +[2025-09-05 17:22:50] [Rank 0] Total FTA (Unweighted): 0.6862 +[2025-09-05 17:22:50] [Rank 0] Total FTA (Unweighted): 0.6862 +[2025-09-05 17:22:50] [Rank 0] Total FTA (Weighted): 0.6863 +[2025-09-05 17:22:50] [Rank 0] Total FTA (Weighted): 0.6863 +[2025-09-05 17:22:50] [Rank 0] Group 0 Loss: 4.3137 +[2025-09-05 17:22:50] [Rank 0] Group 0 Loss: 4.3137 +[2025-09-05 17:22:50] [Rank 0] Group 1 Loss: 4.2139 +[2025-09-05 17:22:50] [Rank 0] Group 1 Loss: 4.2139 +[2025-09-05 17:22:50] [Rank 0] Group 2 Loss: 4.0706 +[2025-09-05 17:22:50] [Rank 0] Group 2 Loss: 4.0706 +[2025-09-05 17:22:50] [Rank 0] Group 3 Loss: 4.3661 +[2025-09-05 17:22:50] [Rank 0] Group 3 Loss: 4.3661 +[2025-09-05 17:22:50] [Rank 0] Group 4 Loss: 4.4108 +[2025-09-05 17:22:50] [Rank 0] Group 4 Loss: 4.4108 +[2025-09-05 17:22:50] [Rank 0] Group 5 Loss: 4.3852 +[2025-09-05 17:22:50] [Rank 0] Group 5 Loss: 4.3852 +[2025-09-05 17:22:50] [Rank 0] Group 6 Loss: 4.3581 +[2025-09-05 17:22:50] [Rank 0] Group 6 Loss: 4.3581 +[2025-09-05 17:22:50] [Rank 0] Group 7 Loss: 4.3826 +[2025-09-05 17:22:50] [Rank 0] Group 7 Loss: 4.3826 +[2025-09-05 17:22:50] [Rank 0] Group 8 Loss: 4.5585 +[2025-09-05 17:22:50] [Rank 0] Group 8 Loss: 4.5585 +[2025-09-05 17:22:50] [Rank 0] Group 9 Loss: 4.6007 +[2025-09-05 17:22:50] [Rank 0] Group 9 Loss: 4.6007 +[2025-09-05 17:22:50] [Rank 0] Group 10 Loss: 4.8290 +[2025-09-05 17:22:50] [Rank 0] Group 10 Loss: 4.8290 +[2025-09-05 17:22:50] [Rank 0] Group 11 Loss: 4.8455 +[2025-09-05 17:22:50] [Rank 0] Group 11 Loss: 4.8455 +[2025-09-05 17:22:50] [Rank 0] Group 12 Loss: 4.9574 +[2025-09-05 17:22:50] [Rank 0] Group 12 Loss: 4.9574 +[2025-09-05 17:22:50] [Rank 0] Group 13 Loss: 5.1040 +[2025-09-05 17:22:50] [Rank 0] Group 13 Loss: 5.1040 +[2025-09-05 17:22:50] [Rank 0] Group 14 Loss: 5.1239 +[2025-09-05 17:22:50] [Rank 0] Group 14 Loss: 5.1239 +[2025-09-05 17:22:50] [Rank 0] Group 15 Loss: 5.2387 +[2025-09-05 17:22:50] [Rank 0] Group 15 Loss: 5.2387 +[2025-09-05 17:22:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:22:50] [Rank 0] Group 6 FTA: 0.9700 +[2025-09-05 17:22:50] [Rank 0] Group 6 FTA: 0.9700 +[2025-09-05 17:22:50] [Rank 0] Group 7 FTA: 0.7500 +[2025-09-05 17:22:50] [Rank 0] Group 7 FTA: 0.7500 +[2025-09-05 17:22:50] [Rank 0] Group 8 FTA: 0.7500 +[2025-09-05 17:22:50] [Rank 0] Group 8 FTA: 0.7500 +[2025-09-05 17:22:50] [Rank 0] Group 9 FTA: 0.6700 +[2025-09-05 17:22:50] [Rank 0] Group 9 FTA: 0.6700 +[2025-09-05 17:22:50] [Rank 0] Group 10 FTA: 0.6700 +[2025-09-05 17:22:50] [Rank 0] Group 10 FTA: 0.6700 +[2025-09-05 17:22:50] [Rank 0] Group 11 FTA: 0.5000 +[2025-09-05 17:22:50] [Rank 0] Group 11 FTA: 0.5000 +[2025-09-05 17:22:50] [Rank 0] Group 12 FTA: 0.2400 +[2025-09-05 17:22:50] [Rank 0] Group 12 FTA: 0.2400 +[2025-09-05 17:22:50] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 17:22:50] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 17:22:50] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:22:50] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:22:50] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 17:22:50] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 17:22:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:22:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:22:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:22:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:22:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:22:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:22:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:22:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:22:52] [Rank 0] step:4001/10000 train_time:165642ms step_avg:41.40ms +[2025-09-05 17:22:52] [Rank 0] step:4001/10000 train_time:165642ms step_avg:41.40ms +[2025-09-05 17:22:52] [Rank 0] step:4021/10000 train_time:166183ms step_avg:41.33ms +[2025-09-05 17:22:52] [Rank 0] step:4021/10000 train_time:166183ms step_avg:41.33ms +[2025-09-05 17:22:53] [Rank 0] step:4041/10000 train_time:166841ms step_avg:41.29ms +[2025-09-05 17:22:53] [Rank 0] step:4041/10000 train_time:166841ms step_avg:41.29ms +[2025-09-05 17:22:54] [Rank 0] step:4061/10000 train_time:167500ms step_avg:41.25ms +[2025-09-05 17:22:54] [Rank 0] step:4061/10000 train_time:167500ms step_avg:41.25ms +[2025-09-05 17:22:54] [Rank 0] step:4081/10000 train_time:168158ms step_avg:41.21ms +[2025-09-05 17:22:54] [Rank 0] step:4081/10000 train_time:168158ms step_avg:41.21ms +[2025-09-05 17:22:55] [Rank 0] step:4101/10000 train_time:168815ms step_avg:41.16ms +[2025-09-05 17:22:55] [Rank 0] step:4101/10000 train_time:168815ms step_avg:41.16ms +[2025-09-05 17:22:56] [Rank 0] step:4121/10000 train_time:169474ms step_avg:41.12ms +[2025-09-05 17:22:56] [Rank 0] step:4121/10000 train_time:169474ms step_avg:41.12ms +[2025-09-05 17:22:56] [Rank 0] step:4141/10000 train_time:170133ms step_avg:41.09ms +[2025-09-05 17:22:56] [Rank 0] step:4141/10000 train_time:170133ms step_avg:41.09ms +[2025-09-05 17:22:57] [Rank 0] step:4161/10000 train_time:170792ms step_avg:41.05ms +[2025-09-05 17:22:57] [Rank 0] step:4161/10000 train_time:170792ms step_avg:41.05ms +[2025-09-05 17:22:58] [Rank 0] step:4181/10000 train_time:171602ms step_avg:41.04ms +[2025-09-05 17:22:58] [Rank 0] step:4181/10000 train_time:171602ms step_avg:41.04ms +[2025-09-05 17:22:58] [Rank 0] step:4201/10000 train_time:172260ms step_avg:41.00ms +[2025-09-05 17:22:58] [Rank 0] step:4201/10000 train_time:172260ms step_avg:41.00ms +[2025-09-05 17:22:59] [Rank 0] step:4221/10000 train_time:172919ms step_avg:40.97ms +[2025-09-05 17:22:59] [Rank 0] step:4221/10000 train_time:172919ms step_avg:40.97ms +[2025-09-05 17:23:00] [Rank 0] step:4241/10000 train_time:173577ms step_avg:40.93ms +[2025-09-05 17:23:00] [Rank 0] step:4241/10000 train_time:173577ms step_avg:40.93ms +[2025-09-05 17:23:01] [Rank 0] step:4261/10000 train_time:174455ms step_avg:40.94ms +[2025-09-05 17:23:01] [Rank 0] step:4261/10000 train_time:174455ms step_avg:40.94ms +[2025-09-05 17:23:01] [Rank 0] step:4281/10000 train_time:175114ms step_avg:40.90ms +[2025-09-05 17:23:01] [Rank 0] step:4281/10000 train_time:175114ms step_avg:40.90ms +[2025-09-05 17:23:02] [Rank 0] step:4301/10000 train_time:175773ms step_avg:40.87ms +[2025-09-05 17:23:02] [Rank 0] step:4301/10000 train_time:175773ms step_avg:40.87ms +[2025-09-05 17:23:03] [Rank 0] step:4321/10000 train_time:176431ms step_avg:40.83ms +[2025-09-05 17:23:03] [Rank 0] step:4321/10000 train_time:176431ms step_avg:40.83ms +[2025-09-05 17:23:03] [Rank 0] step:4341/10000 train_time:177090ms step_avg:40.79ms +[2025-09-05 17:23:03] [Rank 0] step:4341/10000 train_time:177090ms step_avg:40.79ms +[2025-09-05 17:23:04] [Rank 0] step:4361/10000 train_time:177748ms step_avg:40.76ms +[2025-09-05 17:23:04] [Rank 0] step:4361/10000 train_time:177748ms step_avg:40.76ms +[2025-09-05 17:23:05] [Rank 0] step:4381/10000 train_time:178407ms step_avg:40.72ms +[2025-09-05 17:23:05] [Rank 0] step:4381/10000 train_time:178407ms step_avg:40.72ms +[2025-09-05 17:23:05] [Rank 0] step:4401/10000 train_time:179066ms step_avg:40.69ms +[2025-09-05 17:23:05] [Rank 0] step:4401/10000 train_time:179066ms step_avg:40.69ms +[2025-09-05 17:23:06] [Rank 0] step:4421/10000 train_time:179725ms step_avg:40.65ms +[2025-09-05 17:23:06] [Rank 0] step:4421/10000 train_time:179725ms step_avg:40.65ms +[2025-09-05 17:23:07] [Rank 0] step:4441/10000 train_time:180384ms step_avg:40.62ms +[2025-09-05 17:23:07] [Rank 0] step:4441/10000 train_time:180384ms step_avg:40.62ms +[2025-09-05 17:23:07] [Rank 0] step:4461/10000 train_time:181043ms step_avg:40.58ms +[2025-09-05 17:23:07] [Rank 0] step:4461/10000 train_time:181043ms step_avg:40.58ms +[2025-09-05 17:23:08] [Rank 0] step:4481/10000 train_time:181701ms step_avg:40.55ms +[2025-09-05 17:23:08] [Rank 0] step:4481/10000 train_time:181701ms step_avg:40.55ms +[2025-09-05 17:23:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:23:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:23:09] [Rank 0] PRINT: step:4500/10000 train_loss:0.8308 val_loss:0.8091 train_time:182593ms step_avg:40.58ms +[2025-09-05 17:23:09] [Rank 0] PRINT: step:4500/10000 train_loss:0.8308 val_loss:0.8091 train_time:182593ms step_avg:40.58ms +[2025-09-05 17:23:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:23:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:23:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:23:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:24:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:24:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:24:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:24:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:24:31] [Rank 0] Total Loss: 4.6300 +[2025-09-05 17:24:31] [Rank 0] Total Loss: 4.6300 +[2025-09-05 17:24:31] [Rank 0] Total FTA (Unweighted): 0.6994 +[2025-09-05 17:24:31] [Rank 0] Total FTA (Unweighted): 0.6994 +[2025-09-05 17:24:31] [Rank 0] Total FTA (Weighted): 0.6994 +[2025-09-05 17:24:31] [Rank 0] Total FTA (Weighted): 0.6994 +[2025-09-05 17:24:31] [Rank 0] Group 0 Loss: 4.3990 +[2025-09-05 17:24:31] [Rank 0] Group 0 Loss: 4.3990 +[2025-09-05 17:24:31] [Rank 0] Group 1 Loss: 4.2115 +[2025-09-05 17:24:31] [Rank 0] Group 1 Loss: 4.2115 +[2025-09-05 17:24:31] [Rank 0] Group 2 Loss: 4.0222 +[2025-09-05 17:24:31] [Rank 0] Group 2 Loss: 4.0222 +[2025-09-05 17:24:31] [Rank 0] Group 3 Loss: 4.4874 +[2025-09-05 17:24:31] [Rank 0] Group 3 Loss: 4.4874 +[2025-09-05 17:24:31] [Rank 0] Group 4 Loss: 4.4883 +[2025-09-05 17:24:31] [Rank 0] Group 4 Loss: 4.4883 +[2025-09-05 17:24:31] [Rank 0] Group 5 Loss: 4.4097 +[2025-09-05 17:24:31] [Rank 0] Group 5 Loss: 4.4097 +[2025-09-05 17:24:31] [Rank 0] Group 6 Loss: 4.3798 +[2025-09-05 17:24:31] [Rank 0] Group 6 Loss: 4.3798 +[2025-09-05 17:24:31] [Rank 0] Group 7 Loss: 4.4161 +[2025-09-05 17:24:31] [Rank 0] Group 7 Loss: 4.4161 +[2025-09-05 17:24:31] [Rank 0] Group 8 Loss: 4.5604 +[2025-09-05 17:24:31] [Rank 0] Group 8 Loss: 4.5604 +[2025-09-05 17:24:31] [Rank 0] Group 9 Loss: 4.6181 +[2025-09-05 17:24:31] [Rank 0] Group 9 Loss: 4.6181 +[2025-09-05 17:24:31] [Rank 0] Group 10 Loss: 4.8577 +[2025-09-05 17:24:31] [Rank 0] Group 10 Loss: 4.8577 +[2025-09-05 17:24:31] [Rank 0] Group 11 Loss: 4.8671 +[2025-09-05 17:24:31] [Rank 0] Group 11 Loss: 4.8671 +[2025-09-05 17:24:31] [Rank 0] Group 12 Loss: 4.9309 +[2025-09-05 17:24:31] [Rank 0] Group 12 Loss: 4.9309 +[2025-09-05 17:24:31] [Rank 0] Group 13 Loss: 5.1210 +[2025-09-05 17:24:31] [Rank 0] Group 13 Loss: 5.1210 +[2025-09-05 17:24:31] [Rank 0] Group 14 Loss: 5.1113 +[2025-09-05 17:24:31] [Rank 0] Group 14 Loss: 5.1113 +[2025-09-05 17:24:31] [Rank 0] Group 15 Loss: 5.1988 +[2025-09-05 17:24:31] [Rank 0] Group 15 Loss: 5.1988 +[2025-09-05 17:24:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:24:31] [Rank 0] Group 6 FTA: 0.9800 +[2025-09-05 17:24:31] [Rank 0] Group 6 FTA: 0.9800 +[2025-09-05 17:24:31] [Rank 0] Group 7 FTA: 0.7600 +[2025-09-05 17:24:31] [Rank 0] Group 7 FTA: 0.7600 +[2025-09-05 17:24:31] [Rank 0] Group 8 FTA: 0.7700 +[2025-09-05 17:24:31] [Rank 0] Group 8 FTA: 0.7700 +[2025-09-05 17:24:31] [Rank 0] Group 9 FTA: 0.7000 +[2025-09-05 17:24:31] [Rank 0] Group 9 FTA: 0.7000 +[2025-09-05 17:24:31] [Rank 0] Group 10 FTA: 0.6800 +[2025-09-05 17:24:31] [Rank 0] Group 10 FTA: 0.6800 +[2025-09-05 17:24:31] [Rank 0] Group 11 FTA: 0.5500 +[2025-09-05 17:24:31] [Rank 0] Group 11 FTA: 0.5500 +[2025-09-05 17:24:31] [Rank 0] Group 12 FTA: 0.3500 +[2025-09-05 17:24:31] [Rank 0] Group 12 FTA: 0.3500 +[2025-09-05 17:24:31] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 17:24:31] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 17:24:31] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:24:31] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:24:31] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:24:31] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:24:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:24:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:24:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:24:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:24:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:24:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:24:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:24:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:24:32] [Rank 0] step:4501/10000 train_time:182602ms step_avg:40.57ms +[2025-09-05 17:24:32] [Rank 0] step:4501/10000 train_time:182602ms step_avg:40.57ms +[2025-09-05 17:24:33] [Rank 0] step:4521/10000 train_time:183036ms step_avg:40.49ms +[2025-09-05 17:24:33] [Rank 0] step:4521/10000 train_time:183036ms step_avg:40.49ms +[2025-09-05 17:24:34] [Rank 0] step:4541/10000 train_time:183693ms step_avg:40.45ms +[2025-09-05 17:24:34] [Rank 0] step:4541/10000 train_time:183693ms step_avg:40.45ms +[2025-09-05 17:24:34] [Rank 0] step:4561/10000 train_time:184351ms step_avg:40.42ms +[2025-09-05 17:24:34] [Rank 0] step:4561/10000 train_time:184351ms step_avg:40.42ms +[2025-09-05 17:24:35] [Rank 0] step:4581/10000 train_time:185009ms step_avg:40.39ms +[2025-09-05 17:24:35] [Rank 0] step:4581/10000 train_time:185009ms step_avg:40.39ms +[2025-09-05 17:24:36] [Rank 0] step:4601/10000 train_time:185665ms step_avg:40.35ms +[2025-09-05 17:24:36] [Rank 0] step:4601/10000 train_time:185665ms step_avg:40.35ms +[2025-09-05 17:24:36] [Rank 0] step:4621/10000 train_time:186322ms step_avg:40.32ms +[2025-09-05 17:24:36] [Rank 0] step:4621/10000 train_time:186322ms step_avg:40.32ms +[2025-09-05 17:24:37] [Rank 0] step:4641/10000 train_time:186979ms step_avg:40.29ms +[2025-09-05 17:24:37] [Rank 0] step:4641/10000 train_time:186979ms step_avg:40.29ms +[2025-09-05 17:24:38] [Rank 0] step:4661/10000 train_time:187636ms step_avg:40.26ms +[2025-09-05 17:24:38] [Rank 0] step:4661/10000 train_time:187636ms step_avg:40.26ms +[2025-09-05 17:24:38] [Rank 0] step:4681/10000 train_time:188294ms step_avg:40.23ms +[2025-09-05 17:24:38] [Rank 0] step:4681/10000 train_time:188294ms step_avg:40.23ms +[2025-09-05 17:24:39] [Rank 0] step:4701/10000 train_time:188951ms step_avg:40.19ms +[2025-09-05 17:24:39] [Rank 0] step:4701/10000 train_time:188951ms step_avg:40.19ms +[2025-09-05 17:24:40] [Rank 0] step:4721/10000 train_time:189608ms step_avg:40.16ms +[2025-09-05 17:24:40] [Rank 0] step:4721/10000 train_time:189608ms step_avg:40.16ms +[2025-09-05 17:24:40] [Rank 0] step:4741/10000 train_time:190266ms step_avg:40.13ms +[2025-09-05 17:24:40] [Rank 0] step:4741/10000 train_time:190266ms step_avg:40.13ms +[2025-09-05 17:24:41] [Rank 0] step:4761/10000 train_time:190924ms step_avg:40.10ms +[2025-09-05 17:24:41] [Rank 0] step:4761/10000 train_time:190924ms step_avg:40.10ms +[2025-09-05 17:24:42] [Rank 0] step:4781/10000 train_time:191581ms step_avg:40.07ms +[2025-09-05 17:24:42] [Rank 0] step:4781/10000 train_time:191581ms step_avg:40.07ms +[2025-09-05 17:24:42] [Rank 0] step:4801/10000 train_time:192238ms step_avg:40.04ms +[2025-09-05 17:24:42] [Rank 0] step:4801/10000 train_time:192238ms step_avg:40.04ms +[2025-09-05 17:24:43] [Rank 0] step:4821/10000 train_time:192895ms step_avg:40.01ms +[2025-09-05 17:24:43] [Rank 0] step:4821/10000 train_time:192895ms step_avg:40.01ms +[2025-09-05 17:24:44] [Rank 0] step:4841/10000 train_time:193860ms step_avg:40.05ms +[2025-09-05 17:24:44] [Rank 0] step:4841/10000 train_time:193860ms step_avg:40.05ms +[2025-09-05 17:24:45] [Rank 0] step:4861/10000 train_time:194518ms step_avg:40.02ms +[2025-09-05 17:24:45] [Rank 0] step:4861/10000 train_time:194518ms step_avg:40.02ms +[2025-09-05 17:24:45] [Rank 0] step:4881/10000 train_time:195175ms step_avg:39.99ms +[2025-09-05 17:24:45] [Rank 0] step:4881/10000 train_time:195175ms step_avg:39.99ms +[2025-09-05 17:24:46] [Rank 0] step:4901/10000 train_time:195832ms step_avg:39.96ms +[2025-09-05 17:24:46] [Rank 0] step:4901/10000 train_time:195832ms step_avg:39.96ms +[2025-09-05 17:24:47] [Rank 0] step:4921/10000 train_time:196489ms step_avg:39.93ms +[2025-09-05 17:24:47] [Rank 0] step:4921/10000 train_time:196489ms step_avg:39.93ms +[2025-09-05 17:24:47] [Rank 0] step:4941/10000 train_time:197146ms step_avg:39.90ms +[2025-09-05 17:24:47] [Rank 0] step:4941/10000 train_time:197146ms step_avg:39.90ms +[2025-09-05 17:24:48] [Rank 0] step:4961/10000 train_time:197803ms step_avg:39.87ms +[2025-09-05 17:24:48] [Rank 0] step:4961/10000 train_time:197803ms step_avg:39.87ms +[2025-09-05 17:24:49] [Rank 0] step:4981/10000 train_time:198461ms step_avg:39.84ms +[2025-09-05 17:24:49] [Rank 0] step:4981/10000 train_time:198461ms step_avg:39.84ms +[2025-09-05 17:24:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:24:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:24:50] [Rank 0] PRINT: step:5000/10000 train_loss:0.8049 val_loss:0.7857 train_time:199352ms step_avg:39.87ms +[2025-09-05 17:24:50] [Rank 0] PRINT: step:5000/10000 train_loss:0.8049 val_loss:0.7857 train_time:199352ms step_avg:39.87ms +[2025-09-05 17:24:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:24:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:24:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:24:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:26:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:26:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:26:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:26:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:26:12] [Rank 0] Total Loss: 4.8279 +[2025-09-05 17:26:12] [Rank 0] Total Loss: 4.8279 +[2025-09-05 17:26:12] [Rank 0] Total FTA (Unweighted): 0.7300 +[2025-09-05 17:26:12] [Rank 0] Total FTA (Unweighted): 0.7300 +[2025-09-05 17:26:12] [Rank 0] Total FTA (Weighted): 0.7300 +[2025-09-05 17:26:12] [Rank 0] Total FTA (Weighted): 0.7300 +[2025-09-05 17:26:12] [Rank 0] Group 0 Loss: 4.5125 +[2025-09-05 17:26:12] [Rank 0] Group 0 Loss: 4.5125 +[2025-09-05 17:26:12] [Rank 0] Group 1 Loss: 4.4065 +[2025-09-05 17:26:12] [Rank 0] Group 1 Loss: 4.4065 +[2025-09-05 17:26:12] [Rank 0] Group 2 Loss: 4.3098 +[2025-09-05 17:26:12] [Rank 0] Group 2 Loss: 4.3098 +[2025-09-05 17:26:12] [Rank 0] Group 3 Loss: 4.6896 +[2025-09-05 17:26:12] [Rank 0] Group 3 Loss: 4.6896 +[2025-09-05 17:26:12] [Rank 0] Group 4 Loss: 4.6593 +[2025-09-05 17:26:12] [Rank 0] Group 4 Loss: 4.6593 +[2025-09-05 17:26:12] [Rank 0] Group 5 Loss: 4.6709 +[2025-09-05 17:26:12] [Rank 0] Group 5 Loss: 4.6709 +[2025-09-05 17:26:12] [Rank 0] Group 6 Loss: 4.5945 +[2025-09-05 17:26:12] [Rank 0] Group 6 Loss: 4.5945 +[2025-09-05 17:26:12] [Rank 0] Group 7 Loss: 4.6676 +[2025-09-05 17:26:12] [Rank 0] Group 7 Loss: 4.6676 +[2025-09-05 17:26:12] [Rank 0] Group 8 Loss: 4.7983 +[2025-09-05 17:26:12] [Rank 0] Group 8 Loss: 4.7983 +[2025-09-05 17:26:12] [Rank 0] Group 9 Loss: 4.8179 +[2025-09-05 17:26:12] [Rank 0] Group 9 Loss: 4.8179 +[2025-09-05 17:26:12] [Rank 0] Group 10 Loss: 5.0267 +[2025-09-05 17:26:12] [Rank 0] Group 10 Loss: 5.0267 +[2025-09-05 17:26:12] [Rank 0] Group 11 Loss: 5.0604 +[2025-09-05 17:26:12] [Rank 0] Group 11 Loss: 5.0604 +[2025-09-05 17:26:12] [Rank 0] Group 12 Loss: 5.1412 +[2025-09-05 17:26:12] [Rank 0] Group 12 Loss: 5.1412 +[2025-09-05 17:26:12] [Rank 0] Group 13 Loss: 5.2658 +[2025-09-05 17:26:12] [Rank 0] Group 13 Loss: 5.2658 +[2025-09-05 17:26:12] [Rank 0] Group 14 Loss: 5.2917 +[2025-09-05 17:26:12] [Rank 0] Group 14 Loss: 5.2917 +[2025-09-05 17:26:12] [Rank 0] Group 15 Loss: 5.3335 +[2025-09-05 17:26:12] [Rank 0] Group 15 Loss: 5.3335 +[2025-09-05 17:26:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:26:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:26:13] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:26:13] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:26:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:26:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:26:13] [Rank 0] Group 7 FTA: 0.9100 +[2025-09-05 17:26:13] [Rank 0] Group 7 FTA: 0.9100 +[2025-09-05 17:26:13] [Rank 0] Group 8 FTA: 0.7900 +[2025-09-05 17:26:13] [Rank 0] Group 8 FTA: 0.7900 +[2025-09-05 17:26:13] [Rank 0] Group 9 FTA: 0.7300 +[2025-09-05 17:26:13] [Rank 0] Group 9 FTA: 0.7300 +[2025-09-05 17:26:13] [Rank 0] Group 10 FTA: 0.7300 +[2025-09-05 17:26:13] [Rank 0] Group 10 FTA: 0.7300 +[2025-09-05 17:26:13] [Rank 0] Group 11 FTA: 0.6000 +[2025-09-05 17:26:13] [Rank 0] Group 11 FTA: 0.6000 +[2025-09-05 17:26:13] [Rank 0] Group 12 FTA: 0.4500 +[2025-09-05 17:26:13] [Rank 0] Group 12 FTA: 0.4500 +[2025-09-05 17:26:13] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 17:26:13] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 17:26:13] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:26:13] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:26:13] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:26:13] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:26:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:26:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:26:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:26:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:26:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:26:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:26:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:26:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:26:14] [Rank 0] step:5001/10000 train_time:199361ms step_avg:39.86ms +[2025-09-05 17:26:14] [Rank 0] step:5001/10000 train_time:199361ms step_avg:39.86ms +[2025-09-05 17:26:15] [Rank 0] step:5021/10000 train_time:199797ms step_avg:39.79ms +[2025-09-05 17:26:15] [Rank 0] step:5021/10000 train_time:199797ms step_avg:39.79ms +[2025-09-05 17:26:15] [Rank 0] step:5041/10000 train_time:200455ms step_avg:39.76ms +[2025-09-05 17:26:15] [Rank 0] step:5041/10000 train_time:200455ms step_avg:39.76ms +[2025-09-05 17:26:16] [Rank 0] step:5061/10000 train_time:201113ms step_avg:39.74ms +[2025-09-05 17:26:16] [Rank 0] step:5061/10000 train_time:201113ms step_avg:39.74ms +[2025-09-05 17:26:17] [Rank 0] step:5081/10000 train_time:201771ms step_avg:39.71ms +[2025-09-05 17:26:17] [Rank 0] step:5081/10000 train_time:201771ms step_avg:39.71ms +[2025-09-05 17:26:17] [Rank 0] step:5101/10000 train_time:202428ms step_avg:39.68ms +[2025-09-05 17:26:17] [Rank 0] step:5101/10000 train_time:202428ms step_avg:39.68ms +[2025-09-05 17:26:18] [Rank 0] step:5121/10000 train_time:203087ms step_avg:39.66ms +[2025-09-05 17:26:18] [Rank 0] step:5121/10000 train_time:203087ms step_avg:39.66ms +[2025-09-05 17:26:19] [Rank 0] step:5141/10000 train_time:203745ms step_avg:39.63ms +[2025-09-05 17:26:19] [Rank 0] step:5141/10000 train_time:203745ms step_avg:39.63ms +[2025-09-05 17:26:19] [Rank 0] step:5161/10000 train_time:204403ms step_avg:39.61ms +[2025-09-05 17:26:19] [Rank 0] step:5161/10000 train_time:204403ms step_avg:39.61ms +[2025-09-05 17:26:20] [Rank 0] step:5181/10000 train_time:205061ms step_avg:39.58ms +[2025-09-05 17:26:20] [Rank 0] step:5181/10000 train_time:205061ms step_avg:39.58ms +[2025-09-05 17:26:21] [Rank 0] step:5201/10000 train_time:205718ms step_avg:39.55ms +[2025-09-05 17:26:21] [Rank 0] step:5201/10000 train_time:205718ms step_avg:39.55ms +[2025-09-05 17:26:21] [Rank 0] step:5221/10000 train_time:206377ms step_avg:39.53ms +[2025-09-05 17:26:21] [Rank 0] step:5221/10000 train_time:206377ms step_avg:39.53ms +[2025-09-05 17:26:22] [Rank 0] step:5241/10000 train_time:207035ms step_avg:39.50ms +[2025-09-05 17:26:22] [Rank 0] step:5241/10000 train_time:207035ms step_avg:39.50ms +[2025-09-05 17:26:23] [Rank 0] step:5261/10000 train_time:207692ms step_avg:39.48ms +[2025-09-05 17:26:23] [Rank 0] step:5261/10000 train_time:207692ms step_avg:39.48ms +[2025-09-05 17:26:23] [Rank 0] step:5281/10000 train_time:208351ms step_avg:39.45ms +[2025-09-05 17:26:23] [Rank 0] step:5281/10000 train_time:208351ms step_avg:39.45ms +[2025-09-05 17:26:24] [Rank 0] step:5301/10000 train_time:209011ms step_avg:39.43ms +[2025-09-05 17:26:24] [Rank 0] step:5301/10000 train_time:209011ms step_avg:39.43ms +[2025-09-05 17:26:24] [Rank 0] step:5321/10000 train_time:209668ms step_avg:39.40ms +[2025-09-05 17:26:24] [Rank 0] step:5321/10000 train_time:209668ms step_avg:39.40ms +[2025-09-05 17:26:25] [Rank 0] step:5341/10000 train_time:210326ms step_avg:39.38ms +[2025-09-05 17:26:25] [Rank 0] step:5341/10000 train_time:210326ms step_avg:39.38ms +[2025-09-05 17:26:26] [Rank 0] step:5361/10000 train_time:210984ms step_avg:39.36ms +[2025-09-05 17:26:26] [Rank 0] step:5361/10000 train_time:210984ms step_avg:39.36ms +[2025-09-05 17:26:26] [Rank 0] step:5381/10000 train_time:211643ms step_avg:39.33ms +[2025-09-05 17:26:26] [Rank 0] step:5381/10000 train_time:211643ms step_avg:39.33ms +[2025-09-05 17:26:27] [Rank 0] step:5401/10000 train_time:212302ms step_avg:39.31ms +[2025-09-05 17:26:27] [Rank 0] step:5401/10000 train_time:212302ms step_avg:39.31ms +[2025-09-05 17:26:28] [Rank 0] step:5421/10000 train_time:212961ms step_avg:39.28ms +[2025-09-05 17:26:28] [Rank 0] step:5421/10000 train_time:212961ms step_avg:39.28ms +[2025-09-05 17:26:28] [Rank 0] step:5441/10000 train_time:213618ms step_avg:39.26ms +[2025-09-05 17:26:28] [Rank 0] step:5441/10000 train_time:213618ms step_avg:39.26ms +[2025-09-05 17:26:29] [Rank 0] step:5461/10000 train_time:214277ms step_avg:39.24ms +[2025-09-05 17:26:29] [Rank 0] step:5461/10000 train_time:214277ms step_avg:39.24ms +[2025-09-05 17:26:30] [Rank 0] step:5481/10000 train_time:214935ms step_avg:39.21ms +[2025-09-05 17:26:30] [Rank 0] step:5481/10000 train_time:214935ms step_avg:39.21ms +[2025-09-05 17:26:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:26:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:26:31] [Rank 0] PRINT: step:5500/10000 train_loss:0.7838 val_loss:0.7676 train_time:215828ms step_avg:39.24ms +[2025-09-05 17:26:31] [Rank 0] PRINT: step:5500/10000 train_loss:0.7838 val_loss:0.7676 train_time:215828ms step_avg:39.24ms +[2025-09-05 17:26:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:26:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:26:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:26:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:27:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:27:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:27:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:27:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:27:53] [Rank 0] Total Loss: 4.8334 +[2025-09-05 17:27:53] [Rank 0] Total Loss: 4.8334 +[2025-09-05 17:27:53] [Rank 0] Total FTA (Unweighted): 0.7475 +[2025-09-05 17:27:53] [Rank 0] Total FTA (Unweighted): 0.7475 +[2025-09-05 17:27:53] [Rank 0] Total FTA (Weighted): 0.7475 +[2025-09-05 17:27:53] [Rank 0] Total FTA (Weighted): 0.7475 +[2025-09-05 17:27:53] [Rank 0] Group 0 Loss: 4.5275 +[2025-09-05 17:27:53] [Rank 0] Group 0 Loss: 4.5275 +[2025-09-05 17:27:53] [Rank 0] Group 1 Loss: 4.3893 +[2025-09-05 17:27:53] [Rank 0] Group 1 Loss: 4.3893 +[2025-09-05 17:27:53] [Rank 0] Group 2 Loss: 4.2522 +[2025-09-05 17:27:53] [Rank 0] Group 2 Loss: 4.2522 +[2025-09-05 17:27:53] [Rank 0] Group 3 Loss: 4.7157 +[2025-09-05 17:27:53] [Rank 0] Group 3 Loss: 4.7157 +[2025-09-05 17:27:53] [Rank 0] Group 4 Loss: 4.7097 +[2025-09-05 17:27:53] [Rank 0] Group 4 Loss: 4.7097 +[2025-09-05 17:27:53] [Rank 0] Group 5 Loss: 4.6924 +[2025-09-05 17:27:53] [Rank 0] Group 5 Loss: 4.6924 +[2025-09-05 17:27:53] [Rank 0] Group 6 Loss: 4.6318 +[2025-09-05 17:27:53] [Rank 0] Group 6 Loss: 4.6318 +[2025-09-05 17:27:53] [Rank 0] Group 7 Loss: 4.7328 +[2025-09-05 17:27:53] [Rank 0] Group 7 Loss: 4.7328 +[2025-09-05 17:27:53] [Rank 0] Group 8 Loss: 4.7782 +[2025-09-05 17:27:53] [Rank 0] Group 8 Loss: 4.7782 +[2025-09-05 17:27:53] [Rank 0] Group 9 Loss: 4.8123 +[2025-09-05 17:27:53] [Rank 0] Group 9 Loss: 4.8123 +[2025-09-05 17:27:53] [Rank 0] Group 10 Loss: 5.0552 +[2025-09-05 17:27:53] [Rank 0] Group 10 Loss: 5.0552 +[2025-09-05 17:27:53] [Rank 0] Group 11 Loss: 5.0877 +[2025-09-05 17:27:53] [Rank 0] Group 11 Loss: 5.0877 +[2025-09-05 17:27:53] [Rank 0] Group 12 Loss: 5.0888 +[2025-09-05 17:27:53] [Rank 0] Group 12 Loss: 5.0888 +[2025-09-05 17:27:53] [Rank 0] Group 13 Loss: 5.2398 +[2025-09-05 17:27:53] [Rank 0] Group 13 Loss: 5.2398 +[2025-09-05 17:27:53] [Rank 0] Group 14 Loss: 5.2902 +[2025-09-05 17:27:53] [Rank 0] Group 14 Loss: 5.2902 +[2025-09-05 17:27:53] [Rank 0] Group 15 Loss: 5.3300 +[2025-09-05 17:27:53] [Rank 0] Group 15 Loss: 5.3300 +[2025-09-05 17:27:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:27:53] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 17:27:53] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 17:27:53] [Rank 0] Group 8 FTA: 0.8200 +[2025-09-05 17:27:53] [Rank 0] Group 8 FTA: 0.8200 +[2025-09-05 17:27:53] [Rank 0] Group 9 FTA: 0.7600 +[2025-09-05 17:27:53] [Rank 0] Group 9 FTA: 0.7600 +[2025-09-05 17:27:53] [Rank 0] Group 10 FTA: 0.7600 +[2025-09-05 17:27:53] [Rank 0] Group 10 FTA: 0.7600 +[2025-09-05 17:27:53] [Rank 0] Group 11 FTA: 0.6500 +[2025-09-05 17:27:53] [Rank 0] Group 11 FTA: 0.6500 +[2025-09-05 17:27:53] [Rank 0] Group 12 FTA: 0.5200 +[2025-09-05 17:27:53] [Rank 0] Group 12 FTA: 0.5200 +[2025-09-05 17:27:53] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 17:27:53] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 17:27:53] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:27:53] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:27:53] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:27:53] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:27:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:27:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:27:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:27:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:27:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:27:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:27:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:27:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:27:54] [Rank 0] step:5501/10000 train_time:215836ms step_avg:39.24ms +[2025-09-05 17:27:54] [Rank 0] step:5501/10000 train_time:215836ms step_avg:39.24ms +[2025-09-05 17:27:55] [Rank 0] step:5521/10000 train_time:216270ms step_avg:39.17ms +[2025-09-05 17:27:55] [Rank 0] step:5521/10000 train_time:216270ms step_avg:39.17ms +[2025-09-05 17:27:56] [Rank 0] step:5541/10000 train_time:216925ms step_avg:39.15ms +[2025-09-05 17:27:56] [Rank 0] step:5541/10000 train_time:216925ms step_avg:39.15ms +[2025-09-05 17:27:56] [Rank 0] step:5561/10000 train_time:217584ms step_avg:39.13ms +[2025-09-05 17:27:56] [Rank 0] step:5561/10000 train_time:217584ms step_avg:39.13ms +[2025-09-05 17:27:57] [Rank 0] step:5581/10000 train_time:218241ms step_avg:39.10ms +[2025-09-05 17:27:57] [Rank 0] step:5581/10000 train_time:218241ms step_avg:39.10ms +[2025-09-05 17:27:58] [Rank 0] step:5601/10000 train_time:218899ms step_avg:39.08ms +[2025-09-05 17:27:58] [Rank 0] step:5601/10000 train_time:218899ms step_avg:39.08ms +[2025-09-05 17:27:58] [Rank 0] step:5621/10000 train_time:219556ms step_avg:39.06ms +[2025-09-05 17:27:58] [Rank 0] step:5621/10000 train_time:219556ms step_avg:39.06ms +[2025-09-05 17:28:00] [Rank 0] step:5641/10000 train_time:220213ms step_avg:39.04ms +[2025-09-05 17:28:00] [Rank 0] step:5641/10000 train_time:220213ms step_avg:39.04ms +[2025-09-05 17:28:00] [Rank 0] step:5661/10000 train_time:221352ms step_avg:39.10ms +[2025-09-05 17:28:00] [Rank 0] step:5661/10000 train_time:221352ms step_avg:39.10ms +[2025-09-05 17:28:01] [Rank 0] step:5681/10000 train_time:222010ms step_avg:39.08ms +[2025-09-05 17:28:01] [Rank 0] step:5681/10000 train_time:222010ms step_avg:39.08ms +[2025-09-05 17:28:02] [Rank 0] step:5701/10000 train_time:222675ms step_avg:39.06ms +[2025-09-05 17:28:02] [Rank 0] step:5701/10000 train_time:222675ms step_avg:39.06ms +[2025-09-05 17:28:02] [Rank 0] step:5721/10000 train_time:223334ms step_avg:39.04ms +[2025-09-05 17:28:02] [Rank 0] step:5721/10000 train_time:223334ms step_avg:39.04ms +[2025-09-05 17:28:03] [Rank 0] step:5741/10000 train_time:223992ms step_avg:39.02ms +[2025-09-05 17:28:03] [Rank 0] step:5741/10000 train_time:223992ms step_avg:39.02ms +[2025-09-05 17:28:04] [Rank 0] step:5761/10000 train_time:224650ms step_avg:39.00ms +[2025-09-05 17:28:04] [Rank 0] step:5761/10000 train_time:224650ms step_avg:39.00ms +[2025-09-05 17:28:04] [Rank 0] step:5781/10000 train_time:225308ms step_avg:38.97ms +[2025-09-05 17:28:04] [Rank 0] step:5781/10000 train_time:225308ms step_avg:38.97ms +[2025-09-05 17:28:05] [Rank 0] step:5801/10000 train_time:225967ms step_avg:38.95ms +[2025-09-05 17:28:05] [Rank 0] step:5801/10000 train_time:225967ms step_avg:38.95ms +[2025-09-05 17:28:06] [Rank 0] step:5821/10000 train_time:226627ms step_avg:38.93ms +[2025-09-05 17:28:06] [Rank 0] step:5821/10000 train_time:226627ms step_avg:38.93ms +[2025-09-05 17:28:06] [Rank 0] step:5841/10000 train_time:227285ms step_avg:38.91ms +[2025-09-05 17:28:06] [Rank 0] step:5841/10000 train_time:227285ms step_avg:38.91ms +[2025-09-05 17:28:07] [Rank 0] step:5861/10000 train_time:227942ms step_avg:38.89ms +[2025-09-05 17:28:07] [Rank 0] step:5861/10000 train_time:227942ms step_avg:38.89ms +[2025-09-05 17:28:07] [Rank 0] step:5881/10000 train_time:228601ms step_avg:38.87ms +[2025-09-05 17:28:07] [Rank 0] step:5881/10000 train_time:228601ms step_avg:38.87ms +[2025-09-05 17:28:08] [Rank 0] step:5901/10000 train_time:229258ms step_avg:38.85ms +[2025-09-05 17:28:08] [Rank 0] step:5901/10000 train_time:229258ms step_avg:38.85ms +[2025-09-05 17:28:09] [Rank 0] step:5921/10000 train_time:229915ms step_avg:38.83ms +[2025-09-05 17:28:09] [Rank 0] step:5921/10000 train_time:229915ms step_avg:38.83ms +[2025-09-05 17:28:09] [Rank 0] step:5941/10000 train_time:230572ms step_avg:38.81ms +[2025-09-05 17:28:09] [Rank 0] step:5941/10000 train_time:230572ms step_avg:38.81ms +[2025-09-05 17:28:10] [Rank 0] step:5961/10000 train_time:231229ms step_avg:38.79ms +[2025-09-05 17:28:10] [Rank 0] step:5961/10000 train_time:231229ms step_avg:38.79ms +[2025-09-05 17:28:11] [Rank 0] step:5981/10000 train_time:231887ms step_avg:38.77ms +[2025-09-05 17:28:11] [Rank 0] step:5981/10000 train_time:231887ms step_avg:38.77ms +[2025-09-05 17:28:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:28:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:28:12] [Rank 0] PRINT: step:6000/10000 train_loss:0.7671 val_loss:0.7523 train_time:232778ms step_avg:38.80ms +[2025-09-05 17:28:12] [Rank 0] PRINT: step:6000/10000 train_loss:0.7671 val_loss:0.7523 train_time:232778ms step_avg:38.80ms +[2025-09-05 17:28:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:28:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:28:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:28:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:29:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:29:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:29:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:29:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:29:34] [Rank 0] Total Loss: 4.7506 +[2025-09-05 17:29:34] [Rank 0] Total Loss: 4.7506 +[2025-09-05 17:29:34] [Rank 0] Total FTA (Unweighted): 0.7719 +[2025-09-05 17:29:34] [Rank 0] Total FTA (Unweighted): 0.7719 +[2025-09-05 17:29:34] [Rank 0] Total FTA (Weighted): 0.7719 +[2025-09-05 17:29:34] [Rank 0] Total FTA (Weighted): 0.7719 +[2025-09-05 17:29:34] [Rank 0] Group 0 Loss: 4.5621 +[2025-09-05 17:29:34] [Rank 0] Group 0 Loss: 4.5621 +[2025-09-05 17:29:34] [Rank 0] Group 1 Loss: 4.3528 +[2025-09-05 17:29:34] [Rank 0] Group 1 Loss: 4.3528 +[2025-09-05 17:29:34] [Rank 0] Group 2 Loss: 4.2780 +[2025-09-05 17:29:34] [Rank 0] Group 2 Loss: 4.2780 +[2025-09-05 17:29:34] [Rank 0] Group 3 Loss: 4.6119 +[2025-09-05 17:29:34] [Rank 0] Group 3 Loss: 4.6119 +[2025-09-05 17:29:34] [Rank 0] Group 4 Loss: 4.5760 +[2025-09-05 17:29:34] [Rank 0] Group 4 Loss: 4.5760 +[2025-09-05 17:29:34] [Rank 0] Group 5 Loss: 4.6418 +[2025-09-05 17:29:34] [Rank 0] Group 5 Loss: 4.6418 +[2025-09-05 17:29:34] [Rank 0] Group 6 Loss: 4.5174 +[2025-09-05 17:29:34] [Rank 0] Group 6 Loss: 4.5174 +[2025-09-05 17:29:34] [Rank 0] Group 7 Loss: 4.6043 +[2025-09-05 17:29:34] [Rank 0] Group 7 Loss: 4.6043 +[2025-09-05 17:29:34] [Rank 0] Group 8 Loss: 4.6870 +[2025-09-05 17:29:34] [Rank 0] Group 8 Loss: 4.6870 +[2025-09-05 17:29:34] [Rank 0] Group 9 Loss: 4.7095 +[2025-09-05 17:29:34] [Rank 0] Group 9 Loss: 4.7095 +[2025-09-05 17:29:34] [Rank 0] Group 10 Loss: 4.9400 +[2025-09-05 17:29:34] [Rank 0] Group 10 Loss: 4.9400 +[2025-09-05 17:29:34] [Rank 0] Group 11 Loss: 4.9380 +[2025-09-05 17:29:34] [Rank 0] Group 11 Loss: 4.9380 +[2025-09-05 17:29:34] [Rank 0] Group 12 Loss: 4.9512 +[2025-09-05 17:29:34] [Rank 0] Group 12 Loss: 4.9512 +[2025-09-05 17:29:34] [Rank 0] Group 13 Loss: 5.1739 +[2025-09-05 17:29:34] [Rank 0] Group 13 Loss: 5.1739 +[2025-09-05 17:29:34] [Rank 0] Group 14 Loss: 5.2039 +[2025-09-05 17:29:34] [Rank 0] Group 14 Loss: 5.2039 +[2025-09-05 17:29:34] [Rank 0] Group 15 Loss: 5.2613 +[2025-09-05 17:29:34] [Rank 0] Group 15 Loss: 5.2613 +[2025-09-05 17:29:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:29:34] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 17:29:34] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 17:29:34] [Rank 0] Group 8 FTA: 0.8600 +[2025-09-05 17:29:34] [Rank 0] Group 8 FTA: 0.8600 +[2025-09-05 17:29:34] [Rank 0] Group 9 FTA: 0.7600 +[2025-09-05 17:29:34] [Rank 0] Group 9 FTA: 0.7600 +[2025-09-05 17:29:34] [Rank 0] Group 10 FTA: 0.8000 +[2025-09-05 17:29:34] [Rank 0] Group 10 FTA: 0.8000 +[2025-09-05 17:29:34] [Rank 0] Group 11 FTA: 0.7000 +[2025-09-05 17:29:34] [Rank 0] Group 11 FTA: 0.7000 +[2025-09-05 17:29:34] [Rank 0] Group 12 FTA: 0.6500 +[2025-09-05 17:29:34] [Rank 0] Group 12 FTA: 0.6500 +[2025-09-05 17:29:34] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 17:29:34] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 17:29:34] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:29:34] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:29:34] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:29:34] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:29:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:29:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:29:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:29:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:29:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:29:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:29:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:29:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:29:35] [Rank 0] step:6001/10000 train_time:232786ms step_avg:38.79ms +[2025-09-05 17:29:35] [Rank 0] step:6001/10000 train_time:232786ms step_avg:38.79ms +[2025-09-05 17:29:37] [Rank 0] step:6021/10000 train_time:233700ms step_avg:38.81ms +[2025-09-05 17:29:37] [Rank 0] step:6021/10000 train_time:233700ms step_avg:38.81ms +[2025-09-05 17:29:37] [Rank 0] step:6041/10000 train_time:234359ms step_avg:38.79ms +[2025-09-05 17:29:37] [Rank 0] step:6041/10000 train_time:234359ms step_avg:38.79ms +[2025-09-05 17:29:38] [Rank 0] step:6061/10000 train_time:235017ms step_avg:38.78ms +[2025-09-05 17:29:38] [Rank 0] step:6061/10000 train_time:235017ms step_avg:38.78ms +[2025-09-05 17:29:39] [Rank 0] step:6081/10000 train_time:235675ms step_avg:38.76ms +[2025-09-05 17:29:39] [Rank 0] step:6081/10000 train_time:235675ms step_avg:38.76ms +[2025-09-05 17:29:39] [Rank 0] step:6101/10000 train_time:236333ms step_avg:38.74ms +[2025-09-05 17:29:39] [Rank 0] step:6101/10000 train_time:236333ms step_avg:38.74ms +[2025-09-05 17:29:40] [Rank 0] step:6121/10000 train_time:236992ms step_avg:38.72ms +[2025-09-05 17:29:40] [Rank 0] step:6121/10000 train_time:236992ms step_avg:38.72ms +[2025-09-05 17:29:41] [Rank 0] step:6141/10000 train_time:237650ms step_avg:38.70ms +[2025-09-05 17:29:41] [Rank 0] step:6141/10000 train_time:237650ms step_avg:38.70ms +[2025-09-05 17:29:41] [Rank 0] step:6161/10000 train_time:238309ms step_avg:38.68ms +[2025-09-05 17:29:41] [Rank 0] step:6161/10000 train_time:238309ms step_avg:38.68ms +[2025-09-05 17:29:42] [Rank 0] step:6181/10000 train_time:238972ms step_avg:38.66ms +[2025-09-05 17:29:42] [Rank 0] step:6181/10000 train_time:238972ms step_avg:38.66ms +[2025-09-05 17:29:43] [Rank 0] step:6201/10000 train_time:239631ms step_avg:38.64ms +[2025-09-05 17:29:43] [Rank 0] step:6201/10000 train_time:239631ms step_avg:38.64ms +[2025-09-05 17:29:43] [Rank 0] step:6221/10000 train_time:240289ms step_avg:38.63ms +[2025-09-05 17:29:43] [Rank 0] step:6221/10000 train_time:240289ms step_avg:38.63ms +[2025-09-05 17:29:44] [Rank 0] step:6241/10000 train_time:240947ms step_avg:38.61ms +[2025-09-05 17:29:44] [Rank 0] step:6241/10000 train_time:240947ms step_avg:38.61ms +[2025-09-05 17:29:45] [Rank 0] step:6261/10000 train_time:241606ms step_avg:38.59ms +[2025-09-05 17:29:45] [Rank 0] step:6261/10000 train_time:241606ms step_avg:38.59ms +[2025-09-05 17:29:45] [Rank 0] step:6281/10000 train_time:242264ms step_avg:38.57ms +[2025-09-05 17:29:45] [Rank 0] step:6281/10000 train_time:242264ms step_avg:38.57ms +[2025-09-05 17:29:46] [Rank 0] step:6301/10000 train_time:242923ms step_avg:38.55ms +[2025-09-05 17:29:46] [Rank 0] step:6301/10000 train_time:242923ms step_avg:38.55ms +[2025-09-05 17:29:46] [Rank 0] step:6321/10000 train_time:243582ms step_avg:38.54ms +[2025-09-05 17:29:46] [Rank 0] step:6321/10000 train_time:243582ms step_avg:38.54ms +[2025-09-05 17:29:47] [Rank 0] step:6341/10000 train_time:244241ms step_avg:38.52ms +[2025-09-05 17:29:47] [Rank 0] step:6341/10000 train_time:244241ms step_avg:38.52ms +[2025-09-05 17:29:48] [Rank 0] step:6361/10000 train_time:244899ms step_avg:38.50ms +[2025-09-05 17:29:48] [Rank 0] step:6361/10000 train_time:244899ms step_avg:38.50ms +[2025-09-05 17:29:48] [Rank 0] step:6381/10000 train_time:245565ms step_avg:38.48ms +[2025-09-05 17:29:48] [Rank 0] step:6381/10000 train_time:245565ms step_avg:38.48ms +[2025-09-05 17:29:49] [Rank 0] step:6401/10000 train_time:246224ms step_avg:38.47ms +[2025-09-05 17:29:49] [Rank 0] step:6401/10000 train_time:246224ms step_avg:38.47ms +[2025-09-05 17:29:50] [Rank 0] step:6421/10000 train_time:246882ms step_avg:38.45ms +[2025-09-05 17:29:50] [Rank 0] step:6421/10000 train_time:246882ms step_avg:38.45ms +[2025-09-05 17:29:50] [Rank 0] step:6441/10000 train_time:247540ms step_avg:38.43ms +[2025-09-05 17:29:50] [Rank 0] step:6441/10000 train_time:247540ms step_avg:38.43ms +[2025-09-05 17:29:51] [Rank 0] step:6461/10000 train_time:248199ms step_avg:38.41ms +[2025-09-05 17:29:51] [Rank 0] step:6461/10000 train_time:248199ms step_avg:38.41ms +[2025-09-05 17:29:52] [Rank 0] step:6481/10000 train_time:248858ms step_avg:38.40ms +[2025-09-05 17:29:52] [Rank 0] step:6481/10000 train_time:248858ms step_avg:38.40ms +[2025-09-05 17:29:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:29:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:29:53] [Rank 0] PRINT: step:6500/10000 train_loss:0.7526 val_loss:0.7384 train_time:249750ms step_avg:38.42ms +[2025-09-05 17:29:53] [Rank 0] PRINT: step:6500/10000 train_loss:0.7526 val_loss:0.7384 train_time:249750ms step_avg:38.42ms +[2025-09-05 17:29:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:29:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:29:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:29:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:31:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:31:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:31:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:31:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:31:16] [Rank 0] Total Loss: 4.9242 +[2025-09-05 17:31:16] [Rank 0] Total Loss: 4.9242 +[2025-09-05 17:31:16] [Rank 0] Total FTA (Unweighted): 0.7819 +[2025-09-05 17:31:16] [Rank 0] Total FTA (Unweighted): 0.7819 +[2025-09-05 17:31:16] [Rank 0] Total FTA (Weighted): 0.7819 +[2025-09-05 17:31:16] [Rank 0] Total FTA (Weighted): 0.7819 +[2025-09-05 17:31:16] [Rank 0] Group 0 Loss: 4.5679 +[2025-09-05 17:31:16] [Rank 0] Group 0 Loss: 4.5679 +[2025-09-05 17:31:16] [Rank 0] Group 1 Loss: 4.5721 +[2025-09-05 17:31:16] [Rank 0] Group 1 Loss: 4.5721 +[2025-09-05 17:31:16] [Rank 0] Group 2 Loss: 4.4277 +[2025-09-05 17:31:16] [Rank 0] Group 2 Loss: 4.4277 +[2025-09-05 17:31:16] [Rank 0] Group 3 Loss: 4.8858 +[2025-09-05 17:31:16] [Rank 0] Group 3 Loss: 4.8858 +[2025-09-05 17:31:16] [Rank 0] Group 4 Loss: 4.8214 +[2025-09-05 17:31:16] [Rank 0] Group 4 Loss: 4.8214 +[2025-09-05 17:31:16] [Rank 0] Group 5 Loss: 4.8114 +[2025-09-05 17:31:16] [Rank 0] Group 5 Loss: 4.8114 +[2025-09-05 17:31:16] [Rank 0] Group 6 Loss: 4.7443 +[2025-09-05 17:31:16] [Rank 0] Group 6 Loss: 4.7443 +[2025-09-05 17:31:16] [Rank 0] Group 7 Loss: 4.8102 +[2025-09-05 17:31:16] [Rank 0] Group 7 Loss: 4.8102 +[2025-09-05 17:31:16] [Rank 0] Group 8 Loss: 4.9216 +[2025-09-05 17:31:16] [Rank 0] Group 8 Loss: 4.9216 +[2025-09-05 17:31:16] [Rank 0] Group 9 Loss: 4.8960 +[2025-09-05 17:31:16] [Rank 0] Group 9 Loss: 4.8960 +[2025-09-05 17:31:16] [Rank 0] Group 10 Loss: 5.1441 +[2025-09-05 17:31:16] [Rank 0] Group 10 Loss: 5.1441 +[2025-09-05 17:31:16] [Rank 0] Group 11 Loss: 5.1137 +[2025-09-05 17:31:16] [Rank 0] Group 11 Loss: 5.1137 +[2025-09-05 17:31:16] [Rank 0] Group 12 Loss: 5.1392 +[2025-09-05 17:31:16] [Rank 0] Group 12 Loss: 5.1392 +[2025-09-05 17:31:16] [Rank 0] Group 13 Loss: 5.2720 +[2025-09-05 17:31:16] [Rank 0] Group 13 Loss: 5.2720 +[2025-09-05 17:31:16] [Rank 0] Group 14 Loss: 5.3025 +[2025-09-05 17:31:16] [Rank 0] Group 14 Loss: 5.3025 +[2025-09-05 17:31:16] [Rank 0] Group 15 Loss: 5.3574 +[2025-09-05 17:31:16] [Rank 0] Group 15 Loss: 5.3574 +[2025-09-05 17:31:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:31:16] [Rank 0] Group 8 FTA: 0.9000 +[2025-09-05 17:31:16] [Rank 0] Group 8 FTA: 0.9000 +[2025-09-05 17:31:16] [Rank 0] Group 9 FTA: 0.7500 +[2025-09-05 17:31:16] [Rank 0] Group 9 FTA: 0.7500 +[2025-09-05 17:31:16] [Rank 0] Group 10 FTA: 0.7900 +[2025-09-05 17:31:16] [Rank 0] Group 10 FTA: 0.7900 +[2025-09-05 17:31:16] [Rank 0] Group 11 FTA: 0.7600 +[2025-09-05 17:31:16] [Rank 0] Group 11 FTA: 0.7600 +[2025-09-05 17:31:16] [Rank 0] Group 12 FTA: 0.6900 +[2025-09-05 17:31:16] [Rank 0] Group 12 FTA: 0.6900 +[2025-09-05 17:31:16] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-05 17:31:16] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-05 17:31:16] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:31:16] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:31:16] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:31:16] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:31:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:31:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:31:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:31:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:31:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:31:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:31:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:31:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:31:17] [Rank 0] step:6501/10000 train_time:249758ms step_avg:38.42ms +[2025-09-05 17:31:17] [Rank 0] step:6501/10000 train_time:249758ms step_avg:38.42ms +[2025-09-05 17:31:18] [Rank 0] step:6521/10000 train_time:250193ms step_avg:38.37ms +[2025-09-05 17:31:18] [Rank 0] step:6521/10000 train_time:250193ms step_avg:38.37ms +[2025-09-05 17:31:19] [Rank 0] step:6541/10000 train_time:250851ms step_avg:38.35ms +[2025-09-05 17:31:19] [Rank 0] step:6541/10000 train_time:250851ms step_avg:38.35ms +[2025-09-05 17:31:19] [Rank 0] step:6561/10000 train_time:251508ms step_avg:38.33ms +[2025-09-05 17:31:19] [Rank 0] step:6561/10000 train_time:251508ms step_avg:38.33ms +[2025-09-05 17:31:20] [Rank 0] step:6581/10000 train_time:252165ms step_avg:38.32ms +[2025-09-05 17:31:20] [Rank 0] step:6581/10000 train_time:252165ms step_avg:38.32ms +[2025-09-05 17:31:21] [Rank 0] step:6601/10000 train_time:252823ms step_avg:38.30ms +[2025-09-05 17:31:21] [Rank 0] step:6601/10000 train_time:252823ms step_avg:38.30ms +[2025-09-05 17:31:21] [Rank 0] step:6621/10000 train_time:253480ms step_avg:38.28ms +[2025-09-05 17:31:21] [Rank 0] step:6621/10000 train_time:253480ms step_avg:38.28ms +[2025-09-05 17:31:22] [Rank 0] step:6641/10000 train_time:254137ms step_avg:38.27ms +[2025-09-05 17:31:22] [Rank 0] step:6641/10000 train_time:254137ms step_avg:38.27ms +[2025-09-05 17:31:23] [Rank 0] step:6661/10000 train_time:254795ms step_avg:38.25ms +[2025-09-05 17:31:23] [Rank 0] step:6661/10000 train_time:254795ms step_avg:38.25ms +[2025-09-05 17:31:23] [Rank 0] step:6681/10000 train_time:255612ms step_avg:38.26ms +[2025-09-05 17:31:23] [Rank 0] step:6681/10000 train_time:255612ms step_avg:38.26ms +[2025-09-05 17:31:24] [Rank 0] step:6701/10000 train_time:256269ms step_avg:38.24ms +[2025-09-05 17:31:24] [Rank 0] step:6701/10000 train_time:256269ms step_avg:38.24ms +[2025-09-05 17:31:25] [Rank 0] step:6721/10000 train_time:256927ms step_avg:38.23ms +[2025-09-05 17:31:25] [Rank 0] step:6721/10000 train_time:256927ms step_avg:38.23ms +[2025-09-05 17:31:25] [Rank 0] step:6741/10000 train_time:257585ms step_avg:38.21ms +[2025-09-05 17:31:25] [Rank 0] step:6741/10000 train_time:257585ms step_avg:38.21ms +[2025-09-05 17:31:26] [Rank 0] step:6761/10000 train_time:258413ms step_avg:38.22ms +[2025-09-05 17:31:26] [Rank 0] step:6761/10000 train_time:258413ms step_avg:38.22ms +[2025-09-05 17:31:27] [Rank 0] step:6781/10000 train_time:259069ms step_avg:38.21ms +[2025-09-05 17:31:27] [Rank 0] step:6781/10000 train_time:259069ms step_avg:38.21ms +[2025-09-05 17:31:28] [Rank 0] step:6801/10000 train_time:259726ms step_avg:38.19ms +[2025-09-05 17:31:28] [Rank 0] step:6801/10000 train_time:259726ms step_avg:38.19ms +[2025-09-05 17:31:28] [Rank 0] step:6821/10000 train_time:260384ms step_avg:38.17ms +[2025-09-05 17:31:28] [Rank 0] step:6821/10000 train_time:260384ms step_avg:38.17ms +[2025-09-05 17:31:29] [Rank 0] step:6841/10000 train_time:261243ms step_avg:38.19ms +[2025-09-05 17:31:29] [Rank 0] step:6841/10000 train_time:261243ms step_avg:38.19ms +[2025-09-05 17:31:30] [Rank 0] step:6861/10000 train_time:261900ms step_avg:38.17ms +[2025-09-05 17:31:30] [Rank 0] step:6861/10000 train_time:261900ms step_avg:38.17ms +[2025-09-05 17:31:30] [Rank 0] step:6881/10000 train_time:262558ms step_avg:38.16ms +[2025-09-05 17:31:30] [Rank 0] step:6881/10000 train_time:262558ms step_avg:38.16ms +[2025-09-05 17:31:31] [Rank 0] step:6901/10000 train_time:263216ms step_avg:38.14ms +[2025-09-05 17:31:31] [Rank 0] step:6901/10000 train_time:263216ms step_avg:38.14ms +[2025-09-05 17:31:32] [Rank 0] step:6921/10000 train_time:263874ms step_avg:38.13ms +[2025-09-05 17:31:32] [Rank 0] step:6921/10000 train_time:263874ms step_avg:38.13ms +[2025-09-05 17:31:32] [Rank 0] step:6941/10000 train_time:264531ms step_avg:38.11ms +[2025-09-05 17:31:32] [Rank 0] step:6941/10000 train_time:264531ms step_avg:38.11ms +[2025-09-05 17:31:33] [Rank 0] step:6961/10000 train_time:265189ms step_avg:38.10ms +[2025-09-05 17:31:33] [Rank 0] step:6961/10000 train_time:265189ms step_avg:38.10ms +[2025-09-05 17:31:34] [Rank 0] step:6981/10000 train_time:265847ms step_avg:38.08ms +[2025-09-05 17:31:34] [Rank 0] step:6981/10000 train_time:265847ms step_avg:38.08ms +[2025-09-05 17:31:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:31:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:31:35] [Rank 0] PRINT: step:7000/10000 train_loss:0.7403 val_loss:0.7277 train_time:266739ms step_avg:38.11ms +[2025-09-05 17:31:35] [Rank 0] PRINT: step:7000/10000 train_loss:0.7403 val_loss:0.7277 train_time:266739ms step_avg:38.11ms +[2025-09-05 17:31:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:31:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:31:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:31:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:32:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:32:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:32:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:32:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:32:57] [Rank 0] Total Loss: 4.8565 +[2025-09-05 17:32:57] [Rank 0] Total Loss: 4.8565 +[2025-09-05 17:32:57] [Rank 0] Total FTA (Unweighted): 0.7956 +[2025-09-05 17:32:57] [Rank 0] Total FTA (Unweighted): 0.7956 +[2025-09-05 17:32:57] [Rank 0] Total FTA (Weighted): 0.7956 +[2025-09-05 17:32:57] [Rank 0] Total FTA (Weighted): 0.7956 +[2025-09-05 17:32:57] [Rank 0] Group 0 Loss: 4.6305 +[2025-09-05 17:32:57] [Rank 0] Group 0 Loss: 4.6305 +[2025-09-05 17:32:57] [Rank 0] Group 1 Loss: 4.5157 +[2025-09-05 17:32:57] [Rank 0] Group 1 Loss: 4.5157 +[2025-09-05 17:32:57] [Rank 0] Group 2 Loss: 4.3926 +[2025-09-05 17:32:57] [Rank 0] Group 2 Loss: 4.3926 +[2025-09-05 17:32:57] [Rank 0] Group 3 Loss: 4.8124 +[2025-09-05 17:32:57] [Rank 0] Group 3 Loss: 4.8124 +[2025-09-05 17:32:57] [Rank 0] Group 4 Loss: 4.8418 +[2025-09-05 17:32:57] [Rank 0] Group 4 Loss: 4.8418 +[2025-09-05 17:32:57] [Rank 0] Group 5 Loss: 4.7079 +[2025-09-05 17:32:57] [Rank 0] Group 5 Loss: 4.7079 +[2025-09-05 17:32:57] [Rank 0] Group 6 Loss: 4.6319 +[2025-09-05 17:32:57] [Rank 0] Group 6 Loss: 4.6319 +[2025-09-05 17:32:57] [Rank 0] Group 7 Loss: 4.7258 +[2025-09-05 17:32:57] [Rank 0] Group 7 Loss: 4.7258 +[2025-09-05 17:32:57] [Rank 0] Group 8 Loss: 4.8259 +[2025-09-05 17:32:57] [Rank 0] Group 8 Loss: 4.8259 +[2025-09-05 17:32:57] [Rank 0] Group 9 Loss: 4.8414 +[2025-09-05 17:32:57] [Rank 0] Group 9 Loss: 4.8414 +[2025-09-05 17:32:57] [Rank 0] Group 10 Loss: 4.9744 +[2025-09-05 17:32:57] [Rank 0] Group 10 Loss: 4.9744 +[2025-09-05 17:32:57] [Rank 0] Group 11 Loss: 5.0594 +[2025-09-05 17:32:57] [Rank 0] Group 11 Loss: 5.0594 +[2025-09-05 17:32:57] [Rank 0] Group 12 Loss: 5.0525 +[2025-09-05 17:32:57] [Rank 0] Group 12 Loss: 5.0525 +[2025-09-05 17:32:57] [Rank 0] Group 13 Loss: 5.1913 +[2025-09-05 17:32:57] [Rank 0] Group 13 Loss: 5.1913 +[2025-09-05 17:32:57] [Rank 0] Group 14 Loss: 5.2171 +[2025-09-05 17:32:57] [Rank 0] Group 14 Loss: 5.2171 +[2025-09-05 17:32:57] [Rank 0] Group 15 Loss: 5.2826 +[2025-09-05 17:32:57] [Rank 0] Group 15 Loss: 5.2826 +[2025-09-05 17:32:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:32:57] [Rank 0] Group 8 FTA: 0.8800 +[2025-09-05 17:32:57] [Rank 0] Group 8 FTA: 0.8800 +[2025-09-05 17:32:57] [Rank 0] Group 9 FTA: 0.7800 +[2025-09-05 17:32:57] [Rank 0] Group 9 FTA: 0.7800 +[2025-09-05 17:32:57] [Rank 0] Group 10 FTA: 0.8500 +[2025-09-05 17:32:57] [Rank 0] Group 10 FTA: 0.8500 +[2025-09-05 17:32:57] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-05 17:32:57] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-05 17:32:57] [Rank 0] Group 12 FTA: 0.7400 +[2025-09-05 17:32:57] [Rank 0] Group 12 FTA: 0.7400 +[2025-09-05 17:32:57] [Rank 0] Group 13 FTA: 0.3800 +[2025-09-05 17:32:57] [Rank 0] Group 13 FTA: 0.3800 +[2025-09-05 17:32:57] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:32:57] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:32:57] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:32:57] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:32:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:32:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:32:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:32:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:32:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:32:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:32:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:32:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:32:58] [Rank 0] step:7001/10000 train_time:266748ms step_avg:38.10ms +[2025-09-05 17:32:58] [Rank 0] step:7001/10000 train_time:266748ms step_avg:38.10ms +[2025-09-05 17:32:59] [Rank 0] step:7021/10000 train_time:267199ms step_avg:38.06ms +[2025-09-05 17:32:59] [Rank 0] step:7021/10000 train_time:267199ms step_avg:38.06ms +[2025-09-05 17:33:00] [Rank 0] step:7041/10000 train_time:267857ms step_avg:38.04ms +[2025-09-05 17:33:00] [Rank 0] step:7041/10000 train_time:267857ms step_avg:38.04ms +[2025-09-05 17:33:00] [Rank 0] step:7061/10000 train_time:268516ms step_avg:38.03ms +[2025-09-05 17:33:00] [Rank 0] step:7061/10000 train_time:268516ms step_avg:38.03ms +[2025-09-05 17:33:01] [Rank 0] step:7081/10000 train_time:269175ms step_avg:38.01ms +[2025-09-05 17:33:01] [Rank 0] step:7081/10000 train_time:269175ms step_avg:38.01ms +[2025-09-05 17:33:02] [Rank 0] step:7101/10000 train_time:269833ms step_avg:38.00ms +[2025-09-05 17:33:02] [Rank 0] step:7101/10000 train_time:269833ms step_avg:38.00ms +[2025-09-05 17:33:02] [Rank 0] step:7121/10000 train_time:270492ms step_avg:37.99ms +[2025-09-05 17:33:02] [Rank 0] step:7121/10000 train_time:270492ms step_avg:37.99ms +[2025-09-05 17:33:03] [Rank 0] step:7141/10000 train_time:271150ms step_avg:37.97ms +[2025-09-05 17:33:03] [Rank 0] step:7141/10000 train_time:271150ms step_avg:37.97ms +[2025-09-05 17:33:04] [Rank 0] step:7161/10000 train_time:271809ms step_avg:37.96ms +[2025-09-05 17:33:04] [Rank 0] step:7161/10000 train_time:271809ms step_avg:37.96ms +[2025-09-05 17:33:04] [Rank 0] step:7181/10000 train_time:272468ms step_avg:37.94ms +[2025-09-05 17:33:04] [Rank 0] step:7181/10000 train_time:272468ms step_avg:37.94ms +[2025-09-05 17:33:05] [Rank 0] step:7201/10000 train_time:273126ms step_avg:37.93ms +[2025-09-05 17:33:05] [Rank 0] step:7201/10000 train_time:273126ms step_avg:37.93ms +[2025-09-05 17:33:06] [Rank 0] step:7221/10000 train_time:273785ms step_avg:37.92ms +[2025-09-05 17:33:06] [Rank 0] step:7221/10000 train_time:273785ms step_avg:37.92ms +[2025-09-05 17:33:06] [Rank 0] step:7241/10000 train_time:274443ms step_avg:37.90ms +[2025-09-05 17:33:06] [Rank 0] step:7241/10000 train_time:274443ms step_avg:37.90ms +[2025-09-05 17:33:07] [Rank 0] step:7261/10000 train_time:275101ms step_avg:37.89ms +[2025-09-05 17:33:07] [Rank 0] step:7261/10000 train_time:275101ms step_avg:37.89ms +[2025-09-05 17:33:08] [Rank 0] step:7281/10000 train_time:275760ms step_avg:37.87ms +[2025-09-05 17:33:08] [Rank 0] step:7281/10000 train_time:275760ms step_avg:37.87ms +[2025-09-05 17:33:08] [Rank 0] step:7301/10000 train_time:276418ms step_avg:37.86ms +[2025-09-05 17:33:08] [Rank 0] step:7301/10000 train_time:276418ms step_avg:37.86ms +[2025-09-05 17:33:09] [Rank 0] step:7321/10000 train_time:277077ms step_avg:37.85ms +[2025-09-05 17:33:09] [Rank 0] step:7321/10000 train_time:277077ms step_avg:37.85ms +[2025-09-05 17:33:10] [Rank 0] step:7341/10000 train_time:277735ms step_avg:37.83ms +[2025-09-05 17:33:10] [Rank 0] step:7341/10000 train_time:277735ms step_avg:37.83ms +[2025-09-05 17:33:10] [Rank 0] step:7361/10000 train_time:278393ms step_avg:37.82ms +[2025-09-05 17:33:10] [Rank 0] step:7361/10000 train_time:278393ms step_avg:37.82ms +[2025-09-05 17:33:11] [Rank 0] step:7381/10000 train_time:279051ms step_avg:37.81ms +[2025-09-05 17:33:11] [Rank 0] step:7381/10000 train_time:279051ms step_avg:37.81ms +[2025-09-05 17:33:12] [Rank 0] step:7401/10000 train_time:279710ms step_avg:37.79ms +[2025-09-05 17:33:12] [Rank 0] step:7401/10000 train_time:279710ms step_avg:37.79ms +[2025-09-05 17:33:12] [Rank 0] step:7421/10000 train_time:280369ms step_avg:37.78ms +[2025-09-05 17:33:12] [Rank 0] step:7421/10000 train_time:280369ms step_avg:37.78ms +[2025-09-05 17:33:13] [Rank 0] step:7441/10000 train_time:281028ms step_avg:37.77ms +[2025-09-05 17:33:13] [Rank 0] step:7441/10000 train_time:281028ms step_avg:37.77ms +[2025-09-05 17:33:14] [Rank 0] step:7461/10000 train_time:281686ms step_avg:37.75ms +[2025-09-05 17:33:14] [Rank 0] step:7461/10000 train_time:281686ms step_avg:37.75ms +[2025-09-05 17:33:14] [Rank 0] step:7481/10000 train_time:282344ms step_avg:37.74ms +[2025-09-05 17:33:14] [Rank 0] step:7481/10000 train_time:282344ms step_avg:37.74ms +[2025-09-05 17:33:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:33:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:33:15] [Rank 0] PRINT: step:7500/10000 train_loss:0.7293 val_loss:0.7174 train_time:283237ms step_avg:37.76ms +[2025-09-05 17:33:15] [Rank 0] PRINT: step:7500/10000 train_loss:0.7293 val_loss:0.7174 train_time:283237ms step_avg:37.76ms +[2025-09-05 17:33:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:33:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:33:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:33:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:34:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:34:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:34:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:34:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:34:38] [Rank 0] Total Loss: 4.9654 +[2025-09-05 17:34:38] [Rank 0] Total Loss: 4.9654 +[2025-09-05 17:34:38] [Rank 0] Total FTA (Unweighted): 0.8031 +[2025-09-05 17:34:38] [Rank 0] Total FTA (Unweighted): 0.8031 +[2025-09-05 17:34:38] [Rank 0] Total FTA (Weighted): 0.8031 +[2025-09-05 17:34:38] [Rank 0] Total FTA (Weighted): 0.8031 +[2025-09-05 17:34:38] [Rank 0] Group 0 Loss: 4.9003 +[2025-09-05 17:34:38] [Rank 0] Group 0 Loss: 4.9003 +[2025-09-05 17:34:38] [Rank 0] Group 1 Loss: 4.5467 +[2025-09-05 17:34:38] [Rank 0] Group 1 Loss: 4.5467 +[2025-09-05 17:34:38] [Rank 0] Group 2 Loss: 4.4357 +[2025-09-05 17:34:38] [Rank 0] Group 2 Loss: 4.4357 +[2025-09-05 17:34:38] [Rank 0] Group 3 Loss: 4.9233 +[2025-09-05 17:34:38] [Rank 0] Group 3 Loss: 4.9233 +[2025-09-05 17:34:38] [Rank 0] Group 4 Loss: 4.8913 +[2025-09-05 17:34:38] [Rank 0] Group 4 Loss: 4.8913 +[2025-09-05 17:34:38] [Rank 0] Group 5 Loss: 4.8834 +[2025-09-05 17:34:38] [Rank 0] Group 5 Loss: 4.8834 +[2025-09-05 17:34:38] [Rank 0] Group 6 Loss: 4.7465 +[2025-09-05 17:34:38] [Rank 0] Group 6 Loss: 4.7465 +[2025-09-05 17:34:38] [Rank 0] Group 7 Loss: 4.8491 +[2025-09-05 17:34:38] [Rank 0] Group 7 Loss: 4.8491 +[2025-09-05 17:34:38] [Rank 0] Group 8 Loss: 4.9451 +[2025-09-05 17:34:38] [Rank 0] Group 8 Loss: 4.9451 +[2025-09-05 17:34:38] [Rank 0] Group 9 Loss: 4.9666 +[2025-09-05 17:34:38] [Rank 0] Group 9 Loss: 4.9666 +[2025-09-05 17:34:38] [Rank 0] Group 10 Loss: 5.1327 +[2025-09-05 17:34:38] [Rank 0] Group 10 Loss: 5.1327 +[2025-09-05 17:34:38] [Rank 0] Group 11 Loss: 5.1352 +[2025-09-05 17:34:38] [Rank 0] Group 11 Loss: 5.1352 +[2025-09-05 17:34:38] [Rank 0] Group 12 Loss: 5.1528 +[2025-09-05 17:34:38] [Rank 0] Group 12 Loss: 5.1528 +[2025-09-05 17:34:38] [Rank 0] Group 13 Loss: 5.2870 +[2025-09-05 17:34:38] [Rank 0] Group 13 Loss: 5.2870 +[2025-09-05 17:34:38] [Rank 0] Group 14 Loss: 5.2985 +[2025-09-05 17:34:38] [Rank 0] Group 14 Loss: 5.2985 +[2025-09-05 17:34:38] [Rank 0] Group 15 Loss: 5.3522 +[2025-09-05 17:34:38] [Rank 0] Group 15 Loss: 5.3522 +[2025-09-05 17:34:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:34:38] [Rank 0] Group 8 FTA: 0.9200 +[2025-09-05 17:34:38] [Rank 0] Group 8 FTA: 0.9200 +[2025-09-05 17:34:38] [Rank 0] Group 9 FTA: 0.8000 +[2025-09-05 17:34:38] [Rank 0] Group 9 FTA: 0.8000 +[2025-09-05 17:34:38] [Rank 0] Group 10 FTA: 0.8700 +[2025-09-05 17:34:38] [Rank 0] Group 10 FTA: 0.8700 +[2025-09-05 17:34:38] [Rank 0] Group 11 FTA: 0.8300 +[2025-09-05 17:34:38] [Rank 0] Group 11 FTA: 0.8300 +[2025-09-05 17:34:38] [Rank 0] Group 12 FTA: 0.7700 +[2025-09-05 17:34:38] [Rank 0] Group 12 FTA: 0.7700 +[2025-09-05 17:34:38] [Rank 0] Group 13 FTA: 0.4400 +[2025-09-05 17:34:38] [Rank 0] Group 13 FTA: 0.4400 +[2025-09-05 17:34:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:34:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:34:38] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 17:34:38] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 17:34:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:34:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:34:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:34:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:34:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:34:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:34:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:34:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:34:39] [Rank 0] step:7501/10000 train_time:283245ms step_avg:37.76ms +[2025-09-05 17:34:39] [Rank 0] step:7501/10000 train_time:283245ms step_avg:37.76ms +[2025-09-05 17:34:40] [Rank 0] step:7521/10000 train_time:283689ms step_avg:37.72ms +[2025-09-05 17:34:40] [Rank 0] step:7521/10000 train_time:283689ms step_avg:37.72ms +[2025-09-05 17:34:41] [Rank 0] step:7541/10000 train_time:284347ms step_avg:37.71ms +[2025-09-05 17:34:41] [Rank 0] step:7541/10000 train_time:284347ms step_avg:37.71ms +[2025-09-05 17:34:41] [Rank 0] step:7561/10000 train_time:285004ms step_avg:37.69ms +[2025-09-05 17:34:41] [Rank 0] step:7561/10000 train_time:285004ms step_avg:37.69ms +[2025-09-05 17:34:42] [Rank 0] step:7581/10000 train_time:285661ms step_avg:37.68ms +[2025-09-05 17:34:42] [Rank 0] step:7581/10000 train_time:285661ms step_avg:37.68ms +[2025-09-05 17:34:43] [Rank 0] step:7601/10000 train_time:286319ms step_avg:37.67ms +[2025-09-05 17:34:43] [Rank 0] step:7601/10000 train_time:286319ms step_avg:37.67ms +[2025-09-05 17:34:43] [Rank 0] step:7621/10000 train_time:286976ms step_avg:37.66ms +[2025-09-05 17:34:43] [Rank 0] step:7621/10000 train_time:286976ms step_avg:37.66ms +[2025-09-05 17:34:45] [Rank 0] step:7641/10000 train_time:287866ms step_avg:37.67ms +[2025-09-05 17:34:45] [Rank 0] step:7641/10000 train_time:287866ms step_avg:37.67ms +[2025-09-05 17:34:45] [Rank 0] step:7661/10000 train_time:288761ms step_avg:37.69ms +[2025-09-05 17:34:45] [Rank 0] step:7661/10000 train_time:288761ms step_avg:37.69ms +[2025-09-05 17:34:46] [Rank 0] step:7681/10000 train_time:289418ms step_avg:37.68ms +[2025-09-05 17:34:46] [Rank 0] step:7681/10000 train_time:289418ms step_avg:37.68ms +[2025-09-05 17:34:47] [Rank 0] step:7701/10000 train_time:290074ms step_avg:37.67ms +[2025-09-05 17:34:47] [Rank 0] step:7701/10000 train_time:290074ms step_avg:37.67ms +[2025-09-05 17:34:47] [Rank 0] step:7721/10000 train_time:290731ms step_avg:37.65ms +[2025-09-05 17:34:47] [Rank 0] step:7721/10000 train_time:290731ms step_avg:37.65ms +[2025-09-05 17:34:48] [Rank 0] step:7741/10000 train_time:291388ms step_avg:37.64ms +[2025-09-05 17:34:48] [Rank 0] step:7741/10000 train_time:291388ms step_avg:37.64ms +[2025-09-05 17:34:48] [Rank 0] step:7761/10000 train_time:292046ms step_avg:37.63ms +[2025-09-05 17:34:48] [Rank 0] step:7761/10000 train_time:292046ms step_avg:37.63ms +[2025-09-05 17:34:49] [Rank 0] step:7781/10000 train_time:292705ms step_avg:37.62ms +[2025-09-05 17:34:49] [Rank 0] step:7781/10000 train_time:292705ms step_avg:37.62ms +[2025-09-05 17:34:50] [Rank 0] step:7801/10000 train_time:293363ms step_avg:37.61ms +[2025-09-05 17:34:50] [Rank 0] step:7801/10000 train_time:293363ms step_avg:37.61ms +[2025-09-05 17:34:50] [Rank 0] step:7821/10000 train_time:294021ms step_avg:37.59ms +[2025-09-05 17:34:50] [Rank 0] step:7821/10000 train_time:294021ms step_avg:37.59ms +[2025-09-05 17:34:51] [Rank 0] step:7841/10000 train_time:294679ms step_avg:37.58ms +[2025-09-05 17:34:51] [Rank 0] step:7841/10000 train_time:294679ms step_avg:37.58ms +[2025-09-05 17:34:52] [Rank 0] step:7861/10000 train_time:295336ms step_avg:37.57ms +[2025-09-05 17:34:52] [Rank 0] step:7861/10000 train_time:295336ms step_avg:37.57ms +[2025-09-05 17:34:52] [Rank 0] step:7881/10000 train_time:295994ms step_avg:37.56ms +[2025-09-05 17:34:52] [Rank 0] step:7881/10000 train_time:295994ms step_avg:37.56ms +[2025-09-05 17:34:53] [Rank 0] step:7901/10000 train_time:296650ms step_avg:37.55ms +[2025-09-05 17:34:53] [Rank 0] step:7901/10000 train_time:296650ms step_avg:37.55ms +[2025-09-05 17:34:54] [Rank 0] step:7921/10000 train_time:297309ms step_avg:37.53ms +[2025-09-05 17:34:54] [Rank 0] step:7921/10000 train_time:297309ms step_avg:37.53ms +[2025-09-05 17:34:54] [Rank 0] step:7941/10000 train_time:297966ms step_avg:37.52ms +[2025-09-05 17:34:54] [Rank 0] step:7941/10000 train_time:297966ms step_avg:37.52ms +[2025-09-05 17:34:55] [Rank 0] step:7961/10000 train_time:298624ms step_avg:37.51ms +[2025-09-05 17:34:55] [Rank 0] step:7961/10000 train_time:298624ms step_avg:37.51ms +[2025-09-05 17:34:56] [Rank 0] step:7981/10000 train_time:299282ms step_avg:37.50ms +[2025-09-05 17:34:56] [Rank 0] step:7981/10000 train_time:299282ms step_avg:37.50ms +[2025-09-05 17:34:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:34:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:34:57] [Rank 0] PRINT: step:8000/10000 train_loss:0.7201 val_loss:0.7089 train_time:300174ms step_avg:37.52ms +[2025-09-05 17:34:57] [Rank 0] PRINT: step:8000/10000 train_loss:0.7201 val_loss:0.7089 train_time:300174ms step_avg:37.52ms +[2025-09-05 17:34:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:34:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:34:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:34:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:36:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:36:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:36:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:36:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:36:19] [Rank 0] Total Loss: 5.0219 +[2025-09-05 17:36:19] [Rank 0] Total Loss: 5.0219 +[2025-09-05 17:36:19] [Rank 0] Total FTA (Unweighted): 0.8225 +[2025-09-05 17:36:19] [Rank 0] Total FTA (Unweighted): 0.8225 +[2025-09-05 17:36:19] [Rank 0] Total FTA (Weighted): 0.8225 +[2025-09-05 17:36:19] [Rank 0] Total FTA (Weighted): 0.8225 +[2025-09-05 17:36:19] [Rank 0] Group 0 Loss: 4.7922 +[2025-09-05 17:36:19] [Rank 0] Group 0 Loss: 4.7922 +[2025-09-05 17:36:19] [Rank 0] Group 1 Loss: 4.7167 +[2025-09-05 17:36:19] [Rank 0] Group 1 Loss: 4.7167 +[2025-09-05 17:36:19] [Rank 0] Group 2 Loss: 4.4755 +[2025-09-05 17:36:19] [Rank 0] Group 2 Loss: 4.4755 +[2025-09-05 17:36:19] [Rank 0] Group 3 Loss: 4.9646 +[2025-09-05 17:36:19] [Rank 0] Group 3 Loss: 4.9646 +[2025-09-05 17:36:19] [Rank 0] Group 4 Loss: 4.9414 +[2025-09-05 17:36:19] [Rank 0] Group 4 Loss: 4.9414 +[2025-09-05 17:36:19] [Rank 0] Group 5 Loss: 4.9604 +[2025-09-05 17:36:19] [Rank 0] Group 5 Loss: 4.9604 +[2025-09-05 17:36:19] [Rank 0] Group 6 Loss: 4.8157 +[2025-09-05 17:36:19] [Rank 0] Group 6 Loss: 4.8157 +[2025-09-05 17:36:19] [Rank 0] Group 7 Loss: 4.9076 +[2025-09-05 17:36:19] [Rank 0] Group 7 Loss: 4.9076 +[2025-09-05 17:36:19] [Rank 0] Group 8 Loss: 4.9997 +[2025-09-05 17:36:19] [Rank 0] Group 8 Loss: 4.9997 +[2025-09-05 17:36:19] [Rank 0] Group 9 Loss: 5.0054 +[2025-09-05 17:36:19] [Rank 0] Group 9 Loss: 5.0054 +[2025-09-05 17:36:19] [Rank 0] Group 10 Loss: 5.2148 +[2025-09-05 17:36:19] [Rank 0] Group 10 Loss: 5.2148 +[2025-09-05 17:36:19] [Rank 0] Group 11 Loss: 5.2059 +[2025-09-05 17:36:19] [Rank 0] Group 11 Loss: 5.2059 +[2025-09-05 17:36:19] [Rank 0] Group 12 Loss: 5.2007 +[2025-09-05 17:36:19] [Rank 0] Group 12 Loss: 5.2007 +[2025-09-05 17:36:19] [Rank 0] Group 13 Loss: 5.3625 +[2025-09-05 17:36:19] [Rank 0] Group 13 Loss: 5.3625 +[2025-09-05 17:36:19] [Rank 0] Group 14 Loss: 5.3548 +[2025-09-05 17:36:19] [Rank 0] Group 14 Loss: 5.3548 +[2025-09-05 17:36:19] [Rank 0] Group 15 Loss: 5.4329 +[2025-09-05 17:36:19] [Rank 0] Group 15 Loss: 5.4329 +[2025-09-05 17:36:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:36:19] [Rank 0] Group 8 FTA: 0.9200 +[2025-09-05 17:36:19] [Rank 0] Group 8 FTA: 0.9200 +[2025-09-05 17:36:19] [Rank 0] Group 9 FTA: 0.8100 +[2025-09-05 17:36:19] [Rank 0] Group 9 FTA: 0.8100 +[2025-09-05 17:36:19] [Rank 0] Group 10 FTA: 0.9000 +[2025-09-05 17:36:19] [Rank 0] Group 10 FTA: 0.9000 +[2025-09-05 17:36:19] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 17:36:19] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 17:36:19] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 17:36:19] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 17:36:19] [Rank 0] Group 13 FTA: 0.5400 +[2025-09-05 17:36:19] [Rank 0] Group 13 FTA: 0.5400 +[2025-09-05 17:36:19] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 17:36:19] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 17:36:19] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:36:19] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:36:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:36:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:36:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:36:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:36:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:36:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:36:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:36:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:36:21] [Rank 0] step:8001/10000 train_time:300183ms step_avg:37.52ms +[2025-09-05 17:36:21] [Rank 0] step:8001/10000 train_time:300183ms step_avg:37.52ms +[2025-09-05 17:36:22] [Rank 0] step:8021/10000 train_time:300626ms step_avg:37.48ms +[2025-09-05 17:36:22] [Rank 0] step:8021/10000 train_time:300626ms step_avg:37.48ms +[2025-09-05 17:36:22] [Rank 0] step:8041/10000 train_time:301755ms step_avg:37.53ms +[2025-09-05 17:36:22] [Rank 0] step:8041/10000 train_time:301755ms step_avg:37.53ms +[2025-09-05 17:36:23] [Rank 0] step:8061/10000 train_time:302413ms step_avg:37.52ms +[2025-09-05 17:36:23] [Rank 0] step:8061/10000 train_time:302413ms step_avg:37.52ms +[2025-09-05 17:36:24] [Rank 0] step:8081/10000 train_time:303071ms step_avg:37.50ms +[2025-09-05 17:36:24] [Rank 0] step:8081/10000 train_time:303071ms step_avg:37.50ms +[2025-09-05 17:36:24] [Rank 0] step:8101/10000 train_time:303730ms step_avg:37.49ms +[2025-09-05 17:36:24] [Rank 0] step:8101/10000 train_time:303730ms step_avg:37.49ms +[2025-09-05 17:36:25] [Rank 0] step:8121/10000 train_time:304389ms step_avg:37.48ms +[2025-09-05 17:36:25] [Rank 0] step:8121/10000 train_time:304389ms step_avg:37.48ms +[2025-09-05 17:36:26] [Rank 0] step:8141/10000 train_time:305046ms step_avg:37.47ms +[2025-09-05 17:36:26] [Rank 0] step:8141/10000 train_time:305046ms step_avg:37.47ms +[2025-09-05 17:36:26] [Rank 0] step:8161/10000 train_time:305706ms step_avg:37.46ms +[2025-09-05 17:36:26] [Rank 0] step:8161/10000 train_time:305706ms step_avg:37.46ms +[2025-09-05 17:36:27] [Rank 0] step:8181/10000 train_time:306363ms step_avg:37.45ms +[2025-09-05 17:36:27] [Rank 0] step:8181/10000 train_time:306363ms step_avg:37.45ms +[2025-09-05 17:36:28] [Rank 0] step:8201/10000 train_time:307022ms step_avg:37.44ms +[2025-09-05 17:36:28] [Rank 0] step:8201/10000 train_time:307022ms step_avg:37.44ms +[2025-09-05 17:36:28] [Rank 0] step:8221/10000 train_time:307680ms step_avg:37.43ms +[2025-09-05 17:36:28] [Rank 0] step:8221/10000 train_time:307680ms step_avg:37.43ms +[2025-09-05 17:36:29] [Rank 0] step:8241/10000 train_time:308338ms step_avg:37.42ms +[2025-09-05 17:36:29] [Rank 0] step:8241/10000 train_time:308338ms step_avg:37.42ms +[2025-09-05 17:36:30] [Rank 0] step:8261/10000 train_time:308998ms step_avg:37.40ms +[2025-09-05 17:36:30] [Rank 0] step:8261/10000 train_time:308998ms step_avg:37.40ms +[2025-09-05 17:36:30] [Rank 0] step:8281/10000 train_time:309657ms step_avg:37.39ms +[2025-09-05 17:36:30] [Rank 0] step:8281/10000 train_time:309657ms step_avg:37.39ms +[2025-09-05 17:36:31] [Rank 0] step:8301/10000 train_time:310317ms step_avg:37.38ms +[2025-09-05 17:36:31] [Rank 0] step:8301/10000 train_time:310317ms step_avg:37.38ms +[2025-09-05 17:36:32] [Rank 0] step:8321/10000 train_time:310973ms step_avg:37.37ms +[2025-09-05 17:36:32] [Rank 0] step:8321/10000 train_time:310973ms step_avg:37.37ms +[2025-09-05 17:36:32] [Rank 0] step:8341/10000 train_time:311631ms step_avg:37.36ms +[2025-09-05 17:36:32] [Rank 0] step:8341/10000 train_time:311631ms step_avg:37.36ms +[2025-09-05 17:36:33] [Rank 0] step:8361/10000 train_time:312290ms step_avg:37.35ms +[2025-09-05 17:36:33] [Rank 0] step:8361/10000 train_time:312290ms step_avg:37.35ms +[2025-09-05 17:36:34] [Rank 0] step:8381/10000 train_time:312949ms step_avg:37.34ms +[2025-09-05 17:36:34] [Rank 0] step:8381/10000 train_time:312949ms step_avg:37.34ms +[2025-09-05 17:36:34] [Rank 0] step:8401/10000 train_time:313607ms step_avg:37.33ms +[2025-09-05 17:36:34] [Rank 0] step:8401/10000 train_time:313607ms step_avg:37.33ms +[2025-09-05 17:36:35] [Rank 0] step:8421/10000 train_time:314266ms step_avg:37.32ms +[2025-09-05 17:36:35] [Rank 0] step:8421/10000 train_time:314266ms step_avg:37.32ms +[2025-09-05 17:36:36] [Rank 0] step:8441/10000 train_time:314925ms step_avg:37.31ms +[2025-09-05 17:36:36] [Rank 0] step:8441/10000 train_time:314925ms step_avg:37.31ms +[2025-09-05 17:36:36] [Rank 0] step:8461/10000 train_time:315583ms step_avg:37.30ms +[2025-09-05 17:36:36] [Rank 0] step:8461/10000 train_time:315583ms step_avg:37.30ms +[2025-09-05 17:36:37] [Rank 0] step:8481/10000 train_time:316242ms step_avg:37.29ms +[2025-09-05 17:36:37] [Rank 0] step:8481/10000 train_time:316242ms step_avg:37.29ms +[2025-09-05 17:36:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:36:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:36:38] [Rank 0] PRINT: step:8500/10000 train_loss:0.7118 val_loss:0.7010 train_time:317134ms step_avg:37.31ms +[2025-09-05 17:36:38] [Rank 0] PRINT: step:8500/10000 train_loss:0.7118 val_loss:0.7010 train_time:317134ms step_avg:37.31ms +[2025-09-05 17:36:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:36:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:36:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:36:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:38:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:38:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:38:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:38:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:38:01] [Rank 0] Total Loss: 4.9582 +[2025-09-05 17:38:01] [Rank 0] Total Loss: 4.9582 +[2025-09-05 17:38:01] [Rank 0] Total FTA (Unweighted): 0.8281 +[2025-09-05 17:38:01] [Rank 0] Total FTA (Unweighted): 0.8281 +[2025-09-05 17:38:01] [Rank 0] Total FTA (Weighted): 0.8281 +[2025-09-05 17:38:01] [Rank 0] Total FTA (Weighted): 0.8281 +[2025-09-05 17:38:01] [Rank 0] Group 0 Loss: 4.6479 +[2025-09-05 17:38:01] [Rank 0] Group 0 Loss: 4.6479 +[2025-09-05 17:38:01] [Rank 0] Group 1 Loss: 4.6079 +[2025-09-05 17:38:01] [Rank 0] Group 1 Loss: 4.6079 +[2025-09-05 17:38:01] [Rank 0] Group 2 Loss: 4.4864 +[2025-09-05 17:38:01] [Rank 0] Group 2 Loss: 4.4864 +[2025-09-05 17:38:01] [Rank 0] Group 3 Loss: 4.9063 +[2025-09-05 17:38:01] [Rank 0] Group 3 Loss: 4.9063 +[2025-09-05 17:38:01] [Rank 0] Group 4 Loss: 4.8999 +[2025-09-05 17:38:01] [Rank 0] Group 4 Loss: 4.8999 +[2025-09-05 17:38:01] [Rank 0] Group 5 Loss: 4.8881 +[2025-09-05 17:38:01] [Rank 0] Group 5 Loss: 4.8881 +[2025-09-05 17:38:01] [Rank 0] Group 6 Loss: 4.7931 +[2025-09-05 17:38:01] [Rank 0] Group 6 Loss: 4.7931 +[2025-09-05 17:38:01] [Rank 0] Group 7 Loss: 4.8514 +[2025-09-05 17:38:01] [Rank 0] Group 7 Loss: 4.8514 +[2025-09-05 17:38:01] [Rank 0] Group 8 Loss: 4.9617 +[2025-09-05 17:38:01] [Rank 0] Group 8 Loss: 4.9617 +[2025-09-05 17:38:01] [Rank 0] Group 9 Loss: 4.9803 +[2025-09-05 17:38:01] [Rank 0] Group 9 Loss: 4.9803 +[2025-09-05 17:38:01] [Rank 0] Group 10 Loss: 5.1490 +[2025-09-05 17:38:01] [Rank 0] Group 10 Loss: 5.1490 +[2025-09-05 17:38:01] [Rank 0] Group 11 Loss: 5.1381 +[2025-09-05 17:38:01] [Rank 0] Group 11 Loss: 5.1381 +[2025-09-05 17:38:01] [Rank 0] Group 12 Loss: 5.1354 +[2025-09-05 17:38:01] [Rank 0] Group 12 Loss: 5.1354 +[2025-09-05 17:38:01] [Rank 0] Group 13 Loss: 5.2797 +[2025-09-05 17:38:01] [Rank 0] Group 13 Loss: 5.2797 +[2025-09-05 17:38:01] [Rank 0] Group 14 Loss: 5.2758 +[2025-09-05 17:38:01] [Rank 0] Group 14 Loss: 5.2758 +[2025-09-05 17:38:01] [Rank 0] Group 15 Loss: 5.3311 +[2025-09-05 17:38:01] [Rank 0] Group 15 Loss: 5.3311 +[2025-09-05 17:38:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:38:01] [Rank 0] Group 8 FTA: 0.9400 +[2025-09-05 17:38:01] [Rank 0] Group 8 FTA: 0.9400 +[2025-09-05 17:38:01] [Rank 0] Group 9 FTA: 0.8100 +[2025-09-05 17:38:01] [Rank 0] Group 9 FTA: 0.8100 +[2025-09-05 17:38:01] [Rank 0] Group 10 FTA: 0.9000 +[2025-09-05 17:38:01] [Rank 0] Group 10 FTA: 0.9000 +[2025-09-05 17:38:01] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 17:38:01] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 17:38:01] [Rank 0] Group 12 FTA: 0.8600 +[2025-09-05 17:38:01] [Rank 0] Group 12 FTA: 0.8600 +[2025-09-05 17:38:01] [Rank 0] Group 13 FTA: 0.5600 +[2025-09-05 17:38:01] [Rank 0] Group 13 FTA: 0.5600 +[2025-09-05 17:38:01] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 17:38:01] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 17:38:01] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:38:01] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:38:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:38:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:38:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:38:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:38:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:38:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:38:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:38:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:38:03] [Rank 0] step:8501/10000 train_time:317143ms step_avg:37.31ms +[2025-09-05 17:38:03] [Rank 0] step:8501/10000 train_time:317143ms step_avg:37.31ms +[2025-09-05 17:38:03] [Rank 0] step:8521/10000 train_time:317582ms step_avg:37.27ms +[2025-09-05 17:38:03] [Rank 0] step:8521/10000 train_time:317582ms step_avg:37.27ms +[2025-09-05 17:38:04] [Rank 0] step:8541/10000 train_time:318239ms step_avg:37.26ms +[2025-09-05 17:38:04] [Rank 0] step:8541/10000 train_time:318239ms step_avg:37.26ms +[2025-09-05 17:38:05] [Rank 0] step:8561/10000 train_time:318898ms step_avg:37.25ms +[2025-09-05 17:38:05] [Rank 0] step:8561/10000 train_time:318898ms step_avg:37.25ms +[2025-09-05 17:38:05] [Rank 0] step:8581/10000 train_time:319555ms step_avg:37.24ms +[2025-09-05 17:38:05] [Rank 0] step:8581/10000 train_time:319555ms step_avg:37.24ms +[2025-09-05 17:38:06] [Rank 0] step:8601/10000 train_time:320213ms step_avg:37.23ms +[2025-09-05 17:38:06] [Rank 0] step:8601/10000 train_time:320213ms step_avg:37.23ms +[2025-09-05 17:38:07] [Rank 0] step:8621/10000 train_time:320871ms step_avg:37.22ms +[2025-09-05 17:38:07] [Rank 0] step:8621/10000 train_time:320871ms step_avg:37.22ms +[2025-09-05 17:38:07] [Rank 0] step:8641/10000 train_time:321529ms step_avg:37.21ms +[2025-09-05 17:38:07] [Rank 0] step:8641/10000 train_time:321529ms step_avg:37.21ms +[2025-09-05 17:38:08] [Rank 0] step:8661/10000 train_time:322187ms step_avg:37.20ms +[2025-09-05 17:38:08] [Rank 0] step:8661/10000 train_time:322187ms step_avg:37.20ms +[2025-09-05 17:38:08] [Rank 0] step:8681/10000 train_time:322844ms step_avg:37.19ms +[2025-09-05 17:38:08] [Rank 0] step:8681/10000 train_time:322844ms step_avg:37.19ms +[2025-09-05 17:38:09] [Rank 0] step:8701/10000 train_time:323502ms step_avg:37.18ms +[2025-09-05 17:38:09] [Rank 0] step:8701/10000 train_time:323502ms step_avg:37.18ms +[2025-09-05 17:38:10] [Rank 0] step:8721/10000 train_time:324160ms step_avg:37.17ms +[2025-09-05 17:38:10] [Rank 0] step:8721/10000 train_time:324160ms step_avg:37.17ms +[2025-09-05 17:38:10] [Rank 0] step:8741/10000 train_time:324817ms step_avg:37.16ms +[2025-09-05 17:38:10] [Rank 0] step:8741/10000 train_time:324817ms step_avg:37.16ms +[2025-09-05 17:38:11] [Rank 0] step:8761/10000 train_time:325474ms step_avg:37.15ms +[2025-09-05 17:38:11] [Rank 0] step:8761/10000 train_time:325474ms step_avg:37.15ms +[2025-09-05 17:38:12] [Rank 0] step:8781/10000 train_time:326133ms step_avg:37.14ms +[2025-09-05 17:38:12] [Rank 0] step:8781/10000 train_time:326133ms step_avg:37.14ms +[2025-09-05 17:38:12] [Rank 0] step:8801/10000 train_time:326791ms step_avg:37.13ms +[2025-09-05 17:38:12] [Rank 0] step:8801/10000 train_time:326791ms step_avg:37.13ms +[2025-09-05 17:38:13] [Rank 0] step:8821/10000 train_time:327448ms step_avg:37.12ms +[2025-09-05 17:38:13] [Rank 0] step:8821/10000 train_time:327448ms step_avg:37.12ms +[2025-09-05 17:38:14] [Rank 0] step:8841/10000 train_time:328106ms step_avg:37.11ms +[2025-09-05 17:38:14] [Rank 0] step:8841/10000 train_time:328106ms step_avg:37.11ms +[2025-09-05 17:38:14] [Rank 0] step:8861/10000 train_time:328764ms step_avg:37.10ms +[2025-09-05 17:38:14] [Rank 0] step:8861/10000 train_time:328764ms step_avg:37.10ms +[2025-09-05 17:38:15] [Rank 0] step:8881/10000 train_time:329422ms step_avg:37.09ms +[2025-09-05 17:38:15] [Rank 0] step:8881/10000 train_time:329422ms step_avg:37.09ms +[2025-09-05 17:38:16] [Rank 0] step:8901/10000 train_time:330080ms step_avg:37.08ms +[2025-09-05 17:38:16] [Rank 0] step:8901/10000 train_time:330080ms step_avg:37.08ms +[2025-09-05 17:38:16] [Rank 0] step:8921/10000 train_time:330737ms step_avg:37.07ms +[2025-09-05 17:38:16] [Rank 0] step:8921/10000 train_time:330737ms step_avg:37.07ms +[2025-09-05 17:38:17] [Rank 0] step:8941/10000 train_time:331395ms step_avg:37.06ms +[2025-09-05 17:38:17] [Rank 0] step:8941/10000 train_time:331395ms step_avg:37.06ms +[2025-09-05 17:38:18] [Rank 0] step:8961/10000 train_time:332053ms step_avg:37.06ms +[2025-09-05 17:38:18] [Rank 0] step:8961/10000 train_time:332053ms step_avg:37.06ms +[2025-09-05 17:38:18] [Rank 0] step:8981/10000 train_time:332715ms step_avg:37.05ms +[2025-09-05 17:38:18] [Rank 0] step:8981/10000 train_time:332715ms step_avg:37.05ms +[2025-09-05 17:38:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:38:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:38:19] [Rank 0] PRINT: step:9000/10000 train_loss:0.7044 val_loss:0.6943 train_time:333608ms step_avg:37.07ms +[2025-09-05 17:38:19] [Rank 0] PRINT: step:9000/10000 train_loss:0.7044 val_loss:0.6943 train_time:333608ms step_avg:37.07ms +[2025-09-05 17:38:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:38:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:38:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:38:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:39:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:39:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:39:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:39:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:39:42] [Rank 0] Total Loss: 4.9818 +[2025-09-05 17:39:42] [Rank 0] Total Loss: 4.9818 +[2025-09-05 17:39:42] [Rank 0] Total FTA (Unweighted): 0.8456 +[2025-09-05 17:39:42] [Rank 0] Total FTA (Unweighted): 0.8456 +[2025-09-05 17:39:42] [Rank 0] Total FTA (Weighted): 0.8456 +[2025-09-05 17:39:42] [Rank 0] Total FTA (Weighted): 0.8456 +[2025-09-05 17:39:42] [Rank 0] Group 0 Loss: 4.7374 +[2025-09-05 17:39:42] [Rank 0] Group 0 Loss: 4.7374 +[2025-09-05 17:39:42] [Rank 0] Group 1 Loss: 4.5791 +[2025-09-05 17:39:42] [Rank 0] Group 1 Loss: 4.5791 +[2025-09-05 17:39:42] [Rank 0] Group 2 Loss: 4.5983 +[2025-09-05 17:39:42] [Rank 0] Group 2 Loss: 4.5983 +[2025-09-05 17:39:42] [Rank 0] Group 3 Loss: 4.9461 +[2025-09-05 17:39:42] [Rank 0] Group 3 Loss: 4.9461 +[2025-09-05 17:39:42] [Rank 0] Group 4 Loss: 4.8937 +[2025-09-05 17:39:42] [Rank 0] Group 4 Loss: 4.8937 +[2025-09-05 17:39:42] [Rank 0] Group 5 Loss: 4.9144 +[2025-09-05 17:39:42] [Rank 0] Group 5 Loss: 4.9144 +[2025-09-05 17:39:42] [Rank 0] Group 6 Loss: 4.8220 +[2025-09-05 17:39:42] [Rank 0] Group 6 Loss: 4.8220 +[2025-09-05 17:39:42] [Rank 0] Group 7 Loss: 4.8887 +[2025-09-05 17:39:42] [Rank 0] Group 7 Loss: 4.8887 +[2025-09-05 17:39:42] [Rank 0] Group 8 Loss: 4.9659 +[2025-09-05 17:39:42] [Rank 0] Group 8 Loss: 4.9659 +[2025-09-05 17:39:42] [Rank 0] Group 9 Loss: 5.0041 +[2025-09-05 17:39:42] [Rank 0] Group 9 Loss: 5.0041 +[2025-09-05 17:39:42] [Rank 0] Group 10 Loss: 5.1613 +[2025-09-05 17:39:42] [Rank 0] Group 10 Loss: 5.1613 +[2025-09-05 17:39:42] [Rank 0] Group 11 Loss: 5.1534 +[2025-09-05 17:39:42] [Rank 0] Group 11 Loss: 5.1534 +[2025-09-05 17:39:42] [Rank 0] Group 12 Loss: 5.1766 +[2025-09-05 17:39:42] [Rank 0] Group 12 Loss: 5.1766 +[2025-09-05 17:39:42] [Rank 0] Group 13 Loss: 5.2923 +[2025-09-05 17:39:42] [Rank 0] Group 13 Loss: 5.2923 +[2025-09-05 17:39:42] [Rank 0] Group 14 Loss: 5.2488 +[2025-09-05 17:39:42] [Rank 0] Group 14 Loss: 5.2488 +[2025-09-05 17:39:42] [Rank 0] Group 15 Loss: 5.3259 +[2025-09-05 17:39:42] [Rank 0] Group 15 Loss: 5.3259 +[2025-09-05 17:39:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:39:42] [Rank 0] Group 8 FTA: 0.9700 +[2025-09-05 17:39:42] [Rank 0] Group 8 FTA: 0.9700 +[2025-09-05 17:39:42] [Rank 0] Group 9 FTA: 0.8500 +[2025-09-05 17:39:42] [Rank 0] Group 9 FTA: 0.8500 +[2025-09-05 17:39:42] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 17:39:42] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 17:39:42] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 17:39:42] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 17:39:42] [Rank 0] Group 12 FTA: 0.8700 +[2025-09-05 17:39:42] [Rank 0] Group 12 FTA: 0.8700 +[2025-09-05 17:39:42] [Rank 0] Group 13 FTA: 0.6300 +[2025-09-05 17:39:42] [Rank 0] Group 13 FTA: 0.6300 +[2025-09-05 17:39:42] [Rank 0] Group 14 FTA: 0.3000 +[2025-09-05 17:39:42] [Rank 0] Group 14 FTA: 0.3000 +[2025-09-05 17:39:42] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 17:39:42] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 17:39:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:39:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:39:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:39:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:39:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:39:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:39:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:39:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:39:43] [Rank 0] step:9001/10000 train_time:333616ms step_avg:37.06ms +[2025-09-05 17:39:43] [Rank 0] step:9001/10000 train_time:333616ms step_avg:37.06ms +[2025-09-05 17:39:44] [Rank 0] step:9021/10000 train_time:334069ms step_avg:37.03ms +[2025-09-05 17:39:44] [Rank 0] step:9021/10000 train_time:334069ms step_avg:37.03ms +[2025-09-05 17:39:44] [Rank 0] step:9041/10000 train_time:334728ms step_avg:37.02ms +[2025-09-05 17:39:44] [Rank 0] step:9041/10000 train_time:334728ms step_avg:37.02ms +[2025-09-05 17:39:45] [Rank 0] step:9061/10000 train_time:335387ms step_avg:37.01ms +[2025-09-05 17:39:45] [Rank 0] step:9061/10000 train_time:335387ms step_avg:37.01ms +[2025-09-05 17:39:46] [Rank 0] step:9081/10000 train_time:336046ms step_avg:37.01ms +[2025-09-05 17:39:46] [Rank 0] step:9081/10000 train_time:336046ms step_avg:37.01ms +[2025-09-05 17:39:46] [Rank 0] step:9101/10000 train_time:336704ms step_avg:37.00ms +[2025-09-05 17:39:46] [Rank 0] step:9101/10000 train_time:336704ms step_avg:37.00ms +[2025-09-05 17:39:47] [Rank 0] step:9121/10000 train_time:337363ms step_avg:36.99ms +[2025-09-05 17:39:47] [Rank 0] step:9121/10000 train_time:337363ms step_avg:36.99ms +[2025-09-05 17:39:48] [Rank 0] step:9141/10000 train_time:338021ms step_avg:36.98ms +[2025-09-05 17:39:48] [Rank 0] step:9141/10000 train_time:338021ms step_avg:36.98ms +[2025-09-05 17:39:48] [Rank 0] step:9161/10000 train_time:338679ms step_avg:36.97ms +[2025-09-05 17:39:48] [Rank 0] step:9161/10000 train_time:338679ms step_avg:36.97ms +[2025-09-05 17:39:49] [Rank 0] step:9181/10000 train_time:339338ms step_avg:36.96ms +[2025-09-05 17:39:49] [Rank 0] step:9181/10000 train_time:339338ms step_avg:36.96ms +[2025-09-05 17:39:50] [Rank 0] step:9201/10000 train_time:340170ms step_avg:36.97ms +[2025-09-05 17:39:50] [Rank 0] step:9201/10000 train_time:340170ms step_avg:36.97ms +[2025-09-05 17:39:51] [Rank 0] step:9221/10000 train_time:340828ms step_avg:36.96ms +[2025-09-05 17:39:51] [Rank 0] step:9221/10000 train_time:340828ms step_avg:36.96ms +[2025-09-05 17:39:51] [Rank 0] step:9241/10000 train_time:341487ms step_avg:36.95ms +[2025-09-05 17:39:51] [Rank 0] step:9241/10000 train_time:341487ms step_avg:36.95ms +[2025-09-05 17:39:52] [Rank 0] step:9261/10000 train_time:342145ms step_avg:36.94ms +[2025-09-05 17:39:52] [Rank 0] step:9261/10000 train_time:342145ms step_avg:36.94ms +[2025-09-05 17:39:53] [Rank 0] step:9281/10000 train_time:343014ms step_avg:36.96ms +[2025-09-05 17:39:53] [Rank 0] step:9281/10000 train_time:343014ms step_avg:36.96ms +[2025-09-05 17:39:53] [Rank 0] step:9301/10000 train_time:343672ms step_avg:36.95ms +[2025-09-05 17:39:53] [Rank 0] step:9301/10000 train_time:343672ms step_avg:36.95ms +[2025-09-05 17:39:54] [Rank 0] step:9321/10000 train_time:344330ms step_avg:36.94ms +[2025-09-05 17:39:54] [Rank 0] step:9321/10000 train_time:344330ms step_avg:36.94ms +[2025-09-05 17:39:55] [Rank 0] step:9341/10000 train_time:344989ms step_avg:36.93ms +[2025-09-05 17:39:55] [Rank 0] step:9341/10000 train_time:344989ms step_avg:36.93ms +[2025-09-05 17:39:55] [Rank 0] step:9361/10000 train_time:345648ms step_avg:36.92ms +[2025-09-05 17:39:55] [Rank 0] step:9361/10000 train_time:345648ms step_avg:36.92ms +[2025-09-05 17:39:56] [Rank 0] step:9381/10000 train_time:346306ms step_avg:36.92ms +[2025-09-05 17:39:56] [Rank 0] step:9381/10000 train_time:346306ms step_avg:36.92ms +[2025-09-05 17:39:57] [Rank 0] step:9401/10000 train_time:346963ms step_avg:36.91ms +[2025-09-05 17:39:57] [Rank 0] step:9401/10000 train_time:346963ms step_avg:36.91ms +[2025-09-05 17:39:57] [Rank 0] step:9421/10000 train_time:347622ms step_avg:36.90ms +[2025-09-05 17:39:57] [Rank 0] step:9421/10000 train_time:347622ms step_avg:36.90ms +[2025-09-05 17:39:58] [Rank 0] step:9441/10000 train_time:348281ms step_avg:36.89ms +[2025-09-05 17:39:58] [Rank 0] step:9441/10000 train_time:348281ms step_avg:36.89ms +[2025-09-05 17:39:59] [Rank 0] step:9461/10000 train_time:348941ms step_avg:36.88ms +[2025-09-05 17:39:59] [Rank 0] step:9461/10000 train_time:348941ms step_avg:36.88ms +[2025-09-05 17:39:59] [Rank 0] step:9481/10000 train_time:349598ms step_avg:36.87ms +[2025-09-05 17:39:59] [Rank 0] step:9481/10000 train_time:349598ms step_avg:36.87ms +[2025-09-05 17:40:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:40:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:40:00] [Rank 0] PRINT: step:9500/10000 train_loss:0.6977 val_loss:0.6882 train_time:350490ms step_avg:36.89ms +[2025-09-05 17:40:00] [Rank 0] PRINT: step:9500/10000 train_loss:0.6977 val_loss:0.6882 train_time:350490ms step_avg:36.89ms +[2025-09-05 17:40:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:40:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:40:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:40:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:41:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:41:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:41:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:41:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:41:22] [Rank 0] Total Loss: 4.9974 +[2025-09-05 17:41:22] [Rank 0] Total Loss: 4.9974 +[2025-09-05 17:41:22] [Rank 0] Total FTA (Unweighted): 0.8481 +[2025-09-05 17:41:22] [Rank 0] Total FTA (Unweighted): 0.8481 +[2025-09-05 17:41:22] [Rank 0] Total FTA (Weighted): 0.8481 +[2025-09-05 17:41:22] [Rank 0] Total FTA (Weighted): 0.8481 +[2025-09-05 17:41:22] [Rank 0] Group 0 Loss: 4.8234 +[2025-09-05 17:41:22] [Rank 0] Group 0 Loss: 4.8234 +[2025-09-05 17:41:22] [Rank 0] Group 1 Loss: 4.6482 +[2025-09-05 17:41:22] [Rank 0] Group 1 Loss: 4.6482 +[2025-09-05 17:41:22] [Rank 0] Group 2 Loss: 4.5522 +[2025-09-05 17:41:22] [Rank 0] Group 2 Loss: 4.5522 +[2025-09-05 17:41:22] [Rank 0] Group 3 Loss: 4.9201 +[2025-09-05 17:41:22] [Rank 0] Group 3 Loss: 4.9201 +[2025-09-05 17:41:22] [Rank 0] Group 4 Loss: 4.9805 +[2025-09-05 17:41:22] [Rank 0] Group 4 Loss: 4.9805 +[2025-09-05 17:41:22] [Rank 0] Group 5 Loss: 4.9355 +[2025-09-05 17:41:22] [Rank 0] Group 5 Loss: 4.9355 +[2025-09-05 17:41:22] [Rank 0] Group 6 Loss: 4.8246 +[2025-09-05 17:41:22] [Rank 0] Group 6 Loss: 4.8246 +[2025-09-05 17:41:22] [Rank 0] Group 7 Loss: 4.8878 +[2025-09-05 17:41:22] [Rank 0] Group 7 Loss: 4.8878 +[2025-09-05 17:41:22] [Rank 0] Group 8 Loss: 4.9875 +[2025-09-05 17:41:22] [Rank 0] Group 8 Loss: 4.9875 +[2025-09-05 17:41:22] [Rank 0] Group 9 Loss: 4.9837 +[2025-09-05 17:41:22] [Rank 0] Group 9 Loss: 4.9837 +[2025-09-05 17:41:22] [Rank 0] Group 10 Loss: 5.1776 +[2025-09-05 17:41:22] [Rank 0] Group 10 Loss: 5.1776 +[2025-09-05 17:41:22] [Rank 0] Group 11 Loss: 5.1807 +[2025-09-05 17:41:22] [Rank 0] Group 11 Loss: 5.1807 +[2025-09-05 17:41:22] [Rank 0] Group 12 Loss: 5.1565 +[2025-09-05 17:41:22] [Rank 0] Group 12 Loss: 5.1565 +[2025-09-05 17:41:22] [Rank 0] Group 13 Loss: 5.3042 +[2025-09-05 17:41:22] [Rank 0] Group 13 Loss: 5.3042 +[2025-09-05 17:41:22] [Rank 0] Group 14 Loss: 5.2705 +[2025-09-05 17:41:22] [Rank 0] Group 14 Loss: 5.2705 +[2025-09-05 17:41:22] [Rank 0] Group 15 Loss: 5.3253 +[2025-09-05 17:41:22] [Rank 0] Group 15 Loss: 5.3253 +[2025-09-05 17:41:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:41:22] [Rank 0] Group 8 FTA: 0.9700 +[2025-09-05 17:41:22] [Rank 0] Group 8 FTA: 0.9700 +[2025-09-05 17:41:22] [Rank 0] Group 9 FTA: 0.8400 +[2025-09-05 17:41:22] [Rank 0] Group 9 FTA: 0.8400 +[2025-09-05 17:41:22] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 17:41:22] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 17:41:22] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 17:41:22] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 17:41:22] [Rank 0] Group 12 FTA: 0.8800 +[2025-09-05 17:41:22] [Rank 0] Group 12 FTA: 0.8800 +[2025-09-05 17:41:22] [Rank 0] Group 13 FTA: 0.6500 +[2025-09-05 17:41:22] [Rank 0] Group 13 FTA: 0.6500 +[2025-09-05 17:41:23] [Rank 0] Group 14 FTA: 0.3400 +[2025-09-05 17:41:23] [Rank 0] Group 14 FTA: 0.3400 +[2025-09-05 17:41:23] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 17:41:23] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 17:41:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:41:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:41:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:41:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:41:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:41:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:41:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:41:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:41:24] [Rank 0] step:9501/10000 train_time:350500ms step_avg:36.89ms +[2025-09-05 17:41:24] [Rank 0] step:9501/10000 train_time:350500ms step_avg:36.89ms +[2025-09-05 17:41:25] [Rank 0] step:9521/10000 train_time:350941ms step_avg:36.86ms +[2025-09-05 17:41:25] [Rank 0] step:9521/10000 train_time:350941ms step_avg:36.86ms +[2025-09-05 17:41:25] [Rank 0] step:9541/10000 train_time:351599ms step_avg:36.85ms +[2025-09-05 17:41:25] [Rank 0] step:9541/10000 train_time:351599ms step_avg:36.85ms +[2025-09-05 17:41:26] [Rank 0] step:9561/10000 train_time:352257ms step_avg:36.84ms +[2025-09-05 17:41:26] [Rank 0] step:9561/10000 train_time:352257ms step_avg:36.84ms +[2025-09-05 17:41:27] [Rank 0] step:9581/10000 train_time:352916ms step_avg:36.83ms +[2025-09-05 17:41:27] [Rank 0] step:9581/10000 train_time:352916ms step_avg:36.83ms +[2025-09-05 17:41:27] [Rank 0] step:9601/10000 train_time:353574ms step_avg:36.83ms +[2025-09-05 17:41:27] [Rank 0] step:9601/10000 train_time:353574ms step_avg:36.83ms +[2025-09-05 17:41:28] [Rank 0] step:9621/10000 train_time:354232ms step_avg:36.82ms +[2025-09-05 17:41:28] [Rank 0] step:9621/10000 train_time:354232ms step_avg:36.82ms +[2025-09-05 17:41:29] [Rank 0] step:9641/10000 train_time:354890ms step_avg:36.81ms +[2025-09-05 17:41:29] [Rank 0] step:9641/10000 train_time:354890ms step_avg:36.81ms +[2025-09-05 17:41:29] [Rank 0] step:9661/10000 train_time:355627ms step_avg:36.81ms +[2025-09-05 17:41:29] [Rank 0] step:9661/10000 train_time:355627ms step_avg:36.81ms +[2025-09-05 17:41:30] [Rank 0] step:9681/10000 train_time:356285ms step_avg:36.80ms +[2025-09-05 17:41:30] [Rank 0] step:9681/10000 train_time:356285ms step_avg:36.80ms +[2025-09-05 17:41:31] [Rank 0] step:9701/10000 train_time:356944ms step_avg:36.79ms +[2025-09-05 17:41:31] [Rank 0] step:9701/10000 train_time:356944ms step_avg:36.79ms +[2025-09-05 17:41:31] [Rank 0] step:9721/10000 train_time:357600ms step_avg:36.79ms +[2025-09-05 17:41:31] [Rank 0] step:9721/10000 train_time:357600ms step_avg:36.79ms +[2025-09-05 17:41:32] [Rank 0] step:9741/10000 train_time:358257ms step_avg:36.78ms +[2025-09-05 17:41:32] [Rank 0] step:9741/10000 train_time:358257ms step_avg:36.78ms +[2025-09-05 17:41:33] [Rank 0] step:9761/10000 train_time:358915ms step_avg:36.77ms +[2025-09-05 17:41:33] [Rank 0] step:9761/10000 train_time:358915ms step_avg:36.77ms +[2025-09-05 17:41:33] [Rank 0] step:9781/10000 train_time:359572ms step_avg:36.76ms +[2025-09-05 17:41:33] [Rank 0] step:9781/10000 train_time:359572ms step_avg:36.76ms +[2025-09-05 17:41:34] [Rank 0] step:9801/10000 train_time:360230ms step_avg:36.75ms +[2025-09-05 17:41:34] [Rank 0] step:9801/10000 train_time:360230ms step_avg:36.75ms +[2025-09-05 17:41:35] [Rank 0] step:9821/10000 train_time:360888ms step_avg:36.75ms +[2025-09-05 17:41:35] [Rank 0] step:9821/10000 train_time:360888ms step_avg:36.75ms +[2025-09-05 17:41:35] [Rank 0] step:9841/10000 train_time:361546ms step_avg:36.74ms +[2025-09-05 17:41:35] [Rank 0] step:9841/10000 train_time:361546ms step_avg:36.74ms +[2025-09-05 17:41:36] [Rank 0] step:9861/10000 train_time:362203ms step_avg:36.73ms +[2025-09-05 17:41:36] [Rank 0] step:9861/10000 train_time:362203ms step_avg:36.73ms +[2025-09-05 17:41:37] [Rank 0] step:9881/10000 train_time:362862ms step_avg:36.72ms +[2025-09-05 17:41:37] [Rank 0] step:9881/10000 train_time:362862ms step_avg:36.72ms +[2025-09-05 17:41:37] [Rank 0] step:9901/10000 train_time:363520ms step_avg:36.72ms +[2025-09-05 17:41:37] [Rank 0] step:9901/10000 train_time:363520ms step_avg:36.72ms +[2025-09-05 17:41:38] [Rank 0] step:9921/10000 train_time:364177ms step_avg:36.71ms +[2025-09-05 17:41:38] [Rank 0] step:9921/10000 train_time:364177ms step_avg:36.71ms +[2025-09-05 17:41:38] [Rank 0] step:9941/10000 train_time:364835ms step_avg:36.70ms +[2025-09-05 17:41:38] [Rank 0] step:9941/10000 train_time:364835ms step_avg:36.70ms +[2025-09-05 17:41:39] [Rank 0] step:9961/10000 train_time:365494ms step_avg:36.69ms +[2025-09-05 17:41:39] [Rank 0] step:9961/10000 train_time:365494ms step_avg:36.69ms +[2025-09-05 17:41:40] [Rank 0] step:9981/10000 train_time:366152ms step_avg:36.68ms +[2025-09-05 17:41:40] [Rank 0] step:9981/10000 train_time:366152ms step_avg:36.68ms +[2025-09-05 17:41:40] [Rank 0] step:10000/10000 train_time:366777ms step_avg:36.68ms +[2025-09-05 17:41:40] [Rank 0] step:10000/10000 train_time:366777ms step_avg:36.68ms +[2025-09-05 17:41:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:41:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:41:41] [Rank 0] PRINT: step:10000/10000 train_loss:0.6918 val_loss:0.6832 train_time:367051ms step_avg:36.71ms +[2025-09-05 17:41:41] [Rank 0] PRINT: step:10000/10000 train_loss:0.6918 val_loss:0.6832 train_time:367051ms step_avg:36.71ms +[2025-09-05 17:41:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:41:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:41:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:41:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:43:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:43:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:43:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:43:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:43:03] [Rank 0] Total Loss: 5.0088 +[2025-09-05 17:43:03] [Rank 0] Total Loss: 5.0088 +[2025-09-05 17:43:03] [Rank 0] Total FTA (Unweighted): 0.8581 +[2025-09-05 17:43:03] [Rank 0] Total FTA (Unweighted): 0.8581 +[2025-09-05 17:43:03] [Rank 0] Total FTA (Weighted): 0.8581 +[2025-09-05 17:43:03] [Rank 0] Total FTA (Weighted): 0.8581 +[2025-09-05 17:43:03] [Rank 0] Group 0 Loss: 4.7821 +[2025-09-05 17:43:03] [Rank 0] Group 0 Loss: 4.7821 +[2025-09-05 17:43:03] [Rank 0] Group 1 Loss: 4.6164 +[2025-09-05 17:43:03] [Rank 0] Group 1 Loss: 4.6164 +[2025-09-05 17:43:03] [Rank 0] Group 2 Loss: 4.5286 +[2025-09-05 17:43:03] [Rank 0] Group 2 Loss: 4.5286 +[2025-09-05 17:43:03] [Rank 0] Group 3 Loss: 4.9774 +[2025-09-05 17:43:03] [Rank 0] Group 3 Loss: 4.9774 +[2025-09-05 17:43:03] [Rank 0] Group 4 Loss: 4.9847 +[2025-09-05 17:43:03] [Rank 0] Group 4 Loss: 4.9847 +[2025-09-05 17:43:03] [Rank 0] Group 5 Loss: 4.9641 +[2025-09-05 17:43:03] [Rank 0] Group 5 Loss: 4.9641 +[2025-09-05 17:43:03] [Rank 0] Group 6 Loss: 4.8466 +[2025-09-05 17:43:03] [Rank 0] Group 6 Loss: 4.8466 +[2025-09-05 17:43:03] [Rank 0] Group 7 Loss: 4.9134 +[2025-09-05 17:43:03] [Rank 0] Group 7 Loss: 4.9134 +[2025-09-05 17:43:03] [Rank 0] Group 8 Loss: 4.9920 +[2025-09-05 17:43:03] [Rank 0] Group 8 Loss: 4.9920 +[2025-09-05 17:43:03] [Rank 0] Group 9 Loss: 5.0260 +[2025-09-05 17:43:03] [Rank 0] Group 9 Loss: 5.0260 +[2025-09-05 17:43:03] [Rank 0] Group 10 Loss: 5.2063 +[2025-09-05 17:43:03] [Rank 0] Group 10 Loss: 5.2063 +[2025-09-05 17:43:03] [Rank 0] Group 11 Loss: 5.2031 +[2025-09-05 17:43:03] [Rank 0] Group 11 Loss: 5.2031 +[2025-09-05 17:43:03] [Rank 0] Group 12 Loss: 5.1649 +[2025-09-05 17:43:03] [Rank 0] Group 12 Loss: 5.1649 +[2025-09-05 17:43:03] [Rank 0] Group 13 Loss: 5.3172 +[2025-09-05 17:43:03] [Rank 0] Group 13 Loss: 5.3172 +[2025-09-05 17:43:03] [Rank 0] Group 14 Loss: 5.2766 +[2025-09-05 17:43:03] [Rank 0] Group 14 Loss: 5.2766 +[2025-09-05 17:43:03] [Rank 0] Group 15 Loss: 5.3414 +[2025-09-05 17:43:03] [Rank 0] Group 15 Loss: 5.3414 +[2025-09-05 17:43:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:43:03] [Rank 0] Group 8 FTA: 0.9700 +[2025-09-05 17:43:03] [Rank 0] Group 8 FTA: 0.9700 +[2025-09-05 17:43:03] [Rank 0] Group 9 FTA: 0.8400 +[2025-09-05 17:43:03] [Rank 0] Group 9 FTA: 0.8400 +[2025-09-05 17:43:03] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 17:43:03] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 17:43:03] [Rank 0] Group 11 FTA: 0.8600 +[2025-09-05 17:43:03] [Rank 0] Group 11 FTA: 0.8600 +[2025-09-05 17:43:03] [Rank 0] Group 12 FTA: 0.8800 +[2025-09-05 17:43:03] [Rank 0] Group 12 FTA: 0.8800 +[2025-09-05 17:43:03] [Rank 0] Group 13 FTA: 0.7200 +[2025-09-05 17:43:03] [Rank 0] Group 13 FTA: 0.7200 +[2025-09-05 17:43:03] [Rank 0] Group 14 FTA: 0.3500 +[2025-09-05 17:43:03] [Rank 0] Group 14 FTA: 0.3500 +[2025-09-05 17:43:03] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 17:43:03] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 17:43:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:43:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-09-05 17:43:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:43:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-09-05 17:43:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:43:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_loss_curve.png +[2025-09-05 17:43:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:43:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_42/total_acc_curve.png +[2025-09-05 17:43:05] [Rank 0] step:10001/10000 train_time:367060ms step_avg:36.70ms +[2025-09-05 17:43:05] [Rank 0] step:10001/10000 train_time:367060ms step_avg:36.70ms +[2025-09-05 17:43:05] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 17:43:05 2025 --- +[2025-09-05 17:43:05] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 17:43:05 2025 --- +[2025-09-05 17:43:05] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 17:43:05] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..88bd59e23bbb7329bb662f788d78e9aaa8ee1799 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.0005, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "a69344f7-bc12-4312-9f3b-2eddc3e1e931", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..340d822021b3e66fe99cdfce550dd93a3c4dffef --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a537a0248e9c0fbc558edc9142d52e6ff627f6d25d57f930c063737c1dc20d5f +size 451850 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7822aa6e02f2b0677d84b2e6ff8056228fad814f --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88db5d0e87ef6b66924c9c026bdc0bd853f8baf764aed300a7f55b35205a82fc +size 455084 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..20bacae78b6fef7d750738b30627585d707101eb --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1189a3753687af5089c9f884f3c67ec6e1532014e67d8712d1e7201b81bd575e +size 103653 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..7d617b98a8673953079d89e2d05b4397c933ebdd --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3132a778005c604593582c30b2995b6ada615e4912a60017004999cd572c9a8 +size 99624 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/training_log_a69344f7-bc12-4312-9f3b-2eddc3e1e931.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/training_log_a69344f7-bc12-4312-9f3b-2eddc3e1e931.txt new file mode 100644 index 0000000000000000000000000000000000000000..15b001f917aa272553f9c209d681065701cb73d4 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/training_log_a69344f7-bc12-4312-9f3b-2eddc3e1e931.txt @@ -0,0 +1,3820 @@ +[2025-09-05 20:05:29] [Rank 0] PRINT: --- Script Start: Fri Sep 5 20:05:29 2025 --- +[2025-09-05 20:05:29] [Rank 0] PRINT: --- Script Start: Fri Sep 5 20:05:29 2025 --- +[2025-09-05 20:05:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.0005, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 20:05:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.0005, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 20:05:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 20:05:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 20:05:29] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 20:05:29] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 20:05:29] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43 +[2025-09-05 20:05:29] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43 +[2025-09-05 20:05:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 20:05:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 20:05:29] [Rank 0] PRINT: Constructing model... +[2025-09-05 20:05:29] [Rank 0] PRINT: Constructing model... +[2025-09-05 20:05:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 20:05:31] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 20:05:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 20:05:31] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 20:05:31] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 20:05:31] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 20:05:35] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 20:05:35] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 20:05:35] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 20:05:35] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 20:05:35] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 20:05:35] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 20:05:35] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 20:05:35] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 20:05:35] [Rank 0] PRINT: Model returns: +[2025-09-05 20:05:35] [Rank 0] PRINT: Model returns: +[2025-09-05 20:05:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 20:05:35] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 20:05:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 20:05:35] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 20:05:35] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-09-05 20:05:35] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-09-05 20:05:35] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 20:05:35] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 20:05:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 20:05:35] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 20:05:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 20:05:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 20:05:40] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 20:05:40] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 20:05:40] [Rank 0] PRINT: Starting warmup... +[2025-09-05 20:05:40] [Rank 0] PRINT: Starting warmup... +[2025-09-05 20:06:20] [Rank 0] PRINT: Warmup complete. +[2025-09-05 20:06:20] [Rank 0] PRINT: Warmup complete. +[2025-09-05 20:06:20] [Rank 0] PRINT: Starting training... +[2025-09-05 20:06:20] [Rank 0] PRINT: Starting training... +[2025-09-05 20:06:27] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/fixed_eval_indices.json +[2025-09-05 20:06:27] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/fixed_eval_indices.json +[2025-09-05 20:06:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:06:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:06:31] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 20:06:31] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 20:07:05] [Rank 0] step:21/10000 train_time:34245ms step_avg:1630.73ms +[2025-09-05 20:07:05] [Rank 0] step:21/10000 train_time:34245ms step_avg:1630.73ms +[2025-09-05 20:07:06] [Rank 0] step:41/10000 train_time:34892ms step_avg:851.03ms +[2025-09-05 20:07:06] [Rank 0] step:41/10000 train_time:34892ms step_avg:851.03ms +[2025-09-05 20:07:07] [Rank 0] step:61/10000 train_time:35536ms step_avg:582.56ms +[2025-09-05 20:07:07] [Rank 0] step:61/10000 train_time:35536ms step_avg:582.56ms +[2025-09-05 20:07:07] [Rank 0] step:81/10000 train_time:36182ms step_avg:446.69ms +[2025-09-05 20:07:07] [Rank 0] step:81/10000 train_time:36182ms step_avg:446.69ms +[2025-09-05 20:07:08] [Rank 0] step:101/10000 train_time:36826ms step_avg:364.61ms +[2025-09-05 20:07:08] [Rank 0] step:101/10000 train_time:36826ms step_avg:364.61ms +[2025-09-05 20:07:08] [Rank 0] step:121/10000 train_time:37470ms step_avg:309.67ms +[2025-09-05 20:07:08] [Rank 0] step:121/10000 train_time:37470ms step_avg:309.67ms +[2025-09-05 20:07:09] [Rank 0] step:141/10000 train_time:38114ms step_avg:270.31ms +[2025-09-05 20:07:09] [Rank 0] step:141/10000 train_time:38114ms step_avg:270.31ms +[2025-09-05 20:07:10] [Rank 0] step:161/10000 train_time:38757ms step_avg:240.73ms +[2025-09-05 20:07:10] [Rank 0] step:161/10000 train_time:38757ms step_avg:240.73ms +[2025-09-05 20:07:10] [Rank 0] step:181/10000 train_time:39402ms step_avg:217.69ms +[2025-09-05 20:07:10] [Rank 0] step:181/10000 train_time:39402ms step_avg:217.69ms +[2025-09-05 20:07:11] [Rank 0] step:201/10000 train_time:40047ms step_avg:199.24ms +[2025-09-05 20:07:11] [Rank 0] step:201/10000 train_time:40047ms step_avg:199.24ms +[2025-09-05 20:07:12] [Rank 0] step:221/10000 train_time:40691ms step_avg:184.12ms +[2025-09-05 20:07:12] [Rank 0] step:221/10000 train_time:40691ms step_avg:184.12ms +[2025-09-05 20:07:12] [Rank 0] step:241/10000 train_time:41336ms step_avg:171.52ms +[2025-09-05 20:07:12] [Rank 0] step:241/10000 train_time:41336ms step_avg:171.52ms +[2025-09-05 20:07:13] [Rank 0] step:261/10000 train_time:41982ms step_avg:160.85ms +[2025-09-05 20:07:13] [Rank 0] step:261/10000 train_time:41982ms step_avg:160.85ms +[2025-09-05 20:07:14] [Rank 0] step:281/10000 train_time:42626ms step_avg:151.69ms +[2025-09-05 20:07:14] [Rank 0] step:281/10000 train_time:42626ms step_avg:151.69ms +[2025-09-05 20:07:14] [Rank 0] step:301/10000 train_time:43270ms step_avg:143.75ms +[2025-09-05 20:07:14] [Rank 0] step:301/10000 train_time:43270ms step_avg:143.75ms +[2025-09-05 20:07:15] [Rank 0] step:321/10000 train_time:43914ms step_avg:136.80ms +[2025-09-05 20:07:15] [Rank 0] step:321/10000 train_time:43914ms step_avg:136.80ms +[2025-09-05 20:07:16] [Rank 0] step:341/10000 train_time:44558ms step_avg:130.67ms +[2025-09-05 20:07:16] [Rank 0] step:341/10000 train_time:44558ms step_avg:130.67ms +[2025-09-05 20:07:16] [Rank 0] step:361/10000 train_time:45201ms step_avg:125.21ms +[2025-09-05 20:07:16] [Rank 0] step:361/10000 train_time:45201ms step_avg:125.21ms +[2025-09-05 20:07:17] [Rank 0] step:381/10000 train_time:45845ms step_avg:120.33ms +[2025-09-05 20:07:17] [Rank 0] step:381/10000 train_time:45845ms step_avg:120.33ms +[2025-09-05 20:07:17] [Rank 0] step:401/10000 train_time:46490ms step_avg:115.93ms +[2025-09-05 20:07:17] [Rank 0] step:401/10000 train_time:46490ms step_avg:115.93ms +[2025-09-05 20:07:18] [Rank 0] step:421/10000 train_time:47135ms step_avg:111.96ms +[2025-09-05 20:07:18] [Rank 0] step:421/10000 train_time:47135ms step_avg:111.96ms +[2025-09-05 20:07:19] [Rank 0] step:441/10000 train_time:47779ms step_avg:108.34ms +[2025-09-05 20:07:19] [Rank 0] step:441/10000 train_time:47779ms step_avg:108.34ms +[2025-09-05 20:07:19] [Rank 0] step:461/10000 train_time:48424ms step_avg:105.04ms +[2025-09-05 20:07:19] [Rank 0] step:461/10000 train_time:48424ms step_avg:105.04ms +[2025-09-05 20:07:20] [Rank 0] step:481/10000 train_time:49069ms step_avg:102.01ms +[2025-09-05 20:07:20] [Rank 0] step:481/10000 train_time:49069ms step_avg:102.01ms +[2025-09-05 20:07:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:07:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:07:21] [Rank 0] PRINT: step:500/10000 train_loss:6.9222 val_loss:4.7797 train_time:49942ms step_avg:99.88ms +[2025-09-05 20:07:21] [Rank 0] PRINT: step:500/10000 train_loss:6.9222 val_loss:4.7797 train_time:49942ms step_avg:99.88ms +[2025-09-05 20:07:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:07:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:07:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:07:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:08:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:08:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:08:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:08:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:08:42] [Rank 0] Total Loss: 5.8616 +[2025-09-05 20:08:42] [Rank 0] Total Loss: 5.8616 +[2025-09-05 20:08:42] [Rank 0] Total FTA (Unweighted): 0.0000 +[2025-09-05 20:08:42] [Rank 0] Total FTA (Unweighted): 0.0000 +[2025-09-05 20:08:42] [Rank 0] Total FTA (Weighted): 0.0000 +[2025-09-05 20:08:42] [Rank 0] Total FTA (Weighted): 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 0 Loss: 4.9039 +[2025-09-05 20:08:42] [Rank 0] Group 0 Loss: 4.9039 +[2025-09-05 20:08:42] [Rank 0] Group 1 Loss: 5.0290 +[2025-09-05 20:08:42] [Rank 0] Group 1 Loss: 5.0290 +[2025-09-05 20:08:42] [Rank 0] Group 2 Loss: 5.2272 +[2025-09-05 20:08:42] [Rank 0] Group 2 Loss: 5.2272 +[2025-09-05 20:08:42] [Rank 0] Group 3 Loss: 5.5164 +[2025-09-05 20:08:42] [Rank 0] Group 3 Loss: 5.5164 +[2025-09-05 20:08:42] [Rank 0] Group 4 Loss: 5.7702 +[2025-09-05 20:08:42] [Rank 0] Group 4 Loss: 5.7702 +[2025-09-05 20:08:42] [Rank 0] Group 5 Loss: 5.8718 +[2025-09-05 20:08:42] [Rank 0] Group 5 Loss: 5.8718 +[2025-09-05 20:08:42] [Rank 0] Group 6 Loss: 5.9760 +[2025-09-05 20:08:42] [Rank 0] Group 6 Loss: 5.9760 +[2025-09-05 20:08:42] [Rank 0] Group 7 Loss: 6.0101 +[2025-09-05 20:08:42] [Rank 0] Group 7 Loss: 6.0101 +[2025-09-05 20:08:42] [Rank 0] Group 8 Loss: 6.1192 +[2025-09-05 20:08:42] [Rank 0] Group 8 Loss: 6.1192 +[2025-09-05 20:08:42] [Rank 0] Group 9 Loss: 6.2070 +[2025-09-05 20:08:42] [Rank 0] Group 9 Loss: 6.2070 +[2025-09-05 20:08:42] [Rank 0] Group 10 Loss: 6.2087 +[2025-09-05 20:08:42] [Rank 0] Group 10 Loss: 6.2087 +[2025-09-05 20:08:42] [Rank 0] Group 11 Loss: 6.2488 +[2025-09-05 20:08:42] [Rank 0] Group 11 Loss: 6.2488 +[2025-09-05 20:08:42] [Rank 0] Group 12 Loss: 6.1674 +[2025-09-05 20:08:42] [Rank 0] Group 12 Loss: 6.1674 +[2025-09-05 20:08:42] [Rank 0] Group 13 Loss: 6.1738 +[2025-09-05 20:08:42] [Rank 0] Group 13 Loss: 6.1738 +[2025-09-05 20:08:42] [Rank 0] Group 14 Loss: 6.2114 +[2025-09-05 20:08:42] [Rank 0] Group 14 Loss: 6.2114 +[2025-09-05 20:08:42] [Rank 0] Group 15 Loss: 6.1442 +[2025-09-05 20:08:42] [Rank 0] Group 15 Loss: 6.1442 +[2025-09-05 20:08:42] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 1 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 1 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 4 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 4 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 6 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 6 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 7 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 7 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 8 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 8 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 9 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 9 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 10 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 10 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 11 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 11 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 12 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 12 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 13 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 13 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 14 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 14 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 15 FTA: 0.0000 +[2025-09-05 20:08:42] [Rank 0] Group 15 FTA: 0.0000 +[2025-09-05 20:08:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:08:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:08:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:08:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:08:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:08:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:08:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:08:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:08:44] [Rank 0] step:501/10000 train_time:49951ms step_avg:99.70ms +[2025-09-05 20:08:44] [Rank 0] step:501/10000 train_time:49951ms step_avg:99.70ms +[2025-09-05 20:08:44] [Rank 0] step:521/10000 train_time:50396ms step_avg:96.73ms +[2025-09-05 20:08:44] [Rank 0] step:521/10000 train_time:50396ms step_avg:96.73ms +[2025-09-05 20:08:45] [Rank 0] step:541/10000 train_time:51040ms step_avg:94.34ms +[2025-09-05 20:08:45] [Rank 0] step:541/10000 train_time:51040ms step_avg:94.34ms +[2025-09-05 20:08:46] [Rank 0] step:561/10000 train_time:51685ms step_avg:92.13ms +[2025-09-05 20:08:46] [Rank 0] step:561/10000 train_time:51685ms step_avg:92.13ms +[2025-09-05 20:08:46] [Rank 0] step:581/10000 train_time:52333ms step_avg:90.07ms +[2025-09-05 20:08:46] [Rank 0] step:581/10000 train_time:52333ms step_avg:90.07ms +[2025-09-05 20:08:47] [Rank 0] step:601/10000 train_time:52978ms step_avg:88.15ms +[2025-09-05 20:08:47] [Rank 0] step:601/10000 train_time:52978ms step_avg:88.15ms +[2025-09-05 20:08:48] [Rank 0] step:621/10000 train_time:53623ms step_avg:86.35ms +[2025-09-05 20:08:48] [Rank 0] step:621/10000 train_time:53623ms step_avg:86.35ms +[2025-09-05 20:08:48] [Rank 0] step:641/10000 train_time:54268ms step_avg:84.66ms +[2025-09-05 20:08:48] [Rank 0] step:641/10000 train_time:54268ms step_avg:84.66ms +[2025-09-05 20:08:49] [Rank 0] step:661/10000 train_time:54913ms step_avg:83.07ms +[2025-09-05 20:08:49] [Rank 0] step:661/10000 train_time:54913ms step_avg:83.07ms +[2025-09-05 20:08:50] [Rank 0] step:681/10000 train_time:55558ms step_avg:81.58ms +[2025-09-05 20:08:50] [Rank 0] step:681/10000 train_time:55558ms step_avg:81.58ms +[2025-09-05 20:08:50] [Rank 0] step:701/10000 train_time:56202ms step_avg:80.17ms +[2025-09-05 20:08:50] [Rank 0] step:701/10000 train_time:56202ms step_avg:80.17ms +[2025-09-05 20:08:51] [Rank 0] step:721/10000 train_time:56846ms step_avg:78.84ms +[2025-09-05 20:08:51] [Rank 0] step:721/10000 train_time:56846ms step_avg:78.84ms +[2025-09-05 20:08:52] [Rank 0] step:741/10000 train_time:57491ms step_avg:77.59ms +[2025-09-05 20:08:52] [Rank 0] step:741/10000 train_time:57491ms step_avg:77.59ms +[2025-09-05 20:08:52] [Rank 0] step:761/10000 train_time:58140ms step_avg:76.40ms +[2025-09-05 20:08:52] [Rank 0] step:761/10000 train_time:58140ms step_avg:76.40ms +[2025-09-05 20:08:53] [Rank 0] step:781/10000 train_time:58789ms step_avg:75.27ms +[2025-09-05 20:08:53] [Rank 0] step:781/10000 train_time:58789ms step_avg:75.27ms +[2025-09-05 20:08:53] [Rank 0] step:801/10000 train_time:59437ms step_avg:74.20ms +[2025-09-05 20:08:53] [Rank 0] step:801/10000 train_time:59437ms step_avg:74.20ms +[2025-09-05 20:08:55] [Rank 0] step:821/10000 train_time:60087ms step_avg:73.19ms +[2025-09-05 20:08:55] [Rank 0] step:821/10000 train_time:60087ms step_avg:73.19ms +[2025-09-05 20:08:55] [Rank 0] step:841/10000 train_time:61217ms step_avg:72.79ms +[2025-09-05 20:08:55] [Rank 0] step:841/10000 train_time:61217ms step_avg:72.79ms +[2025-09-05 20:08:56] [Rank 0] step:861/10000 train_time:61865ms step_avg:71.85ms +[2025-09-05 20:08:56] [Rank 0] step:861/10000 train_time:61865ms step_avg:71.85ms +[2025-09-05 20:08:57] [Rank 0] step:881/10000 train_time:62514ms step_avg:70.96ms +[2025-09-05 20:08:57] [Rank 0] step:881/10000 train_time:62514ms step_avg:70.96ms +[2025-09-05 20:08:57] [Rank 0] step:901/10000 train_time:63163ms step_avg:70.10ms +[2025-09-05 20:08:57] [Rank 0] step:901/10000 train_time:63163ms step_avg:70.10ms +[2025-09-05 20:08:58] [Rank 0] step:921/10000 train_time:63813ms step_avg:69.29ms +[2025-09-05 20:08:58] [Rank 0] step:921/10000 train_time:63813ms step_avg:69.29ms +[2025-09-05 20:08:58] [Rank 0] step:941/10000 train_time:64462ms step_avg:68.50ms +[2025-09-05 20:08:58] [Rank 0] step:941/10000 train_time:64462ms step_avg:68.50ms +[2025-09-05 20:08:59] [Rank 0] step:961/10000 train_time:65111ms step_avg:67.75ms +[2025-09-05 20:08:59] [Rank 0] step:961/10000 train_time:65111ms step_avg:67.75ms +[2025-09-05 20:09:00] [Rank 0] step:981/10000 train_time:65760ms step_avg:67.03ms +[2025-09-05 20:09:00] [Rank 0] step:981/10000 train_time:65760ms step_avg:67.03ms +[2025-09-05 20:09:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:09:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:09:01] [Rank 0] PRINT: step:1000/10000 train_loss:3.4396 val_loss:2.4387 train_time:66640ms step_avg:66.64ms +[2025-09-05 20:09:01] [Rank 0] PRINT: step:1000/10000 train_loss:3.4396 val_loss:2.4387 train_time:66640ms step_avg:66.64ms +[2025-09-05 20:09:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:09:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:09:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:09:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:10:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:10:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:10:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:10:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:10:21] [Rank 0] Total Loss: 4.2992 +[2025-09-05 20:10:21] [Rank 0] Total Loss: 4.2992 +[2025-09-05 20:10:21] [Rank 0] Total FTA (Unweighted): 0.1419 +[2025-09-05 20:10:21] [Rank 0] Total FTA (Unweighted): 0.1419 +[2025-09-05 20:10:21] [Rank 0] Total FTA (Weighted): 0.1419 +[2025-09-05 20:10:21] [Rank 0] Total FTA (Weighted): 0.1419 +[2025-09-05 20:10:21] [Rank 0] Group 0 Loss: 3.1761 +[2025-09-05 20:10:21] [Rank 0] Group 0 Loss: 3.1761 +[2025-09-05 20:10:21] [Rank 0] Group 1 Loss: 2.9845 +[2025-09-05 20:10:21] [Rank 0] Group 1 Loss: 2.9845 +[2025-09-05 20:10:21] [Rank 0] Group 2 Loss: 3.2153 +[2025-09-05 20:10:21] [Rank 0] Group 2 Loss: 3.2153 +[2025-09-05 20:10:21] [Rank 0] Group 3 Loss: 3.7416 +[2025-09-05 20:10:21] [Rank 0] Group 3 Loss: 3.7416 +[2025-09-05 20:10:21] [Rank 0] Group 4 Loss: 4.1128 +[2025-09-05 20:10:21] [Rank 0] Group 4 Loss: 4.1128 +[2025-09-05 20:10:21] [Rank 0] Group 5 Loss: 4.2816 +[2025-09-05 20:10:21] [Rank 0] Group 5 Loss: 4.2816 +[2025-09-05 20:10:21] [Rank 0] Group 6 Loss: 4.4249 +[2025-09-05 20:10:21] [Rank 0] Group 6 Loss: 4.4249 +[2025-09-05 20:10:21] [Rank 0] Group 7 Loss: 4.4645 +[2025-09-05 20:10:21] [Rank 0] Group 7 Loss: 4.4645 +[2025-09-05 20:10:21] [Rank 0] Group 8 Loss: 4.6528 +[2025-09-05 20:10:21] [Rank 0] Group 8 Loss: 4.6528 +[2025-09-05 20:10:21] [Rank 0] Group 9 Loss: 4.7767 +[2025-09-05 20:10:21] [Rank 0] Group 9 Loss: 4.7767 +[2025-09-05 20:10:21] [Rank 0] Group 10 Loss: 4.8073 +[2025-09-05 20:10:21] [Rank 0] Group 10 Loss: 4.8073 +[2025-09-05 20:10:21] [Rank 0] Group 11 Loss: 4.8852 +[2025-09-05 20:10:21] [Rank 0] Group 11 Loss: 4.8852 +[2025-09-05 20:10:21] [Rank 0] Group 12 Loss: 4.7957 +[2025-09-05 20:10:21] [Rank 0] Group 12 Loss: 4.7957 +[2025-09-05 20:10:21] [Rank 0] Group 13 Loss: 4.8301 +[2025-09-05 20:10:21] [Rank 0] Group 13 Loss: 4.8301 +[2025-09-05 20:10:21] [Rank 0] Group 14 Loss: 4.8611 +[2025-09-05 20:10:21] [Rank 0] Group 14 Loss: 4.8611 +[2025-09-05 20:10:21] [Rank 0] Group 15 Loss: 4.7776 +[2025-09-05 20:10:21] [Rank 0] Group 15 Loss: 4.7776 +[2025-09-05 20:10:21] [Rank 0] Group 0 FTA: 0.5000 +[2025-09-05 20:10:21] [Rank 0] Group 0 FTA: 0.5000 +[2025-09-05 20:10:21] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 20:10:21] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 20:10:21] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:10:21] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:10:21] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 20:10:21] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 20:10:21] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:10:21] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:10:21] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 20:10:21] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 20:10:21] [Rank 0] Group 6 FTA: 0.1100 +[2025-09-05 20:10:21] [Rank 0] Group 6 FTA: 0.1100 +[2025-09-05 20:10:21] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 20:10:21] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 20:10:21] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-05 20:10:21] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-05 20:10:21] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-05 20:10:21] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-05 20:10:21] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-05 20:10:21] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-05 20:10:21] [Rank 0] Group 11 FTA: 0.0800 +[2025-09-05 20:10:21] [Rank 0] Group 11 FTA: 0.0800 +[2025-09-05 20:10:21] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:10:21] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:10:21] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 20:10:21] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 20:10:21] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:10:21] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:10:21] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:10:21] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:10:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:10:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:10:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:10:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:10:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:10:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:10:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:10:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:10:23] [Rank 0] step:1001/10000 train_time:66649ms step_avg:66.58ms +[2025-09-05 20:10:23] [Rank 0] step:1001/10000 train_time:66649ms step_avg:66.58ms +[2025-09-05 20:10:24] [Rank 0] step:1021/10000 train_time:67098ms step_avg:65.72ms +[2025-09-05 20:10:24] [Rank 0] step:1021/10000 train_time:67098ms step_avg:65.72ms +[2025-09-05 20:10:25] [Rank 0] step:1041/10000 train_time:67747ms step_avg:65.08ms +[2025-09-05 20:10:25] [Rank 0] step:1041/10000 train_time:67747ms step_avg:65.08ms +[2025-09-05 20:10:25] [Rank 0] step:1061/10000 train_time:68397ms step_avg:64.46ms +[2025-09-05 20:10:25] [Rank 0] step:1061/10000 train_time:68397ms step_avg:64.46ms +[2025-09-05 20:10:26] [Rank 0] step:1081/10000 train_time:69047ms step_avg:63.87ms +[2025-09-05 20:10:26] [Rank 0] step:1081/10000 train_time:69047ms step_avg:63.87ms +[2025-09-05 20:10:27] [Rank 0] step:1101/10000 train_time:69696ms step_avg:63.30ms +[2025-09-05 20:10:27] [Rank 0] step:1101/10000 train_time:69696ms step_avg:63.30ms +[2025-09-05 20:10:27] [Rank 0] step:1121/10000 train_time:70345ms step_avg:62.75ms +[2025-09-05 20:10:27] [Rank 0] step:1121/10000 train_time:70345ms step_avg:62.75ms +[2025-09-05 20:10:28] [Rank 0] step:1141/10000 train_time:70996ms step_avg:62.22ms +[2025-09-05 20:10:28] [Rank 0] step:1141/10000 train_time:70996ms step_avg:62.22ms +[2025-09-05 20:10:29] [Rank 0] step:1161/10000 train_time:71645ms step_avg:61.71ms +[2025-09-05 20:10:29] [Rank 0] step:1161/10000 train_time:71645ms step_avg:61.71ms +[2025-09-05 20:10:29] [Rank 0] step:1181/10000 train_time:72294ms step_avg:61.21ms +[2025-09-05 20:10:29] [Rank 0] step:1181/10000 train_time:72294ms step_avg:61.21ms +[2025-09-05 20:10:30] [Rank 0] step:1201/10000 train_time:72944ms step_avg:60.74ms +[2025-09-05 20:10:30] [Rank 0] step:1201/10000 train_time:72944ms step_avg:60.74ms +[2025-09-05 20:10:31] [Rank 0] step:1221/10000 train_time:73593ms step_avg:60.27ms +[2025-09-05 20:10:31] [Rank 0] step:1221/10000 train_time:73593ms step_avg:60.27ms +[2025-09-05 20:10:31] [Rank 0] step:1241/10000 train_time:74243ms step_avg:59.82ms +[2025-09-05 20:10:31] [Rank 0] step:1241/10000 train_time:74243ms step_avg:59.82ms +[2025-09-05 20:10:32] [Rank 0] step:1261/10000 train_time:74892ms step_avg:59.39ms +[2025-09-05 20:10:32] [Rank 0] step:1261/10000 train_time:74892ms step_avg:59.39ms +[2025-09-05 20:10:33] [Rank 0] step:1281/10000 train_time:75695ms step_avg:59.09ms +[2025-09-05 20:10:33] [Rank 0] step:1281/10000 train_time:75695ms step_avg:59.09ms +[2025-09-05 20:10:33] [Rank 0] step:1301/10000 train_time:76344ms step_avg:58.68ms +[2025-09-05 20:10:33] [Rank 0] step:1301/10000 train_time:76344ms step_avg:58.68ms +[2025-09-05 20:10:34] [Rank 0] step:1321/10000 train_time:76994ms step_avg:58.28ms +[2025-09-05 20:10:34] [Rank 0] step:1321/10000 train_time:76994ms step_avg:58.28ms +[2025-09-05 20:10:35] [Rank 0] step:1341/10000 train_time:77642ms step_avg:57.90ms +[2025-09-05 20:10:35] [Rank 0] step:1341/10000 train_time:77642ms step_avg:57.90ms +[2025-09-05 20:10:36] [Rank 0] step:1361/10000 train_time:78515ms step_avg:57.69ms +[2025-09-05 20:10:36] [Rank 0] step:1361/10000 train_time:78515ms step_avg:57.69ms +[2025-09-05 20:10:36] [Rank 0] step:1381/10000 train_time:79165ms step_avg:57.32ms +[2025-09-05 20:10:36] [Rank 0] step:1381/10000 train_time:79165ms step_avg:57.32ms +[2025-09-05 20:10:37] [Rank 0] step:1401/10000 train_time:79815ms step_avg:56.97ms +[2025-09-05 20:10:37] [Rank 0] step:1401/10000 train_time:79815ms step_avg:56.97ms +[2025-09-05 20:10:37] [Rank 0] step:1421/10000 train_time:80464ms step_avg:56.62ms +[2025-09-05 20:10:37] [Rank 0] step:1421/10000 train_time:80464ms step_avg:56.62ms +[2025-09-05 20:10:38] [Rank 0] step:1441/10000 train_time:81114ms step_avg:56.29ms +[2025-09-05 20:10:38] [Rank 0] step:1441/10000 train_time:81114ms step_avg:56.29ms +[2025-09-05 20:10:39] [Rank 0] step:1461/10000 train_time:81764ms step_avg:55.96ms +[2025-09-05 20:10:39] [Rank 0] step:1461/10000 train_time:81764ms step_avg:55.96ms +[2025-09-05 20:10:39] [Rank 0] step:1481/10000 train_time:82413ms step_avg:55.65ms +[2025-09-05 20:10:39] [Rank 0] step:1481/10000 train_time:82413ms step_avg:55.65ms +[2025-09-05 20:10:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:10:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:10:41] [Rank 0] PRINT: step:1500/10000 train_loss:1.9537 val_loss:1.5683 train_time:83295ms step_avg:55.53ms +[2025-09-05 20:10:41] [Rank 0] PRINT: step:1500/10000 train_loss:1.9537 val_loss:1.5683 train_time:83295ms step_avg:55.53ms +[2025-09-05 20:10:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:10:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:10:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:10:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:12:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:12:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:12:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:12:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:12:02] [Rank 0] Total Loss: 4.0530 +[2025-09-05 20:12:02] [Rank 0] Total Loss: 4.0530 +[2025-09-05 20:12:02] [Rank 0] Total FTA (Unweighted): 0.3269 +[2025-09-05 20:12:02] [Rank 0] Total FTA (Unweighted): 0.3269 +[2025-09-05 20:12:02] [Rank 0] Total FTA (Weighted): 0.3269 +[2025-09-05 20:12:02] [Rank 0] Total FTA (Weighted): 0.3269 +[2025-09-05 20:12:02] [Rank 0] Group 0 Loss: 3.2728 +[2025-09-05 20:12:02] [Rank 0] Group 0 Loss: 3.2728 +[2025-09-05 20:12:02] [Rank 0] Group 1 Loss: 3.0382 +[2025-09-05 20:12:02] [Rank 0] Group 1 Loss: 3.0382 +[2025-09-05 20:12:02] [Rank 0] Group 2 Loss: 3.0010 +[2025-09-05 20:12:02] [Rank 0] Group 2 Loss: 3.0010 +[2025-09-05 20:12:02] [Rank 0] Group 3 Loss: 3.4574 +[2025-09-05 20:12:02] [Rank 0] Group 3 Loss: 3.4574 +[2025-09-05 20:12:02] [Rank 0] Group 4 Loss: 3.6071 +[2025-09-05 20:12:02] [Rank 0] Group 4 Loss: 3.6071 +[2025-09-05 20:12:02] [Rank 0] Group 5 Loss: 3.8051 +[2025-09-05 20:12:02] [Rank 0] Group 5 Loss: 3.8051 +[2025-09-05 20:12:02] [Rank 0] Group 6 Loss: 3.9984 +[2025-09-05 20:12:02] [Rank 0] Group 6 Loss: 3.9984 +[2025-09-05 20:12:02] [Rank 0] Group 7 Loss: 4.1416 +[2025-09-05 20:12:02] [Rank 0] Group 7 Loss: 4.1416 +[2025-09-05 20:12:02] [Rank 0] Group 8 Loss: 4.3622 +[2025-09-05 20:12:02] [Rank 0] Group 8 Loss: 4.3622 +[2025-09-05 20:12:02] [Rank 0] Group 9 Loss: 4.4632 +[2025-09-05 20:12:02] [Rank 0] Group 9 Loss: 4.4632 +[2025-09-05 20:12:02] [Rank 0] Group 10 Loss: 4.5494 +[2025-09-05 20:12:02] [Rank 0] Group 10 Loss: 4.5494 +[2025-09-05 20:12:02] [Rank 0] Group 11 Loss: 4.5964 +[2025-09-05 20:12:02] [Rank 0] Group 11 Loss: 4.5964 +[2025-09-05 20:12:02] [Rank 0] Group 12 Loss: 4.5931 +[2025-09-05 20:12:02] [Rank 0] Group 12 Loss: 4.5931 +[2025-09-05 20:12:02] [Rank 0] Group 13 Loss: 4.6715 +[2025-09-05 20:12:02] [Rank 0] Group 13 Loss: 4.6715 +[2025-09-05 20:12:02] [Rank 0] Group 14 Loss: 4.6573 +[2025-09-05 20:12:02] [Rank 0] Group 14 Loss: 4.6573 +[2025-09-05 20:12:02] [Rank 0] Group 15 Loss: 4.6333 +[2025-09-05 20:12:02] [Rank 0] Group 15 Loss: 4.6333 +[2025-09-05 20:12:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:12:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:12:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:12:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:12:02] [Rank 0] Group 2 FTA: 0.7800 +[2025-09-05 20:12:02] [Rank 0] Group 2 FTA: 0.7800 +[2025-09-05 20:12:02] [Rank 0] Group 3 FTA: 0.6000 +[2025-09-05 20:12:02] [Rank 0] Group 3 FTA: 0.6000 +[2025-09-05 20:12:02] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 20:12:02] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 20:12:02] [Rank 0] Group 5 FTA: 0.2600 +[2025-09-05 20:12:02] [Rank 0] Group 5 FTA: 0.2600 +[2025-09-05 20:12:02] [Rank 0] Group 6 FTA: 0.2200 +[2025-09-05 20:12:02] [Rank 0] Group 6 FTA: 0.2200 +[2025-09-05 20:12:02] [Rank 0] Group 7 FTA: 0.1900 +[2025-09-05 20:12:02] [Rank 0] Group 7 FTA: 0.1900 +[2025-09-05 20:12:02] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-05 20:12:02] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-05 20:12:02] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:12:02] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:12:02] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 20:12:02] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 20:12:02] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 20:12:02] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 20:12:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 20:12:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 20:12:02] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 20:12:02] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 20:12:02] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:12:02] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:12:02] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:12:02] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:12:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:12:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:12:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:12:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:12:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:12:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:12:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:12:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:12:03] [Rank 0] step:1501/10000 train_time:83303ms step_avg:55.50ms +[2025-09-05 20:12:03] [Rank 0] step:1501/10000 train_time:83303ms step_avg:55.50ms +[2025-09-05 20:12:04] [Rank 0] step:1521/10000 train_time:83730ms step_avg:55.05ms +[2025-09-05 20:12:04] [Rank 0] step:1521/10000 train_time:83730ms step_avg:55.05ms +[2025-09-05 20:12:04] [Rank 0] step:1541/10000 train_time:84380ms step_avg:54.76ms +[2025-09-05 20:12:04] [Rank 0] step:1541/10000 train_time:84380ms step_avg:54.76ms +[2025-09-05 20:12:05] [Rank 0] step:1561/10000 train_time:85029ms step_avg:54.47ms +[2025-09-05 20:12:05] [Rank 0] step:1561/10000 train_time:85029ms step_avg:54.47ms +[2025-09-05 20:12:06] [Rank 0] step:1581/10000 train_time:85678ms step_avg:54.19ms +[2025-09-05 20:12:06] [Rank 0] step:1581/10000 train_time:85678ms step_avg:54.19ms +[2025-09-05 20:12:06] [Rank 0] step:1601/10000 train_time:86328ms step_avg:53.92ms +[2025-09-05 20:12:06] [Rank 0] step:1601/10000 train_time:86328ms step_avg:53.92ms +[2025-09-05 20:12:07] [Rank 0] step:1621/10000 train_time:86977ms step_avg:53.66ms +[2025-09-05 20:12:07] [Rank 0] step:1621/10000 train_time:86977ms step_avg:53.66ms +[2025-09-05 20:12:08] [Rank 0] step:1641/10000 train_time:87808ms step_avg:53.51ms +[2025-09-05 20:12:08] [Rank 0] step:1641/10000 train_time:87808ms step_avg:53.51ms +[2025-09-05 20:12:08] [Rank 0] step:1661/10000 train_time:88456ms step_avg:53.25ms +[2025-09-05 20:12:08] [Rank 0] step:1661/10000 train_time:88456ms step_avg:53.25ms +[2025-09-05 20:12:09] [Rank 0] step:1681/10000 train_time:89105ms step_avg:53.01ms +[2025-09-05 20:12:09] [Rank 0] step:1681/10000 train_time:89105ms step_avg:53.01ms +[2025-09-05 20:12:10] [Rank 0] step:1701/10000 train_time:89755ms step_avg:52.77ms +[2025-09-05 20:12:10] [Rank 0] step:1701/10000 train_time:89755ms step_avg:52.77ms +[2025-09-05 20:12:10] [Rank 0] step:1721/10000 train_time:90404ms step_avg:52.53ms +[2025-09-05 20:12:10] [Rank 0] step:1721/10000 train_time:90404ms step_avg:52.53ms +[2025-09-05 20:12:11] [Rank 0] step:1741/10000 train_time:91053ms step_avg:52.30ms +[2025-09-05 20:12:11] [Rank 0] step:1741/10000 train_time:91053ms step_avg:52.30ms +[2025-09-05 20:12:12] [Rank 0] step:1761/10000 train_time:91702ms step_avg:52.07ms +[2025-09-05 20:12:12] [Rank 0] step:1761/10000 train_time:91702ms step_avg:52.07ms +[2025-09-05 20:12:12] [Rank 0] step:1781/10000 train_time:92351ms step_avg:51.85ms +[2025-09-05 20:12:12] [Rank 0] step:1781/10000 train_time:92351ms step_avg:51.85ms +[2025-09-05 20:12:13] [Rank 0] step:1801/10000 train_time:93000ms step_avg:51.64ms +[2025-09-05 20:12:13] [Rank 0] step:1801/10000 train_time:93000ms step_avg:51.64ms +[2025-09-05 20:12:14] [Rank 0] step:1821/10000 train_time:93649ms step_avg:51.43ms +[2025-09-05 20:12:14] [Rank 0] step:1821/10000 train_time:93649ms step_avg:51.43ms +[2025-09-05 20:12:14] [Rank 0] step:1841/10000 train_time:94299ms step_avg:51.22ms +[2025-09-05 20:12:14] [Rank 0] step:1841/10000 train_time:94299ms step_avg:51.22ms +[2025-09-05 20:12:15] [Rank 0] step:1861/10000 train_time:94948ms step_avg:51.02ms +[2025-09-05 20:12:15] [Rank 0] step:1861/10000 train_time:94948ms step_avg:51.02ms +[2025-09-05 20:12:16] [Rank 0] step:1881/10000 train_time:95597ms step_avg:50.82ms +[2025-09-05 20:12:16] [Rank 0] step:1881/10000 train_time:95597ms step_avg:50.82ms +[2025-09-05 20:12:16] [Rank 0] step:1901/10000 train_time:96246ms step_avg:50.63ms +[2025-09-05 20:12:16] [Rank 0] step:1901/10000 train_time:96246ms step_avg:50.63ms +[2025-09-05 20:12:17] [Rank 0] step:1921/10000 train_time:96896ms step_avg:50.44ms +[2025-09-05 20:12:17] [Rank 0] step:1921/10000 train_time:96896ms step_avg:50.44ms +[2025-09-05 20:12:18] [Rank 0] step:1941/10000 train_time:97545ms step_avg:50.26ms +[2025-09-05 20:12:18] [Rank 0] step:1941/10000 train_time:97545ms step_avg:50.26ms +[2025-09-05 20:12:18] [Rank 0] step:1961/10000 train_time:98195ms step_avg:50.07ms +[2025-09-05 20:12:18] [Rank 0] step:1961/10000 train_time:98195ms step_avg:50.07ms +[2025-09-05 20:12:19] [Rank 0] step:1981/10000 train_time:98845ms step_avg:49.90ms +[2025-09-05 20:12:19] [Rank 0] step:1981/10000 train_time:98845ms step_avg:49.90ms +[2025-09-05 20:12:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:12:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:12:20] [Rank 0] PRINT: step:2000/10000 train_loss:1.3748 val_loss:1.2162 train_time:99726ms step_avg:49.86ms +[2025-09-05 20:12:20] [Rank 0] PRINT: step:2000/10000 train_loss:1.3748 val_loss:1.2162 train_time:99726ms step_avg:49.86ms +[2025-09-05 20:12:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:12:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:12:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:12:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:13:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:13:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:13:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:13:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:13:41] [Rank 0] Total Loss: 4.2747 +[2025-09-05 20:13:41] [Rank 0] Total Loss: 4.2747 +[2025-09-05 20:13:41] [Rank 0] Total FTA (Unweighted): 0.4512 +[2025-09-05 20:13:41] [Rank 0] Total FTA (Unweighted): 0.4512 +[2025-09-05 20:13:41] [Rank 0] Total FTA (Weighted): 0.4512 +[2025-09-05 20:13:41] [Rank 0] Total FTA (Weighted): 0.4512 +[2025-09-05 20:13:41] [Rank 0] Group 0 Loss: 3.7121 +[2025-09-05 20:13:41] [Rank 0] Group 0 Loss: 3.7121 +[2025-09-05 20:13:41] [Rank 0] Group 1 Loss: 3.5141 +[2025-09-05 20:13:41] [Rank 0] Group 1 Loss: 3.5141 +[2025-09-05 20:13:41] [Rank 0] Group 2 Loss: 3.4458 +[2025-09-05 20:13:41] [Rank 0] Group 2 Loss: 3.4458 +[2025-09-05 20:13:41] [Rank 0] Group 3 Loss: 3.7665 +[2025-09-05 20:13:41] [Rank 0] Group 3 Loss: 3.7665 +[2025-09-05 20:13:41] [Rank 0] Group 4 Loss: 3.8016 +[2025-09-05 20:13:41] [Rank 0] Group 4 Loss: 3.8016 +[2025-09-05 20:13:41] [Rank 0] Group 5 Loss: 3.9446 +[2025-09-05 20:13:41] [Rank 0] Group 5 Loss: 3.9446 +[2025-09-05 20:13:41] [Rank 0] Group 6 Loss: 4.0722 +[2025-09-05 20:13:41] [Rank 0] Group 6 Loss: 4.0722 +[2025-09-05 20:13:41] [Rank 0] Group 7 Loss: 4.2253 +[2025-09-05 20:13:41] [Rank 0] Group 7 Loss: 4.2253 +[2025-09-05 20:13:41] [Rank 0] Group 8 Loss: 4.4907 +[2025-09-05 20:13:41] [Rank 0] Group 8 Loss: 4.4907 +[2025-09-05 20:13:41] [Rank 0] Group 9 Loss: 4.5610 +[2025-09-05 20:13:41] [Rank 0] Group 9 Loss: 4.5610 +[2025-09-05 20:13:41] [Rank 0] Group 10 Loss: 4.7400 +[2025-09-05 20:13:41] [Rank 0] Group 10 Loss: 4.7400 +[2025-09-05 20:13:41] [Rank 0] Group 11 Loss: 4.7534 +[2025-09-05 20:13:41] [Rank 0] Group 11 Loss: 4.7534 +[2025-09-05 20:13:41] [Rank 0] Group 12 Loss: 4.7680 +[2025-09-05 20:13:41] [Rank 0] Group 12 Loss: 4.7680 +[2025-09-05 20:13:41] [Rank 0] Group 13 Loss: 4.9036 +[2025-09-05 20:13:41] [Rank 0] Group 13 Loss: 4.9036 +[2025-09-05 20:13:41] [Rank 0] Group 14 Loss: 4.8375 +[2025-09-05 20:13:41] [Rank 0] Group 14 Loss: 4.8375 +[2025-09-05 20:13:41] [Rank 0] Group 15 Loss: 4.8581 +[2025-09-05 20:13:41] [Rank 0] Group 15 Loss: 4.8581 +[2025-09-05 20:13:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:13:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:13:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:13:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:13:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:13:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:13:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:13:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:13:41] [Rank 0] Group 4 FTA: 0.7500 +[2025-09-05 20:13:41] [Rank 0] Group 4 FTA: 0.7500 +[2025-09-05 20:13:41] [Rank 0] Group 5 FTA: 0.5500 +[2025-09-05 20:13:41] [Rank 0] Group 5 FTA: 0.5500 +[2025-09-05 20:13:41] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 20:13:41] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 20:13:41] [Rank 0] Group 7 FTA: 0.3800 +[2025-09-05 20:13:41] [Rank 0] Group 7 FTA: 0.3800 +[2025-09-05 20:13:41] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 20:13:41] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 20:13:41] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:13:41] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:13:41] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 20:13:41] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 20:13:41] [Rank 0] Group 11 FTA: 0.0500 +[2025-09-05 20:13:41] [Rank 0] Group 11 FTA: 0.0500 +[2025-09-05 20:13:41] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 20:13:41] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 20:13:41] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:13:41] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:13:41] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 20:13:41] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 20:13:41] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 20:13:41] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 20:13:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:13:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:13:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:13:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:13:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:13:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:13:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:13:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:13:43] [Rank 0] step:2001/10000 train_time:99734ms step_avg:49.84ms +[2025-09-05 20:13:43] [Rank 0] step:2001/10000 train_time:99734ms step_avg:49.84ms +[2025-09-05 20:13:44] [Rank 0] step:2021/10000 train_time:100378ms step_avg:49.67ms +[2025-09-05 20:13:44] [Rank 0] step:2021/10000 train_time:100378ms step_avg:49.67ms +[2025-09-05 20:13:44] [Rank 0] step:2041/10000 train_time:101029ms step_avg:49.50ms +[2025-09-05 20:13:44] [Rank 0] step:2041/10000 train_time:101029ms step_avg:49.50ms +[2025-09-05 20:13:45] [Rank 0] step:2061/10000 train_time:101678ms step_avg:49.33ms +[2025-09-05 20:13:45] [Rank 0] step:2061/10000 train_time:101678ms step_avg:49.33ms +[2025-09-05 20:13:46] [Rank 0] step:2081/10000 train_time:102466ms step_avg:49.24ms +[2025-09-05 20:13:46] [Rank 0] step:2081/10000 train_time:102466ms step_avg:49.24ms +[2025-09-05 20:13:46] [Rank 0] step:2101/10000 train_time:103116ms step_avg:49.08ms +[2025-09-05 20:13:46] [Rank 0] step:2101/10000 train_time:103116ms step_avg:49.08ms +[2025-09-05 20:13:47] [Rank 0] step:2121/10000 train_time:103766ms step_avg:48.92ms +[2025-09-05 20:13:47] [Rank 0] step:2121/10000 train_time:103766ms step_avg:48.92ms +[2025-09-05 20:13:48] [Rank 0] step:2141/10000 train_time:104415ms step_avg:48.77ms +[2025-09-05 20:13:48] [Rank 0] step:2141/10000 train_time:104415ms step_avg:48.77ms +[2025-09-05 20:13:48] [Rank 0] step:2161/10000 train_time:105065ms step_avg:48.62ms +[2025-09-05 20:13:48] [Rank 0] step:2161/10000 train_time:105065ms step_avg:48.62ms +[2025-09-05 20:13:49] [Rank 0] step:2181/10000 train_time:105714ms step_avg:48.47ms +[2025-09-05 20:13:49] [Rank 0] step:2181/10000 train_time:105714ms step_avg:48.47ms +[2025-09-05 20:13:50] [Rank 0] step:2201/10000 train_time:106364ms step_avg:48.33ms +[2025-09-05 20:13:50] [Rank 0] step:2201/10000 train_time:106364ms step_avg:48.33ms +[2025-09-05 20:13:50] [Rank 0] step:2221/10000 train_time:107013ms step_avg:48.18ms +[2025-09-05 20:13:50] [Rank 0] step:2221/10000 train_time:107013ms step_avg:48.18ms +[2025-09-05 20:13:51] [Rank 0] step:2241/10000 train_time:107666ms step_avg:48.04ms +[2025-09-05 20:13:51] [Rank 0] step:2241/10000 train_time:107666ms step_avg:48.04ms +[2025-09-05 20:13:52] [Rank 0] step:2261/10000 train_time:108323ms step_avg:47.91ms +[2025-09-05 20:13:52] [Rank 0] step:2261/10000 train_time:108323ms step_avg:47.91ms +[2025-09-05 20:13:52] [Rank 0] step:2281/10000 train_time:108979ms step_avg:47.78ms +[2025-09-05 20:13:52] [Rank 0] step:2281/10000 train_time:108979ms step_avg:47.78ms +[2025-09-05 20:13:53] [Rank 0] step:2301/10000 train_time:109634ms step_avg:47.65ms +[2025-09-05 20:13:53] [Rank 0] step:2301/10000 train_time:109634ms step_avg:47.65ms +[2025-09-05 20:13:54] [Rank 0] step:2321/10000 train_time:110290ms step_avg:47.52ms +[2025-09-05 20:13:54] [Rank 0] step:2321/10000 train_time:110290ms step_avg:47.52ms +[2025-09-05 20:13:54] [Rank 0] step:2341/10000 train_time:110946ms step_avg:47.39ms +[2025-09-05 20:13:54] [Rank 0] step:2341/10000 train_time:110946ms step_avg:47.39ms +[2025-09-05 20:13:55] [Rank 0] step:2361/10000 train_time:111602ms step_avg:47.27ms +[2025-09-05 20:13:55] [Rank 0] step:2361/10000 train_time:111602ms step_avg:47.27ms +[2025-09-05 20:13:55] [Rank 0] step:2381/10000 train_time:112257ms step_avg:47.15ms +[2025-09-05 20:13:55] [Rank 0] step:2381/10000 train_time:112257ms step_avg:47.15ms +[2025-09-05 20:13:56] [Rank 0] step:2401/10000 train_time:112913ms step_avg:47.03ms +[2025-09-05 20:13:56] [Rank 0] step:2401/10000 train_time:112913ms step_avg:47.03ms +[2025-09-05 20:13:57] [Rank 0] step:2421/10000 train_time:113568ms step_avg:46.91ms +[2025-09-05 20:13:57] [Rank 0] step:2421/10000 train_time:113568ms step_avg:46.91ms +[2025-09-05 20:13:57] [Rank 0] step:2441/10000 train_time:114224ms step_avg:46.79ms +[2025-09-05 20:13:57] [Rank 0] step:2441/10000 train_time:114224ms step_avg:46.79ms +[2025-09-05 20:13:58] [Rank 0] step:2461/10000 train_time:114880ms step_avg:46.68ms +[2025-09-05 20:13:58] [Rank 0] step:2461/10000 train_time:114880ms step_avg:46.68ms +[2025-09-05 20:13:59] [Rank 0] step:2481/10000 train_time:115536ms step_avg:46.57ms +[2025-09-05 20:13:59] [Rank 0] step:2481/10000 train_time:115536ms step_avg:46.57ms +[2025-09-05 20:13:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:13:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:14:00] [Rank 0] PRINT: step:2500/10000 train_loss:1.1378 val_loss:1.0570 train_time:116426ms step_avg:46.57ms +[2025-09-05 20:14:00] [Rank 0] PRINT: step:2500/10000 train_loss:1.1378 val_loss:1.0570 train_time:116426ms step_avg:46.57ms +[2025-09-05 20:14:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:14:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:14:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:14:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:15:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:15:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:15:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:15:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:15:21] [Rank 0] Total Loss: 4.3273 +[2025-09-05 20:15:21] [Rank 0] Total Loss: 4.3273 +[2025-09-05 20:15:21] [Rank 0] Total FTA (Unweighted): 0.5363 +[2025-09-05 20:15:21] [Rank 0] Total FTA (Unweighted): 0.5363 +[2025-09-05 20:15:21] [Rank 0] Total FTA (Weighted): 0.5363 +[2025-09-05 20:15:21] [Rank 0] Total FTA (Weighted): 0.5363 +[2025-09-05 20:15:21] [Rank 0] Group 0 Loss: 3.9637 +[2025-09-05 20:15:21] [Rank 0] Group 0 Loss: 3.9637 +[2025-09-05 20:15:21] [Rank 0] Group 1 Loss: 3.7692 +[2025-09-05 20:15:21] [Rank 0] Group 1 Loss: 3.7692 +[2025-09-05 20:15:21] [Rank 0] Group 2 Loss: 3.6285 +[2025-09-05 20:15:21] [Rank 0] Group 2 Loss: 3.6285 +[2025-09-05 20:15:21] [Rank 0] Group 3 Loss: 3.9195 +[2025-09-05 20:15:21] [Rank 0] Group 3 Loss: 3.9195 +[2025-09-05 20:15:21] [Rank 0] Group 4 Loss: 3.8544 +[2025-09-05 20:15:21] [Rank 0] Group 4 Loss: 3.8544 +[2025-09-05 20:15:21] [Rank 0] Group 5 Loss: 3.9921 +[2025-09-05 20:15:21] [Rank 0] Group 5 Loss: 3.9921 +[2025-09-05 20:15:21] [Rank 0] Group 6 Loss: 4.0727 +[2025-09-05 20:15:21] [Rank 0] Group 6 Loss: 4.0727 +[2025-09-05 20:15:21] [Rank 0] Group 7 Loss: 4.1673 +[2025-09-05 20:15:21] [Rank 0] Group 7 Loss: 4.1673 +[2025-09-05 20:15:21] [Rank 0] Group 8 Loss: 4.4263 +[2025-09-05 20:15:21] [Rank 0] Group 8 Loss: 4.4263 +[2025-09-05 20:15:21] [Rank 0] Group 9 Loss: 4.4838 +[2025-09-05 20:15:21] [Rank 0] Group 9 Loss: 4.4838 +[2025-09-05 20:15:21] [Rank 0] Group 10 Loss: 4.7212 +[2025-09-05 20:15:21] [Rank 0] Group 10 Loss: 4.7212 +[2025-09-05 20:15:21] [Rank 0] Group 11 Loss: 4.7479 +[2025-09-05 20:15:21] [Rank 0] Group 11 Loss: 4.7479 +[2025-09-05 20:15:21] [Rank 0] Group 12 Loss: 4.7394 +[2025-09-05 20:15:21] [Rank 0] Group 12 Loss: 4.7394 +[2025-09-05 20:15:21] [Rank 0] Group 13 Loss: 4.9134 +[2025-09-05 20:15:21] [Rank 0] Group 13 Loss: 4.9134 +[2025-09-05 20:15:21] [Rank 0] Group 14 Loss: 4.8935 +[2025-09-05 20:15:21] [Rank 0] Group 14 Loss: 4.8935 +[2025-09-05 20:15:21] [Rank 0] Group 15 Loss: 4.9432 +[2025-09-05 20:15:21] [Rank 0] Group 15 Loss: 4.9432 +[2025-09-05 20:15:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:15:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:15:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:15:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:15:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:15:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:15:21] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:15:21] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:15:21] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 20:15:21] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 20:15:21] [Rank 0] Group 5 FTA: 0.7800 +[2025-09-05 20:15:21] [Rank 0] Group 5 FTA: 0.7800 +[2025-09-05 20:15:21] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-05 20:15:21] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-05 20:15:21] [Rank 0] Group 7 FTA: 0.5500 +[2025-09-05 20:15:21] [Rank 0] Group 7 FTA: 0.5500 +[2025-09-05 20:15:21] [Rank 0] Group 8 FTA: 0.5900 +[2025-09-05 20:15:21] [Rank 0] Group 8 FTA: 0.5900 +[2025-09-05 20:15:21] [Rank 0] Group 9 FTA: 0.3300 +[2025-09-05 20:15:21] [Rank 0] Group 9 FTA: 0.3300 +[2025-09-05 20:15:21] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 20:15:21] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 20:15:21] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 20:15:21] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 20:15:21] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 20:15:21] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 20:15:21] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 20:15:21] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 20:15:21] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 20:15:21] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 20:15:21] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:15:21] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:15:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:15:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:15:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:15:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:15:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:15:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:15:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:15:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:15:23] [Rank 0] step:2501/10000 train_time:116435ms step_avg:46.56ms +[2025-09-05 20:15:23] [Rank 0] step:2501/10000 train_time:116435ms step_avg:46.56ms +[2025-09-05 20:15:23] [Rank 0] step:2521/10000 train_time:116884ms step_avg:46.36ms +[2025-09-05 20:15:23] [Rank 0] step:2521/10000 train_time:116884ms step_avg:46.36ms +[2025-09-05 20:15:24] [Rank 0] step:2541/10000 train_time:117540ms step_avg:46.26ms +[2025-09-05 20:15:24] [Rank 0] step:2541/10000 train_time:117540ms step_avg:46.26ms +[2025-09-05 20:15:25] [Rank 0] step:2561/10000 train_time:118195ms step_avg:46.15ms +[2025-09-05 20:15:25] [Rank 0] step:2561/10000 train_time:118195ms step_avg:46.15ms +[2025-09-05 20:15:25] [Rank 0] step:2581/10000 train_time:118851ms step_avg:46.05ms +[2025-09-05 20:15:25] [Rank 0] step:2581/10000 train_time:118851ms step_avg:46.05ms +[2025-09-05 20:15:26] [Rank 0] step:2601/10000 train_time:119507ms step_avg:45.95ms +[2025-09-05 20:15:26] [Rank 0] step:2601/10000 train_time:119507ms step_avg:45.95ms +[2025-09-05 20:15:27] [Rank 0] step:2621/10000 train_time:120163ms step_avg:45.85ms +[2025-09-05 20:15:27] [Rank 0] step:2621/10000 train_time:120163ms step_avg:45.85ms +[2025-09-05 20:15:27] [Rank 0] step:2641/10000 train_time:120819ms step_avg:45.75ms +[2025-09-05 20:15:27] [Rank 0] step:2641/10000 train_time:120819ms step_avg:45.75ms +[2025-09-05 20:15:28] [Rank 0] step:2661/10000 train_time:121476ms step_avg:45.65ms +[2025-09-05 20:15:28] [Rank 0] step:2661/10000 train_time:121476ms step_avg:45.65ms +[2025-09-05 20:15:29] [Rank 0] step:2681/10000 train_time:122132ms step_avg:45.55ms +[2025-09-05 20:15:29] [Rank 0] step:2681/10000 train_time:122132ms step_avg:45.55ms +[2025-09-05 20:15:29] [Rank 0] step:2701/10000 train_time:122787ms step_avg:45.46ms +[2025-09-05 20:15:29] [Rank 0] step:2701/10000 train_time:122787ms step_avg:45.46ms +[2025-09-05 20:15:30] [Rank 0] step:2721/10000 train_time:123443ms step_avg:45.37ms +[2025-09-05 20:15:30] [Rank 0] step:2721/10000 train_time:123443ms step_avg:45.37ms +[2025-09-05 20:15:31] [Rank 0] step:2741/10000 train_time:124099ms step_avg:45.28ms +[2025-09-05 20:15:31] [Rank 0] step:2741/10000 train_time:124099ms step_avg:45.28ms +[2025-09-05 20:15:31] [Rank 0] step:2761/10000 train_time:124755ms step_avg:45.18ms +[2025-09-05 20:15:31] [Rank 0] step:2761/10000 train_time:124755ms step_avg:45.18ms +[2025-09-05 20:15:32] [Rank 0] step:2781/10000 train_time:125411ms step_avg:45.10ms +[2025-09-05 20:15:32] [Rank 0] step:2781/10000 train_time:125411ms step_avg:45.10ms +[2025-09-05 20:15:33] [Rank 0] step:2801/10000 train_time:126067ms step_avg:45.01ms +[2025-09-05 20:15:33] [Rank 0] step:2801/10000 train_time:126067ms step_avg:45.01ms +[2025-09-05 20:15:34] [Rank 0] step:2821/10000 train_time:126729ms step_avg:44.92ms +[2025-09-05 20:15:34] [Rank 0] step:2821/10000 train_time:126729ms step_avg:44.92ms +[2025-09-05 20:15:34] [Rank 0] step:2841/10000 train_time:127844ms step_avg:45.00ms +[2025-09-05 20:15:34] [Rank 0] step:2841/10000 train_time:127844ms step_avg:45.00ms +[2025-09-05 20:15:35] [Rank 0] step:2861/10000 train_time:128500ms step_avg:44.91ms +[2025-09-05 20:15:35] [Rank 0] step:2861/10000 train_time:128500ms step_avg:44.91ms +[2025-09-05 20:15:36] [Rank 0] step:2881/10000 train_time:129156ms step_avg:44.83ms +[2025-09-05 20:15:36] [Rank 0] step:2881/10000 train_time:129156ms step_avg:44.83ms +[2025-09-05 20:15:36] [Rank 0] step:2901/10000 train_time:129812ms step_avg:44.75ms +[2025-09-05 20:15:36] [Rank 0] step:2901/10000 train_time:129812ms step_avg:44.75ms +[2025-09-05 20:15:37] [Rank 0] step:2921/10000 train_time:130468ms step_avg:44.67ms +[2025-09-05 20:15:37] [Rank 0] step:2921/10000 train_time:130468ms step_avg:44.67ms +[2025-09-05 20:15:38] [Rank 0] step:2941/10000 train_time:131122ms step_avg:44.58ms +[2025-09-05 20:15:38] [Rank 0] step:2941/10000 train_time:131122ms step_avg:44.58ms +[2025-09-05 20:15:38] [Rank 0] step:2961/10000 train_time:131779ms step_avg:44.50ms +[2025-09-05 20:15:38] [Rank 0] step:2961/10000 train_time:131779ms step_avg:44.50ms +[2025-09-05 20:15:39] [Rank 0] step:2981/10000 train_time:132433ms step_avg:44.43ms +[2025-09-05 20:15:39] [Rank 0] step:2981/10000 train_time:132433ms step_avg:44.43ms +[2025-09-05 20:15:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:15:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:15:40] [Rank 0] PRINT: step:3000/10000 train_loss:1.0196 val_loss:0.9712 train_time:133321ms step_avg:44.44ms +[2025-09-05 20:15:40] [Rank 0] PRINT: step:3000/10000 train_loss:1.0196 val_loss:0.9712 train_time:133321ms step_avg:44.44ms +[2025-09-05 20:15:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:15:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:15:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:15:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:17:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:17:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:17:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:17:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:17:01] [Rank 0] Total Loss: 4.3753 +[2025-09-05 20:17:01] [Rank 0] Total Loss: 4.3753 +[2025-09-05 20:17:01] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 20:17:01] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 20:17:01] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 20:17:01] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 20:17:01] [Rank 0] Group 0 Loss: 4.0600 +[2025-09-05 20:17:01] [Rank 0] Group 0 Loss: 4.0600 +[2025-09-05 20:17:01] [Rank 0] Group 1 Loss: 3.6925 +[2025-09-05 20:17:01] [Rank 0] Group 1 Loss: 3.6925 +[2025-09-05 20:17:01] [Rank 0] Group 2 Loss: 3.7662 +[2025-09-05 20:17:01] [Rank 0] Group 2 Loss: 3.7662 +[2025-09-05 20:17:01] [Rank 0] Group 3 Loss: 4.0534 +[2025-09-05 20:17:01] [Rank 0] Group 3 Loss: 4.0534 +[2025-09-05 20:17:01] [Rank 0] Group 4 Loss: 3.9365 +[2025-09-05 20:17:01] [Rank 0] Group 4 Loss: 3.9365 +[2025-09-05 20:17:01] [Rank 0] Group 5 Loss: 4.1072 +[2025-09-05 20:17:01] [Rank 0] Group 5 Loss: 4.1072 +[2025-09-05 20:17:01] [Rank 0] Group 6 Loss: 4.1323 +[2025-09-05 20:17:01] [Rank 0] Group 6 Loss: 4.1323 +[2025-09-05 20:17:01] [Rank 0] Group 7 Loss: 4.1322 +[2025-09-05 20:17:01] [Rank 0] Group 7 Loss: 4.1322 +[2025-09-05 20:17:01] [Rank 0] Group 8 Loss: 4.4066 +[2025-09-05 20:17:01] [Rank 0] Group 8 Loss: 4.4066 +[2025-09-05 20:17:01] [Rank 0] Group 9 Loss: 4.4463 +[2025-09-05 20:17:01] [Rank 0] Group 9 Loss: 4.4463 +[2025-09-05 20:17:01] [Rank 0] Group 10 Loss: 4.7491 +[2025-09-05 20:17:01] [Rank 0] Group 10 Loss: 4.7491 +[2025-09-05 20:17:01] [Rank 0] Group 11 Loss: 4.8163 +[2025-09-05 20:17:01] [Rank 0] Group 11 Loss: 4.8163 +[2025-09-05 20:17:01] [Rank 0] Group 12 Loss: 4.8036 +[2025-09-05 20:17:01] [Rank 0] Group 12 Loss: 4.8036 +[2025-09-05 20:17:01] [Rank 0] Group 13 Loss: 4.9574 +[2025-09-05 20:17:01] [Rank 0] Group 13 Loss: 4.9574 +[2025-09-05 20:17:01] [Rank 0] Group 14 Loss: 4.9422 +[2025-09-05 20:17:01] [Rank 0] Group 14 Loss: 4.9422 +[2025-09-05 20:17:01] [Rank 0] Group 15 Loss: 5.0027 +[2025-09-05 20:17:01] [Rank 0] Group 15 Loss: 5.0027 +[2025-09-05 20:17:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:17:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:17:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:17:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:17:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:17:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:17:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:17:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:17:02] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:17:02] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:17:02] [Rank 0] Group 5 FTA: 0.9200 +[2025-09-05 20:17:02] [Rank 0] Group 5 FTA: 0.9200 +[2025-09-05 20:17:02] [Rank 0] Group 6 FTA: 0.6500 +[2025-09-05 20:17:02] [Rank 0] Group 6 FTA: 0.6500 +[2025-09-05 20:17:02] [Rank 0] Group 7 FTA: 0.5900 +[2025-09-05 20:17:02] [Rank 0] Group 7 FTA: 0.5900 +[2025-09-05 20:17:02] [Rank 0] Group 8 FTA: 0.6600 +[2025-09-05 20:17:02] [Rank 0] Group 8 FTA: 0.6600 +[2025-09-05 20:17:02] [Rank 0] Group 9 FTA: 0.4600 +[2025-09-05 20:17:02] [Rank 0] Group 9 FTA: 0.4600 +[2025-09-05 20:17:02] [Rank 0] Group 10 FTA: 0.4300 +[2025-09-05 20:17:02] [Rank 0] Group 10 FTA: 0.4300 +[2025-09-05 20:17:02] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 20:17:02] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 20:17:02] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:17:02] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:17:02] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 20:17:02] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 20:17:02] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 20:17:02] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 20:17:02] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:17:02] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:17:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:17:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:17:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:17:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:17:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:17:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:17:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:17:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:17:03] [Rank 0] step:3001/10000 train_time:133330ms step_avg:44.43ms +[2025-09-05 20:17:03] [Rank 0] step:3001/10000 train_time:133330ms step_avg:44.43ms +[2025-09-05 20:17:04] [Rank 0] step:3021/10000 train_time:133764ms step_avg:44.28ms +[2025-09-05 20:17:04] [Rank 0] step:3021/10000 train_time:133764ms step_avg:44.28ms +[2025-09-05 20:17:04] [Rank 0] step:3041/10000 train_time:134420ms step_avg:44.20ms +[2025-09-05 20:17:04] [Rank 0] step:3041/10000 train_time:134420ms step_avg:44.20ms +[2025-09-05 20:17:05] [Rank 0] step:3061/10000 train_time:135078ms step_avg:44.13ms +[2025-09-05 20:17:05] [Rank 0] step:3061/10000 train_time:135078ms step_avg:44.13ms +[2025-09-05 20:17:06] [Rank 0] step:3081/10000 train_time:135731ms step_avg:44.05ms +[2025-09-05 20:17:06] [Rank 0] step:3081/10000 train_time:135731ms step_avg:44.05ms +[2025-09-05 20:17:06] [Rank 0] step:3101/10000 train_time:136388ms step_avg:43.98ms +[2025-09-05 20:17:06] [Rank 0] step:3101/10000 train_time:136388ms step_avg:43.98ms +[2025-09-05 20:17:07] [Rank 0] step:3121/10000 train_time:137044ms step_avg:43.91ms +[2025-09-05 20:17:07] [Rank 0] step:3121/10000 train_time:137044ms step_avg:43.91ms +[2025-09-05 20:17:08] [Rank 0] step:3141/10000 train_time:137699ms step_avg:43.84ms +[2025-09-05 20:17:08] [Rank 0] step:3141/10000 train_time:137699ms step_avg:43.84ms +[2025-09-05 20:17:08] [Rank 0] step:3161/10000 train_time:138355ms step_avg:43.77ms +[2025-09-05 20:17:08] [Rank 0] step:3161/10000 train_time:138355ms step_avg:43.77ms +[2025-09-05 20:17:09] [Rank 0] step:3181/10000 train_time:139011ms step_avg:43.70ms +[2025-09-05 20:17:09] [Rank 0] step:3181/10000 train_time:139011ms step_avg:43.70ms +[2025-09-05 20:17:10] [Rank 0] step:3201/10000 train_time:139681ms step_avg:43.64ms +[2025-09-05 20:17:10] [Rank 0] step:3201/10000 train_time:139681ms step_avg:43.64ms +[2025-09-05 20:17:10] [Rank 0] step:3221/10000 train_time:140337ms step_avg:43.57ms +[2025-09-05 20:17:10] [Rank 0] step:3221/10000 train_time:140337ms step_avg:43.57ms +[2025-09-05 20:17:11] [Rank 0] step:3241/10000 train_time:140992ms step_avg:43.50ms +[2025-09-05 20:17:11] [Rank 0] step:3241/10000 train_time:140992ms step_avg:43.50ms +[2025-09-05 20:17:12] [Rank 0] step:3261/10000 train_time:141650ms step_avg:43.44ms +[2025-09-05 20:17:12] [Rank 0] step:3261/10000 train_time:141650ms step_avg:43.44ms +[2025-09-05 20:17:12] [Rank 0] step:3281/10000 train_time:142304ms step_avg:43.37ms +[2025-09-05 20:17:12] [Rank 0] step:3281/10000 train_time:142304ms step_avg:43.37ms +[2025-09-05 20:17:13] [Rank 0] step:3301/10000 train_time:142960ms step_avg:43.31ms +[2025-09-05 20:17:13] [Rank 0] step:3301/10000 train_time:142960ms step_avg:43.31ms +[2025-09-05 20:17:14] [Rank 0] step:3321/10000 train_time:143616ms step_avg:43.24ms +[2025-09-05 20:17:14] [Rank 0] step:3321/10000 train_time:143616ms step_avg:43.24ms +[2025-09-05 20:17:14] [Rank 0] step:3341/10000 train_time:144271ms step_avg:43.18ms +[2025-09-05 20:17:14] [Rank 0] step:3341/10000 train_time:144271ms step_avg:43.18ms +[2025-09-05 20:17:15] [Rank 0] step:3361/10000 train_time:144928ms step_avg:43.12ms +[2025-09-05 20:17:15] [Rank 0] step:3361/10000 train_time:144928ms step_avg:43.12ms +[2025-09-05 20:17:15] [Rank 0] step:3381/10000 train_time:145584ms step_avg:43.06ms +[2025-09-05 20:17:15] [Rank 0] step:3381/10000 train_time:145584ms step_avg:43.06ms +[2025-09-05 20:17:16] [Rank 0] step:3401/10000 train_time:146240ms step_avg:43.00ms +[2025-09-05 20:17:16] [Rank 0] step:3401/10000 train_time:146240ms step_avg:43.00ms +[2025-09-05 20:17:17] [Rank 0] step:3421/10000 train_time:146896ms step_avg:42.94ms +[2025-09-05 20:17:17] [Rank 0] step:3421/10000 train_time:146896ms step_avg:42.94ms +[2025-09-05 20:17:17] [Rank 0] step:3441/10000 train_time:147551ms step_avg:42.88ms +[2025-09-05 20:17:17] [Rank 0] step:3441/10000 train_time:147551ms step_avg:42.88ms +[2025-09-05 20:17:18] [Rank 0] step:3461/10000 train_time:148207ms step_avg:42.82ms +[2025-09-05 20:17:18] [Rank 0] step:3461/10000 train_time:148207ms step_avg:42.82ms +[2025-09-05 20:17:19] [Rank 0] step:3481/10000 train_time:148864ms step_avg:42.76ms +[2025-09-05 20:17:19] [Rank 0] step:3481/10000 train_time:148864ms step_avg:42.76ms +[2025-09-05 20:17:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:17:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:17:20] [Rank 0] PRINT: step:3500/10000 train_loss:0.9477 val_loss:0.9124 train_time:149752ms step_avg:42.79ms +[2025-09-05 20:17:20] [Rank 0] PRINT: step:3500/10000 train_loss:0.9477 val_loss:0.9124 train_time:149752ms step_avg:42.79ms +[2025-09-05 20:17:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:17:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:17:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:17:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:18:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:18:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:18:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:18:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:18:41] [Rank 0] Total Loss: 4.4293 +[2025-09-05 20:18:41] [Rank 0] Total Loss: 4.4293 +[2025-09-05 20:18:41] [Rank 0] Total FTA (Unweighted): 0.6350 +[2025-09-05 20:18:41] [Rank 0] Total FTA (Unweighted): 0.6350 +[2025-09-05 20:18:41] [Rank 0] Total FTA (Weighted): 0.6350 +[2025-09-05 20:18:41] [Rank 0] Total FTA (Weighted): 0.6350 +[2025-09-05 20:18:41] [Rank 0] Group 0 Loss: 4.3149 +[2025-09-05 20:18:41] [Rank 0] Group 0 Loss: 4.3149 +[2025-09-05 20:18:41] [Rank 0] Group 1 Loss: 3.9319 +[2025-09-05 20:18:41] [Rank 0] Group 1 Loss: 3.9319 +[2025-09-05 20:18:41] [Rank 0] Group 2 Loss: 3.8666 +[2025-09-05 20:18:41] [Rank 0] Group 2 Loss: 3.8666 +[2025-09-05 20:18:41] [Rank 0] Group 3 Loss: 4.1578 +[2025-09-05 20:18:41] [Rank 0] Group 3 Loss: 4.1578 +[2025-09-05 20:18:41] [Rank 0] Group 4 Loss: 4.0211 +[2025-09-05 20:18:41] [Rank 0] Group 4 Loss: 4.0211 +[2025-09-05 20:18:41] [Rank 0] Group 5 Loss: 4.1870 +[2025-09-05 20:18:41] [Rank 0] Group 5 Loss: 4.1870 +[2025-09-05 20:18:41] [Rank 0] Group 6 Loss: 4.2151 +[2025-09-05 20:18:41] [Rank 0] Group 6 Loss: 4.2151 +[2025-09-05 20:18:41] [Rank 0] Group 7 Loss: 4.1836 +[2025-09-05 20:18:41] [Rank 0] Group 7 Loss: 4.1836 +[2025-09-05 20:18:41] [Rank 0] Group 8 Loss: 4.4116 +[2025-09-05 20:18:41] [Rank 0] Group 8 Loss: 4.4116 +[2025-09-05 20:18:41] [Rank 0] Group 9 Loss: 4.4371 +[2025-09-05 20:18:41] [Rank 0] Group 9 Loss: 4.4371 +[2025-09-05 20:18:41] [Rank 0] Group 10 Loss: 4.7113 +[2025-09-05 20:18:41] [Rank 0] Group 10 Loss: 4.7113 +[2025-09-05 20:18:41] [Rank 0] Group 11 Loss: 4.7435 +[2025-09-05 20:18:41] [Rank 0] Group 11 Loss: 4.7435 +[2025-09-05 20:18:41] [Rank 0] Group 12 Loss: 4.7751 +[2025-09-05 20:18:41] [Rank 0] Group 12 Loss: 4.7751 +[2025-09-05 20:18:41] [Rank 0] Group 13 Loss: 4.9312 +[2025-09-05 20:18:41] [Rank 0] Group 13 Loss: 4.9312 +[2025-09-05 20:18:41] [Rank 0] Group 14 Loss: 4.9698 +[2025-09-05 20:18:41] [Rank 0] Group 14 Loss: 4.9698 +[2025-09-05 20:18:41] [Rank 0] Group 15 Loss: 5.0111 +[2025-09-05 20:18:41] [Rank 0] Group 15 Loss: 5.0111 +[2025-09-05 20:18:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:18:41] [Rank 0] Group 6 FTA: 0.7600 +[2025-09-05 20:18:41] [Rank 0] Group 6 FTA: 0.7600 +[2025-09-05 20:18:41] [Rank 0] Group 7 FTA: 0.6600 +[2025-09-05 20:18:41] [Rank 0] Group 7 FTA: 0.6600 +[2025-09-05 20:18:41] [Rank 0] Group 8 FTA: 0.7400 +[2025-09-05 20:18:41] [Rank 0] Group 8 FTA: 0.7400 +[2025-09-05 20:18:41] [Rank 0] Group 9 FTA: 0.6200 +[2025-09-05 20:18:41] [Rank 0] Group 9 FTA: 0.6200 +[2025-09-05 20:18:41] [Rank 0] Group 10 FTA: 0.5500 +[2025-09-05 20:18:41] [Rank 0] Group 10 FTA: 0.5500 +[2025-09-05 20:18:41] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 20:18:41] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 20:18:41] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 20:18:41] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 20:18:41] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:18:41] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:18:41] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 20:18:41] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 20:18:41] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 20:18:41] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 20:18:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:18:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-09-05 20:18:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:18:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-09-05 20:18:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:18:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_loss_curve.png +[2025-09-05 20:18:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:18:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.0005_seed_43/total_acc_curve.png +[2025-09-05 20:18:42] [Rank 0] step:3501/10000 train_time:149761ms step_avg:42.78ms +[2025-09-05 20:18:42] [Rank 0] step:3501/10000 train_time:149761ms step_avg:42.78ms +[2025-09-05 20:18:43] [Rank 0] step:3521/10000 train_time:150192ms step_avg:42.66ms +[2025-09-05 20:18:43] [Rank 0] step:3521/10000 train_time:150192ms step_avg:42.66ms +[2025-09-05 20:18:43] [Rank 0] step:3541/10000 train_time:150848ms step_avg:42.60ms +[2025-09-05 20:18:43] [Rank 0] step:3541/10000 train_time:150848ms step_avg:42.60ms +[2025-09-05 20:18:44] [Rank 0] step:3561/10000 train_time:151504ms step_avg:42.55ms +[2025-09-05 20:18:44] [Rank 0] step:3561/10000 train_time:151504ms step_avg:42.55ms +[2025-09-05 20:18:45] [Rank 0] step:3581/10000 train_time:152160ms step_avg:42.49ms +[2025-09-05 20:18:45] [Rank 0] step:3581/10000 train_time:152160ms step_avg:42.49ms +[2025-09-05 20:18:45] [Rank 0] step:3601/10000 train_time:152817ms step_avg:42.44ms +[2025-09-05 20:18:45] [Rank 0] step:3601/10000 train_time:152817ms step_avg:42.44ms +[2025-09-05 20:18:46] [Rank 0] step:3621/10000 train_time:153473ms step_avg:42.38ms +[2025-09-05 20:18:46] [Rank 0] step:3621/10000 train_time:153473ms step_avg:42.38ms +[2025-09-05 20:18:47] [Rank 0] step:3641/10000 train_time:154128ms step_avg:42.33ms +[2025-09-05 20:18:47] [Rank 0] step:3641/10000 train_time:154128ms step_avg:42.33ms +[2025-09-05 20:18:47] [Rank 0] step:3661/10000 train_time:154783ms step_avg:42.28ms +[2025-09-05 20:18:47] [Rank 0] step:3661/10000 train_time:154783ms step_avg:42.28ms +[2025-09-05 20:18:48] [Rank 0] step:3681/10000 train_time:155441ms step_avg:42.23ms +[2025-09-05 20:18:48] [Rank 0] step:3681/10000 train_time:155441ms step_avg:42.23ms +[2025-09-05 20:18:49] [Rank 0] step:3701/10000 train_time:156094ms step_avg:42.18ms +[2025-09-05 20:18:49] [Rank 0] step:3701/10000 train_time:156094ms step_avg:42.18ms +[2025-09-05 20:18:49] [Rank 0] step:3721/10000 train_time:156750ms step_avg:42.13ms +[2025-09-05 20:18:49] [Rank 0] step:3721/10000 train_time:156750ms step_avg:42.13ms +[2025-09-05 20:18:50] [Rank 0] step:3741/10000 train_time:157406ms step_avg:42.08ms +[2025-09-05 20:18:50] [Rank 0] step:3741/10000 train_time:157406ms step_avg:42.08ms +[2025-09-05 20:18:51] [Rank 0] step:3761/10000 train_time:158062ms step_avg:42.03ms +[2025-09-05 20:18:51] [Rank 0] step:3761/10000 train_time:158062ms step_avg:42.03ms +[2025-09-05 20:18:51] [Rank 0] step:3781/10000 train_time:158719ms step_avg:41.98ms +[2025-09-05 20:18:51] [Rank 0] step:3781/10000 train_time:158719ms step_avg:41.98ms +[2025-09-05 20:18:52] [Rank 0] step:3801/10000 train_time:159375ms step_avg:41.93ms +[2025-09-05 20:18:52] [Rank 0] step:3801/10000 train_time:159375ms step_avg:41.93ms +[2025-09-05 20:18:53] [Rank 0] step:3821/10000 train_time:160031ms step_avg:41.88ms +[2025-09-05 20:18:53] [Rank 0] step:3821/10000 train_time:160031ms step_avg:41.88ms +[2025-09-05 20:18:53] [Rank 0] step:3841/10000 train_time:160687ms step_avg:41.83ms +[2025-09-05 20:18:53] [Rank 0] step:3841/10000 train_time:160687ms step_avg:41.83ms +[2025-09-05 20:18:54] [Rank 0] step:3861/10000 train_time:161343ms step_avg:41.79ms +[2025-09-05 20:18:54] [Rank 0] step:3861/10000 train_time:161343ms step_avg:41.79ms +[2025-09-05 20:18:55] [Rank 0] step:3881/10000 train_time:161999ms step_avg:41.74ms +[2025-09-05 20:18:55] [Rank 0] step:3881/10000 train_time:161999ms step_avg:41.74ms +[2025-09-05 20:18:55] [Rank 0] step:3901/10000 train_time:162655ms step_avg:41.70ms +[2025-09-05 20:18:55] [Rank 0] step:3901/10000 train_time:162655ms step_avg:41.70ms +[2025-09-05 20:18:56] [Rank 0] step:3921/10000 train_time:163311ms step_avg:41.65ms +[2025-09-05 20:18:56] [Rank 0] step:3921/10000 train_time:163311ms step_avg:41.65ms +[2025-09-05 20:18:57] [Rank 0] step:3941/10000 train_time:163966ms step_avg:41.61ms +[2025-09-05 20:18:57] [Rank 0] step:3941/10000 train_time:163966ms step_avg:41.61ms +[2025-09-05 20:18:57] [Rank 0] step:3961/10000 train_time:164623ms step_avg:41.56ms +[2025-09-05 20:18:57] [Rank 0] step:3961/10000 train_time:164623ms step_avg:41.56ms +[2025-09-05 20:18:58] [Rank 0] step:3981/10000 train_time:165278ms step_avg:41.52ms +[2025-09-05 20:18:58] [Rank 0] step:3981/10000 train_time:165278ms step_avg:41.52ms +[2025-09-05 20:18:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:18:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:18:59] [Rank 0] PRINT: step:4000/10000 train_loss:0.8987 val_loss:0.8702 train_time:166391ms step_avg:41.60ms +[2025-09-05 20:18:59] [Rank 0] PRINT: step:4000/10000 train_loss:0.8987 val_loss:0.8702 train_time:166391ms step_avg:41.60ms +[2025-09-05 20:18:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:18:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:18:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:18:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b859ff472a5b863774cf7b3c3a42a0927d05134c --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e096403c-9cd7-4630-a5fc-59af2a952f2a", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..3541ae23976b442d610bdade8703821482a659f2 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbaa881ff204500fe704a89fcd3b136578f814106bfadb0e7d6faaa20469b57f +size 435572 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..ee0074d668251c32d9081194e3555f4fff5ae459 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40a7e66d996a5d0ea018d00ec9ff55faaf3856dae8e20c31c233ff5aed695744 +size 506788 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..0a9aa3163a517007f3bd54df2ef2a705a79edc34 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce9e85e755440ea57a8a8339bedb21d59d5d595955eba7890620cdb2d6b73467 +size 104336 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..21c92f9f8ada4a8f2b43899ac2be4a422ad25709 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74e7cca9d3d058136a0198ef40f50eea3938d0dc79bad00a02935f6b799361e2 +size 118879 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/training_log_e096403c-9cd7-4630-a5fc-59af2a952f2a.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/training_log_e096403c-9cd7-4630-a5fc-59af2a952f2a.txt new file mode 100644 index 0000000000000000000000000000000000000000..0de91b3b87165e9529a2ce55533ebd557f0daedb --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/training_log_e096403c-9cd7-4630-a5fc-59af2a952f2a.txt @@ -0,0 +1,5614 @@ +[2025-09-05 16:32:43] [Rank 0] PRINT: --- Script Start: Fri Sep 5 16:32:43 2025 --- +[2025-09-05 16:32:43] [Rank 0] PRINT: --- Script Start: Fri Sep 5 16:32:43 2025 --- +[2025-09-05 16:32:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 16:32:43] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 16:32:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 16:32:43] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 16:32:43] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 16:32:43] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 16:32:43] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42 +[2025-09-05 16:32:43] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42 +[2025-09-05 16:32:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 16:32:43] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 16:32:43] [Rank 0] PRINT: Constructing model... +[2025-09-05 16:32:43] [Rank 0] PRINT: Constructing model... +[2025-09-05 16:32:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 16:32:45] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 16:32:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 16:32:45] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 16:32:45] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 16:32:45] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 16:32:49] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 16:32:49] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 16:32:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 16:32:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 16:32:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 16:32:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 16:32:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 16:32:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 16:32:49] [Rank 0] PRINT: Model returns: +[2025-09-05 16:32:49] [Rank 0] PRINT: Model returns: +[2025-09-05 16:32:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 16:32:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 16:32:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 16:32:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 16:32:49] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-09-05 16:32:49] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-09-05 16:32:49] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 16:32:49] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 16:32:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 16:32:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 16:32:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 16:32:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 16:32:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 16:32:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 16:32:54] [Rank 0] PRINT: Starting warmup... +[2025-09-05 16:32:54] [Rank 0] PRINT: Starting warmup... +[2025-09-05 16:33:35] [Rank 0] PRINT: Warmup complete. +[2025-09-05 16:33:35] [Rank 0] PRINT: Warmup complete. +[2025-09-05 16:33:35] [Rank 0] PRINT: Starting training... +[2025-09-05 16:33:35] [Rank 0] PRINT: Starting training... +[2025-09-05 16:33:41] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/fixed_eval_indices.json +[2025-09-05 16:33:41] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/fixed_eval_indices.json +[2025-09-05 16:33:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:33:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:33:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 16:33:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 16:34:19] [Rank 0] step:21/10000 train_time:33260ms step_avg:1583.79ms +[2025-09-05 16:34:19] [Rank 0] step:21/10000 train_time:33260ms step_avg:1583.79ms +[2025-09-05 16:34:19] [Rank 0] step:41/10000 train_time:33906ms step_avg:826.98ms +[2025-09-05 16:34:19] [Rank 0] step:41/10000 train_time:33906ms step_avg:826.98ms +[2025-09-05 16:34:20] [Rank 0] step:61/10000 train_time:34552ms step_avg:566.42ms +[2025-09-05 16:34:20] [Rank 0] step:61/10000 train_time:34552ms step_avg:566.42ms +[2025-09-05 16:34:21] [Rank 0] step:81/10000 train_time:35199ms step_avg:434.56ms +[2025-09-05 16:34:21] [Rank 0] step:81/10000 train_time:35199ms step_avg:434.56ms +[2025-09-05 16:34:21] [Rank 0] step:101/10000 train_time:35843ms step_avg:354.88ms +[2025-09-05 16:34:21] [Rank 0] step:101/10000 train_time:35843ms step_avg:354.88ms +[2025-09-05 16:34:22] [Rank 0] step:121/10000 train_time:36488ms step_avg:301.56ms +[2025-09-05 16:34:22] [Rank 0] step:121/10000 train_time:36488ms step_avg:301.56ms +[2025-09-05 16:34:22] [Rank 0] step:141/10000 train_time:37133ms step_avg:263.36ms +[2025-09-05 16:34:22] [Rank 0] step:141/10000 train_time:37133ms step_avg:263.36ms +[2025-09-05 16:34:23] [Rank 0] step:161/10000 train_time:37779ms step_avg:234.65ms +[2025-09-05 16:34:23] [Rank 0] step:161/10000 train_time:37779ms step_avg:234.65ms +[2025-09-05 16:34:24] [Rank 0] step:181/10000 train_time:38424ms step_avg:212.29ms +[2025-09-05 16:34:24] [Rank 0] step:181/10000 train_time:38424ms step_avg:212.29ms +[2025-09-05 16:34:24] [Rank 0] step:201/10000 train_time:39070ms step_avg:194.38ms +[2025-09-05 16:34:24] [Rank 0] step:201/10000 train_time:39070ms step_avg:194.38ms +[2025-09-05 16:34:25] [Rank 0] step:221/10000 train_time:39717ms step_avg:179.71ms +[2025-09-05 16:34:25] [Rank 0] step:221/10000 train_time:39717ms step_avg:179.71ms +[2025-09-05 16:34:26] [Rank 0] step:241/10000 train_time:40362ms step_avg:167.48ms +[2025-09-05 16:34:26] [Rank 0] step:241/10000 train_time:40362ms step_avg:167.48ms +[2025-09-05 16:34:26] [Rank 0] step:261/10000 train_time:41009ms step_avg:157.12ms +[2025-09-05 16:34:26] [Rank 0] step:261/10000 train_time:41009ms step_avg:157.12ms +[2025-09-05 16:34:27] [Rank 0] step:281/10000 train_time:41654ms step_avg:148.24ms +[2025-09-05 16:34:27] [Rank 0] step:281/10000 train_time:41654ms step_avg:148.24ms +[2025-09-05 16:34:28] [Rank 0] step:301/10000 train_time:42446ms step_avg:141.02ms +[2025-09-05 16:34:28] [Rank 0] step:301/10000 train_time:42446ms step_avg:141.02ms +[2025-09-05 16:34:28] [Rank 0] step:321/10000 train_time:43091ms step_avg:134.24ms +[2025-09-05 16:34:28] [Rank 0] step:321/10000 train_time:43091ms step_avg:134.24ms +[2025-09-05 16:34:29] [Rank 0] step:341/10000 train_time:43738ms step_avg:128.26ms +[2025-09-05 16:34:29] [Rank 0] step:341/10000 train_time:43738ms step_avg:128.26ms +[2025-09-05 16:34:30] [Rank 0] step:361/10000 train_time:44383ms step_avg:122.95ms +[2025-09-05 16:34:30] [Rank 0] step:361/10000 train_time:44383ms step_avg:122.95ms +[2025-09-05 16:34:31] [Rank 0] step:381/10000 train_time:45211ms step_avg:118.66ms +[2025-09-05 16:34:31] [Rank 0] step:381/10000 train_time:45211ms step_avg:118.66ms +[2025-09-05 16:34:31] [Rank 0] step:401/10000 train_time:45859ms step_avg:114.36ms +[2025-09-05 16:34:31] [Rank 0] step:401/10000 train_time:45859ms step_avg:114.36ms +[2025-09-05 16:34:32] [Rank 0] step:421/10000 train_time:46507ms step_avg:110.47ms +[2025-09-05 16:34:32] [Rank 0] step:421/10000 train_time:46507ms step_avg:110.47ms +[2025-09-05 16:34:33] [Rank 0] step:441/10000 train_time:47150ms step_avg:106.92ms +[2025-09-05 16:34:33] [Rank 0] step:441/10000 train_time:47150ms step_avg:106.92ms +[2025-09-05 16:34:33] [Rank 0] step:461/10000 train_time:47796ms step_avg:103.68ms +[2025-09-05 16:34:33] [Rank 0] step:461/10000 train_time:47796ms step_avg:103.68ms +[2025-09-05 16:34:34] [Rank 0] step:481/10000 train_time:48441ms step_avg:100.71ms +[2025-09-05 16:34:34] [Rank 0] step:481/10000 train_time:48441ms step_avg:100.71ms +[2025-09-05 16:34:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:34:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:34:35] [Rank 0] PRINT: step:500/10000 train_loss:5.1874 val_loss:2.3527 train_time:49318ms step_avg:98.64ms +[2025-09-05 16:34:35] [Rank 0] PRINT: step:500/10000 train_loss:5.1874 val_loss:2.3527 train_time:49318ms step_avg:98.64ms +[2025-09-05 16:34:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:34:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:34:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:34:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:35:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:35:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:35:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:35:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:35:55] [Rank 0] Total Loss: 4.1538 +[2025-09-05 16:35:55] [Rank 0] Total Loss: 4.1538 +[2025-09-05 16:35:55] [Rank 0] Total FTA (Unweighted): 0.1200 +[2025-09-05 16:35:55] [Rank 0] Total FTA (Unweighted): 0.1200 +[2025-09-05 16:35:55] [Rank 0] Total FTA (Weighted): 0.1200 +[2025-09-05 16:35:55] [Rank 0] Total FTA (Weighted): 0.1200 +[2025-09-05 16:35:55] [Rank 0] Group 0 Loss: 3.0411 +[2025-09-05 16:35:55] [Rank 0] Group 0 Loss: 3.0411 +[2025-09-05 16:35:55] [Rank 0] Group 1 Loss: 2.9029 +[2025-09-05 16:35:55] [Rank 0] Group 1 Loss: 2.9029 +[2025-09-05 16:35:55] [Rank 0] Group 2 Loss: 2.9673 +[2025-09-05 16:35:55] [Rank 0] Group 2 Loss: 2.9673 +[2025-09-05 16:35:55] [Rank 0] Group 3 Loss: 3.4462 +[2025-09-05 16:35:55] [Rank 0] Group 3 Loss: 3.4462 +[2025-09-05 16:35:55] [Rank 0] Group 4 Loss: 3.9328 +[2025-09-05 16:35:55] [Rank 0] Group 4 Loss: 3.9328 +[2025-09-05 16:35:55] [Rank 0] Group 5 Loss: 4.1298 +[2025-09-05 16:35:55] [Rank 0] Group 5 Loss: 4.1298 +[2025-09-05 16:35:55] [Rank 0] Group 6 Loss: 4.2620 +[2025-09-05 16:35:55] [Rank 0] Group 6 Loss: 4.2620 +[2025-09-05 16:35:55] [Rank 0] Group 7 Loss: 4.3238 +[2025-09-05 16:35:55] [Rank 0] Group 7 Loss: 4.3238 +[2025-09-05 16:35:55] [Rank 0] Group 8 Loss: 4.5170 +[2025-09-05 16:35:55] [Rank 0] Group 8 Loss: 4.5170 +[2025-09-05 16:35:55] [Rank 0] Group 9 Loss: 4.6302 +[2025-09-05 16:35:55] [Rank 0] Group 9 Loss: 4.6302 +[2025-09-05 16:35:55] [Rank 0] Group 10 Loss: 4.7063 +[2025-09-05 16:35:55] [Rank 0] Group 10 Loss: 4.7063 +[2025-09-05 16:35:55] [Rank 0] Group 11 Loss: 4.7611 +[2025-09-05 16:35:55] [Rank 0] Group 11 Loss: 4.7611 +[2025-09-05 16:35:55] [Rank 0] Group 12 Loss: 4.6935 +[2025-09-05 16:35:55] [Rank 0] Group 12 Loss: 4.6935 +[2025-09-05 16:35:55] [Rank 0] Group 13 Loss: 4.7089 +[2025-09-05 16:35:55] [Rank 0] Group 13 Loss: 4.7089 +[2025-09-05 16:35:55] [Rank 0] Group 14 Loss: 4.7585 +[2025-09-05 16:35:55] [Rank 0] Group 14 Loss: 4.7585 +[2025-09-05 16:35:55] [Rank 0] Group 15 Loss: 4.6791 +[2025-09-05 16:35:55] [Rank 0] Group 15 Loss: 4.6791 +[2025-09-05 16:35:55] [Rank 0] Group 0 FTA: 0.1500 +[2025-09-05 16:35:55] [Rank 0] Group 0 FTA: 0.1500 +[2025-09-05 16:35:55] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-05 16:35:55] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-05 16:35:55] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 16:35:55] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 16:35:55] [Rank 0] Group 3 FTA: 0.1300 +[2025-09-05 16:35:55] [Rank 0] Group 3 FTA: 0.1300 +[2025-09-05 16:35:55] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] Group 5 FTA: 0.1300 +[2025-09-05 16:35:55] [Rank 0] Group 5 FTA: 0.1300 +[2025-09-05 16:35:55] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 16:35:55] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 16:35:55] [Rank 0] Group 7 FTA: 0.0600 +[2025-09-05 16:35:55] [Rank 0] Group 7 FTA: 0.0600 +[2025-09-05 16:35:55] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 16:35:55] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 16:35:55] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 16:35:55] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 16:35:55] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 16:35:55] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 16:35:55] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:35:55] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:35:55] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 16:35:55] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 16:35:55] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:35:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:35:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:35:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:35:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:35:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:35:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:35:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:35:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:35:57] [Rank 0] step:501/10000 train_time:49327ms step_avg:98.46ms +[2025-09-05 16:35:57] [Rank 0] step:501/10000 train_time:49327ms step_avg:98.46ms +[2025-09-05 16:35:57] [Rank 0] step:521/10000 train_time:49769ms step_avg:95.53ms +[2025-09-05 16:35:57] [Rank 0] step:521/10000 train_time:49769ms step_avg:95.53ms +[2025-09-05 16:35:58] [Rank 0] step:541/10000 train_time:50414ms step_avg:93.19ms +[2025-09-05 16:35:58] [Rank 0] step:541/10000 train_time:50414ms step_avg:93.19ms +[2025-09-05 16:35:59] [Rank 0] step:561/10000 train_time:51059ms step_avg:91.01ms +[2025-09-05 16:35:59] [Rank 0] step:561/10000 train_time:51059ms step_avg:91.01ms +[2025-09-05 16:35:59] [Rank 0] step:581/10000 train_time:51705ms step_avg:88.99ms +[2025-09-05 16:35:59] [Rank 0] step:581/10000 train_time:51705ms step_avg:88.99ms +[2025-09-05 16:36:00] [Rank 0] step:601/10000 train_time:52350ms step_avg:87.10ms +[2025-09-05 16:36:00] [Rank 0] step:601/10000 train_time:52350ms step_avg:87.10ms +[2025-09-05 16:36:01] [Rank 0] step:621/10000 train_time:52995ms step_avg:85.34ms +[2025-09-05 16:36:01] [Rank 0] step:621/10000 train_time:52995ms step_avg:85.34ms +[2025-09-05 16:36:01] [Rank 0] step:641/10000 train_time:53639ms step_avg:83.68ms +[2025-09-05 16:36:01] [Rank 0] step:641/10000 train_time:53639ms step_avg:83.68ms +[2025-09-05 16:36:02] [Rank 0] step:661/10000 train_time:54288ms step_avg:82.13ms +[2025-09-05 16:36:02] [Rank 0] step:661/10000 train_time:54288ms step_avg:82.13ms +[2025-09-05 16:36:03] [Rank 0] step:681/10000 train_time:54933ms step_avg:80.67ms +[2025-09-05 16:36:03] [Rank 0] step:681/10000 train_time:54933ms step_avg:80.67ms +[2025-09-05 16:36:03] [Rank 0] step:701/10000 train_time:55578ms step_avg:79.28ms +[2025-09-05 16:36:03] [Rank 0] step:701/10000 train_time:55578ms step_avg:79.28ms +[2025-09-05 16:36:04] [Rank 0] step:721/10000 train_time:56223ms step_avg:77.98ms +[2025-09-05 16:36:04] [Rank 0] step:721/10000 train_time:56223ms step_avg:77.98ms +[2025-09-05 16:36:05] [Rank 0] step:741/10000 train_time:56868ms step_avg:76.74ms +[2025-09-05 16:36:05] [Rank 0] step:741/10000 train_time:56868ms step_avg:76.74ms +[2025-09-05 16:36:05] [Rank 0] step:761/10000 train_time:57516ms step_avg:75.58ms +[2025-09-05 16:36:05] [Rank 0] step:761/10000 train_time:57516ms step_avg:75.58ms +[2025-09-05 16:36:06] [Rank 0] step:781/10000 train_time:58165ms step_avg:74.47ms +[2025-09-05 16:36:06] [Rank 0] step:781/10000 train_time:58165ms step_avg:74.47ms +[2025-09-05 16:36:07] [Rank 0] step:801/10000 train_time:58815ms step_avg:73.43ms +[2025-09-05 16:36:07] [Rank 0] step:801/10000 train_time:58815ms step_avg:73.43ms +[2025-09-05 16:36:08] [Rank 0] step:821/10000 train_time:59464ms step_avg:72.43ms +[2025-09-05 16:36:08] [Rank 0] step:821/10000 train_time:59464ms step_avg:72.43ms +[2025-09-05 16:36:08] [Rank 0] step:841/10000 train_time:60568ms step_avg:72.02ms +[2025-09-05 16:36:08] [Rank 0] step:841/10000 train_time:60568ms step_avg:72.02ms +[2025-09-05 16:36:09] [Rank 0] step:861/10000 train_time:61217ms step_avg:71.10ms +[2025-09-05 16:36:09] [Rank 0] step:861/10000 train_time:61217ms step_avg:71.10ms +[2025-09-05 16:36:10] [Rank 0] step:881/10000 train_time:61867ms step_avg:70.22ms +[2025-09-05 16:36:10] [Rank 0] step:881/10000 train_time:61867ms step_avg:70.22ms +[2025-09-05 16:36:10] [Rank 0] step:901/10000 train_time:62517ms step_avg:69.39ms +[2025-09-05 16:36:10] [Rank 0] step:901/10000 train_time:62517ms step_avg:69.39ms +[2025-09-05 16:36:11] [Rank 0] step:921/10000 train_time:63167ms step_avg:68.58ms +[2025-09-05 16:36:11] [Rank 0] step:921/10000 train_time:63167ms step_avg:68.58ms +[2025-09-05 16:36:12] [Rank 0] step:941/10000 train_time:63816ms step_avg:67.82ms +[2025-09-05 16:36:12] [Rank 0] step:941/10000 train_time:63816ms step_avg:67.82ms +[2025-09-05 16:36:12] [Rank 0] step:961/10000 train_time:64465ms step_avg:67.08ms +[2025-09-05 16:36:12] [Rank 0] step:961/10000 train_time:64465ms step_avg:67.08ms +[2025-09-05 16:36:13] [Rank 0] step:981/10000 train_time:65115ms step_avg:66.38ms +[2025-09-05 16:36:13] [Rank 0] step:981/10000 train_time:65115ms step_avg:66.38ms +[2025-09-05 16:36:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:36:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:36:14] [Rank 0] PRINT: step:1000/10000 train_loss:1.6673 val_loss:1.2683 train_time:65996ms step_avg:66.00ms +[2025-09-05 16:36:14] [Rank 0] PRINT: step:1000/10000 train_loss:1.6673 val_loss:1.2683 train_time:65996ms step_avg:66.00ms +[2025-09-05 16:36:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:36:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:36:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:36:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:37:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:37:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:37:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:37:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:37:34] [Rank 0] Total Loss: 4.1073 +[2025-09-05 16:37:34] [Rank 0] Total Loss: 4.1073 +[2025-09-05 16:37:34] [Rank 0] Total FTA (Unweighted): 0.3862 +[2025-09-05 16:37:34] [Rank 0] Total FTA (Unweighted): 0.3862 +[2025-09-05 16:37:34] [Rank 0] Total FTA (Weighted): 0.3862 +[2025-09-05 16:37:34] [Rank 0] Total FTA (Weighted): 0.3862 +[2025-09-05 16:37:34] [Rank 0] Group 0 Loss: 3.4905 +[2025-09-05 16:37:34] [Rank 0] Group 0 Loss: 3.4905 +[2025-09-05 16:37:34] [Rank 0] Group 1 Loss: 3.3895 +[2025-09-05 16:37:34] [Rank 0] Group 1 Loss: 3.3895 +[2025-09-05 16:37:34] [Rank 0] Group 2 Loss: 3.1513 +[2025-09-05 16:37:34] [Rank 0] Group 2 Loss: 3.1513 +[2025-09-05 16:37:34] [Rank 0] Group 3 Loss: 3.5720 +[2025-09-05 16:37:34] [Rank 0] Group 3 Loss: 3.5720 +[2025-09-05 16:37:34] [Rank 0] Group 4 Loss: 3.6595 +[2025-09-05 16:37:34] [Rank 0] Group 4 Loss: 3.6595 +[2025-09-05 16:37:34] [Rank 0] Group 5 Loss: 3.7875 +[2025-09-05 16:37:34] [Rank 0] Group 5 Loss: 3.7875 +[2025-09-05 16:37:34] [Rank 0] Group 6 Loss: 3.9267 +[2025-09-05 16:37:34] [Rank 0] Group 6 Loss: 3.9267 +[2025-09-05 16:37:34] [Rank 0] Group 7 Loss: 4.0912 +[2025-09-05 16:37:34] [Rank 0] Group 7 Loss: 4.0912 +[2025-09-05 16:37:34] [Rank 0] Group 8 Loss: 4.3297 +[2025-09-05 16:37:34] [Rank 0] Group 8 Loss: 4.3297 +[2025-09-05 16:37:34] [Rank 0] Group 9 Loss: 4.3983 +[2025-09-05 16:37:34] [Rank 0] Group 9 Loss: 4.3983 +[2025-09-05 16:37:34] [Rank 0] Group 10 Loss: 4.5664 +[2025-09-05 16:37:34] [Rank 0] Group 10 Loss: 4.5664 +[2025-09-05 16:37:34] [Rank 0] Group 11 Loss: 4.6428 +[2025-09-05 16:37:34] [Rank 0] Group 11 Loss: 4.6428 +[2025-09-05 16:37:34] [Rank 0] Group 12 Loss: 4.6397 +[2025-09-05 16:37:34] [Rank 0] Group 12 Loss: 4.6397 +[2025-09-05 16:37:34] [Rank 0] Group 13 Loss: 4.7246 +[2025-09-05 16:37:34] [Rank 0] Group 13 Loss: 4.7246 +[2025-09-05 16:37:34] [Rank 0] Group 14 Loss: 4.6790 +[2025-09-05 16:37:34] [Rank 0] Group 14 Loss: 4.6790 +[2025-09-05 16:37:34] [Rank 0] Group 15 Loss: 4.6684 +[2025-09-05 16:37:34] [Rank 0] Group 15 Loss: 4.6684 +[2025-09-05 16:37:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:37:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:37:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:37:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:37:34] [Rank 0] Group 2 FTA: 0.9000 +[2025-09-05 16:37:34] [Rank 0] Group 2 FTA: 0.9000 +[2025-09-05 16:37:34] [Rank 0] Group 3 FTA: 0.9200 +[2025-09-05 16:37:34] [Rank 0] Group 3 FTA: 0.9200 +[2025-09-05 16:37:34] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 16:37:34] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 16:37:34] [Rank 0] Group 5 FTA: 0.3900 +[2025-09-05 16:37:34] [Rank 0] Group 5 FTA: 0.3900 +[2025-09-05 16:37:34] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 16:37:34] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 16:37:34] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 16:37:34] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 16:37:34] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 16:37:34] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 16:37:34] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 16:37:34] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 16:37:34] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 16:37:34] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 16:37:34] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:37:34] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:37:34] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:37:34] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:37:34] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:37:34] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:37:34] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:37:34] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:37:34] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:37:34] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:37:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:37:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:37:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:37:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:37:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:37:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:37:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:37:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:37:36] [Rank 0] step:1001/10000 train_time:66005ms step_avg:65.94ms +[2025-09-05 16:37:36] [Rank 0] step:1001/10000 train_time:66005ms step_avg:65.94ms +[2025-09-05 16:37:37] [Rank 0] step:1021/10000 train_time:66446ms step_avg:65.08ms +[2025-09-05 16:37:37] [Rank 0] step:1021/10000 train_time:66446ms step_avg:65.08ms +[2025-09-05 16:37:37] [Rank 0] step:1041/10000 train_time:67096ms step_avg:64.45ms +[2025-09-05 16:37:37] [Rank 0] step:1041/10000 train_time:67096ms step_avg:64.45ms +[2025-09-05 16:37:38] [Rank 0] step:1061/10000 train_time:67906ms step_avg:64.00ms +[2025-09-05 16:37:38] [Rank 0] step:1061/10000 train_time:67906ms step_avg:64.00ms +[2025-09-05 16:37:39] [Rank 0] step:1081/10000 train_time:68558ms step_avg:63.42ms +[2025-09-05 16:37:39] [Rank 0] step:1081/10000 train_time:68558ms step_avg:63.42ms +[2025-09-05 16:37:40] [Rank 0] step:1101/10000 train_time:69208ms step_avg:62.86ms +[2025-09-05 16:37:40] [Rank 0] step:1101/10000 train_time:69208ms step_avg:62.86ms +[2025-09-05 16:37:40] [Rank 0] step:1121/10000 train_time:69859ms step_avg:62.32ms +[2025-09-05 16:37:40] [Rank 0] step:1121/10000 train_time:69859ms step_avg:62.32ms +[2025-09-05 16:37:41] [Rank 0] step:1141/10000 train_time:70702ms step_avg:61.97ms +[2025-09-05 16:37:41] [Rank 0] step:1141/10000 train_time:70702ms step_avg:61.97ms +[2025-09-05 16:37:42] [Rank 0] step:1161/10000 train_time:71352ms step_avg:61.46ms +[2025-09-05 16:37:42] [Rank 0] step:1161/10000 train_time:71352ms step_avg:61.46ms +[2025-09-05 16:37:42] [Rank 0] step:1181/10000 train_time:72003ms step_avg:60.97ms +[2025-09-05 16:37:42] [Rank 0] step:1181/10000 train_time:72003ms step_avg:60.97ms +[2025-09-05 16:37:43] [Rank 0] step:1201/10000 train_time:72653ms step_avg:60.49ms +[2025-09-05 16:37:43] [Rank 0] step:1201/10000 train_time:72653ms step_avg:60.49ms +[2025-09-05 16:37:44] [Rank 0] step:1221/10000 train_time:73303ms step_avg:60.04ms +[2025-09-05 16:37:44] [Rank 0] step:1221/10000 train_time:73303ms step_avg:60.04ms +[2025-09-05 16:37:44] [Rank 0] step:1241/10000 train_time:73954ms step_avg:59.59ms +[2025-09-05 16:37:44] [Rank 0] step:1241/10000 train_time:73954ms step_avg:59.59ms +[2025-09-05 16:37:45] [Rank 0] step:1261/10000 train_time:74604ms step_avg:59.16ms +[2025-09-05 16:37:45] [Rank 0] step:1261/10000 train_time:74604ms step_avg:59.16ms +[2025-09-05 16:37:46] [Rank 0] step:1281/10000 train_time:75255ms step_avg:58.75ms +[2025-09-05 16:37:46] [Rank 0] step:1281/10000 train_time:75255ms step_avg:58.75ms +[2025-09-05 16:37:46] [Rank 0] step:1301/10000 train_time:75906ms step_avg:58.34ms +[2025-09-05 16:37:46] [Rank 0] step:1301/10000 train_time:75906ms step_avg:58.34ms +[2025-09-05 16:37:47] [Rank 0] step:1321/10000 train_time:76556ms step_avg:57.95ms +[2025-09-05 16:37:47] [Rank 0] step:1321/10000 train_time:76556ms step_avg:57.95ms +[2025-09-05 16:37:48] [Rank 0] step:1341/10000 train_time:77207ms step_avg:57.57ms +[2025-09-05 16:37:48] [Rank 0] step:1341/10000 train_time:77207ms step_avg:57.57ms +[2025-09-05 16:37:48] [Rank 0] step:1361/10000 train_time:77859ms step_avg:57.21ms +[2025-09-05 16:37:48] [Rank 0] step:1361/10000 train_time:77859ms step_avg:57.21ms +[2025-09-05 16:37:49] [Rank 0] step:1381/10000 train_time:78509ms step_avg:56.85ms +[2025-09-05 16:37:49] [Rank 0] step:1381/10000 train_time:78509ms step_avg:56.85ms +[2025-09-05 16:37:50] [Rank 0] step:1401/10000 train_time:79160ms step_avg:56.50ms +[2025-09-05 16:37:50] [Rank 0] step:1401/10000 train_time:79160ms step_avg:56.50ms +[2025-09-05 16:37:50] [Rank 0] step:1421/10000 train_time:79812ms step_avg:56.17ms +[2025-09-05 16:37:50] [Rank 0] step:1421/10000 train_time:79812ms step_avg:56.17ms +[2025-09-05 16:37:51] [Rank 0] step:1441/10000 train_time:80463ms step_avg:55.84ms +[2025-09-05 16:37:51] [Rank 0] step:1441/10000 train_time:80463ms step_avg:55.84ms +[2025-09-05 16:37:51] [Rank 0] step:1461/10000 train_time:81114ms step_avg:55.52ms +[2025-09-05 16:37:51] [Rank 0] step:1461/10000 train_time:81114ms step_avg:55.52ms +[2025-09-05 16:37:52] [Rank 0] step:1481/10000 train_time:81765ms step_avg:55.21ms +[2025-09-05 16:37:52] [Rank 0] step:1481/10000 train_time:81765ms step_avg:55.21ms +[2025-09-05 16:37:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:37:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:37:53] [Rank 0] PRINT: step:1500/10000 train_loss:1.1353 val_loss:1.0298 train_time:82648ms step_avg:55.10ms +[2025-09-05 16:37:53] [Rank 0] PRINT: step:1500/10000 train_loss:1.1353 val_loss:1.0298 train_time:82648ms step_avg:55.10ms +[2025-09-05 16:37:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:37:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:37:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:37:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:39:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:39:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:39:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:39:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:39:13] [Rank 0] Total Loss: 4.3606 +[2025-09-05 16:39:13] [Rank 0] Total Loss: 4.3606 +[2025-09-05 16:39:13] [Rank 0] Total FTA (Unweighted): 0.5175 +[2025-09-05 16:39:13] [Rank 0] Total FTA (Unweighted): 0.5175 +[2025-09-05 16:39:13] [Rank 0] Total FTA (Weighted): 0.5175 +[2025-09-05 16:39:13] [Rank 0] Total FTA (Weighted): 0.5175 +[2025-09-05 16:39:13] [Rank 0] Group 0 Loss: 3.9179 +[2025-09-05 16:39:13] [Rank 0] Group 0 Loss: 3.9179 +[2025-09-05 16:39:13] [Rank 0] Group 1 Loss: 3.8141 +[2025-09-05 16:39:13] [Rank 0] Group 1 Loss: 3.8141 +[2025-09-05 16:39:13] [Rank 0] Group 2 Loss: 3.4696 +[2025-09-05 16:39:13] [Rank 0] Group 2 Loss: 3.4696 +[2025-09-05 16:39:13] [Rank 0] Group 3 Loss: 3.9087 +[2025-09-05 16:39:13] [Rank 0] Group 3 Loss: 3.9087 +[2025-09-05 16:39:13] [Rank 0] Group 4 Loss: 3.9644 +[2025-09-05 16:39:13] [Rank 0] Group 4 Loss: 3.9644 +[2025-09-05 16:39:13] [Rank 0] Group 5 Loss: 4.0362 +[2025-09-05 16:39:13] [Rank 0] Group 5 Loss: 4.0362 +[2025-09-05 16:39:13] [Rank 0] Group 6 Loss: 4.0640 +[2025-09-05 16:39:13] [Rank 0] Group 6 Loss: 4.0640 +[2025-09-05 16:39:13] [Rank 0] Group 7 Loss: 4.2040 +[2025-09-05 16:39:13] [Rank 0] Group 7 Loss: 4.2040 +[2025-09-05 16:39:13] [Rank 0] Group 8 Loss: 4.4411 +[2025-09-05 16:39:13] [Rank 0] Group 8 Loss: 4.4411 +[2025-09-05 16:39:13] [Rank 0] Group 9 Loss: 4.4915 +[2025-09-05 16:39:13] [Rank 0] Group 9 Loss: 4.4915 +[2025-09-05 16:39:13] [Rank 0] Group 10 Loss: 4.7651 +[2025-09-05 16:39:13] [Rank 0] Group 10 Loss: 4.7651 +[2025-09-05 16:39:13] [Rank 0] Group 11 Loss: 4.7953 +[2025-09-05 16:39:13] [Rank 0] Group 11 Loss: 4.7953 +[2025-09-05 16:39:13] [Rank 0] Group 12 Loss: 4.8781 +[2025-09-05 16:39:13] [Rank 0] Group 12 Loss: 4.8781 +[2025-09-05 16:39:13] [Rank 0] Group 13 Loss: 5.0213 +[2025-09-05 16:39:13] [Rank 0] Group 13 Loss: 5.0213 +[2025-09-05 16:39:13] [Rank 0] Group 14 Loss: 4.9749 +[2025-09-05 16:39:13] [Rank 0] Group 14 Loss: 4.9749 +[2025-09-05 16:39:13] [Rank 0] Group 15 Loss: 5.0230 +[2025-09-05 16:39:13] [Rank 0] Group 15 Loss: 5.0230 +[2025-09-05 16:39:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:39:13] [Rank 0] Group 5 FTA: 0.7600 +[2025-09-05 16:39:13] [Rank 0] Group 5 FTA: 0.7600 +[2025-09-05 16:39:13] [Rank 0] Group 6 FTA: 0.5400 +[2025-09-05 16:39:13] [Rank 0] Group 6 FTA: 0.5400 +[2025-09-05 16:39:13] [Rank 0] Group 7 FTA: 0.5500 +[2025-09-05 16:39:13] [Rank 0] Group 7 FTA: 0.5500 +[2025-09-05 16:39:13] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:39:13] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:39:13] [Rank 0] Group 9 FTA: 0.3100 +[2025-09-05 16:39:13] [Rank 0] Group 9 FTA: 0.3100 +[2025-09-05 16:39:13] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 16:39:13] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 16:39:13] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:39:13] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:39:13] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 16:39:13] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 16:39:13] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:39:13] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:39:14] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:39:14] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:39:14] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 16:39:14] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 16:39:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:39:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:39:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:39:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:39:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:39:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:39:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:39:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:39:15] [Rank 0] step:1501/10000 train_time:82656ms step_avg:55.07ms +[2025-09-05 16:39:15] [Rank 0] step:1501/10000 train_time:82656ms step_avg:55.07ms +[2025-09-05 16:39:16] [Rank 0] step:1521/10000 train_time:83101ms step_avg:54.64ms +[2025-09-05 16:39:16] [Rank 0] step:1521/10000 train_time:83101ms step_avg:54.64ms +[2025-09-05 16:39:16] [Rank 0] step:1541/10000 train_time:83751ms step_avg:54.35ms +[2025-09-05 16:39:16] [Rank 0] step:1541/10000 train_time:83751ms step_avg:54.35ms +[2025-09-05 16:39:17] [Rank 0] step:1561/10000 train_time:84400ms step_avg:54.07ms +[2025-09-05 16:39:17] [Rank 0] step:1561/10000 train_time:84400ms step_avg:54.07ms +[2025-09-05 16:39:17] [Rank 0] step:1581/10000 train_time:85049ms step_avg:53.79ms +[2025-09-05 16:39:17] [Rank 0] step:1581/10000 train_time:85049ms step_avg:53.79ms +[2025-09-05 16:39:18] [Rank 0] step:1601/10000 train_time:85698ms step_avg:53.53ms +[2025-09-05 16:39:18] [Rank 0] step:1601/10000 train_time:85698ms step_avg:53.53ms +[2025-09-05 16:39:19] [Rank 0] step:1621/10000 train_time:86347ms step_avg:53.27ms +[2025-09-05 16:39:19] [Rank 0] step:1621/10000 train_time:86347ms step_avg:53.27ms +[2025-09-05 16:39:20] [Rank 0] step:1641/10000 train_time:87182ms step_avg:53.13ms +[2025-09-05 16:39:20] [Rank 0] step:1641/10000 train_time:87182ms step_avg:53.13ms +[2025-09-05 16:39:20] [Rank 0] step:1661/10000 train_time:87831ms step_avg:52.88ms +[2025-09-05 16:39:20] [Rank 0] step:1661/10000 train_time:87831ms step_avg:52.88ms +[2025-09-05 16:39:21] [Rank 0] step:1681/10000 train_time:88481ms step_avg:52.64ms +[2025-09-05 16:39:21] [Rank 0] step:1681/10000 train_time:88481ms step_avg:52.64ms +[2025-09-05 16:39:22] [Rank 0] step:1701/10000 train_time:89130ms step_avg:52.40ms +[2025-09-05 16:39:22] [Rank 0] step:1701/10000 train_time:89130ms step_avg:52.40ms +[2025-09-05 16:39:22] [Rank 0] step:1721/10000 train_time:89779ms step_avg:52.17ms +[2025-09-05 16:39:22] [Rank 0] step:1721/10000 train_time:89779ms step_avg:52.17ms +[2025-09-05 16:39:23] [Rank 0] step:1741/10000 train_time:90428ms step_avg:51.94ms +[2025-09-05 16:39:23] [Rank 0] step:1741/10000 train_time:90428ms step_avg:51.94ms +[2025-09-05 16:39:23] [Rank 0] step:1761/10000 train_time:91078ms step_avg:51.72ms +[2025-09-05 16:39:23] [Rank 0] step:1761/10000 train_time:91078ms step_avg:51.72ms +[2025-09-05 16:39:24] [Rank 0] step:1781/10000 train_time:91728ms step_avg:51.50ms +[2025-09-05 16:39:24] [Rank 0] step:1781/10000 train_time:91728ms step_avg:51.50ms +[2025-09-05 16:39:25] [Rank 0] step:1801/10000 train_time:92377ms step_avg:51.29ms +[2025-09-05 16:39:25] [Rank 0] step:1801/10000 train_time:92377ms step_avg:51.29ms +[2025-09-05 16:39:25] [Rank 0] step:1821/10000 train_time:93026ms step_avg:51.08ms +[2025-09-05 16:39:25] [Rank 0] step:1821/10000 train_time:93026ms step_avg:51.08ms +[2025-09-05 16:39:26] [Rank 0] step:1841/10000 train_time:93675ms step_avg:50.88ms +[2025-09-05 16:39:26] [Rank 0] step:1841/10000 train_time:93675ms step_avg:50.88ms +[2025-09-05 16:39:27] [Rank 0] step:1861/10000 train_time:94323ms step_avg:50.68ms +[2025-09-05 16:39:27] [Rank 0] step:1861/10000 train_time:94323ms step_avg:50.68ms +[2025-09-05 16:39:27] [Rank 0] step:1881/10000 train_time:94973ms step_avg:50.49ms +[2025-09-05 16:39:27] [Rank 0] step:1881/10000 train_time:94973ms step_avg:50.49ms +[2025-09-05 16:39:28] [Rank 0] step:1901/10000 train_time:95623ms step_avg:50.30ms +[2025-09-05 16:39:28] [Rank 0] step:1901/10000 train_time:95623ms step_avg:50.30ms +[2025-09-05 16:39:29] [Rank 0] step:1921/10000 train_time:96273ms step_avg:50.12ms +[2025-09-05 16:39:29] [Rank 0] step:1921/10000 train_time:96273ms step_avg:50.12ms +[2025-09-05 16:39:29] [Rank 0] step:1941/10000 train_time:96922ms step_avg:49.93ms +[2025-09-05 16:39:29] [Rank 0] step:1941/10000 train_time:96922ms step_avg:49.93ms +[2025-09-05 16:39:30] [Rank 0] step:1961/10000 train_time:97572ms step_avg:49.76ms +[2025-09-05 16:39:30] [Rank 0] step:1961/10000 train_time:97572ms step_avg:49.76ms +[2025-09-05 16:39:31] [Rank 0] step:1981/10000 train_time:98221ms step_avg:49.58ms +[2025-09-05 16:39:31] [Rank 0] step:1981/10000 train_time:98221ms step_avg:49.58ms +[2025-09-05 16:39:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:39:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:39:32] [Rank 0] PRINT: step:2000/10000 train_loss:0.9832 val_loss:0.9312 train_time:99101ms step_avg:49.55ms +[2025-09-05 16:39:32] [Rank 0] PRINT: step:2000/10000 train_loss:0.9832 val_loss:0.9312 train_time:99101ms step_avg:49.55ms +[2025-09-05 16:39:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:39:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:39:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:39:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:40:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:40:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:40:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:40:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:40:52] [Rank 0] Total Loss: 4.5558 +[2025-09-05 16:40:52] [Rank 0] Total Loss: 4.5558 +[2025-09-05 16:40:52] [Rank 0] Total FTA (Unweighted): 0.6100 +[2025-09-05 16:40:52] [Rank 0] Total FTA (Unweighted): 0.6100 +[2025-09-05 16:40:52] [Rank 0] Total FTA (Weighted): 0.6100 +[2025-09-05 16:40:52] [Rank 0] Total FTA (Weighted): 0.6100 +[2025-09-05 16:40:52] [Rank 0] Group 0 Loss: 4.1950 +[2025-09-05 16:40:52] [Rank 0] Group 0 Loss: 4.1950 +[2025-09-05 16:40:52] [Rank 0] Group 1 Loss: 4.0851 +[2025-09-05 16:40:52] [Rank 0] Group 1 Loss: 4.0851 +[2025-09-05 16:40:52] [Rank 0] Group 2 Loss: 3.8777 +[2025-09-05 16:40:52] [Rank 0] Group 2 Loss: 3.8777 +[2025-09-05 16:40:52] [Rank 0] Group 3 Loss: 4.2314 +[2025-09-05 16:40:52] [Rank 0] Group 3 Loss: 4.2314 +[2025-09-05 16:40:52] [Rank 0] Group 4 Loss: 4.1736 +[2025-09-05 16:40:52] [Rank 0] Group 4 Loss: 4.1736 +[2025-09-05 16:40:52] [Rank 0] Group 5 Loss: 4.2831 +[2025-09-05 16:40:52] [Rank 0] Group 5 Loss: 4.2831 +[2025-09-05 16:40:52] [Rank 0] Group 6 Loss: 4.2579 +[2025-09-05 16:40:52] [Rank 0] Group 6 Loss: 4.2579 +[2025-09-05 16:40:52] [Rank 0] Group 7 Loss: 4.3062 +[2025-09-05 16:40:52] [Rank 0] Group 7 Loss: 4.3062 +[2025-09-05 16:40:52] [Rank 0] Group 8 Loss: 4.5082 +[2025-09-05 16:40:52] [Rank 0] Group 8 Loss: 4.5082 +[2025-09-05 16:40:52] [Rank 0] Group 9 Loss: 4.5334 +[2025-09-05 16:40:52] [Rank 0] Group 9 Loss: 4.5334 +[2025-09-05 16:40:52] [Rank 0] Group 10 Loss: 4.8044 +[2025-09-05 16:40:52] [Rank 0] Group 10 Loss: 4.8044 +[2025-09-05 16:40:52] [Rank 0] Group 11 Loss: 4.9396 +[2025-09-05 16:40:52] [Rank 0] Group 11 Loss: 4.9396 +[2025-09-05 16:40:52] [Rank 0] Group 12 Loss: 5.0093 +[2025-09-05 16:40:52] [Rank 0] Group 12 Loss: 5.0093 +[2025-09-05 16:40:52] [Rank 0] Group 13 Loss: 5.2063 +[2025-09-05 16:40:52] [Rank 0] Group 13 Loss: 5.2063 +[2025-09-05 16:40:53] [Rank 0] Group 14 Loss: 5.1857 +[2025-09-05 16:40:53] [Rank 0] Group 14 Loss: 5.1857 +[2025-09-05 16:40:53] [Rank 0] Group 15 Loss: 5.2950 +[2025-09-05 16:40:53] [Rank 0] Group 15 Loss: 5.2950 +[2025-09-05 16:40:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:40:53] [Rank 0] Group 6 FTA: 0.8800 +[2025-09-05 16:40:53] [Rank 0] Group 6 FTA: 0.8800 +[2025-09-05 16:40:53] [Rank 0] Group 7 FTA: 0.7000 +[2025-09-05 16:40:53] [Rank 0] Group 7 FTA: 0.7000 +[2025-09-05 16:40:53] [Rank 0] Group 8 FTA: 0.7200 +[2025-09-05 16:40:53] [Rank 0] Group 8 FTA: 0.7200 +[2025-09-05 16:40:53] [Rank 0] Group 9 FTA: 0.5200 +[2025-09-05 16:40:53] [Rank 0] Group 9 FTA: 0.5200 +[2025-09-05 16:40:53] [Rank 0] Group 10 FTA: 0.2600 +[2025-09-05 16:40:53] [Rank 0] Group 10 FTA: 0.2600 +[2025-09-05 16:40:53] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 16:40:53] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 16:40:53] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 16:40:53] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 16:40:53] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:40:53] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:40:53] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 16:40:53] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 16:40:53] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:40:53] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:40:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:40:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:40:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:40:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:40:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:40:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:40:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:40:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:40:54] [Rank 0] step:2001/10000 train_time:99110ms step_avg:49.53ms +[2025-09-05 16:40:54] [Rank 0] step:2001/10000 train_time:99110ms step_avg:49.53ms +[2025-09-05 16:40:55] [Rank 0] step:2021/10000 train_time:99759ms step_avg:49.36ms +[2025-09-05 16:40:55] [Rank 0] step:2021/10000 train_time:99759ms step_avg:49.36ms +[2025-09-05 16:40:56] [Rank 0] step:2041/10000 train_time:100409ms step_avg:49.20ms +[2025-09-05 16:40:56] [Rank 0] step:2041/10000 train_time:100409ms step_avg:49.20ms +[2025-09-05 16:40:56] [Rank 0] step:2061/10000 train_time:101060ms step_avg:49.03ms +[2025-09-05 16:40:56] [Rank 0] step:2061/10000 train_time:101060ms step_avg:49.03ms +[2025-09-05 16:40:57] [Rank 0] step:2081/10000 train_time:101710ms step_avg:48.88ms +[2025-09-05 16:40:57] [Rank 0] step:2081/10000 train_time:101710ms step_avg:48.88ms +[2025-09-05 16:40:57] [Rank 0] step:2101/10000 train_time:102361ms step_avg:48.72ms +[2025-09-05 16:40:57] [Rank 0] step:2101/10000 train_time:102361ms step_avg:48.72ms +[2025-09-05 16:40:58] [Rank 0] step:2121/10000 train_time:103012ms step_avg:48.57ms +[2025-09-05 16:40:58] [Rank 0] step:2121/10000 train_time:103012ms step_avg:48.57ms +[2025-09-05 16:40:59] [Rank 0] step:2141/10000 train_time:103662ms step_avg:48.42ms +[2025-09-05 16:40:59] [Rank 0] step:2141/10000 train_time:103662ms step_avg:48.42ms +[2025-09-05 16:40:59] [Rank 0] step:2161/10000 train_time:104313ms step_avg:48.27ms +[2025-09-05 16:40:59] [Rank 0] step:2161/10000 train_time:104313ms step_avg:48.27ms +[2025-09-05 16:41:00] [Rank 0] step:2181/10000 train_time:104963ms step_avg:48.13ms +[2025-09-05 16:41:00] [Rank 0] step:2181/10000 train_time:104963ms step_avg:48.13ms +[2025-09-05 16:41:01] [Rank 0] step:2201/10000 train_time:105614ms step_avg:47.98ms +[2025-09-05 16:41:01] [Rank 0] step:2201/10000 train_time:105614ms step_avg:47.98ms +[2025-09-05 16:41:01] [Rank 0] step:2221/10000 train_time:106264ms step_avg:47.85ms +[2025-09-05 16:41:01] [Rank 0] step:2221/10000 train_time:106264ms step_avg:47.85ms +[2025-09-05 16:41:02] [Rank 0] step:2241/10000 train_time:106919ms step_avg:47.71ms +[2025-09-05 16:41:02] [Rank 0] step:2241/10000 train_time:106919ms step_avg:47.71ms +[2025-09-05 16:41:03] [Rank 0] step:2261/10000 train_time:107577ms step_avg:47.58ms +[2025-09-05 16:41:03] [Rank 0] step:2261/10000 train_time:107577ms step_avg:47.58ms +[2025-09-05 16:41:03] [Rank 0] step:2281/10000 train_time:108233ms step_avg:47.45ms +[2025-09-05 16:41:03] [Rank 0] step:2281/10000 train_time:108233ms step_avg:47.45ms +[2025-09-05 16:41:04] [Rank 0] step:2301/10000 train_time:108891ms step_avg:47.32ms +[2025-09-05 16:41:04] [Rank 0] step:2301/10000 train_time:108891ms step_avg:47.32ms +[2025-09-05 16:41:05] [Rank 0] step:2321/10000 train_time:109547ms step_avg:47.20ms +[2025-09-05 16:41:05] [Rank 0] step:2321/10000 train_time:109547ms step_avg:47.20ms +[2025-09-05 16:41:05] [Rank 0] step:2341/10000 train_time:110205ms step_avg:47.08ms +[2025-09-05 16:41:05] [Rank 0] step:2341/10000 train_time:110205ms step_avg:47.08ms +[2025-09-05 16:41:06] [Rank 0] step:2361/10000 train_time:110867ms step_avg:46.96ms +[2025-09-05 16:41:06] [Rank 0] step:2361/10000 train_time:110867ms step_avg:46.96ms +[2025-09-05 16:41:07] [Rank 0] step:2381/10000 train_time:111524ms step_avg:46.84ms +[2025-09-05 16:41:07] [Rank 0] step:2381/10000 train_time:111524ms step_avg:46.84ms +[2025-09-05 16:41:07] [Rank 0] step:2401/10000 train_time:112181ms step_avg:46.72ms +[2025-09-05 16:41:07] [Rank 0] step:2401/10000 train_time:112181ms step_avg:46.72ms +[2025-09-05 16:41:08] [Rank 0] step:2421/10000 train_time:112838ms step_avg:46.61ms +[2025-09-05 16:41:08] [Rank 0] step:2421/10000 train_time:112838ms step_avg:46.61ms +[2025-09-05 16:41:09] [Rank 0] step:2441/10000 train_time:113495ms step_avg:46.50ms +[2025-09-05 16:41:09] [Rank 0] step:2441/10000 train_time:113495ms step_avg:46.50ms +[2025-09-05 16:41:09] [Rank 0] step:2461/10000 train_time:114152ms step_avg:46.38ms +[2025-09-05 16:41:09] [Rank 0] step:2461/10000 train_time:114152ms step_avg:46.38ms +[2025-09-05 16:41:10] [Rank 0] step:2481/10000 train_time:114811ms step_avg:46.28ms +[2025-09-05 16:41:10] [Rank 0] step:2481/10000 train_time:114811ms step_avg:46.28ms +[2025-09-05 16:41:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:41:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:41:11] [Rank 0] PRINT: step:2500/10000 train_loss:0.9073 val_loss:0.8674 train_time:115700ms step_avg:46.28ms +[2025-09-05 16:41:11] [Rank 0] PRINT: step:2500/10000 train_loss:0.9073 val_loss:0.8674 train_time:115700ms step_avg:46.28ms +[2025-09-05 16:41:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:41:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:41:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:41:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:42:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:42:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:42:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:42:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:42:32] [Rank 0] Total Loss: 4.7572 +[2025-09-05 16:42:32] [Rank 0] Total Loss: 4.7572 +[2025-09-05 16:42:32] [Rank 0] Total FTA (Unweighted): 0.6481 +[2025-09-05 16:42:32] [Rank 0] Total FTA (Unweighted): 0.6481 +[2025-09-05 16:42:32] [Rank 0] Total FTA (Weighted): 0.6481 +[2025-09-05 16:42:32] [Rank 0] Total FTA (Weighted): 0.6481 +[2025-09-05 16:42:32] [Rank 0] Group 0 Loss: 4.6190 +[2025-09-05 16:42:32] [Rank 0] Group 0 Loss: 4.6190 +[2025-09-05 16:42:32] [Rank 0] Group 1 Loss: 4.1193 +[2025-09-05 16:42:32] [Rank 0] Group 1 Loss: 4.1193 +[2025-09-05 16:42:32] [Rank 0] Group 2 Loss: 4.0592 +[2025-09-05 16:42:32] [Rank 0] Group 2 Loss: 4.0592 +[2025-09-05 16:42:32] [Rank 0] Group 3 Loss: 4.4626 +[2025-09-05 16:42:32] [Rank 0] Group 3 Loss: 4.4626 +[2025-09-05 16:42:32] [Rank 0] Group 4 Loss: 4.5193 +[2025-09-05 16:42:32] [Rank 0] Group 4 Loss: 4.5193 +[2025-09-05 16:42:32] [Rank 0] Group 5 Loss: 4.5279 +[2025-09-05 16:42:32] [Rank 0] Group 5 Loss: 4.5279 +[2025-09-05 16:42:32] [Rank 0] Group 6 Loss: 4.4403 +[2025-09-05 16:42:32] [Rank 0] Group 6 Loss: 4.4403 +[2025-09-05 16:42:32] [Rank 0] Group 7 Loss: 4.5303 +[2025-09-05 16:42:32] [Rank 0] Group 7 Loss: 4.5303 +[2025-09-05 16:42:32] [Rank 0] Group 8 Loss: 4.7234 +[2025-09-05 16:42:32] [Rank 0] Group 8 Loss: 4.7234 +[2025-09-05 16:42:32] [Rank 0] Group 9 Loss: 4.7167 +[2025-09-05 16:42:32] [Rank 0] Group 9 Loss: 4.7167 +[2025-09-05 16:42:32] [Rank 0] Group 10 Loss: 4.9295 +[2025-09-05 16:42:32] [Rank 0] Group 10 Loss: 4.9295 +[2025-09-05 16:42:32] [Rank 0] Group 11 Loss: 5.0744 +[2025-09-05 16:42:32] [Rank 0] Group 11 Loss: 5.0744 +[2025-09-05 16:42:32] [Rank 0] Group 12 Loss: 5.1617 +[2025-09-05 16:42:32] [Rank 0] Group 12 Loss: 5.1617 +[2025-09-05 16:42:32] [Rank 0] Group 13 Loss: 5.3258 +[2025-09-05 16:42:32] [Rank 0] Group 13 Loss: 5.3258 +[2025-09-05 16:42:32] [Rank 0] Group 14 Loss: 5.3940 +[2025-09-05 16:42:32] [Rank 0] Group 14 Loss: 5.3940 +[2025-09-05 16:42:32] [Rank 0] Group 15 Loss: 5.5112 +[2025-09-05 16:42:32] [Rank 0] Group 15 Loss: 5.5112 +[2025-09-05 16:42:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:42:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:42:33] [Rank 0] Group 6 FTA: 0.9800 +[2025-09-05 16:42:33] [Rank 0] Group 6 FTA: 0.9800 +[2025-09-05 16:42:33] [Rank 0] Group 7 FTA: 0.8000 +[2025-09-05 16:42:33] [Rank 0] Group 7 FTA: 0.8000 +[2025-09-05 16:42:33] [Rank 0] Group 8 FTA: 0.7800 +[2025-09-05 16:42:33] [Rank 0] Group 8 FTA: 0.7800 +[2025-09-05 16:42:33] [Rank 0] Group 9 FTA: 0.6700 +[2025-09-05 16:42:33] [Rank 0] Group 9 FTA: 0.6700 +[2025-09-05 16:42:33] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:42:33] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:42:33] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 16:42:33] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 16:42:33] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:42:33] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:42:33] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:42:33] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:42:33] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:42:33] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:42:33] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:42:33] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:42:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:42:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:42:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:42:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:42:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:42:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:42:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:42:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:42:34] [Rank 0] step:2501/10000 train_time:115708ms step_avg:46.26ms +[2025-09-05 16:42:34] [Rank 0] step:2501/10000 train_time:115708ms step_avg:46.26ms +[2025-09-05 16:42:35] [Rank 0] step:2521/10000 train_time:116137ms step_avg:46.07ms +[2025-09-05 16:42:35] [Rank 0] step:2521/10000 train_time:116137ms step_avg:46.07ms +[2025-09-05 16:42:35] [Rank 0] step:2541/10000 train_time:116794ms step_avg:45.96ms +[2025-09-05 16:42:35] [Rank 0] step:2541/10000 train_time:116794ms step_avg:45.96ms +[2025-09-05 16:42:36] [Rank 0] step:2561/10000 train_time:117450ms step_avg:45.86ms +[2025-09-05 16:42:36] [Rank 0] step:2561/10000 train_time:117450ms step_avg:45.86ms +[2025-09-05 16:42:37] [Rank 0] step:2581/10000 train_time:118107ms step_avg:45.76ms +[2025-09-05 16:42:37] [Rank 0] step:2581/10000 train_time:118107ms step_avg:45.76ms +[2025-09-05 16:42:37] [Rank 0] step:2601/10000 train_time:118764ms step_avg:45.66ms +[2025-09-05 16:42:37] [Rank 0] step:2601/10000 train_time:118764ms step_avg:45.66ms +[2025-09-05 16:42:38] [Rank 0] step:2621/10000 train_time:119420ms step_avg:45.56ms +[2025-09-05 16:42:38] [Rank 0] step:2621/10000 train_time:119420ms step_avg:45.56ms +[2025-09-05 16:42:39] [Rank 0] step:2641/10000 train_time:120076ms step_avg:45.47ms +[2025-09-05 16:42:39] [Rank 0] step:2641/10000 train_time:120076ms step_avg:45.47ms +[2025-09-05 16:42:39] [Rank 0] step:2661/10000 train_time:120735ms step_avg:45.37ms +[2025-09-05 16:42:39] [Rank 0] step:2661/10000 train_time:120735ms step_avg:45.37ms +[2025-09-05 16:42:40] [Rank 0] step:2681/10000 train_time:121391ms step_avg:45.28ms +[2025-09-05 16:42:40] [Rank 0] step:2681/10000 train_time:121391ms step_avg:45.28ms +[2025-09-05 16:42:41] [Rank 0] step:2701/10000 train_time:122047ms step_avg:45.19ms +[2025-09-05 16:42:41] [Rank 0] step:2701/10000 train_time:122047ms step_avg:45.19ms +[2025-09-05 16:42:41] [Rank 0] step:2721/10000 train_time:122704ms step_avg:45.10ms +[2025-09-05 16:42:41] [Rank 0] step:2721/10000 train_time:122704ms step_avg:45.10ms +[2025-09-05 16:42:42] [Rank 0] step:2741/10000 train_time:123360ms step_avg:45.01ms +[2025-09-05 16:42:42] [Rank 0] step:2741/10000 train_time:123360ms step_avg:45.01ms +[2025-09-05 16:42:43] [Rank 0] step:2761/10000 train_time:124017ms step_avg:44.92ms +[2025-09-05 16:42:43] [Rank 0] step:2761/10000 train_time:124017ms step_avg:44.92ms +[2025-09-05 16:42:43] [Rank 0] step:2781/10000 train_time:124673ms step_avg:44.83ms +[2025-09-05 16:42:43] [Rank 0] step:2781/10000 train_time:124673ms step_avg:44.83ms +[2025-09-05 16:42:44] [Rank 0] step:2801/10000 train_time:125330ms step_avg:44.74ms +[2025-09-05 16:42:44] [Rank 0] step:2801/10000 train_time:125330ms step_avg:44.74ms +[2025-09-05 16:42:45] [Rank 0] step:2821/10000 train_time:125994ms step_avg:44.66ms +[2025-09-05 16:42:45] [Rank 0] step:2821/10000 train_time:125994ms step_avg:44.66ms +[2025-09-05 16:42:46] [Rank 0] step:2841/10000 train_time:127118ms step_avg:44.74ms +[2025-09-05 16:42:46] [Rank 0] step:2841/10000 train_time:127118ms step_avg:44.74ms +[2025-09-05 16:42:46] [Rank 0] step:2861/10000 train_time:127775ms step_avg:44.66ms +[2025-09-05 16:42:46] [Rank 0] step:2861/10000 train_time:127775ms step_avg:44.66ms +[2025-09-05 16:42:47] [Rank 0] step:2881/10000 train_time:128430ms step_avg:44.58ms +[2025-09-05 16:42:47] [Rank 0] step:2881/10000 train_time:128430ms step_avg:44.58ms +[2025-09-05 16:42:48] [Rank 0] step:2901/10000 train_time:129087ms step_avg:44.50ms +[2025-09-05 16:42:48] [Rank 0] step:2901/10000 train_time:129087ms step_avg:44.50ms +[2025-09-05 16:42:48] [Rank 0] step:2921/10000 train_time:129743ms step_avg:44.42ms +[2025-09-05 16:42:48] [Rank 0] step:2921/10000 train_time:129743ms step_avg:44.42ms +[2025-09-05 16:42:49] [Rank 0] step:2941/10000 train_time:130400ms step_avg:44.34ms +[2025-09-05 16:42:49] [Rank 0] step:2941/10000 train_time:130400ms step_avg:44.34ms +[2025-09-05 16:42:50] [Rank 0] step:2961/10000 train_time:131055ms step_avg:44.26ms +[2025-09-05 16:42:50] [Rank 0] step:2961/10000 train_time:131055ms step_avg:44.26ms +[2025-09-05 16:42:50] [Rank 0] step:2981/10000 train_time:131711ms step_avg:44.18ms +[2025-09-05 16:42:50] [Rank 0] step:2981/10000 train_time:131711ms step_avg:44.18ms +[2025-09-05 16:42:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:42:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:42:51] [Rank 0] PRINT: step:3000/10000 train_loss:0.8555 val_loss:0.8274 train_time:132601ms step_avg:44.20ms +[2025-09-05 16:42:51] [Rank 0] PRINT: step:3000/10000 train_loss:0.8555 val_loss:0.8274 train_time:132601ms step_avg:44.20ms +[2025-09-05 16:42:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:42:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:42:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:42:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:44:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:44:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:44:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:44:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:44:12] [Rank 0] Total Loss: 4.6929 +[2025-09-05 16:44:12] [Rank 0] Total Loss: 4.6929 +[2025-09-05 16:44:12] [Rank 0] Total FTA (Unweighted): 0.6875 +[2025-09-05 16:44:12] [Rank 0] Total FTA (Unweighted): 0.6875 +[2025-09-05 16:44:12] [Rank 0] Total FTA (Weighted): 0.6875 +[2025-09-05 16:44:12] [Rank 0] Total FTA (Weighted): 0.6875 +[2025-09-05 16:44:12] [Rank 0] Group 0 Loss: 4.4880 +[2025-09-05 16:44:12] [Rank 0] Group 0 Loss: 4.4880 +[2025-09-05 16:44:12] [Rank 0] Group 1 Loss: 4.2491 +[2025-09-05 16:44:12] [Rank 0] Group 1 Loss: 4.2491 +[2025-09-05 16:44:12] [Rank 0] Group 2 Loss: 4.0974 +[2025-09-05 16:44:12] [Rank 0] Group 2 Loss: 4.0974 +[2025-09-05 16:44:12] [Rank 0] Group 3 Loss: 4.4892 +[2025-09-05 16:44:12] [Rank 0] Group 3 Loss: 4.4892 +[2025-09-05 16:44:12] [Rank 0] Group 4 Loss: 4.4392 +[2025-09-05 16:44:12] [Rank 0] Group 4 Loss: 4.4392 +[2025-09-05 16:44:12] [Rank 0] Group 5 Loss: 4.5392 +[2025-09-05 16:44:12] [Rank 0] Group 5 Loss: 4.5392 +[2025-09-05 16:44:12] [Rank 0] Group 6 Loss: 4.4097 +[2025-09-05 16:44:12] [Rank 0] Group 6 Loss: 4.4097 +[2025-09-05 16:44:12] [Rank 0] Group 7 Loss: 4.5085 +[2025-09-05 16:44:12] [Rank 0] Group 7 Loss: 4.5085 +[2025-09-05 16:44:12] [Rank 0] Group 8 Loss: 4.6518 +[2025-09-05 16:44:12] [Rank 0] Group 8 Loss: 4.6518 +[2025-09-05 16:44:12] [Rank 0] Group 9 Loss: 4.6132 +[2025-09-05 16:44:12] [Rank 0] Group 9 Loss: 4.6132 +[2025-09-05 16:44:12] [Rank 0] Group 10 Loss: 4.7827 +[2025-09-05 16:44:12] [Rank 0] Group 10 Loss: 4.7827 +[2025-09-05 16:44:12] [Rank 0] Group 11 Loss: 4.9500 +[2025-09-05 16:44:12] [Rank 0] Group 11 Loss: 4.9500 +[2025-09-05 16:44:12] [Rank 0] Group 12 Loss: 5.0896 +[2025-09-05 16:44:12] [Rank 0] Group 12 Loss: 5.0896 +[2025-09-05 16:44:12] [Rank 0] Group 13 Loss: 5.1942 +[2025-09-05 16:44:12] [Rank 0] Group 13 Loss: 5.1942 +[2025-09-05 16:44:12] [Rank 0] Group 14 Loss: 5.2210 +[2025-09-05 16:44:12] [Rank 0] Group 14 Loss: 5.2210 +[2025-09-05 16:44:12] [Rank 0] Group 15 Loss: 5.3642 +[2025-09-05 16:44:12] [Rank 0] Group 15 Loss: 5.3642 +[2025-09-05 16:44:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:44:12] [Rank 0] Group 7 FTA: 0.9100 +[2025-09-05 16:44:12] [Rank 0] Group 7 FTA: 0.9100 +[2025-09-05 16:44:12] [Rank 0] Group 8 FTA: 0.7700 +[2025-09-05 16:44:12] [Rank 0] Group 8 FTA: 0.7700 +[2025-09-05 16:44:12] [Rank 0] Group 9 FTA: 0.7300 +[2025-09-05 16:44:12] [Rank 0] Group 9 FTA: 0.7300 +[2025-09-05 16:44:12] [Rank 0] Group 10 FTA: 0.6800 +[2025-09-05 16:44:12] [Rank 0] Group 10 FTA: 0.6800 +[2025-09-05 16:44:12] [Rank 0] Group 11 FTA: 0.3500 +[2025-09-05 16:44:12] [Rank 0] Group 11 FTA: 0.3500 +[2025-09-05 16:44:12] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 16:44:12] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 16:44:12] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 16:44:12] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 16:44:12] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:44:12] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:44:12] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:44:12] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:44:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:44:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:44:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:44:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:44:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:44:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:44:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:44:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:44:14] [Rank 0] step:3001/10000 train_time:132610ms step_avg:44.19ms +[2025-09-05 16:44:14] [Rank 0] step:3001/10000 train_time:132610ms step_avg:44.19ms +[2025-09-05 16:44:14] [Rank 0] step:3021/10000 train_time:133055ms step_avg:44.04ms +[2025-09-05 16:44:14] [Rank 0] step:3021/10000 train_time:133055ms step_avg:44.04ms +[2025-09-05 16:44:15] [Rank 0] step:3041/10000 train_time:133712ms step_avg:43.97ms +[2025-09-05 16:44:15] [Rank 0] step:3041/10000 train_time:133712ms step_avg:43.97ms +[2025-09-05 16:44:16] [Rank 0] step:3061/10000 train_time:134369ms step_avg:43.90ms +[2025-09-05 16:44:16] [Rank 0] step:3061/10000 train_time:134369ms step_avg:43.90ms +[2025-09-05 16:44:16] [Rank 0] step:3081/10000 train_time:135026ms step_avg:43.83ms +[2025-09-05 16:44:16] [Rank 0] step:3081/10000 train_time:135026ms step_avg:43.83ms +[2025-09-05 16:44:17] [Rank 0] step:3101/10000 train_time:135683ms step_avg:43.75ms +[2025-09-05 16:44:17] [Rank 0] step:3101/10000 train_time:135683ms step_avg:43.75ms +[2025-09-05 16:44:18] [Rank 0] step:3121/10000 train_time:136339ms step_avg:43.68ms +[2025-09-05 16:44:18] [Rank 0] step:3121/10000 train_time:136339ms step_avg:43.68ms +[2025-09-05 16:44:18] [Rank 0] step:3141/10000 train_time:136996ms step_avg:43.62ms +[2025-09-05 16:44:18] [Rank 0] step:3141/10000 train_time:136996ms step_avg:43.62ms +[2025-09-05 16:44:19] [Rank 0] step:3161/10000 train_time:137653ms step_avg:43.55ms +[2025-09-05 16:44:19] [Rank 0] step:3161/10000 train_time:137653ms step_avg:43.55ms +[2025-09-05 16:44:19] [Rank 0] step:3181/10000 train_time:138310ms step_avg:43.48ms +[2025-09-05 16:44:19] [Rank 0] step:3181/10000 train_time:138310ms step_avg:43.48ms +[2025-09-05 16:44:20] [Rank 0] step:3201/10000 train_time:138967ms step_avg:43.41ms +[2025-09-05 16:44:20] [Rank 0] step:3201/10000 train_time:138967ms step_avg:43.41ms +[2025-09-05 16:44:21] [Rank 0] step:3221/10000 train_time:139624ms step_avg:43.35ms +[2025-09-05 16:44:21] [Rank 0] step:3221/10000 train_time:139624ms step_avg:43.35ms +[2025-09-05 16:44:21] [Rank 0] step:3241/10000 train_time:140281ms step_avg:43.28ms +[2025-09-05 16:44:21] [Rank 0] step:3241/10000 train_time:140281ms step_avg:43.28ms +[2025-09-05 16:44:22] [Rank 0] step:3261/10000 train_time:140938ms step_avg:43.22ms +[2025-09-05 16:44:22] [Rank 0] step:3261/10000 train_time:140938ms step_avg:43.22ms +[2025-09-05 16:44:23] [Rank 0] step:3281/10000 train_time:141596ms step_avg:43.16ms +[2025-09-05 16:44:23] [Rank 0] step:3281/10000 train_time:141596ms step_avg:43.16ms +[2025-09-05 16:44:23] [Rank 0] step:3301/10000 train_time:142253ms step_avg:43.09ms +[2025-09-05 16:44:23] [Rank 0] step:3301/10000 train_time:142253ms step_avg:43.09ms +[2025-09-05 16:44:24] [Rank 0] step:3321/10000 train_time:142909ms step_avg:43.03ms +[2025-09-05 16:44:24] [Rank 0] step:3321/10000 train_time:142909ms step_avg:43.03ms +[2025-09-05 16:44:25] [Rank 0] step:3341/10000 train_time:143567ms step_avg:42.97ms +[2025-09-05 16:44:25] [Rank 0] step:3341/10000 train_time:143567ms step_avg:42.97ms +[2025-09-05 16:44:25] [Rank 0] step:3361/10000 train_time:144245ms step_avg:42.92ms +[2025-09-05 16:44:25] [Rank 0] step:3361/10000 train_time:144245ms step_avg:42.92ms +[2025-09-05 16:44:26] [Rank 0] step:3381/10000 train_time:144903ms step_avg:42.86ms +[2025-09-05 16:44:26] [Rank 0] step:3381/10000 train_time:144903ms step_avg:42.86ms +[2025-09-05 16:44:27] [Rank 0] step:3401/10000 train_time:145560ms step_avg:42.80ms +[2025-09-05 16:44:27] [Rank 0] step:3401/10000 train_time:145560ms step_avg:42.80ms +[2025-09-05 16:44:27] [Rank 0] step:3421/10000 train_time:146217ms step_avg:42.74ms +[2025-09-05 16:44:27] [Rank 0] step:3421/10000 train_time:146217ms step_avg:42.74ms +[2025-09-05 16:44:28] [Rank 0] step:3441/10000 train_time:146874ms step_avg:42.68ms +[2025-09-05 16:44:28] [Rank 0] step:3441/10000 train_time:146874ms step_avg:42.68ms +[2025-09-05 16:44:29] [Rank 0] step:3461/10000 train_time:147530ms step_avg:42.63ms +[2025-09-05 16:44:29] [Rank 0] step:3461/10000 train_time:147530ms step_avg:42.63ms +[2025-09-05 16:44:29] [Rank 0] step:3481/10000 train_time:148187ms step_avg:42.57ms +[2025-09-05 16:44:29] [Rank 0] step:3481/10000 train_time:148187ms step_avg:42.57ms +[2025-09-05 16:44:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:44:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:44:30] [Rank 0] PRINT: step:3500/10000 train_loss:0.8188 val_loss:0.7959 train_time:149078ms step_avg:42.59ms +[2025-09-05 16:44:30] [Rank 0] PRINT: step:3500/10000 train_loss:0.8188 val_loss:0.7959 train_time:149078ms step_avg:42.59ms +[2025-09-05 16:44:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:44:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:44:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:44:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:45:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:45:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:45:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:45:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:45:51] [Rank 0] Total Loss: 4.8964 +[2025-09-05 16:45:51] [Rank 0] Total Loss: 4.8964 +[2025-09-05 16:45:51] [Rank 0] Total FTA (Unweighted): 0.7162 +[2025-09-05 16:45:51] [Rank 0] Total FTA (Unweighted): 0.7162 +[2025-09-05 16:45:51] [Rank 0] Total FTA (Weighted): 0.7163 +[2025-09-05 16:45:51] [Rank 0] Total FTA (Weighted): 0.7163 +[2025-09-05 16:45:51] [Rank 0] Group 0 Loss: 4.6237 +[2025-09-05 16:45:51] [Rank 0] Group 0 Loss: 4.6237 +[2025-09-05 16:45:51] [Rank 0] Group 1 Loss: 4.6654 +[2025-09-05 16:45:51] [Rank 0] Group 1 Loss: 4.6654 +[2025-09-05 16:45:51] [Rank 0] Group 2 Loss: 4.1643 +[2025-09-05 16:45:51] [Rank 0] Group 2 Loss: 4.1643 +[2025-09-05 16:45:51] [Rank 0] Group 3 Loss: 4.6191 +[2025-09-05 16:45:51] [Rank 0] Group 3 Loss: 4.6191 +[2025-09-05 16:45:51] [Rank 0] Group 4 Loss: 4.6413 +[2025-09-05 16:45:51] [Rank 0] Group 4 Loss: 4.6413 +[2025-09-05 16:45:51] [Rank 0] Group 5 Loss: 4.6925 +[2025-09-05 16:45:51] [Rank 0] Group 5 Loss: 4.6925 +[2025-09-05 16:45:51] [Rank 0] Group 6 Loss: 4.6251 +[2025-09-05 16:45:51] [Rank 0] Group 6 Loss: 4.6251 +[2025-09-05 16:45:51] [Rank 0] Group 7 Loss: 4.6969 +[2025-09-05 16:45:51] [Rank 0] Group 7 Loss: 4.6969 +[2025-09-05 16:45:51] [Rank 0] Group 8 Loss: 4.9104 +[2025-09-05 16:45:51] [Rank 0] Group 8 Loss: 4.9104 +[2025-09-05 16:45:51] [Rank 0] Group 9 Loss: 4.8098 +[2025-09-05 16:45:51] [Rank 0] Group 9 Loss: 4.8098 +[2025-09-05 16:45:51] [Rank 0] Group 10 Loss: 4.9874 +[2025-09-05 16:45:51] [Rank 0] Group 10 Loss: 4.9874 +[2025-09-05 16:45:51] [Rank 0] Group 11 Loss: 5.1071 +[2025-09-05 16:45:51] [Rank 0] Group 11 Loss: 5.1071 +[2025-09-05 16:45:51] [Rank 0] Group 12 Loss: 5.2549 +[2025-09-05 16:45:51] [Rank 0] Group 12 Loss: 5.2549 +[2025-09-05 16:45:51] [Rank 0] Group 13 Loss: 5.4461 +[2025-09-05 16:45:51] [Rank 0] Group 13 Loss: 5.4461 +[2025-09-05 16:45:51] [Rank 0] Group 14 Loss: 5.4744 +[2025-09-05 16:45:51] [Rank 0] Group 14 Loss: 5.4744 +[2025-09-05 16:45:51] [Rank 0] Group 15 Loss: 5.6237 +[2025-09-05 16:45:51] [Rank 0] Group 15 Loss: 5.6237 +[2025-09-05 16:45:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:45:51] [Rank 0] Group 8 FTA: 0.9000 +[2025-09-05 16:45:51] [Rank 0] Group 8 FTA: 0.9000 +[2025-09-05 16:45:51] [Rank 0] Group 9 FTA: 0.7800 +[2025-09-05 16:45:51] [Rank 0] Group 9 FTA: 0.7800 +[2025-09-05 16:45:51] [Rank 0] Group 10 FTA: 0.7600 +[2025-09-05 16:45:51] [Rank 0] Group 10 FTA: 0.7600 +[2025-09-05 16:45:51] [Rank 0] Group 11 FTA: 0.5000 +[2025-09-05 16:45:51] [Rank 0] Group 11 FTA: 0.5000 +[2025-09-05 16:45:51] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 16:45:51] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 16:45:51] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:45:51] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:45:51] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:45:51] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:45:51] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:45:51] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:45:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:45:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:45:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:45:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:45:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:45:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:45:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:45:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:45:52] [Rank 0] step:3501/10000 train_time:149087ms step_avg:42.58ms +[2025-09-05 16:45:52] [Rank 0] step:3501/10000 train_time:149087ms step_avg:42.58ms +[2025-09-05 16:45:53] [Rank 0] step:3521/10000 train_time:149531ms step_avg:42.47ms +[2025-09-05 16:45:53] [Rank 0] step:3521/10000 train_time:149531ms step_avg:42.47ms +[2025-09-05 16:45:54] [Rank 0] step:3541/10000 train_time:150188ms step_avg:42.41ms +[2025-09-05 16:45:54] [Rank 0] step:3541/10000 train_time:150188ms step_avg:42.41ms +[2025-09-05 16:45:54] [Rank 0] step:3561/10000 train_time:150845ms step_avg:42.36ms +[2025-09-05 16:45:54] [Rank 0] step:3561/10000 train_time:150845ms step_avg:42.36ms +[2025-09-05 16:45:55] [Rank 0] step:3581/10000 train_time:151502ms step_avg:42.31ms +[2025-09-05 16:45:55] [Rank 0] step:3581/10000 train_time:151502ms step_avg:42.31ms +[2025-09-05 16:45:56] [Rank 0] step:3601/10000 train_time:152158ms step_avg:42.25ms +[2025-09-05 16:45:56] [Rank 0] step:3601/10000 train_time:152158ms step_avg:42.25ms +[2025-09-05 16:45:56] [Rank 0] step:3621/10000 train_time:152814ms step_avg:42.20ms +[2025-09-05 16:45:56] [Rank 0] step:3621/10000 train_time:152814ms step_avg:42.20ms +[2025-09-05 16:45:57] [Rank 0] step:3641/10000 train_time:153546ms step_avg:42.17ms +[2025-09-05 16:45:57] [Rank 0] step:3641/10000 train_time:153546ms step_avg:42.17ms +[2025-09-05 16:45:58] [Rank 0] step:3661/10000 train_time:154202ms step_avg:42.12ms +[2025-09-05 16:45:58] [Rank 0] step:3661/10000 train_time:154202ms step_avg:42.12ms +[2025-09-05 16:45:58] [Rank 0] step:3681/10000 train_time:154858ms step_avg:42.07ms +[2025-09-05 16:45:58] [Rank 0] step:3681/10000 train_time:154858ms step_avg:42.07ms +[2025-09-05 16:45:59] [Rank 0] step:3701/10000 train_time:155515ms step_avg:42.02ms +[2025-09-05 16:45:59] [Rank 0] step:3701/10000 train_time:155515ms step_avg:42.02ms +[2025-09-05 16:46:00] [Rank 0] step:3721/10000 train_time:156171ms step_avg:41.97ms +[2025-09-05 16:46:00] [Rank 0] step:3721/10000 train_time:156171ms step_avg:41.97ms +[2025-09-05 16:46:00] [Rank 0] step:3741/10000 train_time:156828ms step_avg:41.92ms +[2025-09-05 16:46:00] [Rank 0] step:3741/10000 train_time:156828ms step_avg:41.92ms +[2025-09-05 16:46:01] [Rank 0] step:3761/10000 train_time:157484ms step_avg:41.87ms +[2025-09-05 16:46:01] [Rank 0] step:3761/10000 train_time:157484ms step_avg:41.87ms +[2025-09-05 16:46:02] [Rank 0] step:3781/10000 train_time:158140ms step_avg:41.82ms +[2025-09-05 16:46:02] [Rank 0] step:3781/10000 train_time:158140ms step_avg:41.82ms +[2025-09-05 16:46:02] [Rank 0] step:3801/10000 train_time:158796ms step_avg:41.78ms +[2025-09-05 16:46:02] [Rank 0] step:3801/10000 train_time:158796ms step_avg:41.78ms +[2025-09-05 16:46:03] [Rank 0] step:3821/10000 train_time:159453ms step_avg:41.73ms +[2025-09-05 16:46:03] [Rank 0] step:3821/10000 train_time:159453ms step_avg:41.73ms +[2025-09-05 16:46:04] [Rank 0] step:3841/10000 train_time:160109ms step_avg:41.68ms +[2025-09-05 16:46:04] [Rank 0] step:3841/10000 train_time:160109ms step_avg:41.68ms +[2025-09-05 16:46:05] [Rank 0] step:3861/10000 train_time:160909ms step_avg:41.68ms +[2025-09-05 16:46:05] [Rank 0] step:3861/10000 train_time:160909ms step_avg:41.68ms +[2025-09-05 16:46:05] [Rank 0] step:3881/10000 train_time:161566ms step_avg:41.63ms +[2025-09-05 16:46:05] [Rank 0] step:3881/10000 train_time:161566ms step_avg:41.63ms +[2025-09-05 16:46:06] [Rank 0] step:3901/10000 train_time:162223ms step_avg:41.58ms +[2025-09-05 16:46:06] [Rank 0] step:3901/10000 train_time:162223ms step_avg:41.58ms +[2025-09-05 16:46:07] [Rank 0] step:3921/10000 train_time:163008ms step_avg:41.57ms +[2025-09-05 16:46:07] [Rank 0] step:3921/10000 train_time:163008ms step_avg:41.57ms +[2025-09-05 16:46:07] [Rank 0] step:3941/10000 train_time:163665ms step_avg:41.53ms +[2025-09-05 16:46:07] [Rank 0] step:3941/10000 train_time:163665ms step_avg:41.53ms +[2025-09-05 16:46:08] [Rank 0] step:3961/10000 train_time:164321ms step_avg:41.48ms +[2025-09-05 16:46:08] [Rank 0] step:3961/10000 train_time:164321ms step_avg:41.48ms +[2025-09-05 16:46:09] [Rank 0] step:3981/10000 train_time:164977ms step_avg:41.44ms +[2025-09-05 16:46:09] [Rank 0] step:3981/10000 train_time:164977ms step_avg:41.44ms +[2025-09-05 16:46:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:46:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:46:10] [Rank 0] PRINT: step:4000/10000 train_loss:0.7907 val_loss:0.7707 train_time:165868ms step_avg:41.47ms +[2025-09-05 16:46:10] [Rank 0] PRINT: step:4000/10000 train_loss:0.7907 val_loss:0.7707 train_time:165868ms step_avg:41.47ms +[2025-09-05 16:46:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:46:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:46:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:46:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:47:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:47:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:47:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:47:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:47:31] [Rank 0] Total Loss: 4.7499 +[2025-09-05 16:47:31] [Rank 0] Total Loss: 4.7499 +[2025-09-05 16:47:31] [Rank 0] Total FTA (Unweighted): 0.7438 +[2025-09-05 16:47:31] [Rank 0] Total FTA (Unweighted): 0.7438 +[2025-09-05 16:47:31] [Rank 0] Total FTA (Weighted): 0.7438 +[2025-09-05 16:47:31] [Rank 0] Total FTA (Weighted): 0.7438 +[2025-09-05 16:47:31] [Rank 0] Group 0 Loss: 4.3897 +[2025-09-05 16:47:31] [Rank 0] Group 0 Loss: 4.3897 +[2025-09-05 16:47:31] [Rank 0] Group 1 Loss: 4.4205 +[2025-09-05 16:47:31] [Rank 0] Group 1 Loss: 4.4205 +[2025-09-05 16:47:31] [Rank 0] Group 2 Loss: 4.1710 +[2025-09-05 16:47:31] [Rank 0] Group 2 Loss: 4.1710 +[2025-09-05 16:47:31] [Rank 0] Group 3 Loss: 4.5499 +[2025-09-05 16:47:31] [Rank 0] Group 3 Loss: 4.5499 +[2025-09-05 16:47:31] [Rank 0] Group 4 Loss: 4.5754 +[2025-09-05 16:47:31] [Rank 0] Group 4 Loss: 4.5754 +[2025-09-05 16:47:31] [Rank 0] Group 5 Loss: 4.5330 +[2025-09-05 16:47:31] [Rank 0] Group 5 Loss: 4.5330 +[2025-09-05 16:47:31] [Rank 0] Group 6 Loss: 4.4992 +[2025-09-05 16:47:31] [Rank 0] Group 6 Loss: 4.4992 +[2025-09-05 16:47:31] [Rank 0] Group 7 Loss: 4.6011 +[2025-09-05 16:47:31] [Rank 0] Group 7 Loss: 4.6011 +[2025-09-05 16:47:31] [Rank 0] Group 8 Loss: 4.7375 +[2025-09-05 16:47:31] [Rank 0] Group 8 Loss: 4.7375 +[2025-09-05 16:47:31] [Rank 0] Group 9 Loss: 4.6999 +[2025-09-05 16:47:31] [Rank 0] Group 9 Loss: 4.6999 +[2025-09-05 16:47:31] [Rank 0] Group 10 Loss: 4.8380 +[2025-09-05 16:47:31] [Rank 0] Group 10 Loss: 4.8380 +[2025-09-05 16:47:31] [Rank 0] Group 11 Loss: 4.9574 +[2025-09-05 16:47:31] [Rank 0] Group 11 Loss: 4.9574 +[2025-09-05 16:47:31] [Rank 0] Group 12 Loss: 5.0824 +[2025-09-05 16:47:31] [Rank 0] Group 12 Loss: 5.0824 +[2025-09-05 16:47:31] [Rank 0] Group 13 Loss: 5.2418 +[2025-09-05 16:47:31] [Rank 0] Group 13 Loss: 5.2418 +[2025-09-05 16:47:31] [Rank 0] Group 14 Loss: 5.2758 +[2025-09-05 16:47:31] [Rank 0] Group 14 Loss: 5.2758 +[2025-09-05 16:47:31] [Rank 0] Group 15 Loss: 5.4263 +[2025-09-05 16:47:31] [Rank 0] Group 15 Loss: 5.4263 +[2025-09-05 16:47:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:47:31] [Rank 0] Group 8 FTA: 0.9400 +[2025-09-05 16:47:31] [Rank 0] Group 8 FTA: 0.9400 +[2025-09-05 16:47:31] [Rank 0] Group 9 FTA: 0.8100 +[2025-09-05 16:47:31] [Rank 0] Group 9 FTA: 0.8100 +[2025-09-05 16:47:31] [Rank 0] Group 10 FTA: 0.8500 +[2025-09-05 16:47:31] [Rank 0] Group 10 FTA: 0.8500 +[2025-09-05 16:47:31] [Rank 0] Group 11 FTA: 0.6200 +[2025-09-05 16:47:31] [Rank 0] Group 11 FTA: 0.6200 +[2025-09-05 16:47:31] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 16:47:31] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 16:47:31] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 16:47:31] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 16:47:31] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:47:31] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:47:31] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:47:31] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:47:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:47:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:47:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:47:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:47:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:47:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:47:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:47:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:47:33] [Rank 0] step:4001/10000 train_time:165876ms step_avg:41.46ms +[2025-09-05 16:47:33] [Rank 0] step:4001/10000 train_time:165876ms step_avg:41.46ms +[2025-09-05 16:47:33] [Rank 0] step:4021/10000 train_time:166321ms step_avg:41.36ms +[2025-09-05 16:47:33] [Rank 0] step:4021/10000 train_time:166321ms step_avg:41.36ms +[2025-09-05 16:47:34] [Rank 0] step:4041/10000 train_time:166978ms step_avg:41.32ms +[2025-09-05 16:47:34] [Rank 0] step:4041/10000 train_time:166978ms step_avg:41.32ms +[2025-09-05 16:47:35] [Rank 0] step:4061/10000 train_time:167634ms step_avg:41.28ms +[2025-09-05 16:47:35] [Rank 0] step:4061/10000 train_time:167634ms step_avg:41.28ms +[2025-09-05 16:47:35] [Rank 0] step:4081/10000 train_time:168292ms step_avg:41.24ms +[2025-09-05 16:47:35] [Rank 0] step:4081/10000 train_time:168292ms step_avg:41.24ms +[2025-09-05 16:47:36] [Rank 0] step:4101/10000 train_time:168948ms step_avg:41.20ms +[2025-09-05 16:47:36] [Rank 0] step:4101/10000 train_time:168948ms step_avg:41.20ms +[2025-09-05 16:47:37] [Rank 0] step:4121/10000 train_time:169606ms step_avg:41.16ms +[2025-09-05 16:47:37] [Rank 0] step:4121/10000 train_time:169606ms step_avg:41.16ms +[2025-09-05 16:47:37] [Rank 0] step:4141/10000 train_time:170263ms step_avg:41.12ms +[2025-09-05 16:47:37] [Rank 0] step:4141/10000 train_time:170263ms step_avg:41.12ms +[2025-09-05 16:47:38] [Rank 0] step:4161/10000 train_time:170920ms step_avg:41.08ms +[2025-09-05 16:47:38] [Rank 0] step:4161/10000 train_time:170920ms step_avg:41.08ms +[2025-09-05 16:47:39] [Rank 0] step:4181/10000 train_time:171578ms step_avg:41.04ms +[2025-09-05 16:47:39] [Rank 0] step:4181/10000 train_time:171578ms step_avg:41.04ms +[2025-09-05 16:47:39] [Rank 0] step:4201/10000 train_time:172235ms step_avg:41.00ms +[2025-09-05 16:47:39] [Rank 0] step:4201/10000 train_time:172235ms step_avg:41.00ms +[2025-09-05 16:47:40] [Rank 0] step:4221/10000 train_time:172893ms step_avg:40.96ms +[2025-09-05 16:47:40] [Rank 0] step:4221/10000 train_time:172893ms step_avg:40.96ms +[2025-09-05 16:47:40] [Rank 0] step:4241/10000 train_time:173550ms step_avg:40.92ms +[2025-09-05 16:47:40] [Rank 0] step:4241/10000 train_time:173550ms step_avg:40.92ms +[2025-09-05 16:47:41] [Rank 0] step:4261/10000 train_time:174208ms step_avg:40.88ms +[2025-09-05 16:47:41] [Rank 0] step:4261/10000 train_time:174208ms step_avg:40.88ms +[2025-09-05 16:47:42] [Rank 0] step:4281/10000 train_time:174864ms step_avg:40.85ms +[2025-09-05 16:47:42] [Rank 0] step:4281/10000 train_time:174864ms step_avg:40.85ms +[2025-09-05 16:47:42] [Rank 0] step:4301/10000 train_time:175521ms step_avg:40.81ms +[2025-09-05 16:47:42] [Rank 0] step:4301/10000 train_time:175521ms step_avg:40.81ms +[2025-09-05 16:47:43] [Rank 0] step:4321/10000 train_time:176178ms step_avg:40.77ms +[2025-09-05 16:47:43] [Rank 0] step:4321/10000 train_time:176178ms step_avg:40.77ms +[2025-09-05 16:47:44] [Rank 0] step:4341/10000 train_time:176834ms step_avg:40.74ms +[2025-09-05 16:47:44] [Rank 0] step:4341/10000 train_time:176834ms step_avg:40.74ms +[2025-09-05 16:47:44] [Rank 0] step:4361/10000 train_time:177491ms step_avg:40.70ms +[2025-09-05 16:47:44] [Rank 0] step:4361/10000 train_time:177491ms step_avg:40.70ms +[2025-09-05 16:47:45] [Rank 0] step:4381/10000 train_time:178148ms step_avg:40.66ms +[2025-09-05 16:47:45] [Rank 0] step:4381/10000 train_time:178148ms step_avg:40.66ms +[2025-09-05 16:47:46] [Rank 0] step:4401/10000 train_time:178807ms step_avg:40.63ms +[2025-09-05 16:47:46] [Rank 0] step:4401/10000 train_time:178807ms step_avg:40.63ms +[2025-09-05 16:47:46] [Rank 0] step:4421/10000 train_time:179464ms step_avg:40.59ms +[2025-09-05 16:47:46] [Rank 0] step:4421/10000 train_time:179464ms step_avg:40.59ms +[2025-09-05 16:47:47] [Rank 0] step:4441/10000 train_time:180121ms step_avg:40.56ms +[2025-09-05 16:47:47] [Rank 0] step:4441/10000 train_time:180121ms step_avg:40.56ms +[2025-09-05 16:47:48] [Rank 0] step:4461/10000 train_time:180779ms step_avg:40.52ms +[2025-09-05 16:47:48] [Rank 0] step:4461/10000 train_time:180779ms step_avg:40.52ms +[2025-09-05 16:47:48] [Rank 0] step:4481/10000 train_time:181436ms step_avg:40.49ms +[2025-09-05 16:47:48] [Rank 0] step:4481/10000 train_time:181436ms step_avg:40.49ms +[2025-09-05 16:47:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:47:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:47:49] [Rank 0] PRINT: step:4500/10000 train_loss:0.7683 val_loss:0.7496 train_time:182328ms step_avg:40.52ms +[2025-09-05 16:47:49] [Rank 0] PRINT: step:4500/10000 train_loss:0.7683 val_loss:0.7496 train_time:182328ms step_avg:40.52ms +[2025-09-05 16:47:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:47:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:47:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:47:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:49:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:49:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:49:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:49:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:49:10] [Rank 0] Total Loss: 4.8510 +[2025-09-05 16:49:10] [Rank 0] Total Loss: 4.8510 +[2025-09-05 16:49:10] [Rank 0] Total FTA (Unweighted): 0.7663 +[2025-09-05 16:49:10] [Rank 0] Total FTA (Unweighted): 0.7663 +[2025-09-05 16:49:10] [Rank 0] Total FTA (Weighted): 0.7662 +[2025-09-05 16:49:10] [Rank 0] Total FTA (Weighted): 0.7662 +[2025-09-05 16:49:10] [Rank 0] Group 0 Loss: 4.5876 +[2025-09-05 16:49:10] [Rank 0] Group 0 Loss: 4.5876 +[2025-09-05 16:49:10] [Rank 0] Group 1 Loss: 4.5245 +[2025-09-05 16:49:10] [Rank 0] Group 1 Loss: 4.5245 +[2025-09-05 16:49:10] [Rank 0] Group 2 Loss: 4.2888 +[2025-09-05 16:49:10] [Rank 0] Group 2 Loss: 4.2888 +[2025-09-05 16:49:10] [Rank 0] Group 3 Loss: 4.5608 +[2025-09-05 16:49:10] [Rank 0] Group 3 Loss: 4.5608 +[2025-09-05 16:49:10] [Rank 0] Group 4 Loss: 4.7158 +[2025-09-05 16:49:10] [Rank 0] Group 4 Loss: 4.7158 +[2025-09-05 16:49:10] [Rank 0] Group 5 Loss: 4.7745 +[2025-09-05 16:49:10] [Rank 0] Group 5 Loss: 4.7745 +[2025-09-05 16:49:10] [Rank 0] Group 6 Loss: 4.6810 +[2025-09-05 16:49:10] [Rank 0] Group 6 Loss: 4.6810 +[2025-09-05 16:49:10] [Rank 0] Group 7 Loss: 4.7057 +[2025-09-05 16:49:10] [Rank 0] Group 7 Loss: 4.7057 +[2025-09-05 16:49:10] [Rank 0] Group 8 Loss: 4.8820 +[2025-09-05 16:49:10] [Rank 0] Group 8 Loss: 4.8820 +[2025-09-05 16:49:10] [Rank 0] Group 9 Loss: 4.7644 +[2025-09-05 16:49:10] [Rank 0] Group 9 Loss: 4.7644 +[2025-09-05 16:49:10] [Rank 0] Group 10 Loss: 4.9863 +[2025-09-05 16:49:10] [Rank 0] Group 10 Loss: 4.9863 +[2025-09-05 16:49:10] [Rank 0] Group 11 Loss: 5.0378 +[2025-09-05 16:49:10] [Rank 0] Group 11 Loss: 5.0378 +[2025-09-05 16:49:10] [Rank 0] Group 12 Loss: 5.0953 +[2025-09-05 16:49:10] [Rank 0] Group 12 Loss: 5.0953 +[2025-09-05 16:49:10] [Rank 0] Group 13 Loss: 5.3145 +[2025-09-05 16:49:10] [Rank 0] Group 13 Loss: 5.3145 +[2025-09-05 16:49:10] [Rank 0] Group 14 Loss: 5.2770 +[2025-09-05 16:49:10] [Rank 0] Group 14 Loss: 5.2770 +[2025-09-05 16:49:10] [Rank 0] Group 15 Loss: 5.4206 +[2025-09-05 16:49:10] [Rank 0] Group 15 Loss: 5.4206 +[2025-09-05 16:49:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:49:10] [Rank 0] Group 8 FTA: 0.9600 +[2025-09-05 16:49:10] [Rank 0] Group 8 FTA: 0.9600 +[2025-09-05 16:49:10] [Rank 0] Group 9 FTA: 0.8400 +[2025-09-05 16:49:10] [Rank 0] Group 9 FTA: 0.8400 +[2025-09-05 16:49:10] [Rank 0] Group 10 FTA: 0.8900 +[2025-09-05 16:49:10] [Rank 0] Group 10 FTA: 0.8900 +[2025-09-05 16:49:10] [Rank 0] Group 11 FTA: 0.7400 +[2025-09-05 16:49:10] [Rank 0] Group 11 FTA: 0.7400 +[2025-09-05 16:49:10] [Rank 0] Group 12 FTA: 0.4000 +[2025-09-05 16:49:10] [Rank 0] Group 12 FTA: 0.4000 +[2025-09-05 16:49:10] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 16:49:10] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 16:49:10] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:49:10] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:49:10] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:49:10] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:49:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:49:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:49:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:49:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:49:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:49:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:49:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:49:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:49:12] [Rank 0] step:4501/10000 train_time:182336ms step_avg:40.51ms +[2025-09-05 16:49:12] [Rank 0] step:4501/10000 train_time:182336ms step_avg:40.51ms +[2025-09-05 16:49:12] [Rank 0] step:4521/10000 train_time:182768ms step_avg:40.43ms +[2025-09-05 16:49:12] [Rank 0] step:4521/10000 train_time:182768ms step_avg:40.43ms +[2025-09-05 16:49:13] [Rank 0] step:4541/10000 train_time:183425ms step_avg:40.39ms +[2025-09-05 16:49:13] [Rank 0] step:4541/10000 train_time:183425ms step_avg:40.39ms +[2025-09-05 16:49:14] [Rank 0] step:4561/10000 train_time:184235ms step_avg:40.39ms +[2025-09-05 16:49:14] [Rank 0] step:4561/10000 train_time:184235ms step_avg:40.39ms +[2025-09-05 16:49:14] [Rank 0] step:4581/10000 train_time:184891ms step_avg:40.36ms +[2025-09-05 16:49:14] [Rank 0] step:4581/10000 train_time:184891ms step_avg:40.36ms +[2025-09-05 16:49:15] [Rank 0] step:4601/10000 train_time:185547ms step_avg:40.33ms +[2025-09-05 16:49:15] [Rank 0] step:4601/10000 train_time:185547ms step_avg:40.33ms +[2025-09-05 16:49:16] [Rank 0] step:4621/10000 train_time:186203ms step_avg:40.29ms +[2025-09-05 16:49:16] [Rank 0] step:4621/10000 train_time:186203ms step_avg:40.29ms +[2025-09-05 16:49:17] [Rank 0] step:4641/10000 train_time:187063ms step_avg:40.31ms +[2025-09-05 16:49:17] [Rank 0] step:4641/10000 train_time:187063ms step_avg:40.31ms +[2025-09-05 16:49:17] [Rank 0] step:4661/10000 train_time:187720ms step_avg:40.27ms +[2025-09-05 16:49:17] [Rank 0] step:4661/10000 train_time:187720ms step_avg:40.27ms +[2025-09-05 16:49:18] [Rank 0] step:4681/10000 train_time:188376ms step_avg:40.24ms +[2025-09-05 16:49:18] [Rank 0] step:4681/10000 train_time:188376ms step_avg:40.24ms +[2025-09-05 16:49:19] [Rank 0] step:4701/10000 train_time:189032ms step_avg:40.21ms +[2025-09-05 16:49:19] [Rank 0] step:4701/10000 train_time:189032ms step_avg:40.21ms +[2025-09-05 16:49:19] [Rank 0] step:4721/10000 train_time:189689ms step_avg:40.18ms +[2025-09-05 16:49:19] [Rank 0] step:4721/10000 train_time:189689ms step_avg:40.18ms +[2025-09-05 16:49:20] [Rank 0] step:4741/10000 train_time:190346ms step_avg:40.15ms +[2025-09-05 16:49:20] [Rank 0] step:4741/10000 train_time:190346ms step_avg:40.15ms +[2025-09-05 16:49:21] [Rank 0] step:4761/10000 train_time:191002ms step_avg:40.12ms +[2025-09-05 16:49:21] [Rank 0] step:4761/10000 train_time:191002ms step_avg:40.12ms +[2025-09-05 16:49:21] [Rank 0] step:4781/10000 train_time:191658ms step_avg:40.09ms +[2025-09-05 16:49:21] [Rank 0] step:4781/10000 train_time:191658ms step_avg:40.09ms +[2025-09-05 16:49:22] [Rank 0] step:4801/10000 train_time:192314ms step_avg:40.06ms +[2025-09-05 16:49:22] [Rank 0] step:4801/10000 train_time:192314ms step_avg:40.06ms +[2025-09-05 16:49:22] [Rank 0] step:4821/10000 train_time:192971ms step_avg:40.03ms +[2025-09-05 16:49:22] [Rank 0] step:4821/10000 train_time:192971ms step_avg:40.03ms +[2025-09-05 16:49:23] [Rank 0] step:4841/10000 train_time:193936ms step_avg:40.06ms +[2025-09-05 16:49:23] [Rank 0] step:4841/10000 train_time:193936ms step_avg:40.06ms +[2025-09-05 16:49:24] [Rank 0] step:4861/10000 train_time:194593ms step_avg:40.03ms +[2025-09-05 16:49:24] [Rank 0] step:4861/10000 train_time:194593ms step_avg:40.03ms +[2025-09-05 16:49:25] [Rank 0] step:4881/10000 train_time:195249ms step_avg:40.00ms +[2025-09-05 16:49:25] [Rank 0] step:4881/10000 train_time:195249ms step_avg:40.00ms +[2025-09-05 16:49:25] [Rank 0] step:4901/10000 train_time:195905ms step_avg:39.97ms +[2025-09-05 16:49:25] [Rank 0] step:4901/10000 train_time:195905ms step_avg:39.97ms +[2025-09-05 16:49:26] [Rank 0] step:4921/10000 train_time:196562ms step_avg:39.94ms +[2025-09-05 16:49:26] [Rank 0] step:4921/10000 train_time:196562ms step_avg:39.94ms +[2025-09-05 16:49:27] [Rank 0] step:4941/10000 train_time:197218ms step_avg:39.91ms +[2025-09-05 16:49:27] [Rank 0] step:4941/10000 train_time:197218ms step_avg:39.91ms +[2025-09-05 16:49:27] [Rank 0] step:4961/10000 train_time:197875ms step_avg:39.89ms +[2025-09-05 16:49:27] [Rank 0] step:4961/10000 train_time:197875ms step_avg:39.89ms +[2025-09-05 16:49:28] [Rank 0] step:4981/10000 train_time:198532ms step_avg:39.86ms +[2025-09-05 16:49:28] [Rank 0] step:4981/10000 train_time:198532ms step_avg:39.86ms +[2025-09-05 16:49:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:49:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:49:29] [Rank 0] PRINT: step:5000/10000 train_loss:0.7496 val_loss:0.7340 train_time:199422ms step_avg:39.88ms +[2025-09-05 16:49:29] [Rank 0] PRINT: step:5000/10000 train_loss:0.7496 val_loss:0.7340 train_time:199422ms step_avg:39.88ms +[2025-09-05 16:49:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:49:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:49:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:49:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:50:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:50:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:50:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:50:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:50:50] [Rank 0] Total Loss: 4.9660 +[2025-09-05 16:50:50] [Rank 0] Total Loss: 4.9660 +[2025-09-05 16:50:50] [Rank 0] Total FTA (Unweighted): 0.7775 +[2025-09-05 16:50:50] [Rank 0] Total FTA (Unweighted): 0.7775 +[2025-09-05 16:50:50] [Rank 0] Total FTA (Weighted): 0.7775 +[2025-09-05 16:50:50] [Rank 0] Total FTA (Weighted): 0.7775 +[2025-09-05 16:50:50] [Rank 0] Group 0 Loss: 4.9072 +[2025-09-05 16:50:50] [Rank 0] Group 0 Loss: 4.9072 +[2025-09-05 16:50:50] [Rank 0] Group 1 Loss: 4.6200 +[2025-09-05 16:50:50] [Rank 0] Group 1 Loss: 4.6200 +[2025-09-05 16:50:50] [Rank 0] Group 2 Loss: 4.3664 +[2025-09-05 16:50:50] [Rank 0] Group 2 Loss: 4.3664 +[2025-09-05 16:50:50] [Rank 0] Group 3 Loss: 4.7555 +[2025-09-05 16:50:50] [Rank 0] Group 3 Loss: 4.7555 +[2025-09-05 16:50:50] [Rank 0] Group 4 Loss: 4.7992 +[2025-09-05 16:50:50] [Rank 0] Group 4 Loss: 4.7992 +[2025-09-05 16:50:50] [Rank 0] Group 5 Loss: 4.8419 +[2025-09-05 16:50:50] [Rank 0] Group 5 Loss: 4.8419 +[2025-09-05 16:50:50] [Rank 0] Group 6 Loss: 4.8012 +[2025-09-05 16:50:50] [Rank 0] Group 6 Loss: 4.8012 +[2025-09-05 16:50:50] [Rank 0] Group 7 Loss: 4.8878 +[2025-09-05 16:50:50] [Rank 0] Group 7 Loss: 4.8878 +[2025-09-05 16:50:50] [Rank 0] Group 8 Loss: 4.9879 +[2025-09-05 16:50:50] [Rank 0] Group 8 Loss: 4.9879 +[2025-09-05 16:50:50] [Rank 0] Group 9 Loss: 4.9006 +[2025-09-05 16:50:50] [Rank 0] Group 9 Loss: 4.9006 +[2025-09-05 16:50:50] [Rank 0] Group 10 Loss: 5.1114 +[2025-09-05 16:50:50] [Rank 0] Group 10 Loss: 5.1114 +[2025-09-05 16:50:50] [Rank 0] Group 11 Loss: 5.1358 +[2025-09-05 16:50:50] [Rank 0] Group 11 Loss: 5.1358 +[2025-09-05 16:50:50] [Rank 0] Group 12 Loss: 5.1954 +[2025-09-05 16:50:50] [Rank 0] Group 12 Loss: 5.1954 +[2025-09-05 16:50:50] [Rank 0] Group 13 Loss: 5.3185 +[2025-09-05 16:50:50] [Rank 0] Group 13 Loss: 5.3185 +[2025-09-05 16:50:50] [Rank 0] Group 14 Loss: 5.3516 +[2025-09-05 16:50:50] [Rank 0] Group 14 Loss: 5.3516 +[2025-09-05 16:50:50] [Rank 0] Group 15 Loss: 5.4761 +[2025-09-05 16:50:50] [Rank 0] Group 15 Loss: 5.4761 +[2025-09-05 16:50:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:50:50] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 16:50:50] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 16:50:50] [Rank 0] Group 9 FTA: 0.8900 +[2025-09-05 16:50:50] [Rank 0] Group 9 FTA: 0.8900 +[2025-09-05 16:50:50] [Rank 0] Group 10 FTA: 0.9300 +[2025-09-05 16:50:50] [Rank 0] Group 10 FTA: 0.9300 +[2025-09-05 16:50:50] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-05 16:50:50] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-05 16:50:50] [Rank 0] Group 12 FTA: 0.4100 +[2025-09-05 16:50:50] [Rank 0] Group 12 FTA: 0.4100 +[2025-09-05 16:50:50] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 16:50:50] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 16:50:50] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:50:50] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:50:50] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:50:50] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:50:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:50:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:50:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:50:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:50:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:50:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:50:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:50:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:50:51] [Rank 0] step:5001/10000 train_time:199430ms step_avg:39.88ms +[2025-09-05 16:50:51] [Rank 0] step:5001/10000 train_time:199430ms step_avg:39.88ms +[2025-09-05 16:50:52] [Rank 0] step:5021/10000 train_time:199869ms step_avg:39.81ms +[2025-09-05 16:50:52] [Rank 0] step:5021/10000 train_time:199869ms step_avg:39.81ms +[2025-09-05 16:50:53] [Rank 0] step:5041/10000 train_time:200526ms step_avg:39.78ms +[2025-09-05 16:50:53] [Rank 0] step:5041/10000 train_time:200526ms step_avg:39.78ms +[2025-09-05 16:50:53] [Rank 0] step:5061/10000 train_time:201183ms step_avg:39.75ms +[2025-09-05 16:50:53] [Rank 0] step:5061/10000 train_time:201183ms step_avg:39.75ms +[2025-09-05 16:50:54] [Rank 0] step:5081/10000 train_time:201840ms step_avg:39.72ms +[2025-09-05 16:50:54] [Rank 0] step:5081/10000 train_time:201840ms step_avg:39.72ms +[2025-09-05 16:50:55] [Rank 0] step:5101/10000 train_time:202497ms step_avg:39.70ms +[2025-09-05 16:50:55] [Rank 0] step:5101/10000 train_time:202497ms step_avg:39.70ms +[2025-09-05 16:50:55] [Rank 0] step:5121/10000 train_time:203155ms step_avg:39.67ms +[2025-09-05 16:50:55] [Rank 0] step:5121/10000 train_time:203155ms step_avg:39.67ms +[2025-09-05 16:50:56] [Rank 0] step:5141/10000 train_time:203812ms step_avg:39.64ms +[2025-09-05 16:50:56] [Rank 0] step:5141/10000 train_time:203812ms step_avg:39.64ms +[2025-09-05 16:50:57] [Rank 0] step:5161/10000 train_time:204469ms step_avg:39.62ms +[2025-09-05 16:50:57] [Rank 0] step:5161/10000 train_time:204469ms step_avg:39.62ms +[2025-09-05 16:50:57] [Rank 0] step:5181/10000 train_time:205126ms step_avg:39.59ms +[2025-09-05 16:50:57] [Rank 0] step:5181/10000 train_time:205126ms step_avg:39.59ms +[2025-09-05 16:50:58] [Rank 0] step:5201/10000 train_time:205783ms step_avg:39.57ms +[2025-09-05 16:50:58] [Rank 0] step:5201/10000 train_time:205783ms step_avg:39.57ms +[2025-09-05 16:50:59] [Rank 0] step:5221/10000 train_time:206441ms step_avg:39.54ms +[2025-09-05 16:50:59] [Rank 0] step:5221/10000 train_time:206441ms step_avg:39.54ms +[2025-09-05 16:50:59] [Rank 0] step:5241/10000 train_time:207099ms step_avg:39.52ms +[2025-09-05 16:50:59] [Rank 0] step:5241/10000 train_time:207099ms step_avg:39.52ms +[2025-09-05 16:51:00] [Rank 0] step:5261/10000 train_time:207756ms step_avg:39.49ms +[2025-09-05 16:51:00] [Rank 0] step:5261/10000 train_time:207756ms step_avg:39.49ms +[2025-09-05 16:51:01] [Rank 0] step:5281/10000 train_time:208413ms step_avg:39.46ms +[2025-09-05 16:51:01] [Rank 0] step:5281/10000 train_time:208413ms step_avg:39.46ms +[2025-09-05 16:51:01] [Rank 0] step:5301/10000 train_time:209070ms step_avg:39.44ms +[2025-09-05 16:51:01] [Rank 0] step:5301/10000 train_time:209070ms step_avg:39.44ms +[2025-09-05 16:51:02] [Rank 0] step:5321/10000 train_time:209728ms step_avg:39.42ms +[2025-09-05 16:51:02] [Rank 0] step:5321/10000 train_time:209728ms step_avg:39.42ms +[2025-09-05 16:51:03] [Rank 0] step:5341/10000 train_time:210385ms step_avg:39.39ms +[2025-09-05 16:51:03] [Rank 0] step:5341/10000 train_time:210385ms step_avg:39.39ms +[2025-09-05 16:51:03] [Rank 0] step:5361/10000 train_time:211042ms step_avg:39.37ms +[2025-09-05 16:51:03] [Rank 0] step:5361/10000 train_time:211042ms step_avg:39.37ms +[2025-09-05 16:51:04] [Rank 0] step:5381/10000 train_time:211700ms step_avg:39.34ms +[2025-09-05 16:51:04] [Rank 0] step:5381/10000 train_time:211700ms step_avg:39.34ms +[2025-09-05 16:51:05] [Rank 0] step:5401/10000 train_time:212357ms step_avg:39.32ms +[2025-09-05 16:51:05] [Rank 0] step:5401/10000 train_time:212357ms step_avg:39.32ms +[2025-09-05 16:51:05] [Rank 0] step:5421/10000 train_time:213014ms step_avg:39.29ms +[2025-09-05 16:51:05] [Rank 0] step:5421/10000 train_time:213014ms step_avg:39.29ms +[2025-09-05 16:51:06] [Rank 0] step:5441/10000 train_time:213672ms step_avg:39.27ms +[2025-09-05 16:51:06] [Rank 0] step:5441/10000 train_time:213672ms step_avg:39.27ms +[2025-09-05 16:51:07] [Rank 0] step:5461/10000 train_time:214329ms step_avg:39.25ms +[2025-09-05 16:51:07] [Rank 0] step:5461/10000 train_time:214329ms step_avg:39.25ms +[2025-09-05 16:51:07] [Rank 0] step:5481/10000 train_time:214986ms step_avg:39.22ms +[2025-09-05 16:51:07] [Rank 0] step:5481/10000 train_time:214986ms step_avg:39.22ms +[2025-09-05 16:51:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:51:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:51:08] [Rank 0] PRINT: step:5500/10000 train_loss:0.7349 val_loss:0.7218 train_time:215877ms step_avg:39.25ms +[2025-09-05 16:51:08] [Rank 0] PRINT: step:5500/10000 train_loss:0.7349 val_loss:0.7218 train_time:215877ms step_avg:39.25ms +[2025-09-05 16:51:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:51:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:51:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:51:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:52:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:52:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:52:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:52:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:52:29] [Rank 0] Total Loss: 4.9600 +[2025-09-05 16:52:29] [Rank 0] Total Loss: 4.9600 +[2025-09-05 16:52:29] [Rank 0] Total FTA (Unweighted): 0.7969 +[2025-09-05 16:52:29] [Rank 0] Total FTA (Unweighted): 0.7969 +[2025-09-05 16:52:29] [Rank 0] Total FTA (Weighted): 0.7969 +[2025-09-05 16:52:29] [Rank 0] Total FTA (Weighted): 0.7969 +[2025-09-05 16:52:29] [Rank 0] Group 0 Loss: 4.6918 +[2025-09-05 16:52:29] [Rank 0] Group 0 Loss: 4.6918 +[2025-09-05 16:52:29] [Rank 0] Group 1 Loss: 4.6593 +[2025-09-05 16:52:29] [Rank 0] Group 1 Loss: 4.6593 +[2025-09-05 16:52:29] [Rank 0] Group 2 Loss: 4.4357 +[2025-09-05 16:52:29] [Rank 0] Group 2 Loss: 4.4357 +[2025-09-05 16:52:29] [Rank 0] Group 3 Loss: 4.8336 +[2025-09-05 16:52:29] [Rank 0] Group 3 Loss: 4.8336 +[2025-09-05 16:52:29] [Rank 0] Group 4 Loss: 4.7880 +[2025-09-05 16:52:29] [Rank 0] Group 4 Loss: 4.7880 +[2025-09-05 16:52:29] [Rank 0] Group 5 Loss: 4.8781 +[2025-09-05 16:52:29] [Rank 0] Group 5 Loss: 4.8781 +[2025-09-05 16:52:29] [Rank 0] Group 6 Loss: 4.8215 +[2025-09-05 16:52:29] [Rank 0] Group 6 Loss: 4.8215 +[2025-09-05 16:52:29] [Rank 0] Group 7 Loss: 4.8617 +[2025-09-05 16:52:29] [Rank 0] Group 7 Loss: 4.8617 +[2025-09-05 16:52:29] [Rank 0] Group 8 Loss: 5.0164 +[2025-09-05 16:52:29] [Rank 0] Group 8 Loss: 5.0164 +[2025-09-05 16:52:29] [Rank 0] Group 9 Loss: 4.9528 +[2025-09-05 16:52:29] [Rank 0] Group 9 Loss: 4.9528 +[2025-09-05 16:52:29] [Rank 0] Group 10 Loss: 5.1145 +[2025-09-05 16:52:29] [Rank 0] Group 10 Loss: 5.1145 +[2025-09-05 16:52:29] [Rank 0] Group 11 Loss: 5.1282 +[2025-09-05 16:52:29] [Rank 0] Group 11 Loss: 5.1282 +[2025-09-05 16:52:29] [Rank 0] Group 12 Loss: 5.1630 +[2025-09-05 16:52:29] [Rank 0] Group 12 Loss: 5.1630 +[2025-09-05 16:52:29] [Rank 0] Group 13 Loss: 5.2978 +[2025-09-05 16:52:29] [Rank 0] Group 13 Loss: 5.2978 +[2025-09-05 16:52:29] [Rank 0] Group 14 Loss: 5.2889 +[2025-09-05 16:52:29] [Rank 0] Group 14 Loss: 5.2889 +[2025-09-05 16:52:29] [Rank 0] Group 15 Loss: 5.4283 +[2025-09-05 16:52:29] [Rank 0] Group 15 Loss: 5.4283 +[2025-09-05 16:52:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:52:29] [Rank 0] Group 9 FTA: 0.9100 +[2025-09-05 16:52:29] [Rank 0] Group 9 FTA: 0.9100 +[2025-09-05 16:52:29] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 16:52:29] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 16:52:29] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 16:52:29] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 16:52:29] [Rank 0] Group 12 FTA: 0.5700 +[2025-09-05 16:52:29] [Rank 0] Group 12 FTA: 0.5700 +[2025-09-05 16:52:29] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 16:52:29] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 16:52:29] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:52:29] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:52:29] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:52:29] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:52:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:52:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:52:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:52:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:52:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:52:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:52:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:52:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:52:30] [Rank 0] step:5501/10000 train_time:215885ms step_avg:39.24ms +[2025-09-05 16:52:30] [Rank 0] step:5501/10000 train_time:215885ms step_avg:39.24ms +[2025-09-05 16:52:31] [Rank 0] step:5521/10000 train_time:216332ms step_avg:39.18ms +[2025-09-05 16:52:31] [Rank 0] step:5521/10000 train_time:216332ms step_avg:39.18ms +[2025-09-05 16:52:32] [Rank 0] step:5541/10000 train_time:216987ms step_avg:39.16ms +[2025-09-05 16:52:32] [Rank 0] step:5541/10000 train_time:216987ms step_avg:39.16ms +[2025-09-05 16:52:32] [Rank 0] step:5561/10000 train_time:217644ms step_avg:39.14ms +[2025-09-05 16:52:32] [Rank 0] step:5561/10000 train_time:217644ms step_avg:39.14ms +[2025-09-05 16:52:33] [Rank 0] step:5581/10000 train_time:218299ms step_avg:39.11ms +[2025-09-05 16:52:33] [Rank 0] step:5581/10000 train_time:218299ms step_avg:39.11ms +[2025-09-05 16:52:34] [Rank 0] step:5601/10000 train_time:218955ms step_avg:39.09ms +[2025-09-05 16:52:34] [Rank 0] step:5601/10000 train_time:218955ms step_avg:39.09ms +[2025-09-05 16:52:34] [Rank 0] step:5621/10000 train_time:219610ms step_avg:39.07ms +[2025-09-05 16:52:34] [Rank 0] step:5621/10000 train_time:219610ms step_avg:39.07ms +[2025-09-05 16:52:36] [Rank 0] step:5641/10000 train_time:220799ms step_avg:39.14ms +[2025-09-05 16:52:36] [Rank 0] step:5641/10000 train_time:220799ms step_avg:39.14ms +[2025-09-05 16:52:36] [Rank 0] step:5661/10000 train_time:221375ms step_avg:39.11ms +[2025-09-05 16:52:36] [Rank 0] step:5661/10000 train_time:221375ms step_avg:39.11ms +[2025-09-05 16:52:37] [Rank 0] step:5681/10000 train_time:222030ms step_avg:39.08ms +[2025-09-05 16:52:37] [Rank 0] step:5681/10000 train_time:222030ms step_avg:39.08ms +[2025-09-05 16:52:38] [Rank 0] step:5701/10000 train_time:222686ms step_avg:39.06ms +[2025-09-05 16:52:38] [Rank 0] step:5701/10000 train_time:222686ms step_avg:39.06ms +[2025-09-05 16:52:38] [Rank 0] step:5721/10000 train_time:223343ms step_avg:39.04ms +[2025-09-05 16:52:38] [Rank 0] step:5721/10000 train_time:223343ms step_avg:39.04ms +[2025-09-05 16:52:39] [Rank 0] step:5741/10000 train_time:223999ms step_avg:39.02ms +[2025-09-05 16:52:39] [Rank 0] step:5741/10000 train_time:223999ms step_avg:39.02ms +[2025-09-05 16:52:39] [Rank 0] step:5761/10000 train_time:224654ms step_avg:39.00ms +[2025-09-05 16:52:39] [Rank 0] step:5761/10000 train_time:224654ms step_avg:39.00ms +[2025-09-05 16:52:40] [Rank 0] step:5781/10000 train_time:225309ms step_avg:38.97ms +[2025-09-05 16:52:40] [Rank 0] step:5781/10000 train_time:225309ms step_avg:38.97ms +[2025-09-05 16:52:41] [Rank 0] step:5801/10000 train_time:225965ms step_avg:38.95ms +[2025-09-05 16:52:41] [Rank 0] step:5801/10000 train_time:225965ms step_avg:38.95ms +[2025-09-05 16:52:41] [Rank 0] step:5821/10000 train_time:226621ms step_avg:38.93ms +[2025-09-05 16:52:41] [Rank 0] step:5821/10000 train_time:226621ms step_avg:38.93ms +[2025-09-05 16:52:42] [Rank 0] step:5841/10000 train_time:227277ms step_avg:38.91ms +[2025-09-05 16:52:42] [Rank 0] step:5841/10000 train_time:227277ms step_avg:38.91ms +[2025-09-05 16:52:43] [Rank 0] step:5861/10000 train_time:227933ms step_avg:38.89ms +[2025-09-05 16:52:43] [Rank 0] step:5861/10000 train_time:227933ms step_avg:38.89ms +[2025-09-05 16:52:43] [Rank 0] step:5881/10000 train_time:228589ms step_avg:38.87ms +[2025-09-05 16:52:43] [Rank 0] step:5881/10000 train_time:228589ms step_avg:38.87ms +[2025-09-05 16:52:44] [Rank 0] step:5901/10000 train_time:229246ms step_avg:38.85ms +[2025-09-05 16:52:44] [Rank 0] step:5901/10000 train_time:229246ms step_avg:38.85ms +[2025-09-05 16:52:45] [Rank 0] step:5921/10000 train_time:229901ms step_avg:38.83ms +[2025-09-05 16:52:45] [Rank 0] step:5921/10000 train_time:229901ms step_avg:38.83ms +[2025-09-05 16:52:45] [Rank 0] step:5941/10000 train_time:230557ms step_avg:38.81ms +[2025-09-05 16:52:45] [Rank 0] step:5941/10000 train_time:230557ms step_avg:38.81ms +[2025-09-05 16:52:46] [Rank 0] step:5961/10000 train_time:231213ms step_avg:38.79ms +[2025-09-05 16:52:46] [Rank 0] step:5961/10000 train_time:231213ms step_avg:38.79ms +[2025-09-05 16:52:47] [Rank 0] step:5981/10000 train_time:231869ms step_avg:38.77ms +[2025-09-05 16:52:47] [Rank 0] step:5981/10000 train_time:231869ms step_avg:38.77ms +[2025-09-05 16:52:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:52:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:52:48] [Rank 0] PRINT: step:6000/10000 train_loss:0.7231 val_loss:0.7113 train_time:232758ms step_avg:38.79ms +[2025-09-05 16:52:48] [Rank 0] PRINT: step:6000/10000 train_loss:0.7231 val_loss:0.7113 train_time:232758ms step_avg:38.79ms +[2025-09-05 16:52:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:52:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:52:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:52:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:54:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:54:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:54:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:54:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:54:08] [Rank 0] Total Loss: 4.9267 +[2025-09-05 16:54:08] [Rank 0] Total Loss: 4.9267 +[2025-09-05 16:54:08] [Rank 0] Total FTA (Unweighted): 0.8144 +[2025-09-05 16:54:08] [Rank 0] Total FTA (Unweighted): 0.8144 +[2025-09-05 16:54:08] [Rank 0] Total FTA (Weighted): 0.8144 +[2025-09-05 16:54:08] [Rank 0] Total FTA (Weighted): 0.8144 +[2025-09-05 16:54:08] [Rank 0] Group 0 Loss: 4.6712 +[2025-09-05 16:54:08] [Rank 0] Group 0 Loss: 4.6712 +[2025-09-05 16:54:08] [Rank 0] Group 1 Loss: 4.6850 +[2025-09-05 16:54:08] [Rank 0] Group 1 Loss: 4.6850 +[2025-09-05 16:54:08] [Rank 0] Group 2 Loss: 4.4944 +[2025-09-05 16:54:08] [Rank 0] Group 2 Loss: 4.4944 +[2025-09-05 16:54:08] [Rank 0] Group 3 Loss: 4.7683 +[2025-09-05 16:54:08] [Rank 0] Group 3 Loss: 4.7683 +[2025-09-05 16:54:08] [Rank 0] Group 4 Loss: 4.7184 +[2025-09-05 16:54:08] [Rank 0] Group 4 Loss: 4.7184 +[2025-09-05 16:54:08] [Rank 0] Group 5 Loss: 4.8079 +[2025-09-05 16:54:08] [Rank 0] Group 5 Loss: 4.8079 +[2025-09-05 16:54:08] [Rank 0] Group 6 Loss: 4.7894 +[2025-09-05 16:54:08] [Rank 0] Group 6 Loss: 4.7894 +[2025-09-05 16:54:08] [Rank 0] Group 7 Loss: 4.8680 +[2025-09-05 16:54:08] [Rank 0] Group 7 Loss: 4.8680 +[2025-09-05 16:54:08] [Rank 0] Group 8 Loss: 4.9648 +[2025-09-05 16:54:08] [Rank 0] Group 8 Loss: 4.9648 +[2025-09-05 16:54:08] [Rank 0] Group 9 Loss: 4.9057 +[2025-09-05 16:54:08] [Rank 0] Group 9 Loss: 4.9057 +[2025-09-05 16:54:08] [Rank 0] Group 10 Loss: 5.0286 +[2025-09-05 16:54:08] [Rank 0] Group 10 Loss: 5.0286 +[2025-09-05 16:54:08] [Rank 0] Group 11 Loss: 5.0510 +[2025-09-05 16:54:08] [Rank 0] Group 11 Loss: 5.0510 +[2025-09-05 16:54:08] [Rank 0] Group 12 Loss: 5.0989 +[2025-09-05 16:54:08] [Rank 0] Group 12 Loss: 5.0989 +[2025-09-05 16:54:08] [Rank 0] Group 13 Loss: 5.2940 +[2025-09-05 16:54:08] [Rank 0] Group 13 Loss: 5.2940 +[2025-09-05 16:54:08] [Rank 0] Group 14 Loss: 5.2821 +[2025-09-05 16:54:08] [Rank 0] Group 14 Loss: 5.2821 +[2025-09-05 16:54:08] [Rank 0] Group 15 Loss: 5.4002 +[2025-09-05 16:54:08] [Rank 0] Group 15 Loss: 5.4002 +[2025-09-05 16:54:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:54:08] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:54:09] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 16:54:09] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 16:54:09] [Rank 0] Group 10 FTA: 0.9600 +[2025-09-05 16:54:09] [Rank 0] Group 10 FTA: 0.9600 +[2025-09-05 16:54:09] [Rank 0] Group 11 FTA: 0.8800 +[2025-09-05 16:54:09] [Rank 0] Group 11 FTA: 0.8800 +[2025-09-05 16:54:09] [Rank 0] Group 12 FTA: 0.6500 +[2025-09-05 16:54:09] [Rank 0] Group 12 FTA: 0.6500 +[2025-09-05 16:54:09] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 16:54:09] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 16:54:09] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:54:09] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:54:09] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:54:09] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:54:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:54:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:54:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:54:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:54:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:54:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:54:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:54:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:54:10] [Rank 0] step:6001/10000 train_time:232766ms step_avg:38.79ms +[2025-09-05 16:54:10] [Rank 0] step:6001/10000 train_time:232766ms step_avg:38.79ms +[2025-09-05 16:54:11] [Rank 0] step:6021/10000 train_time:233253ms step_avg:38.74ms +[2025-09-05 16:54:11] [Rank 0] step:6021/10000 train_time:233253ms step_avg:38.74ms +[2025-09-05 16:54:11] [Rank 0] step:6041/10000 train_time:233909ms step_avg:38.72ms +[2025-09-05 16:54:11] [Rank 0] step:6041/10000 train_time:233909ms step_avg:38.72ms +[2025-09-05 16:54:12] [Rank 0] step:6061/10000 train_time:234567ms step_avg:38.70ms +[2025-09-05 16:54:12] [Rank 0] step:6061/10000 train_time:234567ms step_avg:38.70ms +[2025-09-05 16:54:13] [Rank 0] step:6081/10000 train_time:235224ms step_avg:38.68ms +[2025-09-05 16:54:13] [Rank 0] step:6081/10000 train_time:235224ms step_avg:38.68ms +[2025-09-05 16:54:13] [Rank 0] step:6101/10000 train_time:235881ms step_avg:38.66ms +[2025-09-05 16:54:13] [Rank 0] step:6101/10000 train_time:235881ms step_avg:38.66ms +[2025-09-05 16:54:14] [Rank 0] step:6121/10000 train_time:236538ms step_avg:38.64ms +[2025-09-05 16:54:14] [Rank 0] step:6121/10000 train_time:236538ms step_avg:38.64ms +[2025-09-05 16:54:15] [Rank 0] step:6141/10000 train_time:237195ms step_avg:38.62ms +[2025-09-05 16:54:15] [Rank 0] step:6141/10000 train_time:237195ms step_avg:38.62ms +[2025-09-05 16:54:15] [Rank 0] step:6161/10000 train_time:237852ms step_avg:38.61ms +[2025-09-05 16:54:15] [Rank 0] step:6161/10000 train_time:237852ms step_avg:38.61ms +[2025-09-05 16:54:16] [Rank 0] step:6181/10000 train_time:238509ms step_avg:38.59ms +[2025-09-05 16:54:16] [Rank 0] step:6181/10000 train_time:238509ms step_avg:38.59ms +[2025-09-05 16:54:17] [Rank 0] step:6201/10000 train_time:239165ms step_avg:38.57ms +[2025-09-05 16:54:17] [Rank 0] step:6201/10000 train_time:239165ms step_avg:38.57ms +[2025-09-05 16:54:17] [Rank 0] step:6221/10000 train_time:239821ms step_avg:38.55ms +[2025-09-05 16:54:17] [Rank 0] step:6221/10000 train_time:239821ms step_avg:38.55ms +[2025-09-05 16:54:18] [Rank 0] step:6241/10000 train_time:240478ms step_avg:38.53ms +[2025-09-05 16:54:18] [Rank 0] step:6241/10000 train_time:240478ms step_avg:38.53ms +[2025-09-05 16:54:19] [Rank 0] step:6261/10000 train_time:241134ms step_avg:38.51ms +[2025-09-05 16:54:19] [Rank 0] step:6261/10000 train_time:241134ms step_avg:38.51ms +[2025-09-05 16:54:19] [Rank 0] step:6281/10000 train_time:241791ms step_avg:38.50ms +[2025-09-05 16:54:19] [Rank 0] step:6281/10000 train_time:241791ms step_avg:38.50ms +[2025-09-05 16:54:20] [Rank 0] step:6301/10000 train_time:242447ms step_avg:38.48ms +[2025-09-05 16:54:20] [Rank 0] step:6301/10000 train_time:242447ms step_avg:38.48ms +[2025-09-05 16:54:20] [Rank 0] step:6321/10000 train_time:243104ms step_avg:38.46ms +[2025-09-05 16:54:20] [Rank 0] step:6321/10000 train_time:243104ms step_avg:38.46ms +[2025-09-05 16:54:21] [Rank 0] step:6341/10000 train_time:243760ms step_avg:38.44ms +[2025-09-05 16:54:21] [Rank 0] step:6341/10000 train_time:243760ms step_avg:38.44ms +[2025-09-05 16:54:22] [Rank 0] step:6361/10000 train_time:244417ms step_avg:38.42ms +[2025-09-05 16:54:22] [Rank 0] step:6361/10000 train_time:244417ms step_avg:38.42ms +[2025-09-05 16:54:22] [Rank 0] step:6381/10000 train_time:245073ms step_avg:38.41ms +[2025-09-05 16:54:22] [Rank 0] step:6381/10000 train_time:245073ms step_avg:38.41ms +[2025-09-05 16:54:23] [Rank 0] step:6401/10000 train_time:245730ms step_avg:38.39ms +[2025-09-05 16:54:23] [Rank 0] step:6401/10000 train_time:245730ms step_avg:38.39ms +[2025-09-05 16:54:24] [Rank 0] step:6421/10000 train_time:246387ms step_avg:38.37ms +[2025-09-05 16:54:24] [Rank 0] step:6421/10000 train_time:246387ms step_avg:38.37ms +[2025-09-05 16:54:24] [Rank 0] step:6441/10000 train_time:247044ms step_avg:38.35ms +[2025-09-05 16:54:24] [Rank 0] step:6441/10000 train_time:247044ms step_avg:38.35ms +[2025-09-05 16:54:25] [Rank 0] step:6461/10000 train_time:247701ms step_avg:38.34ms +[2025-09-05 16:54:25] [Rank 0] step:6461/10000 train_time:247701ms step_avg:38.34ms +[2025-09-05 16:54:26] [Rank 0] step:6481/10000 train_time:248357ms step_avg:38.32ms +[2025-09-05 16:54:26] [Rank 0] step:6481/10000 train_time:248357ms step_avg:38.32ms +[2025-09-05 16:54:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:54:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:54:27] [Rank 0] PRINT: step:6500/10000 train_loss:0.7131 val_loss:0.7023 train_time:249248ms step_avg:38.35ms +[2025-09-05 16:54:27] [Rank 0] PRINT: step:6500/10000 train_loss:0.7131 val_loss:0.7023 train_time:249248ms step_avg:38.35ms +[2025-09-05 16:54:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:54:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:54:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:54:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:55:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:55:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:55:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:55:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:55:48] [Rank 0] Total Loss: 5.0034 +[2025-09-05 16:55:48] [Rank 0] Total Loss: 5.0034 +[2025-09-05 16:55:48] [Rank 0] Total FTA (Unweighted): 0.8237 +[2025-09-05 16:55:48] [Rank 0] Total FTA (Unweighted): 0.8237 +[2025-09-05 16:55:48] [Rank 0] Total FTA (Weighted): 0.8237 +[2025-09-05 16:55:48] [Rank 0] Total FTA (Weighted): 0.8237 +[2025-09-05 16:55:48] [Rank 0] Group 0 Loss: 4.8357 +[2025-09-05 16:55:48] [Rank 0] Group 0 Loss: 4.8357 +[2025-09-05 16:55:48] [Rank 0] Group 1 Loss: 4.6882 +[2025-09-05 16:55:48] [Rank 0] Group 1 Loss: 4.6882 +[2025-09-05 16:55:48] [Rank 0] Group 2 Loss: 4.5237 +[2025-09-05 16:55:48] [Rank 0] Group 2 Loss: 4.5237 +[2025-09-05 16:55:48] [Rank 0] Group 3 Loss: 4.8201 +[2025-09-05 16:55:48] [Rank 0] Group 3 Loss: 4.8201 +[2025-09-05 16:55:48] [Rank 0] Group 4 Loss: 4.8301 +[2025-09-05 16:55:48] [Rank 0] Group 4 Loss: 4.8301 +[2025-09-05 16:55:48] [Rank 0] Group 5 Loss: 4.9230 +[2025-09-05 16:55:48] [Rank 0] Group 5 Loss: 4.9230 +[2025-09-05 16:55:48] [Rank 0] Group 6 Loss: 4.8954 +[2025-09-05 16:55:48] [Rank 0] Group 6 Loss: 4.8954 +[2025-09-05 16:55:48] [Rank 0] Group 7 Loss: 4.9062 +[2025-09-05 16:55:48] [Rank 0] Group 7 Loss: 4.9062 +[2025-09-05 16:55:48] [Rank 0] Group 8 Loss: 5.0819 +[2025-09-05 16:55:48] [Rank 0] Group 8 Loss: 5.0819 +[2025-09-05 16:55:48] [Rank 0] Group 9 Loss: 4.9725 +[2025-09-05 16:55:48] [Rank 0] Group 9 Loss: 4.9725 +[2025-09-05 16:55:48] [Rank 0] Group 10 Loss: 5.1403 +[2025-09-05 16:55:48] [Rank 0] Group 10 Loss: 5.1403 +[2025-09-05 16:55:48] [Rank 0] Group 11 Loss: 5.1410 +[2025-09-05 16:55:48] [Rank 0] Group 11 Loss: 5.1410 +[2025-09-05 16:55:48] [Rank 0] Group 12 Loss: 5.1927 +[2025-09-05 16:55:48] [Rank 0] Group 12 Loss: 5.1927 +[2025-09-05 16:55:48] [Rank 0] Group 13 Loss: 5.3412 +[2025-09-05 16:55:48] [Rank 0] Group 13 Loss: 5.3412 +[2025-09-05 16:55:48] [Rank 0] Group 14 Loss: 5.3102 +[2025-09-05 16:55:48] [Rank 0] Group 14 Loss: 5.3102 +[2025-09-05 16:55:48] [Rank 0] Group 15 Loss: 5.4522 +[2025-09-05 16:55:48] [Rank 0] Group 15 Loss: 5.4522 +[2025-09-05 16:55:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 16:55:48] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 16:55:48] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:55:48] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 16:55:48] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 16:55:48] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 16:55:48] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 16:55:48] [Rank 0] Group 11 FTA: 0.9000 +[2025-09-05 16:55:48] [Rank 0] Group 11 FTA: 0.9000 +[2025-09-05 16:55:48] [Rank 0] Group 12 FTA: 0.7700 +[2025-09-05 16:55:48] [Rank 0] Group 12 FTA: 0.7700 +[2025-09-05 16:55:48] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 16:55:48] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 16:55:48] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:55:48] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:55:48] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:55:48] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:55:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:55:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:55:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:55:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:55:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:55:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:55:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:55:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:55:50] [Rank 0] step:6501/10000 train_time:249256ms step_avg:38.34ms +[2025-09-05 16:55:50] [Rank 0] step:6501/10000 train_time:249256ms step_avg:38.34ms +[2025-09-05 16:55:51] [Rank 0] step:6521/10000 train_time:249705ms step_avg:38.29ms +[2025-09-05 16:55:51] [Rank 0] step:6521/10000 train_time:249705ms step_avg:38.29ms +[2025-09-05 16:55:51] [Rank 0] step:6541/10000 train_time:250359ms step_avg:38.28ms +[2025-09-05 16:55:51] [Rank 0] step:6541/10000 train_time:250359ms step_avg:38.28ms +[2025-09-05 16:55:52] [Rank 0] step:6561/10000 train_time:251016ms step_avg:38.26ms +[2025-09-05 16:55:52] [Rank 0] step:6561/10000 train_time:251016ms step_avg:38.26ms +[2025-09-05 16:55:53] [Rank 0] step:6581/10000 train_time:251671ms step_avg:38.24ms +[2025-09-05 16:55:53] [Rank 0] step:6581/10000 train_time:251671ms step_avg:38.24ms +[2025-09-05 16:55:53] [Rank 0] step:6601/10000 train_time:252327ms step_avg:38.23ms +[2025-09-05 16:55:53] [Rank 0] step:6601/10000 train_time:252327ms step_avg:38.23ms +[2025-09-05 16:55:54] [Rank 0] step:6621/10000 train_time:252983ms step_avg:38.21ms +[2025-09-05 16:55:54] [Rank 0] step:6621/10000 train_time:252983ms step_avg:38.21ms +[2025-09-05 16:55:54] [Rank 0] step:6641/10000 train_time:253639ms step_avg:38.19ms +[2025-09-05 16:55:54] [Rank 0] step:6641/10000 train_time:253639ms step_avg:38.19ms +[2025-09-05 16:55:55] [Rank 0] step:6661/10000 train_time:254295ms step_avg:38.18ms +[2025-09-05 16:55:55] [Rank 0] step:6661/10000 train_time:254295ms step_avg:38.18ms +[2025-09-05 16:55:56] [Rank 0] step:6681/10000 train_time:254951ms step_avg:38.16ms +[2025-09-05 16:55:56] [Rank 0] step:6681/10000 train_time:254951ms step_avg:38.16ms +[2025-09-05 16:55:56] [Rank 0] step:6701/10000 train_time:255608ms step_avg:38.14ms +[2025-09-05 16:55:56] [Rank 0] step:6701/10000 train_time:255608ms step_avg:38.14ms +[2025-09-05 16:55:57] [Rank 0] step:6721/10000 train_time:256264ms step_avg:38.13ms +[2025-09-05 16:55:57] [Rank 0] step:6721/10000 train_time:256264ms step_avg:38.13ms +[2025-09-05 16:55:58] [Rank 0] step:6741/10000 train_time:256920ms step_avg:38.11ms +[2025-09-05 16:55:58] [Rank 0] step:6741/10000 train_time:256920ms step_avg:38.11ms +[2025-09-05 16:55:58] [Rank 0] step:6761/10000 train_time:257576ms step_avg:38.10ms +[2025-09-05 16:55:58] [Rank 0] step:6761/10000 train_time:257576ms step_avg:38.10ms +[2025-09-05 16:55:59] [Rank 0] step:6781/10000 train_time:258231ms step_avg:38.08ms +[2025-09-05 16:55:59] [Rank 0] step:6781/10000 train_time:258231ms step_avg:38.08ms +[2025-09-05 16:56:00] [Rank 0] step:6801/10000 train_time:258887ms step_avg:38.07ms +[2025-09-05 16:56:00] [Rank 0] step:6801/10000 train_time:258887ms step_avg:38.07ms +[2025-09-05 16:56:00] [Rank 0] step:6821/10000 train_time:259543ms step_avg:38.05ms +[2025-09-05 16:56:00] [Rank 0] step:6821/10000 train_time:259543ms step_avg:38.05ms +[2025-09-05 16:56:02] [Rank 0] step:6841/10000 train_time:260836ms step_avg:38.13ms +[2025-09-05 16:56:02] [Rank 0] step:6841/10000 train_time:260836ms step_avg:38.13ms +[2025-09-05 16:56:02] [Rank 0] step:6861/10000 train_time:261492ms step_avg:38.11ms +[2025-09-05 16:56:02] [Rank 0] step:6861/10000 train_time:261492ms step_avg:38.11ms +[2025-09-05 16:56:03] [Rank 0] step:6881/10000 train_time:262148ms step_avg:38.10ms +[2025-09-05 16:56:03] [Rank 0] step:6881/10000 train_time:262148ms step_avg:38.10ms +[2025-09-05 16:56:04] [Rank 0] step:6901/10000 train_time:262804ms step_avg:38.08ms +[2025-09-05 16:56:04] [Rank 0] step:6901/10000 train_time:262804ms step_avg:38.08ms +[2025-09-05 16:56:04] [Rank 0] step:6921/10000 train_time:263460ms step_avg:38.07ms +[2025-09-05 16:56:04] [Rank 0] step:6921/10000 train_time:263460ms step_avg:38.07ms +[2025-09-05 16:56:05] [Rank 0] step:6941/10000 train_time:264115ms step_avg:38.05ms +[2025-09-05 16:56:05] [Rank 0] step:6941/10000 train_time:264115ms step_avg:38.05ms +[2025-09-05 16:56:06] [Rank 0] step:6961/10000 train_time:264772ms step_avg:38.04ms +[2025-09-05 16:56:06] [Rank 0] step:6961/10000 train_time:264772ms step_avg:38.04ms +[2025-09-05 16:56:06] [Rank 0] step:6981/10000 train_time:265429ms step_avg:38.02ms +[2025-09-05 16:56:06] [Rank 0] step:6981/10000 train_time:265429ms step_avg:38.02ms +[2025-09-05 16:56:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:56:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:56:07] [Rank 0] PRINT: step:7000/10000 train_loss:0.7043 val_loss:0.6945 train_time:266318ms step_avg:38.05ms +[2025-09-05 16:56:07] [Rank 0] PRINT: step:7000/10000 train_loss:0.7043 val_loss:0.6945 train_time:266318ms step_avg:38.05ms +[2025-09-05 16:56:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:56:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:56:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:56:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:57:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:57:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:57:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:57:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:57:28] [Rank 0] Total Loss: 4.9949 +[2025-09-05 16:57:28] [Rank 0] Total Loss: 4.9949 +[2025-09-05 16:57:28] [Rank 0] Total FTA (Unweighted): 0.8294 +[2025-09-05 16:57:28] [Rank 0] Total FTA (Unweighted): 0.8294 +[2025-09-05 16:57:28] [Rank 0] Total FTA (Weighted): 0.8294 +[2025-09-05 16:57:28] [Rank 0] Total FTA (Weighted): 0.8294 +[2025-09-05 16:57:28] [Rank 0] Group 0 Loss: 4.7121 +[2025-09-05 16:57:28] [Rank 0] Group 0 Loss: 4.7121 +[2025-09-05 16:57:28] [Rank 0] Group 1 Loss: 4.7361 +[2025-09-05 16:57:28] [Rank 0] Group 1 Loss: 4.7361 +[2025-09-05 16:57:28] [Rank 0] Group 2 Loss: 4.4647 +[2025-09-05 16:57:28] [Rank 0] Group 2 Loss: 4.4647 +[2025-09-05 16:57:28] [Rank 0] Group 3 Loss: 4.8651 +[2025-09-05 16:57:28] [Rank 0] Group 3 Loss: 4.8651 +[2025-09-05 16:57:28] [Rank 0] Group 4 Loss: 4.8453 +[2025-09-05 16:57:28] [Rank 0] Group 4 Loss: 4.8453 +[2025-09-05 16:57:28] [Rank 0] Group 5 Loss: 4.9207 +[2025-09-05 16:57:28] [Rank 0] Group 5 Loss: 4.9207 +[2025-09-05 16:57:28] [Rank 0] Group 6 Loss: 4.8795 +[2025-09-05 16:57:28] [Rank 0] Group 6 Loss: 4.8795 +[2025-09-05 16:57:28] [Rank 0] Group 7 Loss: 4.8952 +[2025-09-05 16:57:28] [Rank 0] Group 7 Loss: 4.8952 +[2025-09-05 16:57:28] [Rank 0] Group 8 Loss: 5.0540 +[2025-09-05 16:57:28] [Rank 0] Group 8 Loss: 5.0540 +[2025-09-05 16:57:28] [Rank 0] Group 9 Loss: 5.0124 +[2025-09-05 16:57:28] [Rank 0] Group 9 Loss: 5.0124 +[2025-09-05 16:57:28] [Rank 0] Group 10 Loss: 5.1324 +[2025-09-05 16:57:28] [Rank 0] Group 10 Loss: 5.1324 +[2025-09-05 16:57:28] [Rank 0] Group 11 Loss: 5.1761 +[2025-09-05 16:57:28] [Rank 0] Group 11 Loss: 5.1761 +[2025-09-05 16:57:28] [Rank 0] Group 12 Loss: 5.2044 +[2025-09-05 16:57:28] [Rank 0] Group 12 Loss: 5.2044 +[2025-09-05 16:57:28] [Rank 0] Group 13 Loss: 5.3458 +[2025-09-05 16:57:28] [Rank 0] Group 13 Loss: 5.3458 +[2025-09-05 16:57:28] [Rank 0] Group 14 Loss: 5.2854 +[2025-09-05 16:57:28] [Rank 0] Group 14 Loss: 5.2854 +[2025-09-05 16:57:28] [Rank 0] Group 15 Loss: 5.3889 +[2025-09-05 16:57:28] [Rank 0] Group 15 Loss: 5.3889 +[2025-09-05 16:57:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 16:57:28] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 16:57:28] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:57:28] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 16:57:28] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 16:57:28] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-05 16:57:28] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-05 16:57:28] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 16:57:28] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 16:57:28] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-05 16:57:28] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-05 16:57:28] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:57:28] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:57:28] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:57:28] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:57:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:57:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:57:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:57:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:57:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:57:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:57:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:57:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:57:30] [Rank 0] step:7001/10000 train_time:266326ms step_avg:38.04ms +[2025-09-05 16:57:30] [Rank 0] step:7001/10000 train_time:266326ms step_avg:38.04ms +[2025-09-05 16:57:31] [Rank 0] step:7021/10000 train_time:266771ms step_avg:38.00ms +[2025-09-05 16:57:31] [Rank 0] step:7021/10000 train_time:266771ms step_avg:38.00ms +[2025-09-05 16:57:31] [Rank 0] step:7041/10000 train_time:267427ms step_avg:37.98ms +[2025-09-05 16:57:31] [Rank 0] step:7041/10000 train_time:267427ms step_avg:37.98ms +[2025-09-05 16:57:32] [Rank 0] step:7061/10000 train_time:268084ms step_avg:37.97ms +[2025-09-05 16:57:32] [Rank 0] step:7061/10000 train_time:268084ms step_avg:37.97ms +[2025-09-05 16:57:32] [Rank 0] step:7081/10000 train_time:268741ms step_avg:37.95ms +[2025-09-05 16:57:32] [Rank 0] step:7081/10000 train_time:268741ms step_avg:37.95ms +[2025-09-05 16:57:33] [Rank 0] step:7101/10000 train_time:269398ms step_avg:37.94ms +[2025-09-05 16:57:33] [Rank 0] step:7101/10000 train_time:269398ms step_avg:37.94ms +[2025-09-05 16:57:34] [Rank 0] step:7121/10000 train_time:270054ms step_avg:37.92ms +[2025-09-05 16:57:34] [Rank 0] step:7121/10000 train_time:270054ms step_avg:37.92ms +[2025-09-05 16:57:34] [Rank 0] step:7141/10000 train_time:270710ms step_avg:37.91ms +[2025-09-05 16:57:34] [Rank 0] step:7141/10000 train_time:270710ms step_avg:37.91ms +[2025-09-05 16:57:35] [Rank 0] step:7161/10000 train_time:271368ms step_avg:37.90ms +[2025-09-05 16:57:35] [Rank 0] step:7161/10000 train_time:271368ms step_avg:37.90ms +[2025-09-05 16:57:36] [Rank 0] step:7181/10000 train_time:272025ms step_avg:37.88ms +[2025-09-05 16:57:36] [Rank 0] step:7181/10000 train_time:272025ms step_avg:37.88ms +[2025-09-05 16:57:36] [Rank 0] step:7201/10000 train_time:272684ms step_avg:37.87ms +[2025-09-05 16:57:36] [Rank 0] step:7201/10000 train_time:272684ms step_avg:37.87ms +[2025-09-05 16:57:37] [Rank 0] step:7221/10000 train_time:273340ms step_avg:37.85ms +[2025-09-05 16:57:37] [Rank 0] step:7221/10000 train_time:273340ms step_avg:37.85ms +[2025-09-05 16:57:38] [Rank 0] step:7241/10000 train_time:273997ms step_avg:37.84ms +[2025-09-05 16:57:38] [Rank 0] step:7241/10000 train_time:273997ms step_avg:37.84ms +[2025-09-05 16:57:38] [Rank 0] step:7261/10000 train_time:274654ms step_avg:37.83ms +[2025-09-05 16:57:38] [Rank 0] step:7261/10000 train_time:274654ms step_avg:37.83ms +[2025-09-05 16:57:39] [Rank 0] step:7281/10000 train_time:275312ms step_avg:37.81ms +[2025-09-05 16:57:39] [Rank 0] step:7281/10000 train_time:275312ms step_avg:37.81ms +[2025-09-05 16:57:40] [Rank 0] step:7301/10000 train_time:276133ms step_avg:37.82ms +[2025-09-05 16:57:40] [Rank 0] step:7301/10000 train_time:276133ms step_avg:37.82ms +[2025-09-05 16:57:41] [Rank 0] step:7321/10000 train_time:276789ms step_avg:37.81ms +[2025-09-05 16:57:41] [Rank 0] step:7321/10000 train_time:276789ms step_avg:37.81ms +[2025-09-05 16:57:41] [Rank 0] step:7341/10000 train_time:277446ms step_avg:37.79ms +[2025-09-05 16:57:41] [Rank 0] step:7341/10000 train_time:277446ms step_avg:37.79ms +[2025-09-05 16:57:42] [Rank 0] step:7361/10000 train_time:278103ms step_avg:37.78ms +[2025-09-05 16:57:42] [Rank 0] step:7361/10000 train_time:278103ms step_avg:37.78ms +[2025-09-05 16:57:43] [Rank 0] step:7381/10000 train_time:278967ms step_avg:37.80ms +[2025-09-05 16:57:43] [Rank 0] step:7381/10000 train_time:278967ms step_avg:37.80ms +[2025-09-05 16:57:43] [Rank 0] step:7401/10000 train_time:279624ms step_avg:37.78ms +[2025-09-05 16:57:43] [Rank 0] step:7401/10000 train_time:279624ms step_avg:37.78ms +[2025-09-05 16:57:44] [Rank 0] step:7421/10000 train_time:280281ms step_avg:37.77ms +[2025-09-05 16:57:44] [Rank 0] step:7421/10000 train_time:280281ms step_avg:37.77ms +[2025-09-05 16:57:45] [Rank 0] step:7441/10000 train_time:280938ms step_avg:37.76ms +[2025-09-05 16:57:45] [Rank 0] step:7441/10000 train_time:280938ms step_avg:37.76ms +[2025-09-05 16:57:45] [Rank 0] step:7461/10000 train_time:281595ms step_avg:37.74ms +[2025-09-05 16:57:45] [Rank 0] step:7461/10000 train_time:281595ms step_avg:37.74ms +[2025-09-05 16:57:46] [Rank 0] step:7481/10000 train_time:282253ms step_avg:37.73ms +[2025-09-05 16:57:46] [Rank 0] step:7481/10000 train_time:282253ms step_avg:37.73ms +[2025-09-05 16:57:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:57:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:57:47] [Rank 0] PRINT: step:7500/10000 train_loss:0.6964 val_loss:0.6873 train_time:283142ms step_avg:37.75ms +[2025-09-05 16:57:47] [Rank 0] PRINT: step:7500/10000 train_loss:0.6964 val_loss:0.6873 train_time:283142ms step_avg:37.75ms +[2025-09-05 16:57:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:57:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:57:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:57:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:59:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:59:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:59:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:59:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:59:07] [Rank 0] Total Loss: 5.0479 +[2025-09-05 16:59:07] [Rank 0] Total Loss: 5.0479 +[2025-09-05 16:59:07] [Rank 0] Total FTA (Unweighted): 0.8463 +[2025-09-05 16:59:07] [Rank 0] Total FTA (Unweighted): 0.8463 +[2025-09-05 16:59:07] [Rank 0] Total FTA (Weighted): 0.8462 +[2025-09-05 16:59:07] [Rank 0] Total FTA (Weighted): 0.8462 +[2025-09-05 16:59:07] [Rank 0] Group 0 Loss: 4.8797 +[2025-09-05 16:59:07] [Rank 0] Group 0 Loss: 4.8797 +[2025-09-05 16:59:07] [Rank 0] Group 1 Loss: 4.6322 +[2025-09-05 16:59:07] [Rank 0] Group 1 Loss: 4.6322 +[2025-09-05 16:59:07] [Rank 0] Group 2 Loss: 4.5880 +[2025-09-05 16:59:07] [Rank 0] Group 2 Loss: 4.5880 +[2025-09-05 16:59:07] [Rank 0] Group 3 Loss: 4.9482 +[2025-09-05 16:59:07] [Rank 0] Group 3 Loss: 4.9482 +[2025-09-05 16:59:07] [Rank 0] Group 4 Loss: 4.8521 +[2025-09-05 16:59:07] [Rank 0] Group 4 Loss: 4.8521 +[2025-09-05 16:59:07] [Rank 0] Group 5 Loss: 4.9648 +[2025-09-05 16:59:07] [Rank 0] Group 5 Loss: 4.9648 +[2025-09-05 16:59:07] [Rank 0] Group 6 Loss: 4.9222 +[2025-09-05 16:59:07] [Rank 0] Group 6 Loss: 4.9222 +[2025-09-05 16:59:07] [Rank 0] Group 7 Loss: 4.9492 +[2025-09-05 16:59:07] [Rank 0] Group 7 Loss: 4.9492 +[2025-09-05 16:59:07] [Rank 0] Group 8 Loss: 5.1036 +[2025-09-05 16:59:07] [Rank 0] Group 8 Loss: 5.1036 +[2025-09-05 16:59:07] [Rank 0] Group 9 Loss: 5.0194 +[2025-09-05 16:59:07] [Rank 0] Group 9 Loss: 5.0194 +[2025-09-05 16:59:07] [Rank 0] Group 10 Loss: 5.2046 +[2025-09-05 16:59:07] [Rank 0] Group 10 Loss: 5.2046 +[2025-09-05 16:59:07] [Rank 0] Group 11 Loss: 5.2392 +[2025-09-05 16:59:07] [Rank 0] Group 11 Loss: 5.2392 +[2025-09-05 16:59:07] [Rank 0] Group 12 Loss: 5.2346 +[2025-09-05 16:59:07] [Rank 0] Group 12 Loss: 5.2346 +[2025-09-05 16:59:07] [Rank 0] Group 13 Loss: 5.3865 +[2025-09-05 16:59:07] [Rank 0] Group 13 Loss: 5.3865 +[2025-09-05 16:59:07] [Rank 0] Group 14 Loss: 5.3640 +[2025-09-05 16:59:07] [Rank 0] Group 14 Loss: 5.3640 +[2025-09-05 16:59:07] [Rank 0] Group 15 Loss: 5.4785 +[2025-09-05 16:59:07] [Rank 0] Group 15 Loss: 5.4785 +[2025-09-05 16:59:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:59:07] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:59:08] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:59:08] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:59:08] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 16:59:08] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 16:59:08] [Rank 0] Group 11 FTA: 0.9200 +[2025-09-05 16:59:08] [Rank 0] Group 11 FTA: 0.9200 +[2025-09-05 16:59:08] [Rank 0] Group 12 FTA: 0.9000 +[2025-09-05 16:59:08] [Rank 0] Group 12 FTA: 0.9000 +[2025-09-05 16:59:08] [Rank 0] Group 13 FTA: 0.4500 +[2025-09-05 16:59:08] [Rank 0] Group 13 FTA: 0.4500 +[2025-09-05 16:59:08] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 16:59:08] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 16:59:08] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:59:08] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:59:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:59:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 16:59:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:59:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 16:59:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:59:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 16:59:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:59:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 16:59:09] [Rank 0] step:7501/10000 train_time:283150ms step_avg:37.75ms +[2025-09-05 16:59:09] [Rank 0] step:7501/10000 train_time:283150ms step_avg:37.75ms +[2025-09-05 16:59:10] [Rank 0] step:7521/10000 train_time:283595ms step_avg:37.71ms +[2025-09-05 16:59:10] [Rank 0] step:7521/10000 train_time:283595ms step_avg:37.71ms +[2025-09-05 16:59:10] [Rank 0] step:7541/10000 train_time:284250ms step_avg:37.69ms +[2025-09-05 16:59:10] [Rank 0] step:7541/10000 train_time:284250ms step_avg:37.69ms +[2025-09-05 16:59:11] [Rank 0] step:7561/10000 train_time:284907ms step_avg:37.68ms +[2025-09-05 16:59:11] [Rank 0] step:7561/10000 train_time:284907ms step_avg:37.68ms +[2025-09-05 16:59:12] [Rank 0] step:7581/10000 train_time:285563ms step_avg:37.67ms +[2025-09-05 16:59:12] [Rank 0] step:7581/10000 train_time:285563ms step_avg:37.67ms +[2025-09-05 16:59:12] [Rank 0] step:7601/10000 train_time:286219ms step_avg:37.66ms +[2025-09-05 16:59:12] [Rank 0] step:7601/10000 train_time:286219ms step_avg:37.66ms +[2025-09-05 16:59:13] [Rank 0] step:7621/10000 train_time:286875ms step_avg:37.64ms +[2025-09-05 16:59:13] [Rank 0] step:7621/10000 train_time:286875ms step_avg:37.64ms +[2025-09-05 16:59:14] [Rank 0] step:7641/10000 train_time:287765ms step_avg:37.66ms +[2025-09-05 16:59:14] [Rank 0] step:7641/10000 train_time:287765ms step_avg:37.66ms +[2025-09-05 16:59:15] [Rank 0] step:7661/10000 train_time:288658ms step_avg:37.68ms +[2025-09-05 16:59:15] [Rank 0] step:7661/10000 train_time:288658ms step_avg:37.68ms +[2025-09-05 16:59:15] [Rank 0] step:7681/10000 train_time:289314ms step_avg:37.67ms +[2025-09-05 16:59:15] [Rank 0] step:7681/10000 train_time:289314ms step_avg:37.67ms +[2025-09-05 16:59:16] [Rank 0] step:7701/10000 train_time:289970ms step_avg:37.65ms +[2025-09-05 16:59:16] [Rank 0] step:7701/10000 train_time:289970ms step_avg:37.65ms +[2025-09-05 16:59:17] [Rank 0] step:7721/10000 train_time:290626ms step_avg:37.64ms +[2025-09-05 16:59:17] [Rank 0] step:7721/10000 train_time:290626ms step_avg:37.64ms +[2025-09-05 16:59:17] [Rank 0] step:7741/10000 train_time:291281ms step_avg:37.63ms +[2025-09-05 16:59:17] [Rank 0] step:7741/10000 train_time:291281ms step_avg:37.63ms +[2025-09-05 16:59:18] [Rank 0] step:7761/10000 train_time:291937ms step_avg:37.62ms +[2025-09-05 16:59:18] [Rank 0] step:7761/10000 train_time:291937ms step_avg:37.62ms +[2025-09-05 16:59:19] [Rank 0] step:7781/10000 train_time:292593ms step_avg:37.60ms +[2025-09-05 16:59:19] [Rank 0] step:7781/10000 train_time:292593ms step_avg:37.60ms +[2025-09-05 16:59:19] [Rank 0] step:7801/10000 train_time:293249ms step_avg:37.59ms +[2025-09-05 16:59:19] [Rank 0] step:7801/10000 train_time:293249ms step_avg:37.59ms +[2025-09-05 16:59:20] [Rank 0] step:7821/10000 train_time:293905ms step_avg:37.58ms +[2025-09-05 16:59:20] [Rank 0] step:7821/10000 train_time:293905ms step_avg:37.58ms +[2025-09-05 16:59:21] [Rank 0] step:7841/10000 train_time:294561ms step_avg:37.57ms +[2025-09-05 16:59:21] [Rank 0] step:7841/10000 train_time:294561ms step_avg:37.57ms +[2025-09-05 16:59:21] [Rank 0] step:7861/10000 train_time:295217ms step_avg:37.55ms +[2025-09-05 16:59:21] [Rank 0] step:7861/10000 train_time:295217ms step_avg:37.55ms +[2025-09-05 16:59:22] [Rank 0] step:7881/10000 train_time:295873ms step_avg:37.54ms +[2025-09-05 16:59:22] [Rank 0] step:7881/10000 train_time:295873ms step_avg:37.54ms +[2025-09-05 16:59:23] [Rank 0] step:7901/10000 train_time:296529ms step_avg:37.53ms +[2025-09-05 16:59:23] [Rank 0] step:7901/10000 train_time:296529ms step_avg:37.53ms +[2025-09-05 16:59:23] [Rank 0] step:7921/10000 train_time:297184ms step_avg:37.52ms +[2025-09-05 16:59:23] [Rank 0] step:7921/10000 train_time:297184ms step_avg:37.52ms +[2025-09-05 16:59:24] [Rank 0] step:7941/10000 train_time:297840ms step_avg:37.51ms +[2025-09-05 16:59:24] [Rank 0] step:7941/10000 train_time:297840ms step_avg:37.51ms +[2025-09-05 16:59:24] [Rank 0] step:7961/10000 train_time:298496ms step_avg:37.49ms +[2025-09-05 16:59:24] [Rank 0] step:7961/10000 train_time:298496ms step_avg:37.49ms +[2025-09-05 16:59:25] [Rank 0] step:7981/10000 train_time:299152ms step_avg:37.48ms +[2025-09-05 16:59:25] [Rank 0] step:7981/10000 train_time:299152ms step_avg:37.48ms +[2025-09-05 16:59:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:59:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:59:26] [Rank 0] PRINT: step:8000/10000 train_loss:0.6900 val_loss:0.6807 train_time:300042ms step_avg:37.51ms +[2025-09-05 16:59:26] [Rank 0] PRINT: step:8000/10000 train_loss:0.6900 val_loss:0.6807 train_time:300042ms step_avg:37.51ms +[2025-09-05 16:59:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:59:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:59:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:59:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:00:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:00:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:00:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:00:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:00:47] [Rank 0] Total Loss: 5.0550 +[2025-09-05 17:00:47] [Rank 0] Total Loss: 5.0550 +[2025-09-05 17:00:47] [Rank 0] Total FTA (Unweighted): 0.8556 +[2025-09-05 17:00:47] [Rank 0] Total FTA (Unweighted): 0.8556 +[2025-09-05 17:00:47] [Rank 0] Total FTA (Weighted): 0.8556 +[2025-09-05 17:00:47] [Rank 0] Total FTA (Weighted): 0.8556 +[2025-09-05 17:00:47] [Rank 0] Group 0 Loss: 4.9673 +[2025-09-05 17:00:47] [Rank 0] Group 0 Loss: 4.9673 +[2025-09-05 17:00:47] [Rank 0] Group 1 Loss: 4.7557 +[2025-09-05 17:00:47] [Rank 0] Group 1 Loss: 4.7557 +[2025-09-05 17:00:47] [Rank 0] Group 2 Loss: 4.5279 +[2025-09-05 17:00:47] [Rank 0] Group 2 Loss: 4.5279 +[2025-09-05 17:00:47] [Rank 0] Group 3 Loss: 4.9126 +[2025-09-05 17:00:47] [Rank 0] Group 3 Loss: 4.9126 +[2025-09-05 17:00:47] [Rank 0] Group 4 Loss: 4.8163 +[2025-09-05 17:00:47] [Rank 0] Group 4 Loss: 4.8163 +[2025-09-05 17:00:47] [Rank 0] Group 5 Loss: 4.9948 +[2025-09-05 17:00:47] [Rank 0] Group 5 Loss: 4.9948 +[2025-09-05 17:00:47] [Rank 0] Group 6 Loss: 4.9508 +[2025-09-05 17:00:47] [Rank 0] Group 6 Loss: 4.9508 +[2025-09-05 17:00:47] [Rank 0] Group 7 Loss: 4.9815 +[2025-09-05 17:00:47] [Rank 0] Group 7 Loss: 4.9815 +[2025-09-05 17:00:47] [Rank 0] Group 8 Loss: 5.1544 +[2025-09-05 17:00:47] [Rank 0] Group 8 Loss: 5.1544 +[2025-09-05 17:00:47] [Rank 0] Group 9 Loss: 5.0703 +[2025-09-05 17:00:47] [Rank 0] Group 9 Loss: 5.0703 +[2025-09-05 17:00:47] [Rank 0] Group 10 Loss: 5.2314 +[2025-09-05 17:00:47] [Rank 0] Group 10 Loss: 5.2314 +[2025-09-05 17:00:47] [Rank 0] Group 11 Loss: 5.2156 +[2025-09-05 17:00:47] [Rank 0] Group 11 Loss: 5.2156 +[2025-09-05 17:00:47] [Rank 0] Group 12 Loss: 5.2122 +[2025-09-05 17:00:47] [Rank 0] Group 12 Loss: 5.2122 +[2025-09-05 17:00:47] [Rank 0] Group 13 Loss: 5.3910 +[2025-09-05 17:00:47] [Rank 0] Group 13 Loss: 5.3910 +[2025-09-05 17:00:47] [Rank 0] Group 14 Loss: 5.3005 +[2025-09-05 17:00:47] [Rank 0] Group 14 Loss: 5.3005 +[2025-09-05 17:00:47] [Rank 0] Group 15 Loss: 5.3974 +[2025-09-05 17:00:47] [Rank 0] Group 15 Loss: 5.3974 +[2025-09-05 17:00:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:00:47] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 17:00:47] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 17:00:47] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 17:00:47] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 17:00:47] [Rank 0] Group 12 FTA: 0.9300 +[2025-09-05 17:00:47] [Rank 0] Group 12 FTA: 0.9300 +[2025-09-05 17:00:47] [Rank 0] Group 13 FTA: 0.4800 +[2025-09-05 17:00:47] [Rank 0] Group 13 FTA: 0.4800 +[2025-09-05 17:00:47] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 17:00:47] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 17:00:47] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:00:47] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:00:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:00:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:00:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:00:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:00:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:00:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:00:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:00:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:00:49] [Rank 0] step:8001/10000 train_time:300050ms step_avg:37.50ms +[2025-09-05 17:00:49] [Rank 0] step:8001/10000 train_time:300050ms step_avg:37.50ms +[2025-09-05 17:00:49] [Rank 0] step:8021/10000 train_time:300689ms step_avg:37.49ms +[2025-09-05 17:00:49] [Rank 0] step:8021/10000 train_time:300689ms step_avg:37.49ms +[2025-09-05 17:00:50] [Rank 0] step:8041/10000 train_time:301345ms step_avg:37.48ms +[2025-09-05 17:00:50] [Rank 0] step:8041/10000 train_time:301345ms step_avg:37.48ms +[2025-09-05 17:00:51] [Rank 0] step:8061/10000 train_time:302002ms step_avg:37.46ms +[2025-09-05 17:00:51] [Rank 0] step:8061/10000 train_time:302002ms step_avg:37.46ms +[2025-09-05 17:00:51] [Rank 0] step:8081/10000 train_time:302658ms step_avg:37.45ms +[2025-09-05 17:00:51] [Rank 0] step:8081/10000 train_time:302658ms step_avg:37.45ms +[2025-09-05 17:00:52] [Rank 0] step:8101/10000 train_time:303488ms step_avg:37.46ms +[2025-09-05 17:00:52] [Rank 0] step:8101/10000 train_time:303488ms step_avg:37.46ms +[2025-09-05 17:00:53] [Rank 0] step:8121/10000 train_time:304144ms step_avg:37.45ms +[2025-09-05 17:00:53] [Rank 0] step:8121/10000 train_time:304144ms step_avg:37.45ms +[2025-09-05 17:00:54] [Rank 0] step:8141/10000 train_time:304801ms step_avg:37.44ms +[2025-09-05 17:00:54] [Rank 0] step:8141/10000 train_time:304801ms step_avg:37.44ms +[2025-09-05 17:00:54] [Rank 0] step:8161/10000 train_time:305457ms step_avg:37.43ms +[2025-09-05 17:00:54] [Rank 0] step:8161/10000 train_time:305457ms step_avg:37.43ms +[2025-09-05 17:00:55] [Rank 0] step:8181/10000 train_time:306113ms step_avg:37.42ms +[2025-09-05 17:00:55] [Rank 0] step:8181/10000 train_time:306113ms step_avg:37.42ms +[2025-09-05 17:00:56] [Rank 0] step:8201/10000 train_time:306769ms step_avg:37.41ms +[2025-09-05 17:00:56] [Rank 0] step:8201/10000 train_time:306769ms step_avg:37.41ms +[2025-09-05 17:00:56] [Rank 0] step:8221/10000 train_time:307426ms step_avg:37.40ms +[2025-09-05 17:00:56] [Rank 0] step:8221/10000 train_time:307426ms step_avg:37.40ms +[2025-09-05 17:00:57] [Rank 0] step:8241/10000 train_time:308082ms step_avg:37.38ms +[2025-09-05 17:00:57] [Rank 0] step:8241/10000 train_time:308082ms step_avg:37.38ms +[2025-09-05 17:00:57] [Rank 0] step:8261/10000 train_time:308738ms step_avg:37.37ms +[2025-09-05 17:00:57] [Rank 0] step:8261/10000 train_time:308738ms step_avg:37.37ms +[2025-09-05 17:00:58] [Rank 0] step:8281/10000 train_time:309395ms step_avg:37.36ms +[2025-09-05 17:00:58] [Rank 0] step:8281/10000 train_time:309395ms step_avg:37.36ms +[2025-09-05 17:00:59] [Rank 0] step:8301/10000 train_time:310051ms step_avg:37.35ms +[2025-09-05 17:00:59] [Rank 0] step:8301/10000 train_time:310051ms step_avg:37.35ms +[2025-09-05 17:00:59] [Rank 0] step:8321/10000 train_time:310707ms step_avg:37.34ms +[2025-09-05 17:00:59] [Rank 0] step:8321/10000 train_time:310707ms step_avg:37.34ms +[2025-09-05 17:01:00] [Rank 0] step:8341/10000 train_time:311364ms step_avg:37.33ms +[2025-09-05 17:01:00] [Rank 0] step:8341/10000 train_time:311364ms step_avg:37.33ms +[2025-09-05 17:01:01] [Rank 0] step:8361/10000 train_time:312021ms step_avg:37.32ms +[2025-09-05 17:01:01] [Rank 0] step:8361/10000 train_time:312021ms step_avg:37.32ms +[2025-09-05 17:01:01] [Rank 0] step:8381/10000 train_time:312678ms step_avg:37.31ms +[2025-09-05 17:01:01] [Rank 0] step:8381/10000 train_time:312678ms step_avg:37.31ms +[2025-09-05 17:01:02] [Rank 0] step:8401/10000 train_time:313335ms step_avg:37.30ms +[2025-09-05 17:01:02] [Rank 0] step:8401/10000 train_time:313335ms step_avg:37.30ms +[2025-09-05 17:01:03] [Rank 0] step:8421/10000 train_time:313992ms step_avg:37.29ms +[2025-09-05 17:01:03] [Rank 0] step:8421/10000 train_time:313992ms step_avg:37.29ms +[2025-09-05 17:01:03] [Rank 0] step:8441/10000 train_time:314651ms step_avg:37.28ms +[2025-09-05 17:01:03] [Rank 0] step:8441/10000 train_time:314651ms step_avg:37.28ms +[2025-09-05 17:01:04] [Rank 0] step:8461/10000 train_time:315307ms step_avg:37.27ms +[2025-09-05 17:01:04] [Rank 0] step:8461/10000 train_time:315307ms step_avg:37.27ms +[2025-09-05 17:01:05] [Rank 0] step:8481/10000 train_time:315965ms step_avg:37.26ms +[2025-09-05 17:01:05] [Rank 0] step:8481/10000 train_time:315965ms step_avg:37.26ms +[2025-09-05 17:01:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:01:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:01:06] [Rank 0] PRINT: step:8500/10000 train_loss:0.6838 val_loss:0.6749 train_time:316856ms step_avg:37.28ms +[2025-09-05 17:01:06] [Rank 0] PRINT: step:8500/10000 train_loss:0.6838 val_loss:0.6749 train_time:316856ms step_avg:37.28ms +[2025-09-05 17:01:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:01:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:01:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:01:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:02:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:02:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:02:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:02:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:02:26] [Rank 0] Total Loss: 5.1116 +[2025-09-05 17:02:26] [Rank 0] Total Loss: 5.1116 +[2025-09-05 17:02:26] [Rank 0] Total FTA (Unweighted): 0.8644 +[2025-09-05 17:02:26] [Rank 0] Total FTA (Unweighted): 0.8644 +[2025-09-05 17:02:26] [Rank 0] Total FTA (Weighted): 0.8644 +[2025-09-05 17:02:26] [Rank 0] Total FTA (Weighted): 0.8644 +[2025-09-05 17:02:26] [Rank 0] Group 0 Loss: 5.0701 +[2025-09-05 17:02:26] [Rank 0] Group 0 Loss: 5.0701 +[2025-09-05 17:02:26] [Rank 0] Group 1 Loss: 4.6770 +[2025-09-05 17:02:26] [Rank 0] Group 1 Loss: 4.6770 +[2025-09-05 17:02:26] [Rank 0] Group 2 Loss: 4.6253 +[2025-09-05 17:02:26] [Rank 0] Group 2 Loss: 4.6253 +[2025-09-05 17:02:26] [Rank 0] Group 3 Loss: 4.9387 +[2025-09-05 17:02:26] [Rank 0] Group 3 Loss: 4.9387 +[2025-09-05 17:02:26] [Rank 0] Group 4 Loss: 4.9156 +[2025-09-05 17:02:26] [Rank 0] Group 4 Loss: 4.9156 +[2025-09-05 17:02:26] [Rank 0] Group 5 Loss: 5.0387 +[2025-09-05 17:02:26] [Rank 0] Group 5 Loss: 5.0387 +[2025-09-05 17:02:26] [Rank 0] Group 6 Loss: 5.0101 +[2025-09-05 17:02:26] [Rank 0] Group 6 Loss: 5.0101 +[2025-09-05 17:02:26] [Rank 0] Group 7 Loss: 5.0335 +[2025-09-05 17:02:26] [Rank 0] Group 7 Loss: 5.0335 +[2025-09-05 17:02:27] [Rank 0] Group 8 Loss: 5.2200 +[2025-09-05 17:02:27] [Rank 0] Group 8 Loss: 5.2200 +[2025-09-05 17:02:27] [Rank 0] Group 9 Loss: 5.1102 +[2025-09-05 17:02:27] [Rank 0] Group 9 Loss: 5.1102 +[2025-09-05 17:02:27] [Rank 0] Group 10 Loss: 5.2547 +[2025-09-05 17:02:27] [Rank 0] Group 10 Loss: 5.2547 +[2025-09-05 17:02:27] [Rank 0] Group 11 Loss: 5.3164 +[2025-09-05 17:02:27] [Rank 0] Group 11 Loss: 5.3164 +[2025-09-05 17:02:27] [Rank 0] Group 12 Loss: 5.3018 +[2025-09-05 17:02:27] [Rank 0] Group 12 Loss: 5.3018 +[2025-09-05 17:02:27] [Rank 0] Group 13 Loss: 5.4316 +[2025-09-05 17:02:27] [Rank 0] Group 13 Loss: 5.4316 +[2025-09-05 17:02:27] [Rank 0] Group 14 Loss: 5.3591 +[2025-09-05 17:02:27] [Rank 0] Group 14 Loss: 5.3591 +[2025-09-05 17:02:27] [Rank 0] Group 15 Loss: 5.4824 +[2025-09-05 17:02:27] [Rank 0] Group 15 Loss: 5.4824 +[2025-09-05 17:02:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:02:27] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 17:02:27] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 17:02:27] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 17:02:27] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 17:02:27] [Rank 0] Group 12 FTA: 0.9400 +[2025-09-05 17:02:27] [Rank 0] Group 12 FTA: 0.9400 +[2025-09-05 17:02:27] [Rank 0] Group 13 FTA: 0.5200 +[2025-09-05 17:02:27] [Rank 0] Group 13 FTA: 0.5200 +[2025-09-05 17:02:27] [Rank 0] Group 14 FTA: 0.2600 +[2025-09-05 17:02:27] [Rank 0] Group 14 FTA: 0.2600 +[2025-09-05 17:02:27] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 17:02:27] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 17:02:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:02:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:02:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:02:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:02:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:02:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:02:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:02:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:02:28] [Rank 0] step:8501/10000 train_time:316864ms step_avg:37.27ms +[2025-09-05 17:02:28] [Rank 0] step:8501/10000 train_time:316864ms step_avg:37.27ms +[2025-09-05 17:02:29] [Rank 0] step:8521/10000 train_time:317297ms step_avg:37.24ms +[2025-09-05 17:02:29] [Rank 0] step:8521/10000 train_time:317297ms step_avg:37.24ms +[2025-09-05 17:02:29] [Rank 0] step:8541/10000 train_time:317953ms step_avg:37.23ms +[2025-09-05 17:02:29] [Rank 0] step:8541/10000 train_time:317953ms step_avg:37.23ms +[2025-09-05 17:02:30] [Rank 0] step:8561/10000 train_time:318609ms step_avg:37.22ms +[2025-09-05 17:02:30] [Rank 0] step:8561/10000 train_time:318609ms step_avg:37.22ms +[2025-09-05 17:02:31] [Rank 0] step:8581/10000 train_time:319265ms step_avg:37.21ms +[2025-09-05 17:02:31] [Rank 0] step:8581/10000 train_time:319265ms step_avg:37.21ms +[2025-09-05 17:02:31] [Rank 0] step:8601/10000 train_time:319921ms step_avg:37.20ms +[2025-09-05 17:02:31] [Rank 0] step:8601/10000 train_time:319921ms step_avg:37.20ms +[2025-09-05 17:02:32] [Rank 0] step:8621/10000 train_time:320576ms step_avg:37.19ms +[2025-09-05 17:02:32] [Rank 0] step:8621/10000 train_time:320576ms step_avg:37.19ms +[2025-09-05 17:02:33] [Rank 0] step:8641/10000 train_time:321233ms step_avg:37.18ms +[2025-09-05 17:02:33] [Rank 0] step:8641/10000 train_time:321233ms step_avg:37.18ms +[2025-09-05 17:02:33] [Rank 0] step:8661/10000 train_time:321887ms step_avg:37.17ms +[2025-09-05 17:02:33] [Rank 0] step:8661/10000 train_time:321887ms step_avg:37.17ms +[2025-09-05 17:02:34] [Rank 0] step:8681/10000 train_time:322543ms step_avg:37.16ms +[2025-09-05 17:02:34] [Rank 0] step:8681/10000 train_time:322543ms step_avg:37.16ms +[2025-09-05 17:02:35] [Rank 0] step:8701/10000 train_time:323199ms step_avg:37.15ms +[2025-09-05 17:02:35] [Rank 0] step:8701/10000 train_time:323199ms step_avg:37.15ms +[2025-09-05 17:02:35] [Rank 0] step:8721/10000 train_time:323855ms step_avg:37.14ms +[2025-09-05 17:02:35] [Rank 0] step:8721/10000 train_time:323855ms step_avg:37.14ms +[2025-09-05 17:02:36] [Rank 0] step:8741/10000 train_time:324511ms step_avg:37.13ms +[2025-09-05 17:02:36] [Rank 0] step:8741/10000 train_time:324511ms step_avg:37.13ms +[2025-09-05 17:02:37] [Rank 0] step:8761/10000 train_time:325167ms step_avg:37.12ms +[2025-09-05 17:02:37] [Rank 0] step:8761/10000 train_time:325167ms step_avg:37.12ms +[2025-09-05 17:02:37] [Rank 0] step:8781/10000 train_time:325823ms step_avg:37.11ms +[2025-09-05 17:02:37] [Rank 0] step:8781/10000 train_time:325823ms step_avg:37.11ms +[2025-09-05 17:02:38] [Rank 0] step:8801/10000 train_time:326479ms step_avg:37.10ms +[2025-09-05 17:02:38] [Rank 0] step:8801/10000 train_time:326479ms step_avg:37.10ms +[2025-09-05 17:02:39] [Rank 0] step:8821/10000 train_time:327135ms step_avg:37.09ms +[2025-09-05 17:02:39] [Rank 0] step:8821/10000 train_time:327135ms step_avg:37.09ms +[2025-09-05 17:02:39] [Rank 0] step:8841/10000 train_time:327893ms step_avg:37.09ms +[2025-09-05 17:02:39] [Rank 0] step:8841/10000 train_time:327893ms step_avg:37.09ms +[2025-09-05 17:02:40] [Rank 0] step:8861/10000 train_time:328550ms step_avg:37.08ms +[2025-09-05 17:02:40] [Rank 0] step:8861/10000 train_time:328550ms step_avg:37.08ms +[2025-09-05 17:02:41] [Rank 0] step:8881/10000 train_time:329206ms step_avg:37.07ms +[2025-09-05 17:02:41] [Rank 0] step:8881/10000 train_time:329206ms step_avg:37.07ms +[2025-09-05 17:02:41] [Rank 0] step:8901/10000 train_time:329861ms step_avg:37.06ms +[2025-09-05 17:02:41] [Rank 0] step:8901/10000 train_time:329861ms step_avg:37.06ms +[2025-09-05 17:02:42] [Rank 0] step:8921/10000 train_time:330517ms step_avg:37.05ms +[2025-09-05 17:02:42] [Rank 0] step:8921/10000 train_time:330517ms step_avg:37.05ms +[2025-09-05 17:02:43] [Rank 0] step:8941/10000 train_time:331172ms step_avg:37.04ms +[2025-09-05 17:02:43] [Rank 0] step:8941/10000 train_time:331172ms step_avg:37.04ms +[2025-09-05 17:02:43] [Rank 0] step:8961/10000 train_time:331828ms step_avg:37.03ms +[2025-09-05 17:02:43] [Rank 0] step:8961/10000 train_time:331828ms step_avg:37.03ms +[2025-09-05 17:02:44] [Rank 0] step:8981/10000 train_time:332484ms step_avg:37.02ms +[2025-09-05 17:02:44] [Rank 0] step:8981/10000 train_time:332484ms step_avg:37.02ms +[2025-09-05 17:02:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:02:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:02:45] [Rank 0] PRINT: step:9000/10000 train_loss:0.6781 val_loss:0.6695 train_time:333374ms step_avg:37.04ms +[2025-09-05 17:02:45] [Rank 0] PRINT: step:9000/10000 train_loss:0.6781 val_loss:0.6695 train_time:333374ms step_avg:37.04ms +[2025-09-05 17:02:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:02:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:02:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:02:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:04:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:04:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:04:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:04:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:04:06] [Rank 0] Total Loss: 5.0438 +[2025-09-05 17:04:06] [Rank 0] Total Loss: 5.0438 +[2025-09-05 17:04:07] [Rank 0] Total FTA (Unweighted): 0.8787 +[2025-09-05 17:04:07] [Rank 0] Total FTA (Unweighted): 0.8787 +[2025-09-05 17:04:07] [Rank 0] Total FTA (Weighted): 0.8788 +[2025-09-05 17:04:07] [Rank 0] Total FTA (Weighted): 0.8788 +[2025-09-05 17:04:07] [Rank 0] Group 0 Loss: 4.9323 +[2025-09-05 17:04:07] [Rank 0] Group 0 Loss: 4.9323 +[2025-09-05 17:04:07] [Rank 0] Group 1 Loss: 4.5890 +[2025-09-05 17:04:07] [Rank 0] Group 1 Loss: 4.5890 +[2025-09-05 17:04:07] [Rank 0] Group 2 Loss: 4.5634 +[2025-09-05 17:04:07] [Rank 0] Group 2 Loss: 4.5634 +[2025-09-05 17:04:07] [Rank 0] Group 3 Loss: 4.8286 +[2025-09-05 17:04:07] [Rank 0] Group 3 Loss: 4.8286 +[2025-09-05 17:04:07] [Rank 0] Group 4 Loss: 4.8373 +[2025-09-05 17:04:07] [Rank 0] Group 4 Loss: 4.8373 +[2025-09-05 17:04:07] [Rank 0] Group 5 Loss: 4.9898 +[2025-09-05 17:04:07] [Rank 0] Group 5 Loss: 4.9898 +[2025-09-05 17:04:07] [Rank 0] Group 6 Loss: 4.9464 +[2025-09-05 17:04:07] [Rank 0] Group 6 Loss: 4.9464 +[2025-09-05 17:04:07] [Rank 0] Group 7 Loss: 4.9975 +[2025-09-05 17:04:07] [Rank 0] Group 7 Loss: 4.9975 +[2025-09-05 17:04:07] [Rank 0] Group 8 Loss: 5.1544 +[2025-09-05 17:04:07] [Rank 0] Group 8 Loss: 5.1544 +[2025-09-05 17:04:07] [Rank 0] Group 9 Loss: 5.0599 +[2025-09-05 17:04:07] [Rank 0] Group 9 Loss: 5.0599 +[2025-09-05 17:04:07] [Rank 0] Group 10 Loss: 5.2102 +[2025-09-05 17:04:07] [Rank 0] Group 10 Loss: 5.2102 +[2025-09-05 17:04:07] [Rank 0] Group 11 Loss: 5.2956 +[2025-09-05 17:04:07] [Rank 0] Group 11 Loss: 5.2956 +[2025-09-05 17:04:07] [Rank 0] Group 12 Loss: 5.2563 +[2025-09-05 17:04:07] [Rank 0] Group 12 Loss: 5.2563 +[2025-09-05 17:04:07] [Rank 0] Group 13 Loss: 5.3545 +[2025-09-05 17:04:07] [Rank 0] Group 13 Loss: 5.3545 +[2025-09-05 17:04:07] [Rank 0] Group 14 Loss: 5.2829 +[2025-09-05 17:04:07] [Rank 0] Group 14 Loss: 5.2829 +[2025-09-05 17:04:07] [Rank 0] Group 15 Loss: 5.4026 +[2025-09-05 17:04:07] [Rank 0] Group 15 Loss: 5.4026 +[2025-09-05 17:04:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:04:07] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 17:04:07] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 17:04:07] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-05 17:04:07] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-05 17:04:07] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 17:04:07] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 17:04:07] [Rank 0] Group 13 FTA: 0.6300 +[2025-09-05 17:04:07] [Rank 0] Group 13 FTA: 0.6300 +[2025-09-05 17:04:07] [Rank 0] Group 14 FTA: 0.3600 +[2025-09-05 17:04:07] [Rank 0] Group 14 FTA: 0.3600 +[2025-09-05 17:04:07] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 17:04:07] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 17:04:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:04:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:04:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:04:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:04:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:04:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:04:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:04:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:04:08] [Rank 0] step:9001/10000 train_time:333382ms step_avg:37.04ms +[2025-09-05 17:04:08] [Rank 0] step:9001/10000 train_time:333382ms step_avg:37.04ms +[2025-09-05 17:04:09] [Rank 0] step:9021/10000 train_time:333830ms step_avg:37.01ms +[2025-09-05 17:04:09] [Rank 0] step:9021/10000 train_time:333830ms step_avg:37.01ms +[2025-09-05 17:04:09] [Rank 0] step:9041/10000 train_time:334486ms step_avg:37.00ms +[2025-09-05 17:04:09] [Rank 0] step:9041/10000 train_time:334486ms step_avg:37.00ms +[2025-09-05 17:04:10] [Rank 0] step:9061/10000 train_time:335142ms step_avg:36.99ms +[2025-09-05 17:04:10] [Rank 0] step:9061/10000 train_time:335142ms step_avg:36.99ms +[2025-09-05 17:04:11] [Rank 0] step:9081/10000 train_time:335798ms step_avg:36.98ms +[2025-09-05 17:04:11] [Rank 0] step:9081/10000 train_time:335798ms step_avg:36.98ms +[2025-09-05 17:04:11] [Rank 0] step:9101/10000 train_time:336454ms step_avg:36.97ms +[2025-09-05 17:04:11] [Rank 0] step:9101/10000 train_time:336454ms step_avg:36.97ms +[2025-09-05 17:04:12] [Rank 0] step:9121/10000 train_time:337110ms step_avg:36.96ms +[2025-09-05 17:04:12] [Rank 0] step:9121/10000 train_time:337110ms step_avg:36.96ms +[2025-09-05 17:04:13] [Rank 0] step:9141/10000 train_time:337766ms step_avg:36.95ms +[2025-09-05 17:04:13] [Rank 0] step:9141/10000 train_time:337766ms step_avg:36.95ms +[2025-09-05 17:04:13] [Rank 0] step:9161/10000 train_time:338424ms step_avg:36.94ms +[2025-09-05 17:04:13] [Rank 0] step:9161/10000 train_time:338424ms step_avg:36.94ms +[2025-09-05 17:04:14] [Rank 0] step:9181/10000 train_time:339080ms step_avg:36.93ms +[2025-09-05 17:04:14] [Rank 0] step:9181/10000 train_time:339080ms step_avg:36.93ms +[2025-09-05 17:04:15] [Rank 0] step:9201/10000 train_time:339737ms step_avg:36.92ms +[2025-09-05 17:04:15] [Rank 0] step:9201/10000 train_time:339737ms step_avg:36.92ms +[2025-09-05 17:04:15] [Rank 0] step:9221/10000 train_time:340392ms step_avg:36.91ms +[2025-09-05 17:04:15] [Rank 0] step:9221/10000 train_time:340392ms step_avg:36.91ms +[2025-09-05 17:04:16] [Rank 0] step:9241/10000 train_time:341049ms step_avg:36.91ms +[2025-09-05 17:04:16] [Rank 0] step:9241/10000 train_time:341049ms step_avg:36.91ms +[2025-09-05 17:04:17] [Rank 0] step:9261/10000 train_time:341705ms step_avg:36.90ms +[2025-09-05 17:04:17] [Rank 0] step:9261/10000 train_time:341705ms step_avg:36.90ms +[2025-09-05 17:04:17] [Rank 0] step:9281/10000 train_time:342362ms step_avg:36.89ms +[2025-09-05 17:04:17] [Rank 0] step:9281/10000 train_time:342362ms step_avg:36.89ms +[2025-09-05 17:04:18] [Rank 0] step:9301/10000 train_time:343018ms step_avg:36.88ms +[2025-09-05 17:04:18] [Rank 0] step:9301/10000 train_time:343018ms step_avg:36.88ms +[2025-09-05 17:04:19] [Rank 0] step:9321/10000 train_time:343674ms step_avg:36.87ms +[2025-09-05 17:04:19] [Rank 0] step:9321/10000 train_time:343674ms step_avg:36.87ms +[2025-09-05 17:04:19] [Rank 0] step:9341/10000 train_time:344331ms step_avg:36.86ms +[2025-09-05 17:04:19] [Rank 0] step:9341/10000 train_time:344331ms step_avg:36.86ms +[2025-09-05 17:04:20] [Rank 0] step:9361/10000 train_time:344988ms step_avg:36.85ms +[2025-09-05 17:04:20] [Rank 0] step:9361/10000 train_time:344988ms step_avg:36.85ms +[2025-09-05 17:04:20] [Rank 0] step:9381/10000 train_time:345644ms step_avg:36.85ms +[2025-09-05 17:04:20] [Rank 0] step:9381/10000 train_time:345644ms step_avg:36.85ms +[2025-09-05 17:04:21] [Rank 0] step:9401/10000 train_time:346303ms step_avg:36.84ms +[2025-09-05 17:04:21] [Rank 0] step:9401/10000 train_time:346303ms step_avg:36.84ms +[2025-09-05 17:04:22] [Rank 0] step:9421/10000 train_time:346958ms step_avg:36.83ms +[2025-09-05 17:04:22] [Rank 0] step:9421/10000 train_time:346958ms step_avg:36.83ms +[2025-09-05 17:04:22] [Rank 0] step:9441/10000 train_time:347615ms step_avg:36.82ms +[2025-09-05 17:04:22] [Rank 0] step:9441/10000 train_time:347615ms step_avg:36.82ms +[2025-09-05 17:04:23] [Rank 0] step:9461/10000 train_time:348271ms step_avg:36.81ms +[2025-09-05 17:04:23] [Rank 0] step:9461/10000 train_time:348271ms step_avg:36.81ms +[2025-09-05 17:04:24] [Rank 0] step:9481/10000 train_time:348928ms step_avg:36.80ms +[2025-09-05 17:04:24] [Rank 0] step:9481/10000 train_time:348928ms step_avg:36.80ms +[2025-09-05 17:04:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:04:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:04:25] [Rank 0] PRINT: step:9500/10000 train_loss:0.6728 val_loss:0.6653 train_time:349819ms step_avg:36.82ms +[2025-09-05 17:04:25] [Rank 0] PRINT: step:9500/10000 train_loss:0.6728 val_loss:0.6653 train_time:349819ms step_avg:36.82ms +[2025-09-05 17:04:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:04:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:04:25] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:04:25] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:05:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:05:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:05:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:05:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:05:45] [Rank 0] Total Loss: 5.1183 +[2025-09-05 17:05:45] [Rank 0] Total Loss: 5.1183 +[2025-09-05 17:05:45] [Rank 0] Total FTA (Unweighted): 0.8794 +[2025-09-05 17:05:45] [Rank 0] Total FTA (Unweighted): 0.8794 +[2025-09-05 17:05:45] [Rank 0] Total FTA (Weighted): 0.8794 +[2025-09-05 17:05:45] [Rank 0] Total FTA (Weighted): 0.8794 +[2025-09-05 17:05:45] [Rank 0] Group 0 Loss: 5.0153 +[2025-09-05 17:05:45] [Rank 0] Group 0 Loss: 5.0153 +[2025-09-05 17:05:45] [Rank 0] Group 1 Loss: 4.7477 +[2025-09-05 17:05:45] [Rank 0] Group 1 Loss: 4.7477 +[2025-09-05 17:05:45] [Rank 0] Group 2 Loss: 4.5673 +[2025-09-05 17:05:45] [Rank 0] Group 2 Loss: 4.5673 +[2025-09-05 17:05:45] [Rank 0] Group 3 Loss: 4.9154 +[2025-09-05 17:05:45] [Rank 0] Group 3 Loss: 4.9154 +[2025-09-05 17:05:45] [Rank 0] Group 4 Loss: 4.9062 +[2025-09-05 17:05:45] [Rank 0] Group 4 Loss: 4.9062 +[2025-09-05 17:05:45] [Rank 0] Group 5 Loss: 5.0840 +[2025-09-05 17:05:45] [Rank 0] Group 5 Loss: 5.0840 +[2025-09-05 17:05:45] [Rank 0] Group 6 Loss: 5.0179 +[2025-09-05 17:05:45] [Rank 0] Group 6 Loss: 5.0179 +[2025-09-05 17:05:45] [Rank 0] Group 7 Loss: 5.0420 +[2025-09-05 17:05:45] [Rank 0] Group 7 Loss: 5.0420 +[2025-09-05 17:05:45] [Rank 0] Group 8 Loss: 5.2286 +[2025-09-05 17:05:45] [Rank 0] Group 8 Loss: 5.2286 +[2025-09-05 17:05:45] [Rank 0] Group 9 Loss: 5.1158 +[2025-09-05 17:05:45] [Rank 0] Group 9 Loss: 5.1158 +[2025-09-05 17:05:45] [Rank 0] Group 10 Loss: 5.3099 +[2025-09-05 17:05:45] [Rank 0] Group 10 Loss: 5.3099 +[2025-09-05 17:05:45] [Rank 0] Group 11 Loss: 5.3612 +[2025-09-05 17:05:45] [Rank 0] Group 11 Loss: 5.3612 +[2025-09-05 17:05:45] [Rank 0] Group 12 Loss: 5.3164 +[2025-09-05 17:05:45] [Rank 0] Group 12 Loss: 5.3164 +[2025-09-05 17:05:45] [Rank 0] Group 13 Loss: 5.4314 +[2025-09-05 17:05:45] [Rank 0] Group 13 Loss: 5.4314 +[2025-09-05 17:05:45] [Rank 0] Group 14 Loss: 5.3568 +[2025-09-05 17:05:45] [Rank 0] Group 14 Loss: 5.3568 +[2025-09-05 17:05:45] [Rank 0] Group 15 Loss: 5.4764 +[2025-09-05 17:05:45] [Rank 0] Group 15 Loss: 5.4764 +[2025-09-05 17:05:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:05:45] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 17:05:45] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 17:05:45] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 17:05:45] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 17:05:45] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 17:05:45] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 17:05:45] [Rank 0] Group 13 FTA: 0.6500 +[2025-09-05 17:05:45] [Rank 0] Group 13 FTA: 0.6500 +[2025-09-05 17:05:45] [Rank 0] Group 14 FTA: 0.3000 +[2025-09-05 17:05:45] [Rank 0] Group 14 FTA: 0.3000 +[2025-09-05 17:05:45] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 17:05:45] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 17:05:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:05:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:05:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:05:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:05:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:05:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:05:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:05:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:05:47] [Rank 0] step:9501/10000 train_time:349827ms step_avg:36.82ms +[2025-09-05 17:05:47] [Rank 0] step:9501/10000 train_time:349827ms step_avg:36.82ms +[2025-09-05 17:05:47] [Rank 0] step:9521/10000 train_time:350279ms step_avg:36.79ms +[2025-09-05 17:05:47] [Rank 0] step:9521/10000 train_time:350279ms step_avg:36.79ms +[2025-09-05 17:05:48] [Rank 0] step:9541/10000 train_time:350935ms step_avg:36.78ms +[2025-09-05 17:05:48] [Rank 0] step:9541/10000 train_time:350935ms step_avg:36.78ms +[2025-09-05 17:05:49] [Rank 0] step:9561/10000 train_time:351590ms step_avg:36.77ms +[2025-09-05 17:05:49] [Rank 0] step:9561/10000 train_time:351590ms step_avg:36.77ms +[2025-09-05 17:05:49] [Rank 0] step:9581/10000 train_time:352246ms step_avg:36.77ms +[2025-09-05 17:05:49] [Rank 0] step:9581/10000 train_time:352246ms step_avg:36.77ms +[2025-09-05 17:05:50] [Rank 0] step:9601/10000 train_time:352901ms step_avg:36.76ms +[2025-09-05 17:05:50] [Rank 0] step:9601/10000 train_time:352901ms step_avg:36.76ms +[2025-09-05 17:05:51] [Rank 0] step:9621/10000 train_time:353557ms step_avg:36.75ms +[2025-09-05 17:05:51] [Rank 0] step:9621/10000 train_time:353557ms step_avg:36.75ms +[2025-09-05 17:05:51] [Rank 0] step:9641/10000 train_time:354213ms step_avg:36.74ms +[2025-09-05 17:05:51] [Rank 0] step:9641/10000 train_time:354213ms step_avg:36.74ms +[2025-09-05 17:05:52] [Rank 0] step:9661/10000 train_time:355150ms step_avg:36.76ms +[2025-09-05 17:05:52] [Rank 0] step:9661/10000 train_time:355150ms step_avg:36.76ms +[2025-09-05 17:05:53] [Rank 0] step:9681/10000 train_time:355806ms step_avg:36.75ms +[2025-09-05 17:05:53] [Rank 0] step:9681/10000 train_time:355806ms step_avg:36.75ms +[2025-09-05 17:05:54] [Rank 0] step:9701/10000 train_time:356461ms step_avg:36.74ms +[2025-09-05 17:05:54] [Rank 0] step:9701/10000 train_time:356461ms step_avg:36.74ms +[2025-09-05 17:05:54] [Rank 0] step:9721/10000 train_time:357117ms step_avg:36.74ms +[2025-09-05 17:05:54] [Rank 0] step:9721/10000 train_time:357117ms step_avg:36.74ms +[2025-09-05 17:05:55] [Rank 0] step:9741/10000 train_time:357773ms step_avg:36.73ms +[2025-09-05 17:05:55] [Rank 0] step:9741/10000 train_time:357773ms step_avg:36.73ms +[2025-09-05 17:05:56] [Rank 0] step:9761/10000 train_time:358429ms step_avg:36.72ms +[2025-09-05 17:05:56] [Rank 0] step:9761/10000 train_time:358429ms step_avg:36.72ms +[2025-09-05 17:05:56] [Rank 0] step:9781/10000 train_time:359085ms step_avg:36.71ms +[2025-09-05 17:05:56] [Rank 0] step:9781/10000 train_time:359085ms step_avg:36.71ms +[2025-09-05 17:05:57] [Rank 0] step:9801/10000 train_time:359743ms step_avg:36.70ms +[2025-09-05 17:05:57] [Rank 0] step:9801/10000 train_time:359743ms step_avg:36.70ms +[2025-09-05 17:05:57] [Rank 0] step:9821/10000 train_time:360398ms step_avg:36.70ms +[2025-09-05 17:05:57] [Rank 0] step:9821/10000 train_time:360398ms step_avg:36.70ms +[2025-09-05 17:05:58] [Rank 0] step:9841/10000 train_time:361054ms step_avg:36.69ms +[2025-09-05 17:05:58] [Rank 0] step:9841/10000 train_time:361054ms step_avg:36.69ms +[2025-09-05 17:05:59] [Rank 0] step:9861/10000 train_time:361710ms step_avg:36.68ms +[2025-09-05 17:05:59] [Rank 0] step:9861/10000 train_time:361710ms step_avg:36.68ms +[2025-09-05 17:05:59] [Rank 0] step:9881/10000 train_time:362366ms step_avg:36.67ms +[2025-09-05 17:05:59] [Rank 0] step:9881/10000 train_time:362366ms step_avg:36.67ms +[2025-09-05 17:06:00] [Rank 0] step:9901/10000 train_time:363021ms step_avg:36.67ms +[2025-09-05 17:06:00] [Rank 0] step:9901/10000 train_time:363021ms step_avg:36.67ms +[2025-09-05 17:06:01] [Rank 0] step:9921/10000 train_time:363676ms step_avg:36.66ms +[2025-09-05 17:06:01] [Rank 0] step:9921/10000 train_time:363676ms step_avg:36.66ms +[2025-09-05 17:06:01] [Rank 0] step:9941/10000 train_time:364332ms step_avg:36.65ms +[2025-09-05 17:06:01] [Rank 0] step:9941/10000 train_time:364332ms step_avg:36.65ms +[2025-09-05 17:06:02] [Rank 0] step:9961/10000 train_time:364988ms step_avg:36.64ms +[2025-09-05 17:06:02] [Rank 0] step:9961/10000 train_time:364988ms step_avg:36.64ms +[2025-09-05 17:06:03] [Rank 0] step:9981/10000 train_time:365745ms step_avg:36.64ms +[2025-09-05 17:06:03] [Rank 0] step:9981/10000 train_time:365745ms step_avg:36.64ms +[2025-09-05 17:06:03] [Rank 0] step:10000/10000 train_time:366370ms step_avg:36.64ms +[2025-09-05 17:06:03] [Rank 0] step:10000/10000 train_time:366370ms step_avg:36.64ms +[2025-09-05 17:06:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:06:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:06:04] [Rank 0] PRINT: step:10000/10000 train_loss:0.6683 val_loss:0.6611 train_time:366641ms step_avg:36.66ms +[2025-09-05 17:06:04] [Rank 0] PRINT: step:10000/10000 train_loss:0.6683 val_loss:0.6611 train_time:366641ms step_avg:36.66ms +[2025-09-05 17:06:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:06:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:06:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:06:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:07:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:07:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:07:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:07:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:07:25] [Rank 0] Total Loss: 5.1622 +[2025-09-05 17:07:25] [Rank 0] Total Loss: 5.1622 +[2025-09-05 17:07:25] [Rank 0] Total FTA (Unweighted): 0.8894 +[2025-09-05 17:07:25] [Rank 0] Total FTA (Unweighted): 0.8894 +[2025-09-05 17:07:25] [Rank 0] Total FTA (Weighted): 0.8894 +[2025-09-05 17:07:25] [Rank 0] Total FTA (Weighted): 0.8894 +[2025-09-05 17:07:25] [Rank 0] Group 0 Loss: 5.0439 +[2025-09-05 17:07:25] [Rank 0] Group 0 Loss: 5.0439 +[2025-09-05 17:07:25] [Rank 0] Group 1 Loss: 4.7761 +[2025-09-05 17:07:25] [Rank 0] Group 1 Loss: 4.7761 +[2025-09-05 17:07:25] [Rank 0] Group 2 Loss: 4.6480 +[2025-09-05 17:07:25] [Rank 0] Group 2 Loss: 4.6480 +[2025-09-05 17:07:25] [Rank 0] Group 3 Loss: 5.0069 +[2025-09-05 17:07:25] [Rank 0] Group 3 Loss: 5.0069 +[2025-09-05 17:07:25] [Rank 0] Group 4 Loss: 4.9516 +[2025-09-05 17:07:25] [Rank 0] Group 4 Loss: 4.9516 +[2025-09-05 17:07:25] [Rank 0] Group 5 Loss: 5.0995 +[2025-09-05 17:07:25] [Rank 0] Group 5 Loss: 5.0995 +[2025-09-05 17:07:25] [Rank 0] Group 6 Loss: 5.0509 +[2025-09-05 17:07:25] [Rank 0] Group 6 Loss: 5.0509 +[2025-09-05 17:07:25] [Rank 0] Group 7 Loss: 5.1026 +[2025-09-05 17:07:25] [Rank 0] Group 7 Loss: 5.1026 +[2025-09-05 17:07:25] [Rank 0] Group 8 Loss: 5.2898 +[2025-09-05 17:07:25] [Rank 0] Group 8 Loss: 5.2898 +[2025-09-05 17:07:25] [Rank 0] Group 9 Loss: 5.1641 +[2025-09-05 17:07:25] [Rank 0] Group 9 Loss: 5.1641 +[2025-09-05 17:07:25] [Rank 0] Group 10 Loss: 5.3459 +[2025-09-05 17:07:25] [Rank 0] Group 10 Loss: 5.3459 +[2025-09-05 17:07:25] [Rank 0] Group 11 Loss: 5.4023 +[2025-09-05 17:07:25] [Rank 0] Group 11 Loss: 5.4023 +[2025-09-05 17:07:25] [Rank 0] Group 12 Loss: 5.3493 +[2025-09-05 17:07:25] [Rank 0] Group 12 Loss: 5.3493 +[2025-09-05 17:07:25] [Rank 0] Group 13 Loss: 5.4772 +[2025-09-05 17:07:25] [Rank 0] Group 13 Loss: 5.4772 +[2025-09-05 17:07:25] [Rank 0] Group 14 Loss: 5.3906 +[2025-09-05 17:07:25] [Rank 0] Group 14 Loss: 5.3906 +[2025-09-05 17:07:25] [Rank 0] Group 15 Loss: 5.4962 +[2025-09-05 17:07:25] [Rank 0] Group 15 Loss: 5.4962 +[2025-09-05 17:07:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 17:07:25] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 17:07:25] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 17:07:25] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 17:07:25] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 17:07:25] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 17:07:25] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 17:07:25] [Rank 0] Group 13 FTA: 0.7100 +[2025-09-05 17:07:25] [Rank 0] Group 13 FTA: 0.7100 +[2025-09-05 17:07:25] [Rank 0] Group 14 FTA: 0.3900 +[2025-09-05 17:07:25] [Rank 0] Group 14 FTA: 0.3900 +[2025-09-05 17:07:25] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 17:07:25] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 17:07:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:07:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_loss_curves.png +[2025-09-05 17:07:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:07:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/per_class_acc_curves.png +[2025-09-05 17:07:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:07:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_loss_curve.png +[2025-09-05 17:07:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:07:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_42/total_acc_curve.png +[2025-09-05 17:07:26] [Rank 0] step:10001/10000 train_time:366648ms step_avg:36.66ms +[2025-09-05 17:07:26] [Rank 0] step:10001/10000 train_time:366648ms step_avg:36.66ms +[2025-09-05 17:07:26] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 17:07:26 2025 --- +[2025-09-05 17:07:26] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 17:07:26 2025 --- +[2025-09-05 17:07:26] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 17:07:26] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..468779a6d95ad8e213edef45405852ceb5d066bb --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e9e6a63c-5246-483a-b4d0-419fcbd966fd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..b87a108173916083a958b3169d5dd00c02af8041 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7603fcd8586f90cd329b1d860e575c0f6ccb1125dc2627e83404aafe5e382aea +size 435259 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..b158818a13c26e01289442f26a376a38e35b2345 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c061d0312a4911136fbb6630124ed24122cc936b3768e85b8204a1c58149425 +size 481243 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..afdc3a5b6a83229c00d66fc6b8c3fb9086f2824c --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34df7b0c488f47e454c4df29fec6779bd646f820c210face0fe52be32d04e48e +size 104532 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c583008aa7b83d3c28cbc26a11e9d7041ce60853 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30fdbfd3524dd360b6f4a201af2226deadc1e6b9e74e9e8246d36ec4b4590efa +size 112046 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/training_log_e9e6a63c-5246-483a-b4d0-419fcbd966fd.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/training_log_e9e6a63c-5246-483a-b4d0-419fcbd966fd.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b686e724c7166453a6bb53e37a3541232de6a4c --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/training_log_e9e6a63c-5246-483a-b4d0-419fcbd966fd.txt @@ -0,0 +1,5614 @@ +[2025-09-05 19:30:11] [Rank 0] PRINT: --- Script Start: Fri Sep 5 19:30:11 2025 --- +[2025-09-05 19:30:11] [Rank 0] PRINT: --- Script Start: Fri Sep 5 19:30:11 2025 --- +[2025-09-05 19:30:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 19:30:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 19:30:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 19:30:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 19:30:11] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 19:30:11] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 19:30:11] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43 +[2025-09-05 19:30:11] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43 +[2025-09-05 19:30:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 19:30:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 19:30:11] [Rank 0] PRINT: Constructing model... +[2025-09-05 19:30:11] [Rank 0] PRINT: Constructing model... +[2025-09-05 19:30:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 19:30:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 19:30:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 19:30:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 19:30:13] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 19:30:13] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 19:30:17] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 19:30:17] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 19:30:17] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 19:30:17] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 19:30:17] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 19:30:17] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 19:30:17] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 19:30:17] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 19:30:17] [Rank 0] PRINT: Model returns: +[2025-09-05 19:30:17] [Rank 0] PRINT: Model returns: +[2025-09-05 19:30:17] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 19:30:17] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 19:30:17] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 19:30:17] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 19:30:17] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-09-05 19:30:17] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-09-05 19:30:17] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 19:30:17] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 19:30:17] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 19:30:17] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 19:30:17] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 19:30:17] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 19:30:22] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 19:30:22] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 19:30:22] [Rank 0] PRINT: Starting warmup... +[2025-09-05 19:30:22] [Rank 0] PRINT: Starting warmup... +[2025-09-05 19:31:01] [Rank 0] PRINT: Warmup complete. +[2025-09-05 19:31:01] [Rank 0] PRINT: Warmup complete. +[2025-09-05 19:31:01] [Rank 0] PRINT: Starting training... +[2025-09-05 19:31:01] [Rank 0] PRINT: Starting training... +[2025-09-05 19:31:08] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/fixed_eval_indices.json +[2025-09-05 19:31:08] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/fixed_eval_indices.json +[2025-09-05 19:31:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:31:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:31:11] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 19:31:11] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 19:31:44] [Rank 0] step:21/10000 train_time:32189ms step_avg:1532.82ms +[2025-09-05 19:31:44] [Rank 0] step:21/10000 train_time:32189ms step_avg:1532.82ms +[2025-09-05 19:31:44] [Rank 0] step:41/10000 train_time:32836ms step_avg:800.88ms +[2025-09-05 19:31:44] [Rank 0] step:41/10000 train_time:32836ms step_avg:800.88ms +[2025-09-05 19:31:45] [Rank 0] step:61/10000 train_time:33483ms step_avg:548.90ms +[2025-09-05 19:31:45] [Rank 0] step:61/10000 train_time:33483ms step_avg:548.90ms +[2025-09-05 19:31:46] [Rank 0] step:81/10000 train_time:34128ms step_avg:421.33ms +[2025-09-05 19:31:46] [Rank 0] step:81/10000 train_time:34128ms step_avg:421.33ms +[2025-09-05 19:31:46] [Rank 0] step:101/10000 train_time:34773ms step_avg:344.29ms +[2025-09-05 19:31:46] [Rank 0] step:101/10000 train_time:34773ms step_avg:344.29ms +[2025-09-05 19:31:47] [Rank 0] step:121/10000 train_time:35419ms step_avg:292.72ms +[2025-09-05 19:31:47] [Rank 0] step:121/10000 train_time:35419ms step_avg:292.72ms +[2025-09-05 19:31:47] [Rank 0] step:141/10000 train_time:36066ms step_avg:255.79ms +[2025-09-05 19:31:47] [Rank 0] step:141/10000 train_time:36066ms step_avg:255.79ms +[2025-09-05 19:31:48] [Rank 0] step:161/10000 train_time:36712ms step_avg:228.03ms +[2025-09-05 19:31:48] [Rank 0] step:161/10000 train_time:36712ms step_avg:228.03ms +[2025-09-05 19:31:49] [Rank 0] step:181/10000 train_time:37359ms step_avg:206.40ms +[2025-09-05 19:31:49] [Rank 0] step:181/10000 train_time:37359ms step_avg:206.40ms +[2025-09-05 19:31:49] [Rank 0] step:201/10000 train_time:38006ms step_avg:189.08ms +[2025-09-05 19:31:49] [Rank 0] step:201/10000 train_time:38006ms step_avg:189.08ms +[2025-09-05 19:31:50] [Rank 0] step:221/10000 train_time:38652ms step_avg:174.90ms +[2025-09-05 19:31:50] [Rank 0] step:221/10000 train_time:38652ms step_avg:174.90ms +[2025-09-05 19:31:51] [Rank 0] step:241/10000 train_time:39299ms step_avg:163.07ms +[2025-09-05 19:31:51] [Rank 0] step:241/10000 train_time:39299ms step_avg:163.07ms +[2025-09-05 19:31:51] [Rank 0] step:261/10000 train_time:39945ms step_avg:153.05ms +[2025-09-05 19:31:51] [Rank 0] step:261/10000 train_time:39945ms step_avg:153.05ms +[2025-09-05 19:31:52] [Rank 0] step:281/10000 train_time:40592ms step_avg:144.45ms +[2025-09-05 19:31:52] [Rank 0] step:281/10000 train_time:40592ms step_avg:144.45ms +[2025-09-05 19:31:53] [Rank 0] step:301/10000 train_time:41238ms step_avg:137.00ms +[2025-09-05 19:31:53] [Rank 0] step:301/10000 train_time:41238ms step_avg:137.00ms +[2025-09-05 19:31:53] [Rank 0] step:321/10000 train_time:41884ms step_avg:130.48ms +[2025-09-05 19:31:53] [Rank 0] step:321/10000 train_time:41884ms step_avg:130.48ms +[2025-09-05 19:31:54] [Rank 0] step:341/10000 train_time:42530ms step_avg:124.72ms +[2025-09-05 19:31:54] [Rank 0] step:341/10000 train_time:42530ms step_avg:124.72ms +[2025-09-05 19:31:55] [Rank 0] step:361/10000 train_time:43177ms step_avg:119.60ms +[2025-09-05 19:31:55] [Rank 0] step:361/10000 train_time:43177ms step_avg:119.60ms +[2025-09-05 19:31:55] [Rank 0] step:381/10000 train_time:43823ms step_avg:115.02ms +[2025-09-05 19:31:55] [Rank 0] step:381/10000 train_time:43823ms step_avg:115.02ms +[2025-09-05 19:31:56] [Rank 0] step:401/10000 train_time:44470ms step_avg:110.90ms +[2025-09-05 19:31:56] [Rank 0] step:401/10000 train_time:44470ms step_avg:110.90ms +[2025-09-05 19:31:57] [Rank 0] step:421/10000 train_time:45115ms step_avg:107.16ms +[2025-09-05 19:31:57] [Rank 0] step:421/10000 train_time:45115ms step_avg:107.16ms +[2025-09-05 19:31:57] [Rank 0] step:441/10000 train_time:45762ms step_avg:103.77ms +[2025-09-05 19:31:57] [Rank 0] step:441/10000 train_time:45762ms step_avg:103.77ms +[2025-09-05 19:31:58] [Rank 0] step:461/10000 train_time:46409ms step_avg:100.67ms +[2025-09-05 19:31:58] [Rank 0] step:461/10000 train_time:46409ms step_avg:100.67ms +[2025-09-05 19:31:58] [Rank 0] step:481/10000 train_time:47062ms step_avg:97.84ms +[2025-09-05 19:31:58] [Rank 0] step:481/10000 train_time:47062ms step_avg:97.84ms +[2025-09-05 19:31:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:31:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:32:00] [Rank 0] PRINT: step:500/10000 train_loss:5.1865 val_loss:2.3382 train_time:47939ms step_avg:95.88ms +[2025-09-05 19:32:00] [Rank 0] PRINT: step:500/10000 train_loss:5.1865 val_loss:2.3382 train_time:47939ms step_avg:95.88ms +[2025-09-05 19:32:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:32:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:32:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:32:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:33:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:33:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:33:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:33:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:33:20] [Rank 0] Total Loss: 4.2107 +[2025-09-05 19:33:20] [Rank 0] Total Loss: 4.2107 +[2025-09-05 19:33:20] [Rank 0] Total FTA (Unweighted): 0.0931 +[2025-09-05 19:33:20] [Rank 0] Total FTA (Unweighted): 0.0931 +[2025-09-05 19:33:21] [Rank 0] Total FTA (Weighted): 0.0931 +[2025-09-05 19:33:21] [Rank 0] Total FTA (Weighted): 0.0931 +[2025-09-05 19:33:21] [Rank 0] Group 0 Loss: 3.1152 +[2025-09-05 19:33:21] [Rank 0] Group 0 Loss: 3.1152 +[2025-09-05 19:33:21] [Rank 0] Group 1 Loss: 3.0187 +[2025-09-05 19:33:21] [Rank 0] Group 1 Loss: 3.0187 +[2025-09-05 19:33:21] [Rank 0] Group 2 Loss: 3.0861 +[2025-09-05 19:33:21] [Rank 0] Group 2 Loss: 3.0861 +[2025-09-05 19:33:21] [Rank 0] Group 3 Loss: 3.5902 +[2025-09-05 19:33:21] [Rank 0] Group 3 Loss: 3.5902 +[2025-09-05 19:33:21] [Rank 0] Group 4 Loss: 3.9423 +[2025-09-05 19:33:21] [Rank 0] Group 4 Loss: 3.9423 +[2025-09-05 19:33:21] [Rank 0] Group 5 Loss: 4.1577 +[2025-09-05 19:33:21] [Rank 0] Group 5 Loss: 4.1577 +[2025-09-05 19:33:21] [Rank 0] Group 6 Loss: 4.3113 +[2025-09-05 19:33:21] [Rank 0] Group 6 Loss: 4.3113 +[2025-09-05 19:33:21] [Rank 0] Group 7 Loss: 4.3669 +[2025-09-05 19:33:21] [Rank 0] Group 7 Loss: 4.3669 +[2025-09-05 19:33:21] [Rank 0] Group 8 Loss: 4.5664 +[2025-09-05 19:33:21] [Rank 0] Group 8 Loss: 4.5664 +[2025-09-05 19:33:21] [Rank 0] Group 9 Loss: 4.6768 +[2025-09-05 19:33:21] [Rank 0] Group 9 Loss: 4.6768 +[2025-09-05 19:33:21] [Rank 0] Group 10 Loss: 4.7564 +[2025-09-05 19:33:21] [Rank 0] Group 10 Loss: 4.7564 +[2025-09-05 19:33:21] [Rank 0] Group 11 Loss: 4.7889 +[2025-09-05 19:33:21] [Rank 0] Group 11 Loss: 4.7889 +[2025-09-05 19:33:21] [Rank 0] Group 12 Loss: 4.7322 +[2025-09-05 19:33:21] [Rank 0] Group 12 Loss: 4.7322 +[2025-09-05 19:33:21] [Rank 0] Group 13 Loss: 4.7520 +[2025-09-05 19:33:21] [Rank 0] Group 13 Loss: 4.7520 +[2025-09-05 19:33:21] [Rank 0] Group 14 Loss: 4.7968 +[2025-09-05 19:33:21] [Rank 0] Group 14 Loss: 4.7968 +[2025-09-05 19:33:21] [Rank 0] Group 15 Loss: 4.7140 +[2025-09-05 19:33:21] [Rank 0] Group 15 Loss: 4.7140 +[2025-09-05 19:33:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 19:33:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 19:33:21] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 19:33:21] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 19:33:21] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:33:21] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:33:21] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 19:33:21] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 19:33:21] [Rank 0] Group 4 FTA: 0.0400 +[2025-09-05 19:33:21] [Rank 0] Group 4 FTA: 0.0400 +[2025-09-05 19:33:21] [Rank 0] Group 5 FTA: 0.1100 +[2025-09-05 19:33:21] [Rank 0] Group 5 FTA: 0.1100 +[2025-09-05 19:33:21] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 19:33:21] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 19:33:21] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-05 19:33:21] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-05 19:33:21] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-05 19:33:21] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-05 19:33:21] [Rank 0] Group 9 FTA: 0.0500 +[2025-09-05 19:33:21] [Rank 0] Group 9 FTA: 0.0500 +[2025-09-05 19:33:21] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 19:33:21] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 19:33:21] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 19:33:21] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 19:33:21] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 19:33:21] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 19:33:21] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:33:21] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:33:21] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:33:21] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:33:21] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:33:21] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:33:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:33:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:33:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:33:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:33:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:33:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:33:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:33:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:33:22] [Rank 0] step:501/10000 train_time:47946ms step_avg:95.70ms +[2025-09-05 19:33:22] [Rank 0] step:501/10000 train_time:47946ms step_avg:95.70ms +[2025-09-05 19:33:23] [Rank 0] step:521/10000 train_time:48371ms step_avg:92.84ms +[2025-09-05 19:33:23] [Rank 0] step:521/10000 train_time:48371ms step_avg:92.84ms +[2025-09-05 19:33:23] [Rank 0] step:541/10000 train_time:49018ms step_avg:90.61ms +[2025-09-05 19:33:23] [Rank 0] step:541/10000 train_time:49018ms step_avg:90.61ms +[2025-09-05 19:33:24] [Rank 0] step:561/10000 train_time:49665ms step_avg:88.53ms +[2025-09-05 19:33:24] [Rank 0] step:561/10000 train_time:49665ms step_avg:88.53ms +[2025-09-05 19:33:25] [Rank 0] step:581/10000 train_time:50312ms step_avg:86.60ms +[2025-09-05 19:33:25] [Rank 0] step:581/10000 train_time:50312ms step_avg:86.60ms +[2025-09-05 19:33:25] [Rank 0] step:601/10000 train_time:50958ms step_avg:84.79ms +[2025-09-05 19:33:25] [Rank 0] step:601/10000 train_time:50958ms step_avg:84.79ms +[2025-09-05 19:33:26] [Rank 0] step:621/10000 train_time:51604ms step_avg:83.10ms +[2025-09-05 19:33:26] [Rank 0] step:621/10000 train_time:51604ms step_avg:83.10ms +[2025-09-05 19:33:27] [Rank 0] step:641/10000 train_time:52252ms step_avg:81.52ms +[2025-09-05 19:33:27] [Rank 0] step:641/10000 train_time:52252ms step_avg:81.52ms +[2025-09-05 19:33:27] [Rank 0] step:661/10000 train_time:52898ms step_avg:80.03ms +[2025-09-05 19:33:27] [Rank 0] step:661/10000 train_time:52898ms step_avg:80.03ms +[2025-09-05 19:33:28] [Rank 0] step:681/10000 train_time:53544ms step_avg:78.63ms +[2025-09-05 19:33:28] [Rank 0] step:681/10000 train_time:53544ms step_avg:78.63ms +[2025-09-05 19:33:28] [Rank 0] step:701/10000 train_time:54192ms step_avg:77.31ms +[2025-09-05 19:33:28] [Rank 0] step:701/10000 train_time:54192ms step_avg:77.31ms +[2025-09-05 19:33:29] [Rank 0] step:721/10000 train_time:54839ms step_avg:76.06ms +[2025-09-05 19:33:29] [Rank 0] step:721/10000 train_time:54839ms step_avg:76.06ms +[2025-09-05 19:33:30] [Rank 0] step:741/10000 train_time:55485ms step_avg:74.88ms +[2025-09-05 19:33:30] [Rank 0] step:741/10000 train_time:55485ms step_avg:74.88ms +[2025-09-05 19:33:30] [Rank 0] step:761/10000 train_time:56134ms step_avg:73.76ms +[2025-09-05 19:33:30] [Rank 0] step:761/10000 train_time:56134ms step_avg:73.76ms +[2025-09-05 19:33:31] [Rank 0] step:781/10000 train_time:56788ms step_avg:72.71ms +[2025-09-05 19:33:31] [Rank 0] step:781/10000 train_time:56788ms step_avg:72.71ms +[2025-09-05 19:33:32] [Rank 0] step:801/10000 train_time:57438ms step_avg:71.71ms +[2025-09-05 19:33:32] [Rank 0] step:801/10000 train_time:57438ms step_avg:71.71ms +[2025-09-05 19:33:33] [Rank 0] step:821/10000 train_time:58620ms step_avg:71.40ms +[2025-09-05 19:33:33] [Rank 0] step:821/10000 train_time:58620ms step_avg:71.40ms +[2025-09-05 19:33:34] [Rank 0] step:841/10000 train_time:59216ms step_avg:70.41ms +[2025-09-05 19:33:34] [Rank 0] step:841/10000 train_time:59216ms step_avg:70.41ms +[2025-09-05 19:33:34] [Rank 0] step:861/10000 train_time:59866ms step_avg:69.53ms +[2025-09-05 19:33:34] [Rank 0] step:861/10000 train_time:59866ms step_avg:69.53ms +[2025-09-05 19:33:35] [Rank 0] step:881/10000 train_time:60517ms step_avg:68.69ms +[2025-09-05 19:33:35] [Rank 0] step:881/10000 train_time:60517ms step_avg:68.69ms +[2025-09-05 19:33:35] [Rank 0] step:901/10000 train_time:61169ms step_avg:67.89ms +[2025-09-05 19:33:35] [Rank 0] step:901/10000 train_time:61169ms step_avg:67.89ms +[2025-09-05 19:33:36] [Rank 0] step:921/10000 train_time:61821ms step_avg:67.12ms +[2025-09-05 19:33:36] [Rank 0] step:921/10000 train_time:61821ms step_avg:67.12ms +[2025-09-05 19:33:37] [Rank 0] step:941/10000 train_time:62472ms step_avg:66.39ms +[2025-09-05 19:33:37] [Rank 0] step:941/10000 train_time:62472ms step_avg:66.39ms +[2025-09-05 19:33:37] [Rank 0] step:961/10000 train_time:63123ms step_avg:65.69ms +[2025-09-05 19:33:37] [Rank 0] step:961/10000 train_time:63123ms step_avg:65.69ms +[2025-09-05 19:33:38] [Rank 0] step:981/10000 train_time:63775ms step_avg:65.01ms +[2025-09-05 19:33:38] [Rank 0] step:981/10000 train_time:63775ms step_avg:65.01ms +[2025-09-05 19:33:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:33:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:33:39] [Rank 0] PRINT: step:1000/10000 train_loss:1.6497 val_loss:1.2536 train_time:64657ms step_avg:64.66ms +[2025-09-05 19:33:39] [Rank 0] PRINT: step:1000/10000 train_loss:1.6497 val_loss:1.2536 train_time:64657ms step_avg:64.66ms +[2025-09-05 19:33:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:33:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:33:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:33:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:35:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:35:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:35:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:35:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:35:00] [Rank 0] Total Loss: 3.9556 +[2025-09-05 19:35:00] [Rank 0] Total Loss: 3.9556 +[2025-09-05 19:35:00] [Rank 0] Total FTA (Unweighted): 0.3937 +[2025-09-05 19:35:00] [Rank 0] Total FTA (Unweighted): 0.3937 +[2025-09-05 19:35:00] [Rank 0] Total FTA (Weighted): 0.3937 +[2025-09-05 19:35:00] [Rank 0] Total FTA (Weighted): 0.3937 +[2025-09-05 19:35:00] [Rank 0] Group 0 Loss: 3.4238 +[2025-09-05 19:35:00] [Rank 0] Group 0 Loss: 3.4238 +[2025-09-05 19:35:00] [Rank 0] Group 1 Loss: 3.0351 +[2025-09-05 19:35:00] [Rank 0] Group 1 Loss: 3.0351 +[2025-09-05 19:35:00] [Rank 0] Group 2 Loss: 3.1296 +[2025-09-05 19:35:00] [Rank 0] Group 2 Loss: 3.1296 +[2025-09-05 19:35:00] [Rank 0] Group 3 Loss: 3.4285 +[2025-09-05 19:35:00] [Rank 0] Group 3 Loss: 3.4285 +[2025-09-05 19:35:00] [Rank 0] Group 4 Loss: 3.5001 +[2025-09-05 19:35:00] [Rank 0] Group 4 Loss: 3.5001 +[2025-09-05 19:35:00] [Rank 0] Group 5 Loss: 3.6326 +[2025-09-05 19:35:00] [Rank 0] Group 5 Loss: 3.6326 +[2025-09-05 19:35:00] [Rank 0] Group 6 Loss: 3.7745 +[2025-09-05 19:35:00] [Rank 0] Group 6 Loss: 3.7745 +[2025-09-05 19:35:00] [Rank 0] Group 7 Loss: 3.9053 +[2025-09-05 19:35:00] [Rank 0] Group 7 Loss: 3.9053 +[2025-09-05 19:35:00] [Rank 0] Group 8 Loss: 4.1842 +[2025-09-05 19:35:00] [Rank 0] Group 8 Loss: 4.1842 +[2025-09-05 19:35:00] [Rank 0] Group 9 Loss: 4.2430 +[2025-09-05 19:35:00] [Rank 0] Group 9 Loss: 4.2430 +[2025-09-05 19:35:00] [Rank 0] Group 10 Loss: 4.3763 +[2025-09-05 19:35:00] [Rank 0] Group 10 Loss: 4.3763 +[2025-09-05 19:35:00] [Rank 0] Group 11 Loss: 4.4712 +[2025-09-05 19:35:00] [Rank 0] Group 11 Loss: 4.4712 +[2025-09-05 19:35:00] [Rank 0] Group 12 Loss: 4.4828 +[2025-09-05 19:35:00] [Rank 0] Group 12 Loss: 4.4828 +[2025-09-05 19:35:00] [Rank 0] Group 13 Loss: 4.6084 +[2025-09-05 19:35:00] [Rank 0] Group 13 Loss: 4.6084 +[2025-09-05 19:35:00] [Rank 0] Group 14 Loss: 4.5406 +[2025-09-05 19:35:00] [Rank 0] Group 14 Loss: 4.5406 +[2025-09-05 19:35:00] [Rank 0] Group 15 Loss: 4.5540 +[2025-09-05 19:35:00] [Rank 0] Group 15 Loss: 4.5540 +[2025-09-05 19:35:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:35:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:35:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:35:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:35:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:35:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:35:00] [Rank 0] Group 3 FTA: 0.9100 +[2025-09-05 19:35:00] [Rank 0] Group 3 FTA: 0.9100 +[2025-09-05 19:35:00] [Rank 0] Group 4 FTA: 0.4700 +[2025-09-05 19:35:00] [Rank 0] Group 4 FTA: 0.4700 +[2025-09-05 19:35:00] [Rank 0] Group 5 FTA: 0.4100 +[2025-09-05 19:35:00] [Rank 0] Group 5 FTA: 0.4100 +[2025-09-05 19:35:00] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 19:35:00] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 19:35:00] [Rank 0] Group 7 FTA: 0.2800 +[2025-09-05 19:35:00] [Rank 0] Group 7 FTA: 0.2800 +[2025-09-05 19:35:00] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 19:35:00] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 19:35:00] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 19:35:00] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 19:35:00] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 19:35:00] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 19:35:00] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 19:35:00] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 19:35:00] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 19:35:00] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 19:35:00] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:35:00] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:35:00] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:35:00] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:35:00] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:35:00] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:35:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:35:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:35:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:35:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:35:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:35:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:35:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:35:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:35:02] [Rank 0] step:1001/10000 train_time:64664ms step_avg:64.60ms +[2025-09-05 19:35:02] [Rank 0] step:1001/10000 train_time:64664ms step_avg:64.60ms +[2025-09-05 19:35:02] [Rank 0] step:1021/10000 train_time:65110ms step_avg:63.77ms +[2025-09-05 19:35:02] [Rank 0] step:1021/10000 train_time:65110ms step_avg:63.77ms +[2025-09-05 19:35:03] [Rank 0] step:1041/10000 train_time:65862ms step_avg:63.27ms +[2025-09-05 19:35:03] [Rank 0] step:1041/10000 train_time:65862ms step_avg:63.27ms +[2025-09-05 19:35:04] [Rank 0] step:1061/10000 train_time:66514ms step_avg:62.69ms +[2025-09-05 19:35:04] [Rank 0] step:1061/10000 train_time:66514ms step_avg:62.69ms +[2025-09-05 19:35:04] [Rank 0] step:1081/10000 train_time:67166ms step_avg:62.13ms +[2025-09-05 19:35:04] [Rank 0] step:1081/10000 train_time:67166ms step_avg:62.13ms +[2025-09-05 19:35:05] [Rank 0] step:1101/10000 train_time:67818ms step_avg:61.60ms +[2025-09-05 19:35:05] [Rank 0] step:1101/10000 train_time:67818ms step_avg:61.60ms +[2025-09-05 19:35:06] [Rank 0] step:1121/10000 train_time:68470ms step_avg:61.08ms +[2025-09-05 19:35:06] [Rank 0] step:1121/10000 train_time:68470ms step_avg:61.08ms +[2025-09-05 19:35:06] [Rank 0] step:1141/10000 train_time:69122ms step_avg:60.58ms +[2025-09-05 19:35:06] [Rank 0] step:1141/10000 train_time:69122ms step_avg:60.58ms +[2025-09-05 19:35:07] [Rank 0] step:1161/10000 train_time:69777ms step_avg:60.10ms +[2025-09-05 19:35:07] [Rank 0] step:1161/10000 train_time:69777ms step_avg:60.10ms +[2025-09-05 19:35:08] [Rank 0] step:1181/10000 train_time:70429ms step_avg:59.63ms +[2025-09-05 19:35:08] [Rank 0] step:1181/10000 train_time:70429ms step_avg:59.63ms +[2025-09-05 19:35:08] [Rank 0] step:1201/10000 train_time:71081ms step_avg:59.18ms +[2025-09-05 19:35:08] [Rank 0] step:1201/10000 train_time:71081ms step_avg:59.18ms +[2025-09-05 19:35:09] [Rank 0] step:1221/10000 train_time:71733ms step_avg:58.75ms +[2025-09-05 19:35:09] [Rank 0] step:1221/10000 train_time:71733ms step_avg:58.75ms +[2025-09-05 19:35:10] [Rank 0] step:1241/10000 train_time:72386ms step_avg:58.33ms +[2025-09-05 19:35:10] [Rank 0] step:1241/10000 train_time:72386ms step_avg:58.33ms +[2025-09-05 19:35:10] [Rank 0] step:1261/10000 train_time:73038ms step_avg:57.92ms +[2025-09-05 19:35:10] [Rank 0] step:1261/10000 train_time:73038ms step_avg:57.92ms +[2025-09-05 19:35:11] [Rank 0] step:1281/10000 train_time:73689ms step_avg:57.52ms +[2025-09-05 19:35:11] [Rank 0] step:1281/10000 train_time:73689ms step_avg:57.52ms +[2025-09-05 19:35:11] [Rank 0] step:1301/10000 train_time:74338ms step_avg:57.14ms +[2025-09-05 19:35:11] [Rank 0] step:1301/10000 train_time:74338ms step_avg:57.14ms +[2025-09-05 19:35:12] [Rank 0] step:1321/10000 train_time:74990ms step_avg:56.77ms +[2025-09-05 19:35:12] [Rank 0] step:1321/10000 train_time:74990ms step_avg:56.77ms +[2025-09-05 19:35:13] [Rank 0] step:1341/10000 train_time:75640ms step_avg:56.41ms +[2025-09-05 19:35:13] [Rank 0] step:1341/10000 train_time:75640ms step_avg:56.41ms +[2025-09-05 19:35:13] [Rank 0] step:1361/10000 train_time:76291ms step_avg:56.06ms +[2025-09-05 19:35:13] [Rank 0] step:1361/10000 train_time:76291ms step_avg:56.06ms +[2025-09-05 19:35:14] [Rank 0] step:1381/10000 train_time:76943ms step_avg:55.72ms +[2025-09-05 19:35:14] [Rank 0] step:1381/10000 train_time:76943ms step_avg:55.72ms +[2025-09-05 19:35:15] [Rank 0] step:1401/10000 train_time:77595ms step_avg:55.39ms +[2025-09-05 19:35:15] [Rank 0] step:1401/10000 train_time:77595ms step_avg:55.39ms +[2025-09-05 19:35:15] [Rank 0] step:1421/10000 train_time:78246ms step_avg:55.06ms +[2025-09-05 19:35:15] [Rank 0] step:1421/10000 train_time:78246ms step_avg:55.06ms +[2025-09-05 19:35:16] [Rank 0] step:1441/10000 train_time:78899ms step_avg:54.75ms +[2025-09-05 19:35:16] [Rank 0] step:1441/10000 train_time:78899ms step_avg:54.75ms +[2025-09-05 19:35:17] [Rank 0] step:1461/10000 train_time:79551ms step_avg:54.45ms +[2025-09-05 19:35:17] [Rank 0] step:1461/10000 train_time:79551ms step_avg:54.45ms +[2025-09-05 19:35:17] [Rank 0] step:1481/10000 train_time:80203ms step_avg:54.15ms +[2025-09-05 19:35:17] [Rank 0] step:1481/10000 train_time:80203ms step_avg:54.15ms +[2025-09-05 19:35:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:35:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:35:18] [Rank 0] PRINT: step:1500/10000 train_loss:1.1283 val_loss:1.0238 train_time:81088ms step_avg:54.06ms +[2025-09-05 19:35:18] [Rank 0] PRINT: step:1500/10000 train_loss:1.1283 val_loss:1.0238 train_time:81088ms step_avg:54.06ms +[2025-09-05 19:35:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:35:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:35:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:35:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:36:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:36:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:36:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:36:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:36:39] [Rank 0] Total Loss: 4.2139 +[2025-09-05 19:36:39] [Rank 0] Total Loss: 4.2139 +[2025-09-05 19:36:39] [Rank 0] Total FTA (Unweighted): 0.5356 +[2025-09-05 19:36:39] [Rank 0] Total FTA (Unweighted): 0.5356 +[2025-09-05 19:36:39] [Rank 0] Total FTA (Weighted): 0.5356 +[2025-09-05 19:36:39] [Rank 0] Total FTA (Weighted): 0.5356 +[2025-09-05 19:36:39] [Rank 0] Group 0 Loss: 3.9185 +[2025-09-05 19:36:39] [Rank 0] Group 0 Loss: 3.9185 +[2025-09-05 19:36:39] [Rank 0] Group 1 Loss: 3.5776 +[2025-09-05 19:36:39] [Rank 0] Group 1 Loss: 3.5776 +[2025-09-05 19:36:39] [Rank 0] Group 2 Loss: 3.5845 +[2025-09-05 19:36:39] [Rank 0] Group 2 Loss: 3.5845 +[2025-09-05 19:36:39] [Rank 0] Group 3 Loss: 3.8907 +[2025-09-05 19:36:39] [Rank 0] Group 3 Loss: 3.8907 +[2025-09-05 19:36:39] [Rank 0] Group 4 Loss: 3.7882 +[2025-09-05 19:36:39] [Rank 0] Group 4 Loss: 3.7882 +[2025-09-05 19:36:39] [Rank 0] Group 5 Loss: 3.8767 +[2025-09-05 19:36:39] [Rank 0] Group 5 Loss: 3.8767 +[2025-09-05 19:36:39] [Rank 0] Group 6 Loss: 3.8364 +[2025-09-05 19:36:39] [Rank 0] Group 6 Loss: 3.8364 +[2025-09-05 19:36:39] [Rank 0] Group 7 Loss: 3.9969 +[2025-09-05 19:36:39] [Rank 0] Group 7 Loss: 3.9969 +[2025-09-05 19:36:39] [Rank 0] Group 8 Loss: 4.2362 +[2025-09-05 19:36:39] [Rank 0] Group 8 Loss: 4.2362 +[2025-09-05 19:36:39] [Rank 0] Group 9 Loss: 4.3218 +[2025-09-05 19:36:39] [Rank 0] Group 9 Loss: 4.3218 +[2025-09-05 19:36:39] [Rank 0] Group 10 Loss: 4.5325 +[2025-09-05 19:36:39] [Rank 0] Group 10 Loss: 4.5325 +[2025-09-05 19:36:39] [Rank 0] Group 11 Loss: 4.5668 +[2025-09-05 19:36:39] [Rank 0] Group 11 Loss: 4.5668 +[2025-09-05 19:36:39] [Rank 0] Group 12 Loss: 4.7141 +[2025-09-05 19:36:39] [Rank 0] Group 12 Loss: 4.7141 +[2025-09-05 19:36:39] [Rank 0] Group 13 Loss: 4.8407 +[2025-09-05 19:36:39] [Rank 0] Group 13 Loss: 4.8407 +[2025-09-05 19:36:39] [Rank 0] Group 14 Loss: 4.8461 +[2025-09-05 19:36:39] [Rank 0] Group 14 Loss: 4.8461 +[2025-09-05 19:36:39] [Rank 0] Group 15 Loss: 4.8942 +[2025-09-05 19:36:39] [Rank 0] Group 15 Loss: 4.8942 +[2025-09-05 19:36:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:36:39] [Rank 0] Group 5 FTA: 0.7700 +[2025-09-05 19:36:39] [Rank 0] Group 5 FTA: 0.7700 +[2025-09-05 19:36:39] [Rank 0] Group 6 FTA: 0.6000 +[2025-09-05 19:36:39] [Rank 0] Group 6 FTA: 0.6000 +[2025-09-05 19:36:39] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-05 19:36:39] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-05 19:36:39] [Rank 0] Group 8 FTA: 0.5500 +[2025-09-05 19:36:39] [Rank 0] Group 8 FTA: 0.5500 +[2025-09-05 19:36:39] [Rank 0] Group 9 FTA: 0.3100 +[2025-09-05 19:36:39] [Rank 0] Group 9 FTA: 0.3100 +[2025-09-05 19:36:39] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 19:36:39] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 19:36:39] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 19:36:39] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 19:36:39] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 19:36:39] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 19:36:39] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:36:39] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:36:39] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 19:36:39] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 19:36:39] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:36:39] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:36:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:36:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:36:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:36:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:36:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:36:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:36:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:36:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:36:40] [Rank 0] step:1501/10000 train_time:81095ms step_avg:54.03ms +[2025-09-05 19:36:40] [Rank 0] step:1501/10000 train_time:81095ms step_avg:54.03ms +[2025-09-05 19:36:41] [Rank 0] step:1521/10000 train_time:81543ms step_avg:53.61ms +[2025-09-05 19:36:41] [Rank 0] step:1521/10000 train_time:81543ms step_avg:53.61ms +[2025-09-05 19:36:42] [Rank 0] step:1541/10000 train_time:82193ms step_avg:53.34ms +[2025-09-05 19:36:42] [Rank 0] step:1541/10000 train_time:82193ms step_avg:53.34ms +[2025-09-05 19:36:42] [Rank 0] step:1561/10000 train_time:82844ms step_avg:53.07ms +[2025-09-05 19:36:42] [Rank 0] step:1561/10000 train_time:82844ms step_avg:53.07ms +[2025-09-05 19:36:43] [Rank 0] step:1581/10000 train_time:83496ms step_avg:52.81ms +[2025-09-05 19:36:43] [Rank 0] step:1581/10000 train_time:83496ms step_avg:52.81ms +[2025-09-05 19:36:44] [Rank 0] step:1601/10000 train_time:84148ms step_avg:52.56ms +[2025-09-05 19:36:44] [Rank 0] step:1601/10000 train_time:84148ms step_avg:52.56ms +[2025-09-05 19:36:44] [Rank 0] step:1621/10000 train_time:84800ms step_avg:52.31ms +[2025-09-05 19:36:44] [Rank 0] step:1621/10000 train_time:84800ms step_avg:52.31ms +[2025-09-05 19:36:45] [Rank 0] step:1641/10000 train_time:85451ms step_avg:52.07ms +[2025-09-05 19:36:45] [Rank 0] step:1641/10000 train_time:85451ms step_avg:52.07ms +[2025-09-05 19:36:46] [Rank 0] step:1661/10000 train_time:86103ms step_avg:51.84ms +[2025-09-05 19:36:46] [Rank 0] step:1661/10000 train_time:86103ms step_avg:51.84ms +[2025-09-05 19:36:46] [Rank 0] step:1681/10000 train_time:86755ms step_avg:51.61ms +[2025-09-05 19:36:46] [Rank 0] step:1681/10000 train_time:86755ms step_avg:51.61ms +[2025-09-05 19:36:47] [Rank 0] step:1701/10000 train_time:87406ms step_avg:51.39ms +[2025-09-05 19:36:47] [Rank 0] step:1701/10000 train_time:87406ms step_avg:51.39ms +[2025-09-05 19:36:48] [Rank 0] step:1721/10000 train_time:88058ms step_avg:51.17ms +[2025-09-05 19:36:48] [Rank 0] step:1721/10000 train_time:88058ms step_avg:51.17ms +[2025-09-05 19:36:48] [Rank 0] step:1741/10000 train_time:88710ms step_avg:50.95ms +[2025-09-05 19:36:48] [Rank 0] step:1741/10000 train_time:88710ms step_avg:50.95ms +[2025-09-05 19:36:49] [Rank 0] step:1761/10000 train_time:89362ms step_avg:50.74ms +[2025-09-05 19:36:49] [Rank 0] step:1761/10000 train_time:89362ms step_avg:50.74ms +[2025-09-05 19:36:50] [Rank 0] step:1781/10000 train_time:90214ms step_avg:50.65ms +[2025-09-05 19:36:50] [Rank 0] step:1781/10000 train_time:90214ms step_avg:50.65ms +[2025-09-05 19:36:50] [Rank 0] step:1801/10000 train_time:90869ms step_avg:50.45ms +[2025-09-05 19:36:50] [Rank 0] step:1801/10000 train_time:90869ms step_avg:50.45ms +[2025-09-05 19:36:51] [Rank 0] step:1821/10000 train_time:91521ms step_avg:50.26ms +[2025-09-05 19:36:51] [Rank 0] step:1821/10000 train_time:91521ms step_avg:50.26ms +[2025-09-05 19:36:52] [Rank 0] step:1841/10000 train_time:92342ms step_avg:50.16ms +[2025-09-05 19:36:52] [Rank 0] step:1841/10000 train_time:92342ms step_avg:50.16ms +[2025-09-05 19:36:53] [Rank 0] step:1861/10000 train_time:92995ms step_avg:49.97ms +[2025-09-05 19:36:53] [Rank 0] step:1861/10000 train_time:92995ms step_avg:49.97ms +[2025-09-05 19:36:53] [Rank 0] step:1881/10000 train_time:93647ms step_avg:49.79ms +[2025-09-05 19:36:53] [Rank 0] step:1881/10000 train_time:93647ms step_avg:49.79ms +[2025-09-05 19:36:54] [Rank 0] step:1901/10000 train_time:94299ms step_avg:49.61ms +[2025-09-05 19:36:54] [Rank 0] step:1901/10000 train_time:94299ms step_avg:49.61ms +[2025-09-05 19:36:55] [Rank 0] step:1921/10000 train_time:94952ms step_avg:49.43ms +[2025-09-05 19:36:55] [Rank 0] step:1921/10000 train_time:94952ms step_avg:49.43ms +[2025-09-05 19:36:55] [Rank 0] step:1941/10000 train_time:95604ms step_avg:49.25ms +[2025-09-05 19:36:55] [Rank 0] step:1941/10000 train_time:95604ms step_avg:49.25ms +[2025-09-05 19:36:56] [Rank 0] step:1961/10000 train_time:96255ms step_avg:49.08ms +[2025-09-05 19:36:56] [Rank 0] step:1961/10000 train_time:96255ms step_avg:49.08ms +[2025-09-05 19:36:57] [Rank 0] step:1981/10000 train_time:96906ms step_avg:48.92ms +[2025-09-05 19:36:57] [Rank 0] step:1981/10000 train_time:96906ms step_avg:48.92ms +[2025-09-05 19:36:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:36:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:36:58] [Rank 0] PRINT: step:2000/10000 train_loss:0.9768 val_loss:0.9243 train_time:97789ms step_avg:48.89ms +[2025-09-05 19:36:58] [Rank 0] PRINT: step:2000/10000 train_loss:0.9768 val_loss:0.9243 train_time:97789ms step_avg:48.89ms +[2025-09-05 19:36:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:36:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:36:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:36:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:38:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:38:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:38:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:38:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:38:19] [Rank 0] Total Loss: 4.6293 +[2025-09-05 19:38:19] [Rank 0] Total Loss: 4.6293 +[2025-09-05 19:38:19] [Rank 0] Total FTA (Unweighted): 0.6119 +[2025-09-05 19:38:19] [Rank 0] Total FTA (Unweighted): 0.6119 +[2025-09-05 19:38:19] [Rank 0] Total FTA (Weighted): 0.6119 +[2025-09-05 19:38:19] [Rank 0] Total FTA (Weighted): 0.6119 +[2025-09-05 19:38:19] [Rank 0] Group 0 Loss: 4.4734 +[2025-09-05 19:38:19] [Rank 0] Group 0 Loss: 4.4734 +[2025-09-05 19:38:19] [Rank 0] Group 1 Loss: 4.0827 +[2025-09-05 19:38:19] [Rank 0] Group 1 Loss: 4.0827 +[2025-09-05 19:38:19] [Rank 0] Group 2 Loss: 4.0676 +[2025-09-05 19:38:19] [Rank 0] Group 2 Loss: 4.0676 +[2025-09-05 19:38:19] [Rank 0] Group 3 Loss: 4.3657 +[2025-09-05 19:38:19] [Rank 0] Group 3 Loss: 4.3657 +[2025-09-05 19:38:19] [Rank 0] Group 4 Loss: 4.2617 +[2025-09-05 19:38:19] [Rank 0] Group 4 Loss: 4.2617 +[2025-09-05 19:38:19] [Rank 0] Group 5 Loss: 4.3294 +[2025-09-05 19:38:19] [Rank 0] Group 5 Loss: 4.3294 +[2025-09-05 19:38:19] [Rank 0] Group 6 Loss: 4.2878 +[2025-09-05 19:38:19] [Rank 0] Group 6 Loss: 4.2878 +[2025-09-05 19:38:19] [Rank 0] Group 7 Loss: 4.3828 +[2025-09-05 19:38:19] [Rank 0] Group 7 Loss: 4.3828 +[2025-09-05 19:38:19] [Rank 0] Group 8 Loss: 4.5600 +[2025-09-05 19:38:19] [Rank 0] Group 8 Loss: 4.5600 +[2025-09-05 19:38:19] [Rank 0] Group 9 Loss: 4.5769 +[2025-09-05 19:38:19] [Rank 0] Group 9 Loss: 4.5769 +[2025-09-05 19:38:19] [Rank 0] Group 10 Loss: 4.9089 +[2025-09-05 19:38:19] [Rank 0] Group 10 Loss: 4.9089 +[2025-09-05 19:38:19] [Rank 0] Group 11 Loss: 4.9636 +[2025-09-05 19:38:19] [Rank 0] Group 11 Loss: 4.9636 +[2025-09-05 19:38:19] [Rank 0] Group 12 Loss: 5.0229 +[2025-09-05 19:38:19] [Rank 0] Group 12 Loss: 5.0229 +[2025-09-05 19:38:19] [Rank 0] Group 13 Loss: 5.2141 +[2025-09-05 19:38:19] [Rank 0] Group 13 Loss: 5.2141 +[2025-09-05 19:38:19] [Rank 0] Group 14 Loss: 5.2494 +[2025-09-05 19:38:19] [Rank 0] Group 14 Loss: 5.2494 +[2025-09-05 19:38:19] [Rank 0] Group 15 Loss: 5.3227 +[2025-09-05 19:38:19] [Rank 0] Group 15 Loss: 5.3227 +[2025-09-05 19:38:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:38:19] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 19:38:19] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 19:38:19] [Rank 0] Group 6 FTA: 0.8100 +[2025-09-05 19:38:19] [Rank 0] Group 6 FTA: 0.8100 +[2025-09-05 19:38:19] [Rank 0] Group 7 FTA: 0.6600 +[2025-09-05 19:38:19] [Rank 0] Group 7 FTA: 0.6600 +[2025-09-05 19:38:19] [Rank 0] Group 8 FTA: 0.7000 +[2025-09-05 19:38:19] [Rank 0] Group 8 FTA: 0.7000 +[2025-09-05 19:38:19] [Rank 0] Group 9 FTA: 0.5000 +[2025-09-05 19:38:19] [Rank 0] Group 9 FTA: 0.5000 +[2025-09-05 19:38:19] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:38:19] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:38:19] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 19:38:19] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 19:38:19] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 19:38:19] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 19:38:19] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:38:19] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:38:19] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 19:38:19] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 19:38:19] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:38:19] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:38:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:38:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:38:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:38:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:38:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:38:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:38:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:38:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:38:20] [Rank 0] step:2001/10000 train_time:97796ms step_avg:48.87ms +[2025-09-05 19:38:20] [Rank 0] step:2001/10000 train_time:97796ms step_avg:48.87ms +[2025-09-05 19:38:21] [Rank 0] step:2021/10000 train_time:98444ms step_avg:48.71ms +[2025-09-05 19:38:21] [Rank 0] step:2021/10000 train_time:98444ms step_avg:48.71ms +[2025-09-05 19:38:22] [Rank 0] step:2041/10000 train_time:99095ms step_avg:48.55ms +[2025-09-05 19:38:22] [Rank 0] step:2041/10000 train_time:99095ms step_avg:48.55ms +[2025-09-05 19:38:22] [Rank 0] step:2061/10000 train_time:99747ms step_avg:48.40ms +[2025-09-05 19:38:22] [Rank 0] step:2061/10000 train_time:99747ms step_avg:48.40ms +[2025-09-05 19:38:23] [Rank 0] step:2081/10000 train_time:100399ms step_avg:48.25ms +[2025-09-05 19:38:23] [Rank 0] step:2081/10000 train_time:100399ms step_avg:48.25ms +[2025-09-05 19:38:24] [Rank 0] step:2101/10000 train_time:101050ms step_avg:48.10ms +[2025-09-05 19:38:24] [Rank 0] step:2101/10000 train_time:101050ms step_avg:48.10ms +[2025-09-05 19:38:24] [Rank 0] step:2121/10000 train_time:101702ms step_avg:47.95ms +[2025-09-05 19:38:24] [Rank 0] step:2121/10000 train_time:101702ms step_avg:47.95ms +[2025-09-05 19:38:25] [Rank 0] step:2141/10000 train_time:102355ms step_avg:47.81ms +[2025-09-05 19:38:25] [Rank 0] step:2141/10000 train_time:102355ms step_avg:47.81ms +[2025-09-05 19:38:26] [Rank 0] step:2161/10000 train_time:103007ms step_avg:47.67ms +[2025-09-05 19:38:26] [Rank 0] step:2161/10000 train_time:103007ms step_avg:47.67ms +[2025-09-05 19:38:26] [Rank 0] step:2181/10000 train_time:103658ms step_avg:47.53ms +[2025-09-05 19:38:26] [Rank 0] step:2181/10000 train_time:103658ms step_avg:47.53ms +[2025-09-05 19:38:27] [Rank 0] step:2201/10000 train_time:104310ms step_avg:47.39ms +[2025-09-05 19:38:27] [Rank 0] step:2201/10000 train_time:104310ms step_avg:47.39ms +[2025-09-05 19:38:28] [Rank 0] step:2221/10000 train_time:104961ms step_avg:47.26ms +[2025-09-05 19:38:28] [Rank 0] step:2221/10000 train_time:104961ms step_avg:47.26ms +[2025-09-05 19:38:28] [Rank 0] step:2241/10000 train_time:105615ms step_avg:47.13ms +[2025-09-05 19:38:28] [Rank 0] step:2241/10000 train_time:105615ms step_avg:47.13ms +[2025-09-05 19:38:29] [Rank 0] step:2261/10000 train_time:106273ms step_avg:47.00ms +[2025-09-05 19:38:29] [Rank 0] step:2261/10000 train_time:106273ms step_avg:47.00ms +[2025-09-05 19:38:30] [Rank 0] step:2281/10000 train_time:106931ms step_avg:46.88ms +[2025-09-05 19:38:30] [Rank 0] step:2281/10000 train_time:106931ms step_avg:46.88ms +[2025-09-05 19:38:30] [Rank 0] step:2301/10000 train_time:107589ms step_avg:46.76ms +[2025-09-05 19:38:30] [Rank 0] step:2301/10000 train_time:107589ms step_avg:46.76ms +[2025-09-05 19:38:31] [Rank 0] step:2321/10000 train_time:108247ms step_avg:46.64ms +[2025-09-05 19:38:31] [Rank 0] step:2321/10000 train_time:108247ms step_avg:46.64ms +[2025-09-05 19:38:32] [Rank 0] step:2341/10000 train_time:108906ms step_avg:46.52ms +[2025-09-05 19:38:32] [Rank 0] step:2341/10000 train_time:108906ms step_avg:46.52ms +[2025-09-05 19:38:32] [Rank 0] step:2361/10000 train_time:109564ms step_avg:46.41ms +[2025-09-05 19:38:32] [Rank 0] step:2361/10000 train_time:109564ms step_avg:46.41ms +[2025-09-05 19:38:33] [Rank 0] step:2381/10000 train_time:110222ms step_avg:46.29ms +[2025-09-05 19:38:33] [Rank 0] step:2381/10000 train_time:110222ms step_avg:46.29ms +[2025-09-05 19:38:34] [Rank 0] step:2401/10000 train_time:110879ms step_avg:46.18ms +[2025-09-05 19:38:34] [Rank 0] step:2401/10000 train_time:110879ms step_avg:46.18ms +[2025-09-05 19:38:34] [Rank 0] step:2421/10000 train_time:111537ms step_avg:46.07ms +[2025-09-05 19:38:34] [Rank 0] step:2421/10000 train_time:111537ms step_avg:46.07ms +[2025-09-05 19:38:35] [Rank 0] step:2441/10000 train_time:112194ms step_avg:45.96ms +[2025-09-05 19:38:35] [Rank 0] step:2441/10000 train_time:112194ms step_avg:45.96ms +[2025-09-05 19:38:36] [Rank 0] step:2461/10000 train_time:112852ms step_avg:45.86ms +[2025-09-05 19:38:36] [Rank 0] step:2461/10000 train_time:112852ms step_avg:45.86ms +[2025-09-05 19:38:36] [Rank 0] step:2481/10000 train_time:113510ms step_avg:45.75ms +[2025-09-05 19:38:36] [Rank 0] step:2481/10000 train_time:113510ms step_avg:45.75ms +[2025-09-05 19:38:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:38:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:38:37] [Rank 0] PRINT: step:2500/10000 train_loss:0.8995 val_loss:0.8601 train_time:114403ms step_avg:45.76ms +[2025-09-05 19:38:37] [Rank 0] PRINT: step:2500/10000 train_loss:0.8995 val_loss:0.8601 train_time:114403ms step_avg:45.76ms +[2025-09-05 19:38:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:38:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:38:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:38:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:39:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:39:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:39:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:39:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:39:58] [Rank 0] Total Loss: 4.7794 +[2025-09-05 19:39:58] [Rank 0] Total Loss: 4.7794 +[2025-09-05 19:39:58] [Rank 0] Total FTA (Unweighted): 0.6675 +[2025-09-05 19:39:58] [Rank 0] Total FTA (Unweighted): 0.6675 +[2025-09-05 19:39:58] [Rank 0] Total FTA (Weighted): 0.6675 +[2025-09-05 19:39:58] [Rank 0] Total FTA (Weighted): 0.6675 +[2025-09-05 19:39:58] [Rank 0] Group 0 Loss: 4.7986 +[2025-09-05 19:39:58] [Rank 0] Group 0 Loss: 4.7986 +[2025-09-05 19:39:58] [Rank 0] Group 1 Loss: 4.1557 +[2025-09-05 19:39:58] [Rank 0] Group 1 Loss: 4.1557 +[2025-09-05 19:39:58] [Rank 0] Group 2 Loss: 4.2641 +[2025-09-05 19:39:58] [Rank 0] Group 2 Loss: 4.2641 +[2025-09-05 19:39:58] [Rank 0] Group 3 Loss: 4.6398 +[2025-09-05 19:39:58] [Rank 0] Group 3 Loss: 4.6398 +[2025-09-05 19:39:58] [Rank 0] Group 4 Loss: 4.5142 +[2025-09-05 19:39:58] [Rank 0] Group 4 Loss: 4.5142 +[2025-09-05 19:39:58] [Rank 0] Group 5 Loss: 4.5464 +[2025-09-05 19:39:58] [Rank 0] Group 5 Loss: 4.5464 +[2025-09-05 19:39:58] [Rank 0] Group 6 Loss: 4.4566 +[2025-09-05 19:39:58] [Rank 0] Group 6 Loss: 4.4566 +[2025-09-05 19:39:58] [Rank 0] Group 7 Loss: 4.5201 +[2025-09-05 19:39:58] [Rank 0] Group 7 Loss: 4.5201 +[2025-09-05 19:39:58] [Rank 0] Group 8 Loss: 4.7140 +[2025-09-05 19:39:58] [Rank 0] Group 8 Loss: 4.7140 +[2025-09-05 19:39:59] [Rank 0] Group 9 Loss: 4.7317 +[2025-09-05 19:39:59] [Rank 0] Group 9 Loss: 4.7317 +[2025-09-05 19:39:59] [Rank 0] Group 10 Loss: 4.9467 +[2025-09-05 19:39:59] [Rank 0] Group 10 Loss: 4.9467 +[2025-09-05 19:39:59] [Rank 0] Group 11 Loss: 4.9892 +[2025-09-05 19:39:59] [Rank 0] Group 11 Loss: 4.9892 +[2025-09-05 19:39:59] [Rank 0] Group 12 Loss: 5.1034 +[2025-09-05 19:39:59] [Rank 0] Group 12 Loss: 5.1034 +[2025-09-05 19:39:59] [Rank 0] Group 13 Loss: 5.2885 +[2025-09-05 19:39:59] [Rank 0] Group 13 Loss: 5.2885 +[2025-09-05 19:39:59] [Rank 0] Group 14 Loss: 5.3810 +[2025-09-05 19:39:59] [Rank 0] Group 14 Loss: 5.3810 +[2025-09-05 19:39:59] [Rank 0] Group 15 Loss: 5.4205 +[2025-09-05 19:39:59] [Rank 0] Group 15 Loss: 5.4205 +[2025-09-05 19:39:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:39:59] [Rank 0] Group 7 FTA: 0.8200 +[2025-09-05 19:39:59] [Rank 0] Group 7 FTA: 0.8200 +[2025-09-05 19:39:59] [Rank 0] Group 8 FTA: 0.7800 +[2025-09-05 19:39:59] [Rank 0] Group 8 FTA: 0.7800 +[2025-09-05 19:39:59] [Rank 0] Group 9 FTA: 0.6700 +[2025-09-05 19:39:59] [Rank 0] Group 9 FTA: 0.6700 +[2025-09-05 19:39:59] [Rank 0] Group 10 FTA: 0.5800 +[2025-09-05 19:39:59] [Rank 0] Group 10 FTA: 0.5800 +[2025-09-05 19:39:59] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 19:39:59] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 19:39:59] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 19:39:59] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 19:39:59] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:39:59] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:39:59] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:39:59] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:39:59] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:39:59] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:39:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:39:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:40:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:40:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:40:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:40:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:40:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:40:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:40:00] [Rank 0] step:2501/10000 train_time:114411ms step_avg:45.75ms +[2025-09-05 19:40:00] [Rank 0] step:2501/10000 train_time:114411ms step_avg:45.75ms +[2025-09-05 19:40:01] [Rank 0] step:2521/10000 train_time:114855ms step_avg:45.56ms +[2025-09-05 19:40:01] [Rank 0] step:2521/10000 train_time:114855ms step_avg:45.56ms +[2025-09-05 19:40:02] [Rank 0] step:2541/10000 train_time:115616ms step_avg:45.50ms +[2025-09-05 19:40:02] [Rank 0] step:2541/10000 train_time:115616ms step_avg:45.50ms +[2025-09-05 19:40:02] [Rank 0] step:2561/10000 train_time:116276ms step_avg:45.40ms +[2025-09-05 19:40:02] [Rank 0] step:2561/10000 train_time:116276ms step_avg:45.40ms +[2025-09-05 19:40:03] [Rank 0] step:2581/10000 train_time:116933ms step_avg:45.31ms +[2025-09-05 19:40:03] [Rank 0] step:2581/10000 train_time:116933ms step_avg:45.31ms +[2025-09-05 19:40:04] [Rank 0] step:2601/10000 train_time:117591ms step_avg:45.21ms +[2025-09-05 19:40:04] [Rank 0] step:2601/10000 train_time:117591ms step_avg:45.21ms +[2025-09-05 19:40:04] [Rank 0] step:2621/10000 train_time:118248ms step_avg:45.12ms +[2025-09-05 19:40:04] [Rank 0] step:2621/10000 train_time:118248ms step_avg:45.12ms +[2025-09-05 19:40:05] [Rank 0] step:2641/10000 train_time:118905ms step_avg:45.02ms +[2025-09-05 19:40:05] [Rank 0] step:2641/10000 train_time:118905ms step_avg:45.02ms +[2025-09-05 19:40:05] [Rank 0] step:2661/10000 train_time:119563ms step_avg:44.93ms +[2025-09-05 19:40:05] [Rank 0] step:2661/10000 train_time:119563ms step_avg:44.93ms +[2025-09-05 19:40:06] [Rank 0] step:2681/10000 train_time:120221ms step_avg:44.84ms +[2025-09-05 19:40:06] [Rank 0] step:2681/10000 train_time:120221ms step_avg:44.84ms +[2025-09-05 19:40:07] [Rank 0] step:2701/10000 train_time:120879ms step_avg:44.75ms +[2025-09-05 19:40:07] [Rank 0] step:2701/10000 train_time:120879ms step_avg:44.75ms +[2025-09-05 19:40:07] [Rank 0] step:2721/10000 train_time:121536ms step_avg:44.67ms +[2025-09-05 19:40:07] [Rank 0] step:2721/10000 train_time:121536ms step_avg:44.67ms +[2025-09-05 19:40:08] [Rank 0] step:2741/10000 train_time:122193ms step_avg:44.58ms +[2025-09-05 19:40:08] [Rank 0] step:2741/10000 train_time:122193ms step_avg:44.58ms +[2025-09-05 19:40:09] [Rank 0] step:2761/10000 train_time:122851ms step_avg:44.50ms +[2025-09-05 19:40:09] [Rank 0] step:2761/10000 train_time:122851ms step_avg:44.50ms +[2025-09-05 19:40:09] [Rank 0] step:2781/10000 train_time:123508ms step_avg:44.41ms +[2025-09-05 19:40:09] [Rank 0] step:2781/10000 train_time:123508ms step_avg:44.41ms +[2025-09-05 19:40:10] [Rank 0] step:2801/10000 train_time:124165ms step_avg:44.33ms +[2025-09-05 19:40:10] [Rank 0] step:2801/10000 train_time:124165ms step_avg:44.33ms +[2025-09-05 19:40:11] [Rank 0] step:2821/10000 train_time:124824ms step_avg:44.25ms +[2025-09-05 19:40:11] [Rank 0] step:2821/10000 train_time:124824ms step_avg:44.25ms +[2025-09-05 19:40:12] [Rank 0] step:2841/10000 train_time:125957ms step_avg:44.34ms +[2025-09-05 19:40:12] [Rank 0] step:2841/10000 train_time:125957ms step_avg:44.34ms +[2025-09-05 19:40:13] [Rank 0] step:2861/10000 train_time:126614ms step_avg:44.26ms +[2025-09-05 19:40:13] [Rank 0] step:2861/10000 train_time:126614ms step_avg:44.26ms +[2025-09-05 19:40:13] [Rank 0] step:2881/10000 train_time:127271ms step_avg:44.18ms +[2025-09-05 19:40:13] [Rank 0] step:2881/10000 train_time:127271ms step_avg:44.18ms +[2025-09-05 19:40:14] [Rank 0] step:2901/10000 train_time:127928ms step_avg:44.10ms +[2025-09-05 19:40:14] [Rank 0] step:2901/10000 train_time:127928ms step_avg:44.10ms +[2025-09-05 19:40:15] [Rank 0] step:2921/10000 train_time:128585ms step_avg:44.02ms +[2025-09-05 19:40:15] [Rank 0] step:2921/10000 train_time:128585ms step_avg:44.02ms +[2025-09-05 19:40:15] [Rank 0] step:2941/10000 train_time:129242ms step_avg:43.95ms +[2025-09-05 19:40:15] [Rank 0] step:2941/10000 train_time:129242ms step_avg:43.95ms +[2025-09-05 19:40:16] [Rank 0] step:2961/10000 train_time:129898ms step_avg:43.87ms +[2025-09-05 19:40:16] [Rank 0] step:2961/10000 train_time:129898ms step_avg:43.87ms +[2025-09-05 19:40:16] [Rank 0] step:2981/10000 train_time:130556ms step_avg:43.80ms +[2025-09-05 19:40:16] [Rank 0] step:2981/10000 train_time:130556ms step_avg:43.80ms +[2025-09-05 19:40:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:40:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:40:18] [Rank 0] PRINT: step:3000/10000 train_loss:0.8483 val_loss:0.8190 train_time:131447ms step_avg:43.82ms +[2025-09-05 19:40:18] [Rank 0] PRINT: step:3000/10000 train_loss:0.8483 val_loss:0.8190 train_time:131447ms step_avg:43.82ms +[2025-09-05 19:40:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:40:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:40:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:40:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:41:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:41:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:41:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:41:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:41:39] [Rank 0] Total Loss: 4.8124 +[2025-09-05 19:41:39] [Rank 0] Total Loss: 4.8124 +[2025-09-05 19:41:39] [Rank 0] Total FTA (Unweighted): 0.7106 +[2025-09-05 19:41:39] [Rank 0] Total FTA (Unweighted): 0.7106 +[2025-09-05 19:41:39] [Rank 0] Total FTA (Weighted): 0.7106 +[2025-09-05 19:41:39] [Rank 0] Total FTA (Weighted): 0.7106 +[2025-09-05 19:41:39] [Rank 0] Group 0 Loss: 4.5924 +[2025-09-05 19:41:39] [Rank 0] Group 0 Loss: 4.5924 +[2025-09-05 19:41:39] [Rank 0] Group 1 Loss: 4.3512 +[2025-09-05 19:41:39] [Rank 0] Group 1 Loss: 4.3512 +[2025-09-05 19:41:39] [Rank 0] Group 2 Loss: 4.2496 +[2025-09-05 19:41:39] [Rank 0] Group 2 Loss: 4.2496 +[2025-09-05 19:41:39] [Rank 0] Group 3 Loss: 4.6721 +[2025-09-05 19:41:39] [Rank 0] Group 3 Loss: 4.6721 +[2025-09-05 19:41:39] [Rank 0] Group 4 Loss: 4.6031 +[2025-09-05 19:41:39] [Rank 0] Group 4 Loss: 4.6031 +[2025-09-05 19:41:39] [Rank 0] Group 5 Loss: 4.6388 +[2025-09-05 19:41:39] [Rank 0] Group 5 Loss: 4.6388 +[2025-09-05 19:41:39] [Rank 0] Group 6 Loss: 4.5652 +[2025-09-05 19:41:39] [Rank 0] Group 6 Loss: 4.5652 +[2025-09-05 19:41:39] [Rank 0] Group 7 Loss: 4.5617 +[2025-09-05 19:41:39] [Rank 0] Group 7 Loss: 4.5617 +[2025-09-05 19:41:39] [Rank 0] Group 8 Loss: 4.7737 +[2025-09-05 19:41:39] [Rank 0] Group 8 Loss: 4.7737 +[2025-09-05 19:41:39] [Rank 0] Group 9 Loss: 4.7735 +[2025-09-05 19:41:39] [Rank 0] Group 9 Loss: 4.7735 +[2025-09-05 19:41:39] [Rank 0] Group 10 Loss: 4.9428 +[2025-09-05 19:41:39] [Rank 0] Group 10 Loss: 4.9428 +[2025-09-05 19:41:39] [Rank 0] Group 11 Loss: 5.0365 +[2025-09-05 19:41:39] [Rank 0] Group 11 Loss: 5.0365 +[2025-09-05 19:41:39] [Rank 0] Group 12 Loss: 5.0928 +[2025-09-05 19:41:39] [Rank 0] Group 12 Loss: 5.0928 +[2025-09-05 19:41:39] [Rank 0] Group 13 Loss: 5.2793 +[2025-09-05 19:41:39] [Rank 0] Group 13 Loss: 5.2793 +[2025-09-05 19:41:39] [Rank 0] Group 14 Loss: 5.4114 +[2025-09-05 19:41:39] [Rank 0] Group 14 Loss: 5.4114 +[2025-09-05 19:41:39] [Rank 0] Group 15 Loss: 5.4548 +[2025-09-05 19:41:39] [Rank 0] Group 15 Loss: 5.4548 +[2025-09-05 19:41:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:41:39] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 19:41:39] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 19:41:39] [Rank 0] Group 8 FTA: 0.8300 +[2025-09-05 19:41:39] [Rank 0] Group 8 FTA: 0.8300 +[2025-09-05 19:41:39] [Rank 0] Group 9 FTA: 0.7500 +[2025-09-05 19:41:39] [Rank 0] Group 9 FTA: 0.7500 +[2025-09-05 19:41:39] [Rank 0] Group 10 FTA: 0.7200 +[2025-09-05 19:41:39] [Rank 0] Group 10 FTA: 0.7200 +[2025-09-05 19:41:39] [Rank 0] Group 11 FTA: 0.5200 +[2025-09-05 19:41:39] [Rank 0] Group 11 FTA: 0.5200 +[2025-09-05 19:41:39] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:41:39] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:41:39] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:41:39] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:41:39] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:41:39] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:41:39] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:41:39] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:41:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:41:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:41:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:41:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:41:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:41:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:41:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:41:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:41:40] [Rank 0] step:3001/10000 train_time:131454ms step_avg:43.80ms +[2025-09-05 19:41:40] [Rank 0] step:3001/10000 train_time:131454ms step_avg:43.80ms +[2025-09-05 19:41:41] [Rank 0] step:3021/10000 train_time:131895ms step_avg:43.66ms +[2025-09-05 19:41:41] [Rank 0] step:3021/10000 train_time:131895ms step_avg:43.66ms +[2025-09-05 19:41:42] [Rank 0] step:3041/10000 train_time:132553ms step_avg:43.59ms +[2025-09-05 19:41:42] [Rank 0] step:3041/10000 train_time:132553ms step_avg:43.59ms +[2025-09-05 19:41:42] [Rank 0] step:3061/10000 train_time:133208ms step_avg:43.52ms +[2025-09-05 19:41:42] [Rank 0] step:3061/10000 train_time:133208ms step_avg:43.52ms +[2025-09-05 19:41:43] [Rank 0] step:3081/10000 train_time:133865ms step_avg:43.45ms +[2025-09-05 19:41:43] [Rank 0] step:3081/10000 train_time:133865ms step_avg:43.45ms +[2025-09-05 19:41:44] [Rank 0] step:3101/10000 train_time:134522ms step_avg:43.38ms +[2025-09-05 19:41:44] [Rank 0] step:3101/10000 train_time:134522ms step_avg:43.38ms +[2025-09-05 19:41:44] [Rank 0] step:3121/10000 train_time:135180ms step_avg:43.31ms +[2025-09-05 19:41:44] [Rank 0] step:3121/10000 train_time:135180ms step_avg:43.31ms +[2025-09-05 19:41:45] [Rank 0] step:3141/10000 train_time:135838ms step_avg:43.25ms +[2025-09-05 19:41:45] [Rank 0] step:3141/10000 train_time:135838ms step_avg:43.25ms +[2025-09-05 19:41:46] [Rank 0] step:3161/10000 train_time:136495ms step_avg:43.18ms +[2025-09-05 19:41:46] [Rank 0] step:3161/10000 train_time:136495ms step_avg:43.18ms +[2025-09-05 19:41:46] [Rank 0] step:3181/10000 train_time:137153ms step_avg:43.12ms +[2025-09-05 19:41:46] [Rank 0] step:3181/10000 train_time:137153ms step_avg:43.12ms +[2025-09-05 19:41:47] [Rank 0] step:3201/10000 train_time:137811ms step_avg:43.05ms +[2025-09-05 19:41:47] [Rank 0] step:3201/10000 train_time:137811ms step_avg:43.05ms +[2025-09-05 19:41:47] [Rank 0] step:3221/10000 train_time:138469ms step_avg:42.99ms +[2025-09-05 19:41:47] [Rank 0] step:3221/10000 train_time:138469ms step_avg:42.99ms +[2025-09-05 19:41:48] [Rank 0] step:3241/10000 train_time:139126ms step_avg:42.93ms +[2025-09-05 19:41:48] [Rank 0] step:3241/10000 train_time:139126ms step_avg:42.93ms +[2025-09-05 19:41:49] [Rank 0] step:3261/10000 train_time:139786ms step_avg:42.87ms +[2025-09-05 19:41:49] [Rank 0] step:3261/10000 train_time:139786ms step_avg:42.87ms +[2025-09-05 19:41:49] [Rank 0] step:3281/10000 train_time:140443ms step_avg:42.80ms +[2025-09-05 19:41:49] [Rank 0] step:3281/10000 train_time:140443ms step_avg:42.80ms +[2025-09-05 19:41:50] [Rank 0] step:3301/10000 train_time:141100ms step_avg:42.74ms +[2025-09-05 19:41:50] [Rank 0] step:3301/10000 train_time:141100ms step_avg:42.74ms +[2025-09-05 19:41:51] [Rank 0] step:3321/10000 train_time:141757ms step_avg:42.69ms +[2025-09-05 19:41:51] [Rank 0] step:3321/10000 train_time:141757ms step_avg:42.69ms +[2025-09-05 19:41:51] [Rank 0] step:3341/10000 train_time:142414ms step_avg:42.63ms +[2025-09-05 19:41:51] [Rank 0] step:3341/10000 train_time:142414ms step_avg:42.63ms +[2025-09-05 19:41:52] [Rank 0] step:3361/10000 train_time:143072ms step_avg:42.57ms +[2025-09-05 19:41:52] [Rank 0] step:3361/10000 train_time:143072ms step_avg:42.57ms +[2025-09-05 19:41:53] [Rank 0] step:3381/10000 train_time:143730ms step_avg:42.51ms +[2025-09-05 19:41:53] [Rank 0] step:3381/10000 train_time:143730ms step_avg:42.51ms +[2025-09-05 19:41:53] [Rank 0] step:3401/10000 train_time:144388ms step_avg:42.45ms +[2025-09-05 19:41:53] [Rank 0] step:3401/10000 train_time:144388ms step_avg:42.45ms +[2025-09-05 19:41:54] [Rank 0] step:3421/10000 train_time:145046ms step_avg:42.40ms +[2025-09-05 19:41:54] [Rank 0] step:3421/10000 train_time:145046ms step_avg:42.40ms +[2025-09-05 19:41:55] [Rank 0] step:3441/10000 train_time:145704ms step_avg:42.34ms +[2025-09-05 19:41:55] [Rank 0] step:3441/10000 train_time:145704ms step_avg:42.34ms +[2025-09-05 19:41:55] [Rank 0] step:3461/10000 train_time:146362ms step_avg:42.29ms +[2025-09-05 19:41:55] [Rank 0] step:3461/10000 train_time:146362ms step_avg:42.29ms +[2025-09-05 19:41:56] [Rank 0] step:3481/10000 train_time:147020ms step_avg:42.24ms +[2025-09-05 19:41:56] [Rank 0] step:3481/10000 train_time:147020ms step_avg:42.24ms +[2025-09-05 19:41:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:41:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:41:57] [Rank 0] PRINT: step:3500/10000 train_loss:0.8116 val_loss:0.7871 train_time:147910ms step_avg:42.26ms +[2025-09-05 19:41:57] [Rank 0] PRINT: step:3500/10000 train_loss:0.8116 val_loss:0.7871 train_time:147910ms step_avg:42.26ms +[2025-09-05 19:41:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:41:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:41:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:41:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:43:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:43:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:43:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:43:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:43:18] [Rank 0] Total Loss: 4.8876 +[2025-09-05 19:43:18] [Rank 0] Total Loss: 4.8876 +[2025-09-05 19:43:18] [Rank 0] Total FTA (Unweighted): 0.7288 +[2025-09-05 19:43:18] [Rank 0] Total FTA (Unweighted): 0.7288 +[2025-09-05 19:43:18] [Rank 0] Total FTA (Weighted): 0.7288 +[2025-09-05 19:43:18] [Rank 0] Total FTA (Weighted): 0.7288 +[2025-09-05 19:43:18] [Rank 0] Group 0 Loss: 4.7369 +[2025-09-05 19:43:18] [Rank 0] Group 0 Loss: 4.7369 +[2025-09-05 19:43:18] [Rank 0] Group 1 Loss: 4.3906 +[2025-09-05 19:43:18] [Rank 0] Group 1 Loss: 4.3906 +[2025-09-05 19:43:18] [Rank 0] Group 2 Loss: 4.3568 +[2025-09-05 19:43:18] [Rank 0] Group 2 Loss: 4.3568 +[2025-09-05 19:43:18] [Rank 0] Group 3 Loss: 4.7808 +[2025-09-05 19:43:18] [Rank 0] Group 3 Loss: 4.7808 +[2025-09-05 19:43:18] [Rank 0] Group 4 Loss: 4.7798 +[2025-09-05 19:43:18] [Rank 0] Group 4 Loss: 4.7798 +[2025-09-05 19:43:18] [Rank 0] Group 5 Loss: 4.7359 +[2025-09-05 19:43:18] [Rank 0] Group 5 Loss: 4.7359 +[2025-09-05 19:43:18] [Rank 0] Group 6 Loss: 4.7171 +[2025-09-05 19:43:18] [Rank 0] Group 6 Loss: 4.7171 +[2025-09-05 19:43:18] [Rank 0] Group 7 Loss: 4.6876 +[2025-09-05 19:43:18] [Rank 0] Group 7 Loss: 4.6876 +[2025-09-05 19:43:18] [Rank 0] Group 8 Loss: 4.8445 +[2025-09-05 19:43:18] [Rank 0] Group 8 Loss: 4.8445 +[2025-09-05 19:43:18] [Rank 0] Group 9 Loss: 4.8403 +[2025-09-05 19:43:18] [Rank 0] Group 9 Loss: 4.8403 +[2025-09-05 19:43:18] [Rank 0] Group 10 Loss: 5.0432 +[2025-09-05 19:43:18] [Rank 0] Group 10 Loss: 5.0432 +[2025-09-05 19:43:18] [Rank 0] Group 11 Loss: 5.0484 +[2025-09-05 19:43:18] [Rank 0] Group 11 Loss: 5.0484 +[2025-09-05 19:43:18] [Rank 0] Group 12 Loss: 5.1624 +[2025-09-05 19:43:18] [Rank 0] Group 12 Loss: 5.1624 +[2025-09-05 19:43:18] [Rank 0] Group 13 Loss: 5.2599 +[2025-09-05 19:43:18] [Rank 0] Group 13 Loss: 5.2599 +[2025-09-05 19:43:19] [Rank 0] Group 14 Loss: 5.3818 +[2025-09-05 19:43:19] [Rank 0] Group 14 Loss: 5.3818 +[2025-09-05 19:43:19] [Rank 0] Group 15 Loss: 5.4361 +[2025-09-05 19:43:19] [Rank 0] Group 15 Loss: 5.4361 +[2025-09-05 19:43:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:43:19] [Rank 0] Group 8 FTA: 0.9200 +[2025-09-05 19:43:19] [Rank 0] Group 8 FTA: 0.9200 +[2025-09-05 19:43:19] [Rank 0] Group 9 FTA: 0.7700 +[2025-09-05 19:43:19] [Rank 0] Group 9 FTA: 0.7700 +[2025-09-05 19:43:19] [Rank 0] Group 10 FTA: 0.7800 +[2025-09-05 19:43:19] [Rank 0] Group 10 FTA: 0.7800 +[2025-09-05 19:43:19] [Rank 0] Group 11 FTA: 0.5700 +[2025-09-05 19:43:19] [Rank 0] Group 11 FTA: 0.5700 +[2025-09-05 19:43:19] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 19:43:19] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 19:43:19] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 19:43:19] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 19:43:19] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:43:19] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:43:19] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:43:19] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:43:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:43:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:43:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:43:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:43:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:43:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:43:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:43:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:43:20] [Rank 0] step:3501/10000 train_time:147917ms step_avg:42.25ms +[2025-09-05 19:43:20] [Rank 0] step:3501/10000 train_time:147917ms step_avg:42.25ms +[2025-09-05 19:43:21] [Rank 0] step:3521/10000 train_time:148350ms step_avg:42.13ms +[2025-09-05 19:43:21] [Rank 0] step:3521/10000 train_time:148350ms step_avg:42.13ms +[2025-09-05 19:43:21] [Rank 0] step:3541/10000 train_time:149009ms step_avg:42.08ms +[2025-09-05 19:43:21] [Rank 0] step:3541/10000 train_time:149009ms step_avg:42.08ms +[2025-09-05 19:43:22] [Rank 0] step:3561/10000 train_time:149663ms step_avg:42.03ms +[2025-09-05 19:43:22] [Rank 0] step:3561/10000 train_time:149663ms step_avg:42.03ms +[2025-09-05 19:43:23] [Rank 0] step:3581/10000 train_time:150321ms step_avg:41.98ms +[2025-09-05 19:43:23] [Rank 0] step:3581/10000 train_time:150321ms step_avg:41.98ms +[2025-09-05 19:43:23] [Rank 0] step:3601/10000 train_time:150979ms step_avg:41.93ms +[2025-09-05 19:43:23] [Rank 0] step:3601/10000 train_time:150979ms step_avg:41.93ms +[2025-09-05 19:43:24] [Rank 0] step:3621/10000 train_time:151637ms step_avg:41.88ms +[2025-09-05 19:43:24] [Rank 0] step:3621/10000 train_time:151637ms step_avg:41.88ms +[2025-09-05 19:43:25] [Rank 0] step:3641/10000 train_time:152293ms step_avg:41.83ms +[2025-09-05 19:43:25] [Rank 0] step:3641/10000 train_time:152293ms step_avg:41.83ms +[2025-09-05 19:43:25] [Rank 0] step:3661/10000 train_time:152950ms step_avg:41.78ms +[2025-09-05 19:43:25] [Rank 0] step:3661/10000 train_time:152950ms step_avg:41.78ms +[2025-09-05 19:43:26] [Rank 0] step:3681/10000 train_time:153607ms step_avg:41.73ms +[2025-09-05 19:43:26] [Rank 0] step:3681/10000 train_time:153607ms step_avg:41.73ms +[2025-09-05 19:43:27] [Rank 0] step:3701/10000 train_time:154264ms step_avg:41.68ms +[2025-09-05 19:43:27] [Rank 0] step:3701/10000 train_time:154264ms step_avg:41.68ms +[2025-09-05 19:43:27] [Rank 0] step:3721/10000 train_time:154922ms step_avg:41.63ms +[2025-09-05 19:43:27] [Rank 0] step:3721/10000 train_time:154922ms step_avg:41.63ms +[2025-09-05 19:43:28] [Rank 0] step:3741/10000 train_time:155578ms step_avg:41.59ms +[2025-09-05 19:43:28] [Rank 0] step:3741/10000 train_time:155578ms step_avg:41.59ms +[2025-09-05 19:43:29] [Rank 0] step:3761/10000 train_time:156236ms step_avg:41.54ms +[2025-09-05 19:43:29] [Rank 0] step:3761/10000 train_time:156236ms step_avg:41.54ms +[2025-09-05 19:43:29] [Rank 0] step:3781/10000 train_time:156893ms step_avg:41.50ms +[2025-09-05 19:43:29] [Rank 0] step:3781/10000 train_time:156893ms step_avg:41.50ms +[2025-09-05 19:43:30] [Rank 0] step:3801/10000 train_time:157550ms step_avg:41.45ms +[2025-09-05 19:43:30] [Rank 0] step:3801/10000 train_time:157550ms step_avg:41.45ms +[2025-09-05 19:43:31] [Rank 0] step:3821/10000 train_time:158209ms step_avg:41.41ms +[2025-09-05 19:43:31] [Rank 0] step:3821/10000 train_time:158209ms step_avg:41.41ms +[2025-09-05 19:43:31] [Rank 0] step:3841/10000 train_time:158863ms step_avg:41.36ms +[2025-09-05 19:43:31] [Rank 0] step:3841/10000 train_time:158863ms step_avg:41.36ms +[2025-09-05 19:43:32] [Rank 0] step:3861/10000 train_time:159520ms step_avg:41.32ms +[2025-09-05 19:43:32] [Rank 0] step:3861/10000 train_time:159520ms step_avg:41.32ms +[2025-09-05 19:43:32] [Rank 0] step:3881/10000 train_time:160176ms step_avg:41.27ms +[2025-09-05 19:43:32] [Rank 0] step:3881/10000 train_time:160176ms step_avg:41.27ms +[2025-09-05 19:43:33] [Rank 0] step:3901/10000 train_time:160833ms step_avg:41.23ms +[2025-09-05 19:43:33] [Rank 0] step:3901/10000 train_time:160833ms step_avg:41.23ms +[2025-09-05 19:43:34] [Rank 0] step:3921/10000 train_time:161490ms step_avg:41.19ms +[2025-09-05 19:43:34] [Rank 0] step:3921/10000 train_time:161490ms step_avg:41.19ms +[2025-09-05 19:43:34] [Rank 0] step:3941/10000 train_time:162146ms step_avg:41.14ms +[2025-09-05 19:43:34] [Rank 0] step:3941/10000 train_time:162146ms step_avg:41.14ms +[2025-09-05 19:43:35] [Rank 0] step:3961/10000 train_time:162802ms step_avg:41.10ms +[2025-09-05 19:43:35] [Rank 0] step:3961/10000 train_time:162802ms step_avg:41.10ms +[2025-09-05 19:43:36] [Rank 0] step:3981/10000 train_time:163460ms step_avg:41.06ms +[2025-09-05 19:43:36] [Rank 0] step:3981/10000 train_time:163460ms step_avg:41.06ms +[2025-09-05 19:43:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:43:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:43:37] [Rank 0] PRINT: step:4000/10000 train_loss:0.7835 val_loss:0.7624 train_time:164350ms step_avg:41.09ms +[2025-09-05 19:43:37] [Rank 0] PRINT: step:4000/10000 train_loss:0.7835 val_loss:0.7624 train_time:164350ms step_avg:41.09ms +[2025-09-05 19:43:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:43:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:43:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:43:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:44:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:44:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:44:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:44:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:44:58] [Rank 0] Total Loss: 4.8363 +[2025-09-05 19:44:58] [Rank 0] Total Loss: 4.8363 +[2025-09-05 19:44:58] [Rank 0] Total FTA (Unweighted): 0.7619 +[2025-09-05 19:44:58] [Rank 0] Total FTA (Unweighted): 0.7619 +[2025-09-05 19:44:58] [Rank 0] Total FTA (Weighted): 0.7619 +[2025-09-05 19:44:58] [Rank 0] Total FTA (Weighted): 0.7619 +[2025-09-05 19:44:58] [Rank 0] Group 0 Loss: 4.8047 +[2025-09-05 19:44:58] [Rank 0] Group 0 Loss: 4.8047 +[2025-09-05 19:44:58] [Rank 0] Group 1 Loss: 4.2075 +[2025-09-05 19:44:58] [Rank 0] Group 1 Loss: 4.2075 +[2025-09-05 19:44:58] [Rank 0] Group 2 Loss: 4.3952 +[2025-09-05 19:44:58] [Rank 0] Group 2 Loss: 4.3952 +[2025-09-05 19:44:58] [Rank 0] Group 3 Loss: 4.7540 +[2025-09-05 19:44:58] [Rank 0] Group 3 Loss: 4.7540 +[2025-09-05 19:44:58] [Rank 0] Group 4 Loss: 4.6559 +[2025-09-05 19:44:58] [Rank 0] Group 4 Loss: 4.6559 +[2025-09-05 19:44:58] [Rank 0] Group 5 Loss: 4.6350 +[2025-09-05 19:44:58] [Rank 0] Group 5 Loss: 4.6350 +[2025-09-05 19:44:58] [Rank 0] Group 6 Loss: 4.6264 +[2025-09-05 19:44:58] [Rank 0] Group 6 Loss: 4.6264 +[2025-09-05 19:44:58] [Rank 0] Group 7 Loss: 4.7013 +[2025-09-05 19:44:58] [Rank 0] Group 7 Loss: 4.7013 +[2025-09-05 19:44:58] [Rank 0] Group 8 Loss: 4.8200 +[2025-09-05 19:44:58] [Rank 0] Group 8 Loss: 4.8200 +[2025-09-05 19:44:58] [Rank 0] Group 9 Loss: 4.8187 +[2025-09-05 19:44:58] [Rank 0] Group 9 Loss: 4.8187 +[2025-09-05 19:44:58] [Rank 0] Group 10 Loss: 4.9668 +[2025-09-05 19:44:58] [Rank 0] Group 10 Loss: 4.9668 +[2025-09-05 19:44:58] [Rank 0] Group 11 Loss: 5.0045 +[2025-09-05 19:44:58] [Rank 0] Group 11 Loss: 5.0045 +[2025-09-05 19:44:58] [Rank 0] Group 12 Loss: 5.0617 +[2025-09-05 19:44:58] [Rank 0] Group 12 Loss: 5.0617 +[2025-09-05 19:44:58] [Rank 0] Group 13 Loss: 5.2179 +[2025-09-05 19:44:58] [Rank 0] Group 13 Loss: 5.2179 +[2025-09-05 19:44:58] [Rank 0] Group 14 Loss: 5.2832 +[2025-09-05 19:44:58] [Rank 0] Group 14 Loss: 5.2832 +[2025-09-05 19:44:58] [Rank 0] Group 15 Loss: 5.4274 +[2025-09-05 19:44:58] [Rank 0] Group 15 Loss: 5.4274 +[2025-09-05 19:44:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:44:58] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 19:44:58] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 19:44:58] [Rank 0] Group 9 FTA: 0.8500 +[2025-09-05 19:44:58] [Rank 0] Group 9 FTA: 0.8500 +[2025-09-05 19:44:58] [Rank 0] Group 10 FTA: 0.8900 +[2025-09-05 19:44:58] [Rank 0] Group 10 FTA: 0.8900 +[2025-09-05 19:44:58] [Rank 0] Group 11 FTA: 0.7100 +[2025-09-05 19:44:58] [Rank 0] Group 11 FTA: 0.7100 +[2025-09-05 19:44:58] [Rank 0] Group 12 FTA: 0.4000 +[2025-09-05 19:44:58] [Rank 0] Group 12 FTA: 0.4000 +[2025-09-05 19:44:58] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 19:44:58] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 19:44:58] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:44:58] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:44:58] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:44:58] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:44:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:44:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:44:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:44:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:44:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:44:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:45:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:45:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:45:00] [Rank 0] step:4001/10000 train_time:164358ms step_avg:41.08ms +[2025-09-05 19:45:00] [Rank 0] step:4001/10000 train_time:164358ms step_avg:41.08ms +[2025-09-05 19:45:00] [Rank 0] step:4021/10000 train_time:164809ms step_avg:40.99ms +[2025-09-05 19:45:00] [Rank 0] step:4021/10000 train_time:164809ms step_avg:40.99ms +[2025-09-05 19:45:01] [Rank 0] step:4041/10000 train_time:165467ms step_avg:40.95ms +[2025-09-05 19:45:01] [Rank 0] step:4041/10000 train_time:165467ms step_avg:40.95ms +[2025-09-05 19:45:02] [Rank 0] step:4061/10000 train_time:166131ms step_avg:40.91ms +[2025-09-05 19:45:02] [Rank 0] step:4061/10000 train_time:166131ms step_avg:40.91ms +[2025-09-05 19:45:02] [Rank 0] step:4081/10000 train_time:166789ms step_avg:40.87ms +[2025-09-05 19:45:02] [Rank 0] step:4081/10000 train_time:166789ms step_avg:40.87ms +[2025-09-05 19:45:03] [Rank 0] step:4101/10000 train_time:167446ms step_avg:40.83ms +[2025-09-05 19:45:03] [Rank 0] step:4101/10000 train_time:167446ms step_avg:40.83ms +[2025-09-05 19:45:04] [Rank 0] step:4121/10000 train_time:168104ms step_avg:40.79ms +[2025-09-05 19:45:04] [Rank 0] step:4121/10000 train_time:168104ms step_avg:40.79ms +[2025-09-05 19:45:04] [Rank 0] step:4141/10000 train_time:168762ms step_avg:40.75ms +[2025-09-05 19:45:04] [Rank 0] step:4141/10000 train_time:168762ms step_avg:40.75ms +[2025-09-05 19:45:05] [Rank 0] step:4161/10000 train_time:169419ms step_avg:40.72ms +[2025-09-05 19:45:05] [Rank 0] step:4161/10000 train_time:169419ms step_avg:40.72ms +[2025-09-05 19:45:06] [Rank 0] step:4181/10000 train_time:170078ms step_avg:40.68ms +[2025-09-05 19:45:06] [Rank 0] step:4181/10000 train_time:170078ms step_avg:40.68ms +[2025-09-05 19:45:06] [Rank 0] step:4201/10000 train_time:170736ms step_avg:40.64ms +[2025-09-05 19:45:06] [Rank 0] step:4201/10000 train_time:170736ms step_avg:40.64ms +[2025-09-05 19:45:07] [Rank 0] step:4221/10000 train_time:171394ms step_avg:40.61ms +[2025-09-05 19:45:07] [Rank 0] step:4221/10000 train_time:171394ms step_avg:40.61ms +[2025-09-05 19:45:08] [Rank 0] step:4241/10000 train_time:172051ms step_avg:40.57ms +[2025-09-05 19:45:08] [Rank 0] step:4241/10000 train_time:172051ms step_avg:40.57ms +[2025-09-05 19:45:08] [Rank 0] step:4261/10000 train_time:172709ms step_avg:40.53ms +[2025-09-05 19:45:08] [Rank 0] step:4261/10000 train_time:172709ms step_avg:40.53ms +[2025-09-05 19:45:09] [Rank 0] step:4281/10000 train_time:173367ms step_avg:40.50ms +[2025-09-05 19:45:09] [Rank 0] step:4281/10000 train_time:173367ms step_avg:40.50ms +[2025-09-05 19:45:10] [Rank 0] step:4301/10000 train_time:174026ms step_avg:40.46ms +[2025-09-05 19:45:10] [Rank 0] step:4301/10000 train_time:174026ms step_avg:40.46ms +[2025-09-05 19:45:10] [Rank 0] step:4321/10000 train_time:174684ms step_avg:40.43ms +[2025-09-05 19:45:10] [Rank 0] step:4321/10000 train_time:174684ms step_avg:40.43ms +[2025-09-05 19:45:11] [Rank 0] step:4341/10000 train_time:175342ms step_avg:40.39ms +[2025-09-05 19:45:11] [Rank 0] step:4341/10000 train_time:175342ms step_avg:40.39ms +[2025-09-05 19:45:12] [Rank 0] step:4361/10000 train_time:176001ms step_avg:40.36ms +[2025-09-05 19:45:12] [Rank 0] step:4361/10000 train_time:176001ms step_avg:40.36ms +[2025-09-05 19:45:12] [Rank 0] step:4381/10000 train_time:176660ms step_avg:40.32ms +[2025-09-05 19:45:12] [Rank 0] step:4381/10000 train_time:176660ms step_avg:40.32ms +[2025-09-05 19:45:13] [Rank 0] step:4401/10000 train_time:177318ms step_avg:40.29ms +[2025-09-05 19:45:13] [Rank 0] step:4401/10000 train_time:177318ms step_avg:40.29ms +[2025-09-05 19:45:14] [Rank 0] step:4421/10000 train_time:177977ms step_avg:40.26ms +[2025-09-05 19:45:14] [Rank 0] step:4421/10000 train_time:177977ms step_avg:40.26ms +[2025-09-05 19:45:14] [Rank 0] step:4441/10000 train_time:178636ms step_avg:40.22ms +[2025-09-05 19:45:14] [Rank 0] step:4441/10000 train_time:178636ms step_avg:40.22ms +[2025-09-05 19:45:15] [Rank 0] step:4461/10000 train_time:179416ms step_avg:40.22ms +[2025-09-05 19:45:15] [Rank 0] step:4461/10000 train_time:179416ms step_avg:40.22ms +[2025-09-05 19:45:16] [Rank 0] step:4481/10000 train_time:180078ms step_avg:40.19ms +[2025-09-05 19:45:16] [Rank 0] step:4481/10000 train_time:180078ms step_avg:40.19ms +[2025-09-05 19:45:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:45:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:45:17] [Rank 0] PRINT: step:4500/10000 train_loss:0.7606 val_loss:0.7420 train_time:180969ms step_avg:40.22ms +[2025-09-05 19:45:17] [Rank 0] PRINT: step:4500/10000 train_loss:0.7606 val_loss:0.7420 train_time:180969ms step_avg:40.22ms +[2025-09-05 19:45:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:45:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:45:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:45:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:46:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:46:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:46:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:46:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:46:38] [Rank 0] Total Loss: 4.8751 +[2025-09-05 19:46:38] [Rank 0] Total Loss: 4.8751 +[2025-09-05 19:46:38] [Rank 0] Total FTA (Unweighted): 0.7844 +[2025-09-05 19:46:38] [Rank 0] Total FTA (Unweighted): 0.7844 +[2025-09-05 19:46:38] [Rank 0] Total FTA (Weighted): 0.7844 +[2025-09-05 19:46:38] [Rank 0] Total FTA (Weighted): 0.7844 +[2025-09-05 19:46:38] [Rank 0] Group 0 Loss: 4.8044 +[2025-09-05 19:46:38] [Rank 0] Group 0 Loss: 4.8044 +[2025-09-05 19:46:38] [Rank 0] Group 1 Loss: 4.0696 +[2025-09-05 19:46:38] [Rank 0] Group 1 Loss: 4.0696 +[2025-09-05 19:46:38] [Rank 0] Group 2 Loss: 4.5274 +[2025-09-05 19:46:38] [Rank 0] Group 2 Loss: 4.5274 +[2025-09-05 19:46:38] [Rank 0] Group 3 Loss: 4.8105 +[2025-09-05 19:46:38] [Rank 0] Group 3 Loss: 4.8105 +[2025-09-05 19:46:38] [Rank 0] Group 4 Loss: 4.7233 +[2025-09-05 19:46:38] [Rank 0] Group 4 Loss: 4.7233 +[2025-09-05 19:46:38] [Rank 0] Group 5 Loss: 4.7512 +[2025-09-05 19:46:38] [Rank 0] Group 5 Loss: 4.7512 +[2025-09-05 19:46:38] [Rank 0] Group 6 Loss: 4.7098 +[2025-09-05 19:46:38] [Rank 0] Group 6 Loss: 4.7098 +[2025-09-05 19:46:38] [Rank 0] Group 7 Loss: 4.7735 +[2025-09-05 19:46:38] [Rank 0] Group 7 Loss: 4.7735 +[2025-09-05 19:46:38] [Rank 0] Group 8 Loss: 4.8715 +[2025-09-05 19:46:38] [Rank 0] Group 8 Loss: 4.8715 +[2025-09-05 19:46:38] [Rank 0] Group 9 Loss: 4.9736 +[2025-09-05 19:46:38] [Rank 0] Group 9 Loss: 4.9736 +[2025-09-05 19:46:38] [Rank 0] Group 10 Loss: 5.0652 +[2025-09-05 19:46:38] [Rank 0] Group 10 Loss: 5.0652 +[2025-09-05 19:46:38] [Rank 0] Group 11 Loss: 5.0475 +[2025-09-05 19:46:38] [Rank 0] Group 11 Loss: 5.0475 +[2025-09-05 19:46:38] [Rank 0] Group 12 Loss: 5.0156 +[2025-09-05 19:46:38] [Rank 0] Group 12 Loss: 5.0156 +[2025-09-05 19:46:38] [Rank 0] Group 13 Loss: 5.1905 +[2025-09-05 19:46:38] [Rank 0] Group 13 Loss: 5.1905 +[2025-09-05 19:46:38] [Rank 0] Group 14 Loss: 5.2902 +[2025-09-05 19:46:38] [Rank 0] Group 14 Loss: 5.2902 +[2025-09-05 19:46:38] [Rank 0] Group 15 Loss: 5.3774 +[2025-09-05 19:46:38] [Rank 0] Group 15 Loss: 5.3774 +[2025-09-05 19:46:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:46:38] [Rank 0] Group 9 FTA: 0.9100 +[2025-09-05 19:46:38] [Rank 0] Group 9 FTA: 0.9100 +[2025-09-05 19:46:38] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 19:46:38] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 19:46:38] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-05 19:46:38] [Rank 0] Group 11 FTA: 0.8000 +[2025-09-05 19:46:38] [Rank 0] Group 12 FTA: 0.5000 +[2025-09-05 19:46:38] [Rank 0] Group 12 FTA: 0.5000 +[2025-09-05 19:46:38] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 19:46:38] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 19:46:38] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:46:38] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:46:38] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:46:38] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:46:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:46:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:46:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:46:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:46:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:46:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:46:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:46:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:46:40] [Rank 0] step:4501/10000 train_time:180977ms step_avg:40.21ms +[2025-09-05 19:46:40] [Rank 0] step:4501/10000 train_time:180977ms step_avg:40.21ms +[2025-09-05 19:46:40] [Rank 0] step:4521/10000 train_time:181425ms step_avg:40.13ms +[2025-09-05 19:46:40] [Rank 0] step:4521/10000 train_time:181425ms step_avg:40.13ms +[2025-09-05 19:46:41] [Rank 0] step:4541/10000 train_time:182082ms step_avg:40.10ms +[2025-09-05 19:46:41] [Rank 0] step:4541/10000 train_time:182082ms step_avg:40.10ms +[2025-09-05 19:46:42] [Rank 0] step:4561/10000 train_time:182741ms step_avg:40.07ms +[2025-09-05 19:46:42] [Rank 0] step:4561/10000 train_time:182741ms step_avg:40.07ms +[2025-09-05 19:46:42] [Rank 0] step:4581/10000 train_time:183397ms step_avg:40.03ms +[2025-09-05 19:46:42] [Rank 0] step:4581/10000 train_time:183397ms step_avg:40.03ms +[2025-09-05 19:46:43] [Rank 0] step:4601/10000 train_time:184055ms step_avg:40.00ms +[2025-09-05 19:46:43] [Rank 0] step:4601/10000 train_time:184055ms step_avg:40.00ms +[2025-09-05 19:46:44] [Rank 0] step:4621/10000 train_time:184712ms step_avg:39.97ms +[2025-09-05 19:46:44] [Rank 0] step:4621/10000 train_time:184712ms step_avg:39.97ms +[2025-09-05 19:46:44] [Rank 0] step:4641/10000 train_time:185370ms step_avg:39.94ms +[2025-09-05 19:46:44] [Rank 0] step:4641/10000 train_time:185370ms step_avg:39.94ms +[2025-09-05 19:46:45] [Rank 0] step:4661/10000 train_time:186028ms step_avg:39.91ms +[2025-09-05 19:46:45] [Rank 0] step:4661/10000 train_time:186028ms step_avg:39.91ms +[2025-09-05 19:46:46] [Rank 0] step:4681/10000 train_time:186685ms step_avg:39.88ms +[2025-09-05 19:46:46] [Rank 0] step:4681/10000 train_time:186685ms step_avg:39.88ms +[2025-09-05 19:46:46] [Rank 0] step:4701/10000 train_time:187343ms step_avg:39.85ms +[2025-09-05 19:46:46] [Rank 0] step:4701/10000 train_time:187343ms step_avg:39.85ms +[2025-09-05 19:46:47] [Rank 0] step:4721/10000 train_time:188001ms step_avg:39.82ms +[2025-09-05 19:46:47] [Rank 0] step:4721/10000 train_time:188001ms step_avg:39.82ms +[2025-09-05 19:46:48] [Rank 0] step:4741/10000 train_time:188658ms step_avg:39.79ms +[2025-09-05 19:46:48] [Rank 0] step:4741/10000 train_time:188658ms step_avg:39.79ms +[2025-09-05 19:46:48] [Rank 0] step:4761/10000 train_time:189316ms step_avg:39.76ms +[2025-09-05 19:46:48] [Rank 0] step:4761/10000 train_time:189316ms step_avg:39.76ms +[2025-09-05 19:46:49] [Rank 0] step:4781/10000 train_time:189974ms step_avg:39.74ms +[2025-09-05 19:46:49] [Rank 0] step:4781/10000 train_time:189974ms step_avg:39.74ms +[2025-09-05 19:46:50] [Rank 0] step:4801/10000 train_time:190631ms step_avg:39.71ms +[2025-09-05 19:46:50] [Rank 0] step:4801/10000 train_time:190631ms step_avg:39.71ms +[2025-09-05 19:46:50] [Rank 0] step:4821/10000 train_time:191289ms step_avg:39.68ms +[2025-09-05 19:46:50] [Rank 0] step:4821/10000 train_time:191289ms step_avg:39.68ms +[2025-09-05 19:46:51] [Rank 0] step:4841/10000 train_time:192256ms step_avg:39.71ms +[2025-09-05 19:46:51] [Rank 0] step:4841/10000 train_time:192256ms step_avg:39.71ms +[2025-09-05 19:46:52] [Rank 0] step:4861/10000 train_time:192914ms step_avg:39.69ms +[2025-09-05 19:46:52] [Rank 0] step:4861/10000 train_time:192914ms step_avg:39.69ms +[2025-09-05 19:46:53] [Rank 0] step:4881/10000 train_time:193573ms step_avg:39.66ms +[2025-09-05 19:46:53] [Rank 0] step:4881/10000 train_time:193573ms step_avg:39.66ms +[2025-09-05 19:46:53] [Rank 0] step:4901/10000 train_time:194231ms step_avg:39.63ms +[2025-09-05 19:46:53] [Rank 0] step:4901/10000 train_time:194231ms step_avg:39.63ms +[2025-09-05 19:46:54] [Rank 0] step:4921/10000 train_time:194890ms step_avg:39.60ms +[2025-09-05 19:46:54] [Rank 0] step:4921/10000 train_time:194890ms step_avg:39.60ms +[2025-09-05 19:46:55] [Rank 0] step:4941/10000 train_time:195548ms step_avg:39.58ms +[2025-09-05 19:46:55] [Rank 0] step:4941/10000 train_time:195548ms step_avg:39.58ms +[2025-09-05 19:46:55] [Rank 0] step:4961/10000 train_time:196206ms step_avg:39.55ms +[2025-09-05 19:46:55] [Rank 0] step:4961/10000 train_time:196206ms step_avg:39.55ms +[2025-09-05 19:46:56] [Rank 0] step:4981/10000 train_time:196864ms step_avg:39.52ms +[2025-09-05 19:46:56] [Rank 0] step:4981/10000 train_time:196864ms step_avg:39.52ms +[2025-09-05 19:46:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:46:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:46:57] [Rank 0] PRINT: step:5000/10000 train_loss:0.7422 val_loss:0.7259 train_time:197756ms step_avg:39.55ms +[2025-09-05 19:46:57] [Rank 0] PRINT: step:5000/10000 train_loss:0.7422 val_loss:0.7259 train_time:197756ms step_avg:39.55ms +[2025-09-05 19:46:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:46:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:46:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:46:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:48:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:48:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:48:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:48:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:48:19] [Rank 0] Total Loss: 4.9608 +[2025-09-05 19:48:19] [Rank 0] Total Loss: 4.9608 +[2025-09-05 19:48:19] [Rank 0] Total FTA (Unweighted): 0.7956 +[2025-09-05 19:48:19] [Rank 0] Total FTA (Unweighted): 0.7956 +[2025-09-05 19:48:19] [Rank 0] Total FTA (Weighted): 0.7956 +[2025-09-05 19:48:19] [Rank 0] Total FTA (Weighted): 0.7956 +[2025-09-05 19:48:19] [Rank 0] Group 0 Loss: 4.8400 +[2025-09-05 19:48:19] [Rank 0] Group 0 Loss: 4.8400 +[2025-09-05 19:48:19] [Rank 0] Group 1 Loss: 4.4188 +[2025-09-05 19:48:19] [Rank 0] Group 1 Loss: 4.4188 +[2025-09-05 19:48:19] [Rank 0] Group 2 Loss: 4.6102 +[2025-09-05 19:48:19] [Rank 0] Group 2 Loss: 4.6102 +[2025-09-05 19:48:19] [Rank 0] Group 3 Loss: 4.7502 +[2025-09-05 19:48:19] [Rank 0] Group 3 Loss: 4.7502 +[2025-09-05 19:48:19] [Rank 0] Group 4 Loss: 4.7642 +[2025-09-05 19:48:19] [Rank 0] Group 4 Loss: 4.7642 +[2025-09-05 19:48:19] [Rank 0] Group 5 Loss: 4.8440 +[2025-09-05 19:48:19] [Rank 0] Group 5 Loss: 4.8440 +[2025-09-05 19:48:19] [Rank 0] Group 6 Loss: 4.8106 +[2025-09-05 19:48:19] [Rank 0] Group 6 Loss: 4.8106 +[2025-09-05 19:48:19] [Rank 0] Group 7 Loss: 4.9307 +[2025-09-05 19:48:19] [Rank 0] Group 7 Loss: 4.9307 +[2025-09-05 19:48:19] [Rank 0] Group 8 Loss: 5.0047 +[2025-09-05 19:48:19] [Rank 0] Group 8 Loss: 5.0047 +[2025-09-05 19:48:19] [Rank 0] Group 9 Loss: 5.0398 +[2025-09-05 19:48:19] [Rank 0] Group 9 Loss: 5.0398 +[2025-09-05 19:48:19] [Rank 0] Group 10 Loss: 5.1493 +[2025-09-05 19:48:19] [Rank 0] Group 10 Loss: 5.1493 +[2025-09-05 19:48:19] [Rank 0] Group 11 Loss: 5.1443 +[2025-09-05 19:48:19] [Rank 0] Group 11 Loss: 5.1443 +[2025-09-05 19:48:19] [Rank 0] Group 12 Loss: 5.0660 +[2025-09-05 19:48:19] [Rank 0] Group 12 Loss: 5.0660 +[2025-09-05 19:48:19] [Rank 0] Group 13 Loss: 5.2291 +[2025-09-05 19:48:19] [Rank 0] Group 13 Loss: 5.2291 +[2025-09-05 19:48:19] [Rank 0] Group 14 Loss: 5.3272 +[2025-09-05 19:48:19] [Rank 0] Group 14 Loss: 5.3272 +[2025-09-05 19:48:19] [Rank 0] Group 15 Loss: 5.4431 +[2025-09-05 19:48:19] [Rank 0] Group 15 Loss: 5.4431 +[2025-09-05 19:48:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 9 FTA: 0.9500 +[2025-09-05 19:48:19] [Rank 0] Group 9 FTA: 0.9500 +[2025-09-05 19:48:19] [Rank 0] Group 10 FTA: 0.9300 +[2025-09-05 19:48:19] [Rank 0] Group 10 FTA: 0.9300 +[2025-09-05 19:48:19] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 19:48:19] [Rank 0] Group 11 FTA: 0.8400 +[2025-09-05 19:48:19] [Rank 0] Group 12 FTA: 0.5800 +[2025-09-05 19:48:19] [Rank 0] Group 12 FTA: 0.5800 +[2025-09-05 19:48:19] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 19:48:19] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 19:48:19] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:48:19] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:48:19] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:48:19] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:48:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:48:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:48:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:48:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:48:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:48:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:48:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:48:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:48:20] [Rank 0] step:5001/10000 train_time:197764ms step_avg:39.54ms +[2025-09-05 19:48:20] [Rank 0] step:5001/10000 train_time:197764ms step_avg:39.54ms +[2025-09-05 19:48:21] [Rank 0] step:5021/10000 train_time:198217ms step_avg:39.48ms +[2025-09-05 19:48:21] [Rank 0] step:5021/10000 train_time:198217ms step_avg:39.48ms +[2025-09-05 19:48:22] [Rank 0] step:5041/10000 train_time:198873ms step_avg:39.45ms +[2025-09-05 19:48:22] [Rank 0] step:5041/10000 train_time:198873ms step_avg:39.45ms +[2025-09-05 19:48:22] [Rank 0] step:5061/10000 train_time:199531ms step_avg:39.43ms +[2025-09-05 19:48:22] [Rank 0] step:5061/10000 train_time:199531ms step_avg:39.43ms +[2025-09-05 19:48:23] [Rank 0] step:5081/10000 train_time:200187ms step_avg:39.40ms +[2025-09-05 19:48:23] [Rank 0] step:5081/10000 train_time:200187ms step_avg:39.40ms +[2025-09-05 19:48:23] [Rank 0] step:5101/10000 train_time:200845ms step_avg:39.37ms +[2025-09-05 19:48:23] [Rank 0] step:5101/10000 train_time:200845ms step_avg:39.37ms +[2025-09-05 19:48:24] [Rank 0] step:5121/10000 train_time:201503ms step_avg:39.35ms +[2025-09-05 19:48:24] [Rank 0] step:5121/10000 train_time:201503ms step_avg:39.35ms +[2025-09-05 19:48:25] [Rank 0] step:5141/10000 train_time:202342ms step_avg:39.36ms +[2025-09-05 19:48:25] [Rank 0] step:5141/10000 train_time:202342ms step_avg:39.36ms +[2025-09-05 19:48:26] [Rank 0] step:5161/10000 train_time:203001ms step_avg:39.33ms +[2025-09-05 19:48:26] [Rank 0] step:5161/10000 train_time:203001ms step_avg:39.33ms +[2025-09-05 19:48:26] [Rank 0] step:5181/10000 train_time:203659ms step_avg:39.31ms +[2025-09-05 19:48:26] [Rank 0] step:5181/10000 train_time:203659ms step_avg:39.31ms +[2025-09-05 19:48:27] [Rank 0] step:5201/10000 train_time:204548ms step_avg:39.33ms +[2025-09-05 19:48:27] [Rank 0] step:5201/10000 train_time:204548ms step_avg:39.33ms +[2025-09-05 19:48:28] [Rank 0] step:5221/10000 train_time:205205ms step_avg:39.30ms +[2025-09-05 19:48:28] [Rank 0] step:5221/10000 train_time:205205ms step_avg:39.30ms +[2025-09-05 19:48:28] [Rank 0] step:5241/10000 train_time:205863ms step_avg:39.28ms +[2025-09-05 19:48:28] [Rank 0] step:5241/10000 train_time:205863ms step_avg:39.28ms +[2025-09-05 19:48:29] [Rank 0] step:5261/10000 train_time:206520ms step_avg:39.25ms +[2025-09-05 19:48:29] [Rank 0] step:5261/10000 train_time:206520ms step_avg:39.25ms +[2025-09-05 19:48:30] [Rank 0] step:5281/10000 train_time:207179ms step_avg:39.23ms +[2025-09-05 19:48:30] [Rank 0] step:5281/10000 train_time:207179ms step_avg:39.23ms +[2025-09-05 19:48:30] [Rank 0] step:5301/10000 train_time:207838ms step_avg:39.21ms +[2025-09-05 19:48:30] [Rank 0] step:5301/10000 train_time:207838ms step_avg:39.21ms +[2025-09-05 19:48:31] [Rank 0] step:5321/10000 train_time:208495ms step_avg:39.18ms +[2025-09-05 19:48:31] [Rank 0] step:5321/10000 train_time:208495ms step_avg:39.18ms +[2025-09-05 19:48:32] [Rank 0] step:5341/10000 train_time:209154ms step_avg:39.16ms +[2025-09-05 19:48:32] [Rank 0] step:5341/10000 train_time:209154ms step_avg:39.16ms +[2025-09-05 19:48:32] [Rank 0] step:5361/10000 train_time:209813ms step_avg:39.14ms +[2025-09-05 19:48:32] [Rank 0] step:5361/10000 train_time:209813ms step_avg:39.14ms +[2025-09-05 19:48:33] [Rank 0] step:5381/10000 train_time:210472ms step_avg:39.11ms +[2025-09-05 19:48:33] [Rank 0] step:5381/10000 train_time:210472ms step_avg:39.11ms +[2025-09-05 19:48:34] [Rank 0] step:5401/10000 train_time:211131ms step_avg:39.09ms +[2025-09-05 19:48:34] [Rank 0] step:5401/10000 train_time:211131ms step_avg:39.09ms +[2025-09-05 19:48:34] [Rank 0] step:5421/10000 train_time:211789ms step_avg:39.07ms +[2025-09-05 19:48:34] [Rank 0] step:5421/10000 train_time:211789ms step_avg:39.07ms +[2025-09-05 19:48:35] [Rank 0] step:5441/10000 train_time:212447ms step_avg:39.05ms +[2025-09-05 19:48:35] [Rank 0] step:5441/10000 train_time:212447ms step_avg:39.05ms +[2025-09-05 19:48:36] [Rank 0] step:5461/10000 train_time:213104ms step_avg:39.02ms +[2025-09-05 19:48:36] [Rank 0] step:5461/10000 train_time:213104ms step_avg:39.02ms +[2025-09-05 19:48:36] [Rank 0] step:5481/10000 train_time:213763ms step_avg:39.00ms +[2025-09-05 19:48:36] [Rank 0] step:5481/10000 train_time:213763ms step_avg:39.00ms +[2025-09-05 19:48:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:48:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:48:38] [Rank 0] PRINT: step:5500/10000 train_loss:0.7271 val_loss:0.7137 train_time:214655ms step_avg:39.03ms +[2025-09-05 19:48:38] [Rank 0] PRINT: step:5500/10000 train_loss:0.7271 val_loss:0.7137 train_time:214655ms step_avg:39.03ms +[2025-09-05 19:48:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:48:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:48:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:48:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:49:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:49:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:49:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:49:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:49:59] [Rank 0] Total Loss: 5.0461 +[2025-09-05 19:49:59] [Rank 0] Total Loss: 5.0461 +[2025-09-05 19:49:59] [Rank 0] Total FTA (Unweighted): 0.8181 +[2025-09-05 19:49:59] [Rank 0] Total FTA (Unweighted): 0.8181 +[2025-09-05 19:49:59] [Rank 0] Total FTA (Weighted): 0.8181 +[2025-09-05 19:49:59] [Rank 0] Total FTA (Weighted): 0.8181 +[2025-09-05 19:49:59] [Rank 0] Group 0 Loss: 4.8188 +[2025-09-05 19:49:59] [Rank 0] Group 0 Loss: 4.8188 +[2025-09-05 19:49:59] [Rank 0] Group 1 Loss: 4.4232 +[2025-09-05 19:49:59] [Rank 0] Group 1 Loss: 4.4232 +[2025-09-05 19:49:59] [Rank 0] Group 2 Loss: 4.6043 +[2025-09-05 19:49:59] [Rank 0] Group 2 Loss: 4.6043 +[2025-09-05 19:49:59] [Rank 0] Group 3 Loss: 4.9082 +[2025-09-05 19:49:59] [Rank 0] Group 3 Loss: 4.9082 +[2025-09-05 19:49:59] [Rank 0] Group 4 Loss: 4.8498 +[2025-09-05 19:49:59] [Rank 0] Group 4 Loss: 4.8498 +[2025-09-05 19:49:59] [Rank 0] Group 5 Loss: 4.8903 +[2025-09-05 19:49:59] [Rank 0] Group 5 Loss: 4.8903 +[2025-09-05 19:49:59] [Rank 0] Group 6 Loss: 4.9011 +[2025-09-05 19:49:59] [Rank 0] Group 6 Loss: 4.9011 +[2025-09-05 19:49:59] [Rank 0] Group 7 Loss: 4.9820 +[2025-09-05 19:49:59] [Rank 0] Group 7 Loss: 4.9820 +[2025-09-05 19:49:59] [Rank 0] Group 8 Loss: 5.1674 +[2025-09-05 19:49:59] [Rank 0] Group 8 Loss: 5.1674 +[2025-09-05 19:49:59] [Rank 0] Group 9 Loss: 5.1382 +[2025-09-05 19:49:59] [Rank 0] Group 9 Loss: 5.1382 +[2025-09-05 19:49:59] [Rank 0] Group 10 Loss: 5.3103 +[2025-09-05 19:49:59] [Rank 0] Group 10 Loss: 5.3103 +[2025-09-05 19:49:59] [Rank 0] Group 11 Loss: 5.2162 +[2025-09-05 19:49:59] [Rank 0] Group 11 Loss: 5.2162 +[2025-09-05 19:49:59] [Rank 0] Group 12 Loss: 5.1867 +[2025-09-05 19:49:59] [Rank 0] Group 12 Loss: 5.1867 +[2025-09-05 19:49:59] [Rank 0] Group 13 Loss: 5.3764 +[2025-09-05 19:49:59] [Rank 0] Group 13 Loss: 5.3764 +[2025-09-05 19:49:59] [Rank 0] Group 14 Loss: 5.4305 +[2025-09-05 19:49:59] [Rank 0] Group 14 Loss: 5.4305 +[2025-09-05 19:49:59] [Rank 0] Group 15 Loss: 5.5346 +[2025-09-05 19:49:59] [Rank 0] Group 15 Loss: 5.5346 +[2025-09-05 19:49:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:49:59] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 19:49:59] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 19:49:59] [Rank 0] Group 11 FTA: 0.8800 +[2025-09-05 19:49:59] [Rank 0] Group 11 FTA: 0.8800 +[2025-09-05 19:49:59] [Rank 0] Group 12 FTA: 0.7300 +[2025-09-05 19:49:59] [Rank 0] Group 12 FTA: 0.7300 +[2025-09-05 19:49:59] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 19:49:59] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 19:49:59] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:49:59] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:49:59] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:49:59] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:49:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:49:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:50:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:50:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:50:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:50:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:50:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:50:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:50:00] [Rank 0] step:5501/10000 train_time:214662ms step_avg:39.02ms +[2025-09-05 19:50:00] [Rank 0] step:5501/10000 train_time:214662ms step_avg:39.02ms +[2025-09-05 19:50:01] [Rank 0] step:5521/10000 train_time:215099ms step_avg:38.96ms +[2025-09-05 19:50:01] [Rank 0] step:5521/10000 train_time:215099ms step_avg:38.96ms +[2025-09-05 19:50:02] [Rank 0] step:5541/10000 train_time:215758ms step_avg:38.94ms +[2025-09-05 19:50:02] [Rank 0] step:5541/10000 train_time:215758ms step_avg:38.94ms +[2025-09-05 19:50:02] [Rank 0] step:5561/10000 train_time:216437ms step_avg:38.92ms +[2025-09-05 19:50:02] [Rank 0] step:5561/10000 train_time:216437ms step_avg:38.92ms +[2025-09-05 19:50:03] [Rank 0] step:5581/10000 train_time:217095ms step_avg:38.90ms +[2025-09-05 19:50:03] [Rank 0] step:5581/10000 train_time:217095ms step_avg:38.90ms +[2025-09-05 19:50:04] [Rank 0] step:5601/10000 train_time:217756ms step_avg:38.88ms +[2025-09-05 19:50:04] [Rank 0] step:5601/10000 train_time:217756ms step_avg:38.88ms +[2025-09-05 19:50:04] [Rank 0] step:5621/10000 train_time:218412ms step_avg:38.86ms +[2025-09-05 19:50:04] [Rank 0] step:5621/10000 train_time:218412ms step_avg:38.86ms +[2025-09-05 19:50:05] [Rank 0] step:5641/10000 train_time:219069ms step_avg:38.84ms +[2025-09-05 19:50:05] [Rank 0] step:5641/10000 train_time:219069ms step_avg:38.84ms +[2025-09-05 19:50:06] [Rank 0] step:5661/10000 train_time:219877ms step_avg:38.84ms +[2025-09-05 19:50:06] [Rank 0] step:5661/10000 train_time:219877ms step_avg:38.84ms +[2025-09-05 19:50:07] [Rank 0] step:5681/10000 train_time:220536ms step_avg:38.82ms +[2025-09-05 19:50:07] [Rank 0] step:5681/10000 train_time:220536ms step_avg:38.82ms +[2025-09-05 19:50:07] [Rank 0] step:5701/10000 train_time:221193ms step_avg:38.80ms +[2025-09-05 19:50:07] [Rank 0] step:5701/10000 train_time:221193ms step_avg:38.80ms +[2025-09-05 19:50:08] [Rank 0] step:5721/10000 train_time:221851ms step_avg:38.78ms +[2025-09-05 19:50:08] [Rank 0] step:5721/10000 train_time:221851ms step_avg:38.78ms +[2025-09-05 19:50:08] [Rank 0] step:5741/10000 train_time:222509ms step_avg:38.76ms +[2025-09-05 19:50:08] [Rank 0] step:5741/10000 train_time:222509ms step_avg:38.76ms +[2025-09-05 19:50:09] [Rank 0] step:5761/10000 train_time:223167ms step_avg:38.74ms +[2025-09-05 19:50:09] [Rank 0] step:5761/10000 train_time:223167ms step_avg:38.74ms +[2025-09-05 19:50:10] [Rank 0] step:5781/10000 train_time:223824ms step_avg:38.72ms +[2025-09-05 19:50:10] [Rank 0] step:5781/10000 train_time:223824ms step_avg:38.72ms +[2025-09-05 19:50:10] [Rank 0] step:5801/10000 train_time:224482ms step_avg:38.70ms +[2025-09-05 19:50:10] [Rank 0] step:5801/10000 train_time:224482ms step_avg:38.70ms +[2025-09-05 19:50:11] [Rank 0] step:5821/10000 train_time:225139ms step_avg:38.68ms +[2025-09-05 19:50:11] [Rank 0] step:5821/10000 train_time:225139ms step_avg:38.68ms +[2025-09-05 19:50:12] [Rank 0] step:5841/10000 train_time:225796ms step_avg:38.66ms +[2025-09-05 19:50:12] [Rank 0] step:5841/10000 train_time:225796ms step_avg:38.66ms +[2025-09-05 19:50:12] [Rank 0] step:5861/10000 train_time:226454ms step_avg:38.64ms +[2025-09-05 19:50:12] [Rank 0] step:5861/10000 train_time:226454ms step_avg:38.64ms +[2025-09-05 19:50:13] [Rank 0] step:5881/10000 train_time:227111ms step_avg:38.62ms +[2025-09-05 19:50:13] [Rank 0] step:5881/10000 train_time:227111ms step_avg:38.62ms +[2025-09-05 19:50:14] [Rank 0] step:5901/10000 train_time:227768ms step_avg:38.60ms +[2025-09-05 19:50:14] [Rank 0] step:5901/10000 train_time:227768ms step_avg:38.60ms +[2025-09-05 19:50:14] [Rank 0] step:5921/10000 train_time:228426ms step_avg:38.58ms +[2025-09-05 19:50:14] [Rank 0] step:5921/10000 train_time:228426ms step_avg:38.58ms +[2025-09-05 19:50:15] [Rank 0] step:5941/10000 train_time:229083ms step_avg:38.56ms +[2025-09-05 19:50:15] [Rank 0] step:5941/10000 train_time:229083ms step_avg:38.56ms +[2025-09-05 19:50:16] [Rank 0] step:5961/10000 train_time:229740ms step_avg:38.54ms +[2025-09-05 19:50:16] [Rank 0] step:5961/10000 train_time:229740ms step_avg:38.54ms +[2025-09-05 19:50:16] [Rank 0] step:5981/10000 train_time:230397ms step_avg:38.52ms +[2025-09-05 19:50:16] [Rank 0] step:5981/10000 train_time:230397ms step_avg:38.52ms +[2025-09-05 19:50:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:50:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:50:17] [Rank 0] PRINT: step:6000/10000 train_loss:0.7154 val_loss:0.7028 train_time:231289ms step_avg:38.55ms +[2025-09-05 19:50:17] [Rank 0] PRINT: step:6000/10000 train_loss:0.7154 val_loss:0.7028 train_time:231289ms step_avg:38.55ms +[2025-09-05 19:50:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:50:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:50:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:50:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:51:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:51:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:51:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:51:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:51:38] [Rank 0] Total Loss: 5.0523 +[2025-09-05 19:51:38] [Rank 0] Total Loss: 5.0523 +[2025-09-05 19:51:38] [Rank 0] Total FTA (Unweighted): 0.8175 +[2025-09-05 19:51:38] [Rank 0] Total FTA (Unweighted): 0.8175 +[2025-09-05 19:51:38] [Rank 0] Total FTA (Weighted): 0.8175 +[2025-09-05 19:51:38] [Rank 0] Total FTA (Weighted): 0.8175 +[2025-09-05 19:51:38] [Rank 0] Group 0 Loss: 4.9985 +[2025-09-05 19:51:38] [Rank 0] Group 0 Loss: 4.9985 +[2025-09-05 19:51:38] [Rank 0] Group 1 Loss: 4.2561 +[2025-09-05 19:51:38] [Rank 0] Group 1 Loss: 4.2561 +[2025-09-05 19:51:38] [Rank 0] Group 2 Loss: 4.6939 +[2025-09-05 19:51:38] [Rank 0] Group 2 Loss: 4.6939 +[2025-09-05 19:51:38] [Rank 0] Group 3 Loss: 4.9907 +[2025-09-05 19:51:38] [Rank 0] Group 3 Loss: 4.9907 +[2025-09-05 19:51:38] [Rank 0] Group 4 Loss: 4.8656 +[2025-09-05 19:51:38] [Rank 0] Group 4 Loss: 4.8656 +[2025-09-05 19:51:38] [Rank 0] Group 5 Loss: 4.9697 +[2025-09-05 19:51:38] [Rank 0] Group 5 Loss: 4.9697 +[2025-09-05 19:51:38] [Rank 0] Group 6 Loss: 4.9715 +[2025-09-05 19:51:38] [Rank 0] Group 6 Loss: 4.9715 +[2025-09-05 19:51:38] [Rank 0] Group 7 Loss: 4.9718 +[2025-09-05 19:51:38] [Rank 0] Group 7 Loss: 4.9718 +[2025-09-05 19:51:38] [Rank 0] Group 8 Loss: 5.1478 +[2025-09-05 19:51:38] [Rank 0] Group 8 Loss: 5.1478 +[2025-09-05 19:51:38] [Rank 0] Group 9 Loss: 5.1675 +[2025-09-05 19:51:38] [Rank 0] Group 9 Loss: 5.1675 +[2025-09-05 19:51:38] [Rank 0] Group 10 Loss: 5.2181 +[2025-09-05 19:51:38] [Rank 0] Group 10 Loss: 5.2181 +[2025-09-05 19:51:38] [Rank 0] Group 11 Loss: 5.2006 +[2025-09-05 19:51:38] [Rank 0] Group 11 Loss: 5.2006 +[2025-09-05 19:51:38] [Rank 0] Group 12 Loss: 5.1728 +[2025-09-05 19:51:38] [Rank 0] Group 12 Loss: 5.1728 +[2025-09-05 19:51:38] [Rank 0] Group 13 Loss: 5.3372 +[2025-09-05 19:51:38] [Rank 0] Group 13 Loss: 5.3372 +[2025-09-05 19:51:39] [Rank 0] Group 14 Loss: 5.3824 +[2025-09-05 19:51:39] [Rank 0] Group 14 Loss: 5.3824 +[2025-09-05 19:51:39] [Rank 0] Group 15 Loss: 5.4920 +[2025-09-05 19:51:39] [Rank 0] Group 15 Loss: 5.4920 +[2025-09-05 19:51:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:51:39] [Rank 0] Group 10 FTA: 0.9300 +[2025-09-05 19:51:39] [Rank 0] Group 10 FTA: 0.9300 +[2025-09-05 19:51:39] [Rank 0] Group 11 FTA: 0.9200 +[2025-09-05 19:51:39] [Rank 0] Group 11 FTA: 0.9200 +[2025-09-05 19:51:39] [Rank 0] Group 12 FTA: 0.7300 +[2025-09-05 19:51:39] [Rank 0] Group 12 FTA: 0.7300 +[2025-09-05 19:51:39] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 19:51:39] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 19:51:39] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:51:39] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:51:39] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 19:51:39] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 19:51:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:51:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:51:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:51:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:51:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:51:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:51:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:51:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:51:40] [Rank 0] step:6001/10000 train_time:231297ms step_avg:38.54ms +[2025-09-05 19:51:40] [Rank 0] step:6001/10000 train_time:231297ms step_avg:38.54ms +[2025-09-05 19:51:41] [Rank 0] step:6021/10000 train_time:232199ms step_avg:38.56ms +[2025-09-05 19:51:41] [Rank 0] step:6021/10000 train_time:232199ms step_avg:38.56ms +[2025-09-05 19:51:42] [Rank 0] step:6041/10000 train_time:232857ms step_avg:38.55ms +[2025-09-05 19:51:42] [Rank 0] step:6041/10000 train_time:232857ms step_avg:38.55ms +[2025-09-05 19:51:42] [Rank 0] step:6061/10000 train_time:233515ms step_avg:38.53ms +[2025-09-05 19:51:42] [Rank 0] step:6061/10000 train_time:233515ms step_avg:38.53ms +[2025-09-05 19:51:43] [Rank 0] step:6081/10000 train_time:234173ms step_avg:38.51ms +[2025-09-05 19:51:43] [Rank 0] step:6081/10000 train_time:234173ms step_avg:38.51ms +[2025-09-05 19:51:44] [Rank 0] step:6101/10000 train_time:234830ms step_avg:38.49ms +[2025-09-05 19:51:44] [Rank 0] step:6101/10000 train_time:234830ms step_avg:38.49ms +[2025-09-05 19:51:44] [Rank 0] step:6121/10000 train_time:235488ms step_avg:38.47ms +[2025-09-05 19:51:44] [Rank 0] step:6121/10000 train_time:235488ms step_avg:38.47ms +[2025-09-05 19:51:45] [Rank 0] step:6141/10000 train_time:236147ms step_avg:38.45ms +[2025-09-05 19:51:45] [Rank 0] step:6141/10000 train_time:236147ms step_avg:38.45ms +[2025-09-05 19:51:46] [Rank 0] step:6161/10000 train_time:236805ms step_avg:38.44ms +[2025-09-05 19:51:46] [Rank 0] step:6161/10000 train_time:236805ms step_avg:38.44ms +[2025-09-05 19:51:46] [Rank 0] step:6181/10000 train_time:237463ms step_avg:38.42ms +[2025-09-05 19:51:46] [Rank 0] step:6181/10000 train_time:237463ms step_avg:38.42ms +[2025-09-05 19:51:47] [Rank 0] step:6201/10000 train_time:238121ms step_avg:38.40ms +[2025-09-05 19:51:47] [Rank 0] step:6201/10000 train_time:238121ms step_avg:38.40ms +[2025-09-05 19:51:48] [Rank 0] step:6221/10000 train_time:238779ms step_avg:38.38ms +[2025-09-05 19:51:48] [Rank 0] step:6221/10000 train_time:238779ms step_avg:38.38ms +[2025-09-05 19:51:48] [Rank 0] step:6241/10000 train_time:239436ms step_avg:38.37ms +[2025-09-05 19:51:48] [Rank 0] step:6241/10000 train_time:239436ms step_avg:38.37ms +[2025-09-05 19:51:49] [Rank 0] step:6261/10000 train_time:240095ms step_avg:38.35ms +[2025-09-05 19:51:49] [Rank 0] step:6261/10000 train_time:240095ms step_avg:38.35ms +[2025-09-05 19:51:50] [Rank 0] step:6281/10000 train_time:240753ms step_avg:38.33ms +[2025-09-05 19:51:50] [Rank 0] step:6281/10000 train_time:240753ms step_avg:38.33ms +[2025-09-05 19:51:50] [Rank 0] step:6301/10000 train_time:241410ms step_avg:38.31ms +[2025-09-05 19:51:50] [Rank 0] step:6301/10000 train_time:241410ms step_avg:38.31ms +[2025-09-05 19:51:51] [Rank 0] step:6321/10000 train_time:242068ms step_avg:38.30ms +[2025-09-05 19:51:51] [Rank 0] step:6321/10000 train_time:242068ms step_avg:38.30ms +[2025-09-05 19:51:52] [Rank 0] step:6341/10000 train_time:242726ms step_avg:38.28ms +[2025-09-05 19:51:52] [Rank 0] step:6341/10000 train_time:242726ms step_avg:38.28ms +[2025-09-05 19:51:52] [Rank 0] step:6361/10000 train_time:243384ms step_avg:38.26ms +[2025-09-05 19:51:52] [Rank 0] step:6361/10000 train_time:243384ms step_avg:38.26ms +[2025-09-05 19:51:53] [Rank 0] step:6381/10000 train_time:244041ms step_avg:38.24ms +[2025-09-05 19:51:53] [Rank 0] step:6381/10000 train_time:244041ms step_avg:38.24ms +[2025-09-05 19:51:54] [Rank 0] step:6401/10000 train_time:244699ms step_avg:38.23ms +[2025-09-05 19:51:54] [Rank 0] step:6401/10000 train_time:244699ms step_avg:38.23ms +[2025-09-05 19:51:54] [Rank 0] step:6421/10000 train_time:245357ms step_avg:38.21ms +[2025-09-05 19:51:54] [Rank 0] step:6421/10000 train_time:245357ms step_avg:38.21ms +[2025-09-05 19:51:55] [Rank 0] step:6441/10000 train_time:246014ms step_avg:38.20ms +[2025-09-05 19:51:55] [Rank 0] step:6441/10000 train_time:246014ms step_avg:38.20ms +[2025-09-05 19:51:56] [Rank 0] step:6461/10000 train_time:246672ms step_avg:38.18ms +[2025-09-05 19:51:56] [Rank 0] step:6461/10000 train_time:246672ms step_avg:38.18ms +[2025-09-05 19:51:56] [Rank 0] step:6481/10000 train_time:247329ms step_avg:38.16ms +[2025-09-05 19:51:56] [Rank 0] step:6481/10000 train_time:247329ms step_avg:38.16ms +[2025-09-05 19:51:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:51:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:51:57] [Rank 0] PRINT: step:6500/10000 train_loss:0.7054 val_loss:0.6933 train_time:248222ms step_avg:38.19ms +[2025-09-05 19:51:57] [Rank 0] PRINT: step:6500/10000 train_loss:0.7054 val_loss:0.6933 train_time:248222ms step_avg:38.19ms +[2025-09-05 19:51:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:51:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:51:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:51:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:53:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:53:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:53:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:53:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:53:19] [Rank 0] Total Loss: 5.0614 +[2025-09-05 19:53:19] [Rank 0] Total Loss: 5.0614 +[2025-09-05 19:53:19] [Rank 0] Total FTA (Unweighted): 0.8350 +[2025-09-05 19:53:19] [Rank 0] Total FTA (Unweighted): 0.8350 +[2025-09-05 19:53:19] [Rank 0] Total FTA (Weighted): 0.8350 +[2025-09-05 19:53:19] [Rank 0] Total FTA (Weighted): 0.8350 +[2025-09-05 19:53:19] [Rank 0] Group 0 Loss: 4.9980 +[2025-09-05 19:53:19] [Rank 0] Group 0 Loss: 4.9980 +[2025-09-05 19:53:19] [Rank 0] Group 1 Loss: 4.4185 +[2025-09-05 19:53:19] [Rank 0] Group 1 Loss: 4.4185 +[2025-09-05 19:53:19] [Rank 0] Group 2 Loss: 4.7571 +[2025-09-05 19:53:19] [Rank 0] Group 2 Loss: 4.7571 +[2025-09-05 19:53:19] [Rank 0] Group 3 Loss: 5.0128 +[2025-09-05 19:53:19] [Rank 0] Group 3 Loss: 5.0128 +[2025-09-05 19:53:19] [Rank 0] Group 4 Loss: 4.8258 +[2025-09-05 19:53:19] [Rank 0] Group 4 Loss: 4.8258 +[2025-09-05 19:53:19] [Rank 0] Group 5 Loss: 4.9674 +[2025-09-05 19:53:19] [Rank 0] Group 5 Loss: 4.9674 +[2025-09-05 19:53:19] [Rank 0] Group 6 Loss: 4.9545 +[2025-09-05 19:53:19] [Rank 0] Group 6 Loss: 4.9545 +[2025-09-05 19:53:19] [Rank 0] Group 7 Loss: 4.9694 +[2025-09-05 19:53:19] [Rank 0] Group 7 Loss: 4.9694 +[2025-09-05 19:53:19] [Rank 0] Group 8 Loss: 5.1931 +[2025-09-05 19:53:19] [Rank 0] Group 8 Loss: 5.1931 +[2025-09-05 19:53:19] [Rank 0] Group 9 Loss: 5.1540 +[2025-09-05 19:53:19] [Rank 0] Group 9 Loss: 5.1540 +[2025-09-05 19:53:19] [Rank 0] Group 10 Loss: 5.2801 +[2025-09-05 19:53:19] [Rank 0] Group 10 Loss: 5.2801 +[2025-09-05 19:53:19] [Rank 0] Group 11 Loss: 5.2513 +[2025-09-05 19:53:19] [Rank 0] Group 11 Loss: 5.2513 +[2025-09-05 19:53:19] [Rank 0] Group 12 Loss: 5.1482 +[2025-09-05 19:53:19] [Rank 0] Group 12 Loss: 5.1482 +[2025-09-05 19:53:19] [Rank 0] Group 13 Loss: 5.2859 +[2025-09-05 19:53:19] [Rank 0] Group 13 Loss: 5.2859 +[2025-09-05 19:53:19] [Rank 0] Group 14 Loss: 5.3719 +[2025-09-05 19:53:19] [Rank 0] Group 14 Loss: 5.3719 +[2025-09-05 19:53:19] [Rank 0] Group 15 Loss: 5.3944 +[2025-09-05 19:53:19] [Rank 0] Group 15 Loss: 5.3944 +[2025-09-05 19:53:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 19:53:19] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 19:53:19] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:53:19] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 19:53:19] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 19:53:19] [Rank 0] Group 11 FTA: 0.9300 +[2025-09-05 19:53:19] [Rank 0] Group 11 FTA: 0.9300 +[2025-09-05 19:53:19] [Rank 0] Group 12 FTA: 0.8300 +[2025-09-05 19:53:19] [Rank 0] Group 12 FTA: 0.8300 +[2025-09-05 19:53:19] [Rank 0] Group 13 FTA: 0.4100 +[2025-09-05 19:53:19] [Rank 0] Group 13 FTA: 0.4100 +[2025-09-05 19:53:19] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:53:19] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:53:19] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:53:19] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:53:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:53:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:53:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:53:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:53:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:53:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:53:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:53:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:53:20] [Rank 0] step:6501/10000 train_time:248230ms step_avg:38.18ms +[2025-09-05 19:53:20] [Rank 0] step:6501/10000 train_time:248230ms step_avg:38.18ms +[2025-09-05 19:53:21] [Rank 0] step:6521/10000 train_time:248665ms step_avg:38.13ms +[2025-09-05 19:53:21] [Rank 0] step:6521/10000 train_time:248665ms step_avg:38.13ms +[2025-09-05 19:53:22] [Rank 0] step:6541/10000 train_time:249324ms step_avg:38.12ms +[2025-09-05 19:53:22] [Rank 0] step:6541/10000 train_time:249324ms step_avg:38.12ms +[2025-09-05 19:53:22] [Rank 0] step:6561/10000 train_time:249982ms step_avg:38.10ms +[2025-09-05 19:53:22] [Rank 0] step:6561/10000 train_time:249982ms step_avg:38.10ms +[2025-09-05 19:53:23] [Rank 0] step:6581/10000 train_time:250640ms step_avg:38.09ms +[2025-09-05 19:53:23] [Rank 0] step:6581/10000 train_time:250640ms step_avg:38.09ms +[2025-09-05 19:53:24] [Rank 0] step:6601/10000 train_time:251299ms step_avg:38.07ms +[2025-09-05 19:53:24] [Rank 0] step:6601/10000 train_time:251299ms step_avg:38.07ms +[2025-09-05 19:53:24] [Rank 0] step:6621/10000 train_time:251957ms step_avg:38.05ms +[2025-09-05 19:53:24] [Rank 0] step:6621/10000 train_time:251957ms step_avg:38.05ms +[2025-09-05 19:53:25] [Rank 0] step:6641/10000 train_time:252615ms step_avg:38.04ms +[2025-09-05 19:53:25] [Rank 0] step:6641/10000 train_time:252615ms step_avg:38.04ms +[2025-09-05 19:53:26] [Rank 0] step:6661/10000 train_time:253274ms step_avg:38.02ms +[2025-09-05 19:53:26] [Rank 0] step:6661/10000 train_time:253274ms step_avg:38.02ms +[2025-09-05 19:53:26] [Rank 0] step:6681/10000 train_time:253934ms step_avg:38.01ms +[2025-09-05 19:53:26] [Rank 0] step:6681/10000 train_time:253934ms step_avg:38.01ms +[2025-09-05 19:53:27] [Rank 0] step:6701/10000 train_time:254592ms step_avg:37.99ms +[2025-09-05 19:53:27] [Rank 0] step:6701/10000 train_time:254592ms step_avg:37.99ms +[2025-09-05 19:53:28] [Rank 0] step:6721/10000 train_time:255250ms step_avg:37.98ms +[2025-09-05 19:53:28] [Rank 0] step:6721/10000 train_time:255250ms step_avg:37.98ms +[2025-09-05 19:53:28] [Rank 0] step:6741/10000 train_time:255909ms step_avg:37.96ms +[2025-09-05 19:53:28] [Rank 0] step:6741/10000 train_time:255909ms step_avg:37.96ms +[2025-09-05 19:53:29] [Rank 0] step:6761/10000 train_time:256568ms step_avg:37.95ms +[2025-09-05 19:53:29] [Rank 0] step:6761/10000 train_time:256568ms step_avg:37.95ms +[2025-09-05 19:53:30] [Rank 0] step:6781/10000 train_time:257226ms step_avg:37.93ms +[2025-09-05 19:53:30] [Rank 0] step:6781/10000 train_time:257226ms step_avg:37.93ms +[2025-09-05 19:53:30] [Rank 0] step:6801/10000 train_time:257884ms step_avg:37.92ms +[2025-09-05 19:53:30] [Rank 0] step:6801/10000 train_time:257884ms step_avg:37.92ms +[2025-09-05 19:53:31] [Rank 0] step:6821/10000 train_time:258542ms step_avg:37.90ms +[2025-09-05 19:53:31] [Rank 0] step:6821/10000 train_time:258542ms step_avg:37.90ms +[2025-09-05 19:53:32] [Rank 0] step:6841/10000 train_time:259402ms step_avg:37.92ms +[2025-09-05 19:53:32] [Rank 0] step:6841/10000 train_time:259402ms step_avg:37.92ms +[2025-09-05 19:53:32] [Rank 0] step:6861/10000 train_time:260058ms step_avg:37.90ms +[2025-09-05 19:53:32] [Rank 0] step:6861/10000 train_time:260058ms step_avg:37.90ms +[2025-09-05 19:53:33] [Rank 0] step:6881/10000 train_time:260716ms step_avg:37.89ms +[2025-09-05 19:53:33] [Rank 0] step:6881/10000 train_time:260716ms step_avg:37.89ms +[2025-09-05 19:53:34] [Rank 0] step:6901/10000 train_time:261375ms step_avg:37.87ms +[2025-09-05 19:53:34] [Rank 0] step:6901/10000 train_time:261375ms step_avg:37.87ms +[2025-09-05 19:53:34] [Rank 0] step:6921/10000 train_time:262033ms step_avg:37.86ms +[2025-09-05 19:53:34] [Rank 0] step:6921/10000 train_time:262033ms step_avg:37.86ms +[2025-09-05 19:53:35] [Rank 0] step:6941/10000 train_time:262691ms step_avg:37.85ms +[2025-09-05 19:53:35] [Rank 0] step:6941/10000 train_time:262691ms step_avg:37.85ms +[2025-09-05 19:53:36] [Rank 0] step:6961/10000 train_time:263348ms step_avg:37.83ms +[2025-09-05 19:53:36] [Rank 0] step:6961/10000 train_time:263348ms step_avg:37.83ms +[2025-09-05 19:53:36] [Rank 0] step:6981/10000 train_time:264005ms step_avg:37.82ms +[2025-09-05 19:53:36] [Rank 0] step:6981/10000 train_time:264005ms step_avg:37.82ms +[2025-09-05 19:53:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:53:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:53:38] [Rank 0] PRINT: step:7000/10000 train_loss:0.6968 val_loss:0.6857 train_time:264897ms step_avg:37.84ms +[2025-09-05 19:53:38] [Rank 0] PRINT: step:7000/10000 train_loss:0.6968 val_loss:0.6857 train_time:264897ms step_avg:37.84ms +[2025-09-05 19:53:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:53:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:53:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:53:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:54:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:54:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:54:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:54:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:54:59] [Rank 0] Total Loss: 5.0635 +[2025-09-05 19:54:59] [Rank 0] Total Loss: 5.0635 +[2025-09-05 19:54:59] [Rank 0] Total FTA (Unweighted): 0.8488 +[2025-09-05 19:54:59] [Rank 0] Total FTA (Unweighted): 0.8488 +[2025-09-05 19:54:59] [Rank 0] Total FTA (Weighted): 0.8488 +[2025-09-05 19:54:59] [Rank 0] Total FTA (Weighted): 0.8488 +[2025-09-05 19:54:59] [Rank 0] Group 0 Loss: 4.9143 +[2025-09-05 19:54:59] [Rank 0] Group 0 Loss: 4.9143 +[2025-09-05 19:54:59] [Rank 0] Group 1 Loss: 4.3895 +[2025-09-05 19:54:59] [Rank 0] Group 1 Loss: 4.3895 +[2025-09-05 19:54:59] [Rank 0] Group 2 Loss: 4.7243 +[2025-09-05 19:54:59] [Rank 0] Group 2 Loss: 4.7243 +[2025-09-05 19:54:59] [Rank 0] Group 3 Loss: 4.9054 +[2025-09-05 19:54:59] [Rank 0] Group 3 Loss: 4.9054 +[2025-09-05 19:54:59] [Rank 0] Group 4 Loss: 4.8701 +[2025-09-05 19:54:59] [Rank 0] Group 4 Loss: 4.8701 +[2025-09-05 19:54:59] [Rank 0] Group 5 Loss: 4.9606 +[2025-09-05 19:54:59] [Rank 0] Group 5 Loss: 4.9606 +[2025-09-05 19:54:59] [Rank 0] Group 6 Loss: 4.9813 +[2025-09-05 19:54:59] [Rank 0] Group 6 Loss: 4.9813 +[2025-09-05 19:54:59] [Rank 0] Group 7 Loss: 4.9951 +[2025-09-05 19:54:59] [Rank 0] Group 7 Loss: 4.9951 +[2025-09-05 19:54:59] [Rank 0] Group 8 Loss: 5.2026 +[2025-09-05 19:54:59] [Rank 0] Group 8 Loss: 5.2026 +[2025-09-05 19:54:59] [Rank 0] Group 9 Loss: 5.1767 +[2025-09-05 19:54:59] [Rank 0] Group 9 Loss: 5.1767 +[2025-09-05 19:54:59] [Rank 0] Group 10 Loss: 5.3326 +[2025-09-05 19:54:59] [Rank 0] Group 10 Loss: 5.3326 +[2025-09-05 19:54:59] [Rank 0] Group 11 Loss: 5.2669 +[2025-09-05 19:54:59] [Rank 0] Group 11 Loss: 5.2669 +[2025-09-05 19:54:59] [Rank 0] Group 12 Loss: 5.2004 +[2025-09-05 19:54:59] [Rank 0] Group 12 Loss: 5.2004 +[2025-09-05 19:54:59] [Rank 0] Group 13 Loss: 5.3206 +[2025-09-05 19:54:59] [Rank 0] Group 13 Loss: 5.3206 +[2025-09-05 19:54:59] [Rank 0] Group 14 Loss: 5.3797 +[2025-09-05 19:54:59] [Rank 0] Group 14 Loss: 5.3797 +[2025-09-05 19:54:59] [Rank 0] Group 15 Loss: 5.3964 +[2025-09-05 19:54:59] [Rank 0] Group 15 Loss: 5.3964 +[2025-09-05 19:54:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:54:59] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 19:54:59] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 19:54:59] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 19:54:59] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 19:54:59] [Rank 0] Group 12 FTA: 0.8600 +[2025-09-05 19:54:59] [Rank 0] Group 12 FTA: 0.8600 +[2025-09-05 19:54:59] [Rank 0] Group 13 FTA: 0.4600 +[2025-09-05 19:54:59] [Rank 0] Group 13 FTA: 0.4600 +[2025-09-05 19:54:59] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 19:54:59] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 19:54:59] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:54:59] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:54:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:54:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:55:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:55:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:55:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:55:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:55:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:55:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:55:00] [Rank 0] step:7001/10000 train_time:264904ms step_avg:37.84ms +[2025-09-05 19:55:00] [Rank 0] step:7001/10000 train_time:264904ms step_avg:37.84ms +[2025-09-05 19:55:01] [Rank 0] step:7021/10000 train_time:265357ms step_avg:37.79ms +[2025-09-05 19:55:01] [Rank 0] step:7021/10000 train_time:265357ms step_avg:37.79ms +[2025-09-05 19:55:02] [Rank 0] step:7041/10000 train_time:266015ms step_avg:37.78ms +[2025-09-05 19:55:02] [Rank 0] step:7041/10000 train_time:266015ms step_avg:37.78ms +[2025-09-05 19:55:02] [Rank 0] step:7061/10000 train_time:266674ms step_avg:37.77ms +[2025-09-05 19:55:02] [Rank 0] step:7061/10000 train_time:266674ms step_avg:37.77ms +[2025-09-05 19:55:03] [Rank 0] step:7081/10000 train_time:267332ms step_avg:37.75ms +[2025-09-05 19:55:03] [Rank 0] step:7081/10000 train_time:267332ms step_avg:37.75ms +[2025-09-05 19:55:04] [Rank 0] step:7101/10000 train_time:267990ms step_avg:37.74ms +[2025-09-05 19:55:04] [Rank 0] step:7101/10000 train_time:267990ms step_avg:37.74ms +[2025-09-05 19:55:04] [Rank 0] step:7121/10000 train_time:268649ms step_avg:37.73ms +[2025-09-05 19:55:04] [Rank 0] step:7121/10000 train_time:268649ms step_avg:37.73ms +[2025-09-05 19:55:05] [Rank 0] step:7141/10000 train_time:269308ms step_avg:37.71ms +[2025-09-05 19:55:05] [Rank 0] step:7141/10000 train_time:269308ms step_avg:37.71ms +[2025-09-05 19:55:05] [Rank 0] step:7161/10000 train_time:269967ms step_avg:37.70ms +[2025-09-05 19:55:05] [Rank 0] step:7161/10000 train_time:269967ms step_avg:37.70ms +[2025-09-05 19:55:06] [Rank 0] step:7181/10000 train_time:270624ms step_avg:37.69ms +[2025-09-05 19:55:06] [Rank 0] step:7181/10000 train_time:270624ms step_avg:37.69ms +[2025-09-05 19:55:07] [Rank 0] step:7201/10000 train_time:271283ms step_avg:37.67ms +[2025-09-05 19:55:07] [Rank 0] step:7201/10000 train_time:271283ms step_avg:37.67ms +[2025-09-05 19:55:07] [Rank 0] step:7221/10000 train_time:271941ms step_avg:37.66ms +[2025-09-05 19:55:07] [Rank 0] step:7221/10000 train_time:271941ms step_avg:37.66ms +[2025-09-05 19:55:08] [Rank 0] step:7241/10000 train_time:272599ms step_avg:37.65ms +[2025-09-05 19:55:08] [Rank 0] step:7241/10000 train_time:272599ms step_avg:37.65ms +[2025-09-05 19:55:09] [Rank 0] step:7261/10000 train_time:273257ms step_avg:37.63ms +[2025-09-05 19:55:09] [Rank 0] step:7261/10000 train_time:273257ms step_avg:37.63ms +[2025-09-05 19:55:09] [Rank 0] step:7281/10000 train_time:273915ms step_avg:37.62ms +[2025-09-05 19:55:09] [Rank 0] step:7281/10000 train_time:273915ms step_avg:37.62ms +[2025-09-05 19:55:10] [Rank 0] step:7301/10000 train_time:274574ms step_avg:37.61ms +[2025-09-05 19:55:10] [Rank 0] step:7301/10000 train_time:274574ms step_avg:37.61ms +[2025-09-05 19:55:11] [Rank 0] step:7321/10000 train_time:275231ms step_avg:37.59ms +[2025-09-05 19:55:11] [Rank 0] step:7321/10000 train_time:275231ms step_avg:37.59ms +[2025-09-05 19:55:11] [Rank 0] step:7341/10000 train_time:275889ms step_avg:37.58ms +[2025-09-05 19:55:11] [Rank 0] step:7341/10000 train_time:275889ms step_avg:37.58ms +[2025-09-05 19:55:12] [Rank 0] step:7361/10000 train_time:276551ms step_avg:37.57ms +[2025-09-05 19:55:12] [Rank 0] step:7361/10000 train_time:276551ms step_avg:37.57ms +[2025-09-05 19:55:13] [Rank 0] step:7381/10000 train_time:277209ms step_avg:37.56ms +[2025-09-05 19:55:13] [Rank 0] step:7381/10000 train_time:277209ms step_avg:37.56ms +[2025-09-05 19:55:13] [Rank 0] step:7401/10000 train_time:277866ms step_avg:37.54ms +[2025-09-05 19:55:13] [Rank 0] step:7401/10000 train_time:277866ms step_avg:37.54ms +[2025-09-05 19:55:14] [Rank 0] step:7421/10000 train_time:278523ms step_avg:37.53ms +[2025-09-05 19:55:14] [Rank 0] step:7421/10000 train_time:278523ms step_avg:37.53ms +[2025-09-05 19:55:15] [Rank 0] step:7441/10000 train_time:279182ms step_avg:37.52ms +[2025-09-05 19:55:15] [Rank 0] step:7441/10000 train_time:279182ms step_avg:37.52ms +[2025-09-05 19:55:15] [Rank 0] step:7461/10000 train_time:279840ms step_avg:37.51ms +[2025-09-05 19:55:15] [Rank 0] step:7461/10000 train_time:279840ms step_avg:37.51ms +[2025-09-05 19:55:16] [Rank 0] step:7481/10000 train_time:280498ms step_avg:37.49ms +[2025-09-05 19:55:16] [Rank 0] step:7481/10000 train_time:280498ms step_avg:37.49ms +[2025-09-05 19:55:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:55:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:55:17] [Rank 0] PRINT: step:7500/10000 train_loss:0.6891 val_loss:0.6799 train_time:281390ms step_avg:37.52ms +[2025-09-05 19:55:17] [Rank 0] PRINT: step:7500/10000 train_loss:0.6891 val_loss:0.6799 train_time:281390ms step_avg:37.52ms +[2025-09-05 19:55:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:55:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:55:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:55:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:56:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:56:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:56:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:56:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:56:38] [Rank 0] Total Loss: 5.0649 +[2025-09-05 19:56:38] [Rank 0] Total Loss: 5.0649 +[2025-09-05 19:56:38] [Rank 0] Total FTA (Unweighted): 0.8544 +[2025-09-05 19:56:38] [Rank 0] Total FTA (Unweighted): 0.8544 +[2025-09-05 19:56:38] [Rank 0] Total FTA (Weighted): 0.8544 +[2025-09-05 19:56:38] [Rank 0] Total FTA (Weighted): 0.8544 +[2025-09-05 19:56:39] [Rank 0] Group 0 Loss: 4.9046 +[2025-09-05 19:56:39] [Rank 0] Group 0 Loss: 4.9046 +[2025-09-05 19:56:39] [Rank 0] Group 1 Loss: 4.4903 +[2025-09-05 19:56:39] [Rank 0] Group 1 Loss: 4.4903 +[2025-09-05 19:56:39] [Rank 0] Group 2 Loss: 4.6346 +[2025-09-05 19:56:39] [Rank 0] Group 2 Loss: 4.6346 +[2025-09-05 19:56:39] [Rank 0] Group 3 Loss: 4.8983 +[2025-09-05 19:56:39] [Rank 0] Group 3 Loss: 4.8983 +[2025-09-05 19:56:39] [Rank 0] Group 4 Loss: 4.9292 +[2025-09-05 19:56:39] [Rank 0] Group 4 Loss: 4.9292 +[2025-09-05 19:56:39] [Rank 0] Group 5 Loss: 4.9625 +[2025-09-05 19:56:39] [Rank 0] Group 5 Loss: 4.9625 +[2025-09-05 19:56:39] [Rank 0] Group 6 Loss: 4.9383 +[2025-09-05 19:56:39] [Rank 0] Group 6 Loss: 4.9383 +[2025-09-05 19:56:39] [Rank 0] Group 7 Loss: 5.0652 +[2025-09-05 19:56:39] [Rank 0] Group 7 Loss: 5.0652 +[2025-09-05 19:56:39] [Rank 0] Group 8 Loss: 5.1960 +[2025-09-05 19:56:39] [Rank 0] Group 8 Loss: 5.1960 +[2025-09-05 19:56:39] [Rank 0] Group 9 Loss: 5.1687 +[2025-09-05 19:56:39] [Rank 0] Group 9 Loss: 5.1687 +[2025-09-05 19:56:39] [Rank 0] Group 10 Loss: 5.3095 +[2025-09-05 19:56:39] [Rank 0] Group 10 Loss: 5.3095 +[2025-09-05 19:56:39] [Rank 0] Group 11 Loss: 5.2499 +[2025-09-05 19:56:39] [Rank 0] Group 11 Loss: 5.2499 +[2025-09-05 19:56:39] [Rank 0] Group 12 Loss: 5.1842 +[2025-09-05 19:56:39] [Rank 0] Group 12 Loss: 5.1842 +[2025-09-05 19:56:39] [Rank 0] Group 13 Loss: 5.3040 +[2025-09-05 19:56:39] [Rank 0] Group 13 Loss: 5.3040 +[2025-09-05 19:56:39] [Rank 0] Group 14 Loss: 5.3869 +[2025-09-05 19:56:39] [Rank 0] Group 14 Loss: 5.3869 +[2025-09-05 19:56:39] [Rank 0] Group 15 Loss: 5.4159 +[2025-09-05 19:56:39] [Rank 0] Group 15 Loss: 5.4159 +[2025-09-05 19:56:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:56:39] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 19:56:39] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 19:56:39] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 19:56:39] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 19:56:39] [Rank 0] Group 12 FTA: 0.9200 +[2025-09-05 19:56:39] [Rank 0] Group 12 FTA: 0.9200 +[2025-09-05 19:56:39] [Rank 0] Group 13 FTA: 0.4800 +[2025-09-05 19:56:39] [Rank 0] Group 13 FTA: 0.4800 +[2025-09-05 19:56:39] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 19:56:39] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 19:56:39] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:56:39] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:56:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:56:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:56:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:56:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:56:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:56:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:56:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:56:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:56:40] [Rank 0] step:7501/10000 train_time:281399ms step_avg:37.51ms +[2025-09-05 19:56:40] [Rank 0] step:7501/10000 train_time:281399ms step_avg:37.51ms +[2025-09-05 19:56:41] [Rank 0] step:7521/10000 train_time:281845ms step_avg:37.47ms +[2025-09-05 19:56:41] [Rank 0] step:7521/10000 train_time:281845ms step_avg:37.47ms +[2025-09-05 19:56:41] [Rank 0] step:7541/10000 train_time:282503ms step_avg:37.46ms +[2025-09-05 19:56:41] [Rank 0] step:7541/10000 train_time:282503ms step_avg:37.46ms +[2025-09-05 19:56:42] [Rank 0] step:7561/10000 train_time:283161ms step_avg:37.45ms +[2025-09-05 19:56:42] [Rank 0] step:7561/10000 train_time:283161ms step_avg:37.45ms +[2025-09-05 19:56:43] [Rank 0] step:7581/10000 train_time:283820ms step_avg:37.44ms +[2025-09-05 19:56:43] [Rank 0] step:7581/10000 train_time:283820ms step_avg:37.44ms +[2025-09-05 19:56:43] [Rank 0] step:7601/10000 train_time:284479ms step_avg:37.43ms +[2025-09-05 19:56:43] [Rank 0] step:7601/10000 train_time:284479ms step_avg:37.43ms +[2025-09-05 19:56:44] [Rank 0] step:7621/10000 train_time:285135ms step_avg:37.41ms +[2025-09-05 19:56:44] [Rank 0] step:7621/10000 train_time:285135ms step_avg:37.41ms +[2025-09-05 19:56:45] [Rank 0] step:7641/10000 train_time:286133ms step_avg:37.45ms +[2025-09-05 19:56:45] [Rank 0] step:7641/10000 train_time:286133ms step_avg:37.45ms +[2025-09-05 19:56:46] [Rank 0] step:7661/10000 train_time:286602ms step_avg:37.41ms +[2025-09-05 19:56:46] [Rank 0] step:7661/10000 train_time:286602ms step_avg:37.41ms +[2025-09-05 19:56:46] [Rank 0] step:7681/10000 train_time:287260ms step_avg:37.40ms +[2025-09-05 19:56:46] [Rank 0] step:7681/10000 train_time:287260ms step_avg:37.40ms +[2025-09-05 19:56:47] [Rank 0] step:7701/10000 train_time:287918ms step_avg:37.39ms +[2025-09-05 19:56:47] [Rank 0] step:7701/10000 train_time:287918ms step_avg:37.39ms +[2025-09-05 19:56:48] [Rank 0] step:7721/10000 train_time:288578ms step_avg:37.38ms +[2025-09-05 19:56:48] [Rank 0] step:7721/10000 train_time:288578ms step_avg:37.38ms +[2025-09-05 19:56:48] [Rank 0] step:7741/10000 train_time:289235ms step_avg:37.36ms +[2025-09-05 19:56:48] [Rank 0] step:7741/10000 train_time:289235ms step_avg:37.36ms +[2025-09-05 19:56:49] [Rank 0] step:7761/10000 train_time:289894ms step_avg:37.35ms +[2025-09-05 19:56:49] [Rank 0] step:7761/10000 train_time:289894ms step_avg:37.35ms +[2025-09-05 19:56:50] [Rank 0] step:7781/10000 train_time:290552ms step_avg:37.34ms +[2025-09-05 19:56:50] [Rank 0] step:7781/10000 train_time:290552ms step_avg:37.34ms +[2025-09-05 19:56:50] [Rank 0] step:7801/10000 train_time:291211ms step_avg:37.33ms +[2025-09-05 19:56:50] [Rank 0] step:7801/10000 train_time:291211ms step_avg:37.33ms +[2025-09-05 19:56:51] [Rank 0] step:7821/10000 train_time:292033ms step_avg:37.34ms +[2025-09-05 19:56:51] [Rank 0] step:7821/10000 train_time:292033ms step_avg:37.34ms +[2025-09-05 19:56:52] [Rank 0] step:7841/10000 train_time:292691ms step_avg:37.33ms +[2025-09-05 19:56:52] [Rank 0] step:7841/10000 train_time:292691ms step_avg:37.33ms +[2025-09-05 19:56:52] [Rank 0] step:7861/10000 train_time:293350ms step_avg:37.32ms +[2025-09-05 19:56:52] [Rank 0] step:7861/10000 train_time:293350ms step_avg:37.32ms +[2025-09-05 19:56:53] [Rank 0] step:7881/10000 train_time:294012ms step_avg:37.31ms +[2025-09-05 19:56:53] [Rank 0] step:7881/10000 train_time:294012ms step_avg:37.31ms +[2025-09-05 19:56:54] [Rank 0] step:7901/10000 train_time:294902ms step_avg:37.32ms +[2025-09-05 19:56:54] [Rank 0] step:7901/10000 train_time:294902ms step_avg:37.32ms +[2025-09-05 19:56:55] [Rank 0] step:7921/10000 train_time:295560ms step_avg:37.31ms +[2025-09-05 19:56:55] [Rank 0] step:7921/10000 train_time:295560ms step_avg:37.31ms +[2025-09-05 19:56:55] [Rank 0] step:7941/10000 train_time:296218ms step_avg:37.30ms +[2025-09-05 19:56:55] [Rank 0] step:7941/10000 train_time:296218ms step_avg:37.30ms +[2025-09-05 19:56:56] [Rank 0] step:7961/10000 train_time:296876ms step_avg:37.29ms +[2025-09-05 19:56:56] [Rank 0] step:7961/10000 train_time:296876ms step_avg:37.29ms +[2025-09-05 19:56:57] [Rank 0] step:7981/10000 train_time:297534ms step_avg:37.28ms +[2025-09-05 19:56:57] [Rank 0] step:7981/10000 train_time:297534ms step_avg:37.28ms +[2025-09-05 19:56:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:56:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:56:58] [Rank 0] PRINT: step:8000/10000 train_loss:0.6829 val_loss:0.6730 train_time:298426ms step_avg:37.30ms +[2025-09-05 19:56:58] [Rank 0] PRINT: step:8000/10000 train_loss:0.6829 val_loss:0.6730 train_time:298426ms step_avg:37.30ms +[2025-09-05 19:56:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:56:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:56:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:56:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:58:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:58:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:58:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:58:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:58:20] [Rank 0] Total Loss: 5.1159 +[2025-09-05 19:58:20] [Rank 0] Total Loss: 5.1159 +[2025-09-05 19:58:20] [Rank 0] Total FTA (Unweighted): 0.8669 +[2025-09-05 19:58:20] [Rank 0] Total FTA (Unweighted): 0.8669 +[2025-09-05 19:58:20] [Rank 0] Total FTA (Weighted): 0.8669 +[2025-09-05 19:58:20] [Rank 0] Total FTA (Weighted): 0.8669 +[2025-09-05 19:58:20] [Rank 0] Group 0 Loss: 4.8476 +[2025-09-05 19:58:20] [Rank 0] Group 0 Loss: 4.8476 +[2025-09-05 19:58:20] [Rank 0] Group 1 Loss: 4.4058 +[2025-09-05 19:58:20] [Rank 0] Group 1 Loss: 4.4058 +[2025-09-05 19:58:20] [Rank 0] Group 2 Loss: 4.7813 +[2025-09-05 19:58:20] [Rank 0] Group 2 Loss: 4.7813 +[2025-09-05 19:58:20] [Rank 0] Group 3 Loss: 5.0187 +[2025-09-05 19:58:20] [Rank 0] Group 3 Loss: 5.0187 +[2025-09-05 19:58:20] [Rank 0] Group 4 Loss: 4.9674 +[2025-09-05 19:58:20] [Rank 0] Group 4 Loss: 4.9674 +[2025-09-05 19:58:20] [Rank 0] Group 5 Loss: 5.0294 +[2025-09-05 19:58:20] [Rank 0] Group 5 Loss: 5.0294 +[2025-09-05 19:58:20] [Rank 0] Group 6 Loss: 5.0317 +[2025-09-05 19:58:20] [Rank 0] Group 6 Loss: 5.0317 +[2025-09-05 19:58:20] [Rank 0] Group 7 Loss: 5.0106 +[2025-09-05 19:58:20] [Rank 0] Group 7 Loss: 5.0106 +[2025-09-05 19:58:20] [Rank 0] Group 8 Loss: 5.2284 +[2025-09-05 19:58:20] [Rank 0] Group 8 Loss: 5.2284 +[2025-09-05 19:58:20] [Rank 0] Group 9 Loss: 5.2429 +[2025-09-05 19:58:20] [Rank 0] Group 9 Loss: 5.2429 +[2025-09-05 19:58:20] [Rank 0] Group 10 Loss: 5.3960 +[2025-09-05 19:58:20] [Rank 0] Group 10 Loss: 5.3960 +[2025-09-05 19:58:20] [Rank 0] Group 11 Loss: 5.3424 +[2025-09-05 19:58:20] [Rank 0] Group 11 Loss: 5.3424 +[2025-09-05 19:58:20] [Rank 0] Group 12 Loss: 5.2655 +[2025-09-05 19:58:20] [Rank 0] Group 12 Loss: 5.2655 +[2025-09-05 19:58:20] [Rank 0] Group 13 Loss: 5.3882 +[2025-09-05 19:58:20] [Rank 0] Group 13 Loss: 5.3882 +[2025-09-05 19:58:20] [Rank 0] Group 14 Loss: 5.4340 +[2025-09-05 19:58:20] [Rank 0] Group 14 Loss: 5.4340 +[2025-09-05 19:58:20] [Rank 0] Group 15 Loss: 5.4652 +[2025-09-05 19:58:20] [Rank 0] Group 15 Loss: 5.4652 +[2025-09-05 19:58:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:58:20] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 19:58:20] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 19:58:20] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 19:58:20] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 19:58:20] [Rank 0] Group 12 FTA: 0.9200 +[2025-09-05 19:58:20] [Rank 0] Group 12 FTA: 0.9200 +[2025-09-05 19:58:20] [Rank 0] Group 13 FTA: 0.5700 +[2025-09-05 19:58:20] [Rank 0] Group 13 FTA: 0.5700 +[2025-09-05 19:58:20] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 19:58:20] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 19:58:20] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 19:58:20] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 19:58:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:58:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 19:58:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:58:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 19:58:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:58:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 19:58:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:58:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 19:58:21] [Rank 0] step:8001/10000 train_time:298435ms step_avg:37.30ms +[2025-09-05 19:58:21] [Rank 0] step:8001/10000 train_time:298435ms step_avg:37.30ms +[2025-09-05 19:58:22] [Rank 0] step:8021/10000 train_time:298870ms step_avg:37.26ms +[2025-09-05 19:58:22] [Rank 0] step:8021/10000 train_time:298870ms step_avg:37.26ms +[2025-09-05 19:58:23] [Rank 0] step:8041/10000 train_time:299992ms step_avg:37.31ms +[2025-09-05 19:58:23] [Rank 0] step:8041/10000 train_time:299992ms step_avg:37.31ms +[2025-09-05 19:58:24] [Rank 0] step:8061/10000 train_time:300649ms step_avg:37.30ms +[2025-09-05 19:58:24] [Rank 0] step:8061/10000 train_time:300649ms step_avg:37.30ms +[2025-09-05 19:58:24] [Rank 0] step:8081/10000 train_time:301307ms step_avg:37.29ms +[2025-09-05 19:58:24] [Rank 0] step:8081/10000 train_time:301307ms step_avg:37.29ms +[2025-09-05 19:58:25] [Rank 0] step:8101/10000 train_time:301964ms step_avg:37.27ms +[2025-09-05 19:58:25] [Rank 0] step:8101/10000 train_time:301964ms step_avg:37.27ms +[2025-09-05 19:58:26] [Rank 0] step:8121/10000 train_time:302622ms step_avg:37.26ms +[2025-09-05 19:58:26] [Rank 0] step:8121/10000 train_time:302622ms step_avg:37.26ms +[2025-09-05 19:58:26] [Rank 0] step:8141/10000 train_time:303281ms step_avg:37.25ms +[2025-09-05 19:58:26] [Rank 0] step:8141/10000 train_time:303281ms step_avg:37.25ms +[2025-09-05 19:58:27] [Rank 0] step:8161/10000 train_time:304044ms step_avg:37.26ms +[2025-09-05 19:58:27] [Rank 0] step:8161/10000 train_time:304044ms step_avg:37.26ms +[2025-09-05 19:58:28] [Rank 0] step:8181/10000 train_time:304704ms step_avg:37.25ms +[2025-09-05 19:58:28] [Rank 0] step:8181/10000 train_time:304704ms step_avg:37.25ms +[2025-09-05 19:58:28] [Rank 0] step:8201/10000 train_time:305362ms step_avg:37.23ms +[2025-09-05 19:58:28] [Rank 0] step:8201/10000 train_time:305362ms step_avg:37.23ms +[2025-09-05 19:58:29] [Rank 0] step:8221/10000 train_time:306124ms step_avg:37.24ms +[2025-09-05 19:58:29] [Rank 0] step:8221/10000 train_time:306124ms step_avg:37.24ms +[2025-09-05 19:58:30] [Rank 0] step:8241/10000 train_time:306782ms step_avg:37.23ms +[2025-09-05 19:58:30] [Rank 0] step:8241/10000 train_time:306782ms step_avg:37.23ms +[2025-09-05 19:58:31] [Rank 0] step:8261/10000 train_time:307440ms step_avg:37.22ms +[2025-09-05 19:58:31] [Rank 0] step:8261/10000 train_time:307440ms step_avg:37.22ms +[2025-09-05 19:58:31] [Rank 0] step:8281/10000 train_time:308097ms step_avg:37.21ms +[2025-09-05 19:58:31] [Rank 0] step:8281/10000 train_time:308097ms step_avg:37.21ms +[2025-09-05 19:58:32] [Rank 0] step:8301/10000 train_time:308755ms step_avg:37.19ms +[2025-09-05 19:58:32] [Rank 0] step:8301/10000 train_time:308755ms step_avg:37.19ms +[2025-09-05 19:58:33] [Rank 0] step:8321/10000 train_time:309413ms step_avg:37.18ms +[2025-09-05 19:58:33] [Rank 0] step:8321/10000 train_time:309413ms step_avg:37.18ms +[2025-09-05 19:58:33] [Rank 0] step:8341/10000 train_time:310071ms step_avg:37.17ms +[2025-09-05 19:58:33] [Rank 0] step:8341/10000 train_time:310071ms step_avg:37.17ms +[2025-09-05 19:58:34] [Rank 0] step:8361/10000 train_time:310729ms step_avg:37.16ms +[2025-09-05 19:58:34] [Rank 0] step:8361/10000 train_time:310729ms step_avg:37.16ms +[2025-09-05 19:58:35] [Rank 0] step:8381/10000 train_time:311388ms step_avg:37.15ms +[2025-09-05 19:58:35] [Rank 0] step:8381/10000 train_time:311388ms step_avg:37.15ms +[2025-09-05 19:58:35] [Rank 0] step:8401/10000 train_time:312046ms step_avg:37.14ms +[2025-09-05 19:58:35] [Rank 0] step:8401/10000 train_time:312046ms step_avg:37.14ms +[2025-09-05 19:58:36] [Rank 0] step:8421/10000 train_time:312705ms step_avg:37.13ms +[2025-09-05 19:58:36] [Rank 0] step:8421/10000 train_time:312705ms step_avg:37.13ms +[2025-09-05 19:58:36] [Rank 0] step:8441/10000 train_time:313363ms step_avg:37.12ms +[2025-09-05 19:58:36] [Rank 0] step:8441/10000 train_time:313363ms step_avg:37.12ms +[2025-09-05 19:58:37] [Rank 0] step:8461/10000 train_time:314020ms step_avg:37.11ms +[2025-09-05 19:58:37] [Rank 0] step:8461/10000 train_time:314020ms step_avg:37.11ms +[2025-09-05 19:58:38] [Rank 0] step:8481/10000 train_time:314677ms step_avg:37.10ms +[2025-09-05 19:58:38] [Rank 0] step:8481/10000 train_time:314677ms step_avg:37.10ms +[2025-09-05 19:58:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:58:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:58:39] [Rank 0] PRINT: step:8500/10000 train_loss:0.6769 val_loss:0.6675 train_time:315569ms step_avg:37.13ms +[2025-09-05 19:58:39] [Rank 0] PRINT: step:8500/10000 train_loss:0.6769 val_loss:0.6675 train_time:315569ms step_avg:37.13ms +[2025-09-05 19:58:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:58:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:58:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:58:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:00:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:00:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:00:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:00:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:00:01] [Rank 0] Total Loss: 5.1502 +[2025-09-05 20:00:01] [Rank 0] Total Loss: 5.1502 +[2025-09-05 20:00:01] [Rank 0] Total FTA (Unweighted): 0.8787 +[2025-09-05 20:00:01] [Rank 0] Total FTA (Unweighted): 0.8787 +[2025-09-05 20:00:01] [Rank 0] Total FTA (Weighted): 0.8788 +[2025-09-05 20:00:01] [Rank 0] Total FTA (Weighted): 0.8788 +[2025-09-05 20:00:01] [Rank 0] Group 0 Loss: 5.1223 +[2025-09-05 20:00:01] [Rank 0] Group 0 Loss: 5.1223 +[2025-09-05 20:00:01] [Rank 0] Group 1 Loss: 4.5513 +[2025-09-05 20:00:01] [Rank 0] Group 1 Loss: 4.5513 +[2025-09-05 20:00:01] [Rank 0] Group 2 Loss: 4.8126 +[2025-09-05 20:00:01] [Rank 0] Group 2 Loss: 4.8126 +[2025-09-05 20:00:01] [Rank 0] Group 3 Loss: 4.9967 +[2025-09-05 20:00:01] [Rank 0] Group 3 Loss: 4.9967 +[2025-09-05 20:00:01] [Rank 0] Group 4 Loss: 4.9875 +[2025-09-05 20:00:01] [Rank 0] Group 4 Loss: 4.9875 +[2025-09-05 20:00:01] [Rank 0] Group 5 Loss: 5.0153 +[2025-09-05 20:00:01] [Rank 0] Group 5 Loss: 5.0153 +[2025-09-05 20:00:01] [Rank 0] Group 6 Loss: 5.0357 +[2025-09-05 20:00:01] [Rank 0] Group 6 Loss: 5.0357 +[2025-09-05 20:00:01] [Rank 0] Group 7 Loss: 5.0938 +[2025-09-05 20:00:01] [Rank 0] Group 7 Loss: 5.0938 +[2025-09-05 20:00:01] [Rank 0] Group 8 Loss: 5.2647 +[2025-09-05 20:00:01] [Rank 0] Group 8 Loss: 5.2647 +[2025-09-05 20:00:01] [Rank 0] Group 9 Loss: 5.2600 +[2025-09-05 20:00:01] [Rank 0] Group 9 Loss: 5.2600 +[2025-09-05 20:00:01] [Rank 0] Group 10 Loss: 5.3654 +[2025-09-05 20:00:01] [Rank 0] Group 10 Loss: 5.3654 +[2025-09-05 20:00:01] [Rank 0] Group 11 Loss: 5.3379 +[2025-09-05 20:00:01] [Rank 0] Group 11 Loss: 5.3379 +[2025-09-05 20:00:01] [Rank 0] Group 12 Loss: 5.2880 +[2025-09-05 20:00:01] [Rank 0] Group 12 Loss: 5.2880 +[2025-09-05 20:00:01] [Rank 0] Group 13 Loss: 5.3846 +[2025-09-05 20:00:01] [Rank 0] Group 13 Loss: 5.3846 +[2025-09-05 20:00:01] [Rank 0] Group 14 Loss: 5.4406 +[2025-09-05 20:00:01] [Rank 0] Group 14 Loss: 5.4406 +[2025-09-05 20:00:01] [Rank 0] Group 15 Loss: 5.4466 +[2025-09-05 20:00:01] [Rank 0] Group 15 Loss: 5.4466 +[2025-09-05 20:00:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 20:00:01] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 20:00:01] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 20:00:01] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 20:00:01] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 20:00:01] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 20:00:01] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 20:00:01] [Rank 0] Group 13 FTA: 0.7400 +[2025-09-05 20:00:01] [Rank 0] Group 13 FTA: 0.7400 +[2025-09-05 20:00:01] [Rank 0] Group 14 FTA: 0.2800 +[2025-09-05 20:00:01] [Rank 0] Group 14 FTA: 0.2800 +[2025-09-05 20:00:01] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 20:00:01] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 20:00:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 20:00:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 20:00:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 20:00:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 20:00:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 20:00:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 20:00:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 20:00:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 20:00:04] [Rank 0] step:8501/10000 train_time:315579ms step_avg:37.12ms +[2025-09-05 20:00:04] [Rank 0] step:8501/10000 train_time:315579ms step_avg:37.12ms +[2025-09-05 20:00:04] [Rank 0] step:8521/10000 train_time:316016ms step_avg:37.09ms +[2025-09-05 20:00:04] [Rank 0] step:8521/10000 train_time:316016ms step_avg:37.09ms +[2025-09-05 20:00:05] [Rank 0] step:8541/10000 train_time:316672ms step_avg:37.08ms +[2025-09-05 20:00:05] [Rank 0] step:8541/10000 train_time:316672ms step_avg:37.08ms +[2025-09-05 20:00:06] [Rank 0] step:8561/10000 train_time:317330ms step_avg:37.07ms +[2025-09-05 20:00:06] [Rank 0] step:8561/10000 train_time:317330ms step_avg:37.07ms +[2025-09-05 20:00:06] [Rank 0] step:8581/10000 train_time:317988ms step_avg:37.06ms +[2025-09-05 20:00:06] [Rank 0] step:8581/10000 train_time:317988ms step_avg:37.06ms +[2025-09-05 20:00:07] [Rank 0] step:8601/10000 train_time:318647ms step_avg:37.05ms +[2025-09-05 20:00:07] [Rank 0] step:8601/10000 train_time:318647ms step_avg:37.05ms +[2025-09-05 20:00:08] [Rank 0] step:8621/10000 train_time:319305ms step_avg:37.04ms +[2025-09-05 20:00:08] [Rank 0] step:8621/10000 train_time:319305ms step_avg:37.04ms +[2025-09-05 20:00:08] [Rank 0] step:8641/10000 train_time:319963ms step_avg:37.03ms +[2025-09-05 20:00:08] [Rank 0] step:8641/10000 train_time:319963ms step_avg:37.03ms +[2025-09-05 20:00:09] [Rank 0] step:8661/10000 train_time:320621ms step_avg:37.02ms +[2025-09-05 20:00:09] [Rank 0] step:8661/10000 train_time:320621ms step_avg:37.02ms +[2025-09-05 20:00:10] [Rank 0] step:8681/10000 train_time:321279ms step_avg:37.01ms +[2025-09-05 20:00:10] [Rank 0] step:8681/10000 train_time:321279ms step_avg:37.01ms +[2025-09-05 20:00:10] [Rank 0] step:8701/10000 train_time:321937ms step_avg:37.00ms +[2025-09-05 20:00:10] [Rank 0] step:8701/10000 train_time:321937ms step_avg:37.00ms +[2025-09-05 20:00:11] [Rank 0] step:8721/10000 train_time:322593ms step_avg:36.99ms +[2025-09-05 20:00:11] [Rank 0] step:8721/10000 train_time:322593ms step_avg:36.99ms +[2025-09-05 20:00:12] [Rank 0] step:8741/10000 train_time:323251ms step_avg:36.98ms +[2025-09-05 20:00:12] [Rank 0] step:8741/10000 train_time:323251ms step_avg:36.98ms +[2025-09-05 20:00:12] [Rank 0] step:8761/10000 train_time:323909ms step_avg:36.97ms +[2025-09-05 20:00:12] [Rank 0] step:8761/10000 train_time:323909ms step_avg:36.97ms +[2025-09-05 20:00:13] [Rank 0] step:8781/10000 train_time:324566ms step_avg:36.96ms +[2025-09-05 20:00:13] [Rank 0] step:8781/10000 train_time:324566ms step_avg:36.96ms +[2025-09-05 20:00:14] [Rank 0] step:8801/10000 train_time:325225ms step_avg:36.95ms +[2025-09-05 20:00:14] [Rank 0] step:8801/10000 train_time:325225ms step_avg:36.95ms +[2025-09-05 20:00:14] [Rank 0] step:8821/10000 train_time:325883ms step_avg:36.94ms +[2025-09-05 20:00:14] [Rank 0] step:8821/10000 train_time:325883ms step_avg:36.94ms +[2025-09-05 20:00:15] [Rank 0] step:8841/10000 train_time:326638ms step_avg:36.95ms +[2025-09-05 20:00:15] [Rank 0] step:8841/10000 train_time:326638ms step_avg:36.95ms +[2025-09-05 20:00:16] [Rank 0] step:8861/10000 train_time:327296ms step_avg:36.94ms +[2025-09-05 20:00:16] [Rank 0] step:8861/10000 train_time:327296ms step_avg:36.94ms +[2025-09-05 20:00:16] [Rank 0] step:8881/10000 train_time:327954ms step_avg:36.93ms +[2025-09-05 20:00:16] [Rank 0] step:8881/10000 train_time:327954ms step_avg:36.93ms +[2025-09-05 20:00:17] [Rank 0] step:8901/10000 train_time:328613ms step_avg:36.92ms +[2025-09-05 20:00:17] [Rank 0] step:8901/10000 train_time:328613ms step_avg:36.92ms +[2025-09-05 20:00:18] [Rank 0] step:8921/10000 train_time:329270ms step_avg:36.91ms +[2025-09-05 20:00:18] [Rank 0] step:8921/10000 train_time:329270ms step_avg:36.91ms +[2025-09-05 20:00:18] [Rank 0] step:8941/10000 train_time:329929ms step_avg:36.90ms +[2025-09-05 20:00:18] [Rank 0] step:8941/10000 train_time:329929ms step_avg:36.90ms +[2025-09-05 20:00:19] [Rank 0] step:8961/10000 train_time:330587ms step_avg:36.89ms +[2025-09-05 20:00:19] [Rank 0] step:8961/10000 train_time:330587ms step_avg:36.89ms +[2025-09-05 20:00:20] [Rank 0] step:8981/10000 train_time:331245ms step_avg:36.88ms +[2025-09-05 20:00:20] [Rank 0] step:8981/10000 train_time:331245ms step_avg:36.88ms +[2025-09-05 20:00:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:00:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:00:21] [Rank 0] PRINT: step:9000/10000 train_loss:0.6714 val_loss:0.6624 train_time:332136ms step_avg:36.90ms +[2025-09-05 20:00:21] [Rank 0] PRINT: step:9000/10000 train_loss:0.6714 val_loss:0.6624 train_time:332136ms step_avg:36.90ms +[2025-09-05 20:00:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:00:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:00:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:00:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:01:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:01:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:01:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:01:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:01:42] [Rank 0] Total Loss: 5.2163 +[2025-09-05 20:01:42] [Rank 0] Total Loss: 5.2163 +[2025-09-05 20:01:42] [Rank 0] Total FTA (Unweighted): 0.8862 +[2025-09-05 20:01:42] [Rank 0] Total FTA (Unweighted): 0.8862 +[2025-09-05 20:01:42] [Rank 0] Total FTA (Weighted): 0.8862 +[2025-09-05 20:01:42] [Rank 0] Total FTA (Weighted): 0.8862 +[2025-09-05 20:01:42] [Rank 0] Group 0 Loss: 5.0903 +[2025-09-05 20:01:42] [Rank 0] Group 0 Loss: 5.0903 +[2025-09-05 20:01:42] [Rank 0] Group 1 Loss: 4.5908 +[2025-09-05 20:01:42] [Rank 0] Group 1 Loss: 4.5908 +[2025-09-05 20:01:42] [Rank 0] Group 2 Loss: 4.8515 +[2025-09-05 20:01:42] [Rank 0] Group 2 Loss: 4.8515 +[2025-09-05 20:01:42] [Rank 0] Group 3 Loss: 5.0582 +[2025-09-05 20:01:42] [Rank 0] Group 3 Loss: 5.0582 +[2025-09-05 20:01:42] [Rank 0] Group 4 Loss: 5.0539 +[2025-09-05 20:01:42] [Rank 0] Group 4 Loss: 5.0539 +[2025-09-05 20:01:42] [Rank 0] Group 5 Loss: 5.0841 +[2025-09-05 20:01:42] [Rank 0] Group 5 Loss: 5.0841 +[2025-09-05 20:01:42] [Rank 0] Group 6 Loss: 5.1228 +[2025-09-05 20:01:42] [Rank 0] Group 6 Loss: 5.1228 +[2025-09-05 20:01:42] [Rank 0] Group 7 Loss: 5.1387 +[2025-09-05 20:01:42] [Rank 0] Group 7 Loss: 5.1387 +[2025-09-05 20:01:42] [Rank 0] Group 8 Loss: 5.3146 +[2025-09-05 20:01:42] [Rank 0] Group 8 Loss: 5.3146 +[2025-09-05 20:01:42] [Rank 0] Group 9 Loss: 5.3491 +[2025-09-05 20:01:42] [Rank 0] Group 9 Loss: 5.3491 +[2025-09-05 20:01:42] [Rank 0] Group 10 Loss: 5.5090 +[2025-09-05 20:01:42] [Rank 0] Group 10 Loss: 5.5090 +[2025-09-05 20:01:42] [Rank 0] Group 11 Loss: 5.4439 +[2025-09-05 20:01:42] [Rank 0] Group 11 Loss: 5.4439 +[2025-09-05 20:01:42] [Rank 0] Group 12 Loss: 5.3688 +[2025-09-05 20:01:42] [Rank 0] Group 12 Loss: 5.3688 +[2025-09-05 20:01:42] [Rank 0] Group 13 Loss: 5.4541 +[2025-09-05 20:01:42] [Rank 0] Group 13 Loss: 5.4541 +[2025-09-05 20:01:42] [Rank 0] Group 14 Loss: 5.5206 +[2025-09-05 20:01:42] [Rank 0] Group 14 Loss: 5.5206 +[2025-09-05 20:01:42] [Rank 0] Group 15 Loss: 5.5100 +[2025-09-05 20:01:42] [Rank 0] Group 15 Loss: 5.5100 +[2025-09-05 20:01:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:01:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:01:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:01:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:01:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:01:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:01:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:01:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 20:01:43] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 20:01:43] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 20:01:43] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 20:01:43] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 20:01:43] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 20:01:43] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 20:01:43] [Rank 0] Group 13 FTA: 0.7300 +[2025-09-05 20:01:43] [Rank 0] Group 13 FTA: 0.7300 +[2025-09-05 20:01:43] [Rank 0] Group 14 FTA: 0.3100 +[2025-09-05 20:01:43] [Rank 0] Group 14 FTA: 0.3100 +[2025-09-05 20:01:43] [Rank 0] Group 15 FTA: 0.1900 +[2025-09-05 20:01:43] [Rank 0] Group 15 FTA: 0.1900 +[2025-09-05 20:01:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 20:01:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 20:01:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 20:01:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 20:01:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 20:01:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 20:01:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 20:01:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 20:01:44] [Rank 0] step:9001/10000 train_time:332146ms step_avg:36.90ms +[2025-09-05 20:01:44] [Rank 0] step:9001/10000 train_time:332146ms step_avg:36.90ms +[2025-09-05 20:01:45] [Rank 0] step:9021/10000 train_time:332596ms step_avg:36.87ms +[2025-09-05 20:01:45] [Rank 0] step:9021/10000 train_time:332596ms step_avg:36.87ms +[2025-09-05 20:01:45] [Rank 0] step:9041/10000 train_time:333255ms step_avg:36.86ms +[2025-09-05 20:01:45] [Rank 0] step:9041/10000 train_time:333255ms step_avg:36.86ms +[2025-09-05 20:01:46] [Rank 0] step:9061/10000 train_time:333913ms step_avg:36.85ms +[2025-09-05 20:01:46] [Rank 0] step:9061/10000 train_time:333913ms step_avg:36.85ms +[2025-09-05 20:01:47] [Rank 0] step:9081/10000 train_time:334571ms step_avg:36.84ms +[2025-09-05 20:01:47] [Rank 0] step:9081/10000 train_time:334571ms step_avg:36.84ms +[2025-09-05 20:01:47] [Rank 0] step:9101/10000 train_time:335229ms step_avg:36.83ms +[2025-09-05 20:01:47] [Rank 0] step:9101/10000 train_time:335229ms step_avg:36.83ms +[2025-09-05 20:01:48] [Rank 0] step:9121/10000 train_time:335888ms step_avg:36.83ms +[2025-09-05 20:01:48] [Rank 0] step:9121/10000 train_time:335888ms step_avg:36.83ms +[2025-09-05 20:01:49] [Rank 0] step:9141/10000 train_time:336549ms step_avg:36.82ms +[2025-09-05 20:01:49] [Rank 0] step:9141/10000 train_time:336549ms step_avg:36.82ms +[2025-09-05 20:01:49] [Rank 0] step:9161/10000 train_time:337208ms step_avg:36.81ms +[2025-09-05 20:01:49] [Rank 0] step:9161/10000 train_time:337208ms step_avg:36.81ms +[2025-09-05 20:01:50] [Rank 0] step:9181/10000 train_time:337867ms step_avg:36.80ms +[2025-09-05 20:01:50] [Rank 0] step:9181/10000 train_time:337867ms step_avg:36.80ms +[2025-09-05 20:01:51] [Rank 0] step:9201/10000 train_time:338525ms step_avg:36.79ms +[2025-09-05 20:01:51] [Rank 0] step:9201/10000 train_time:338525ms step_avg:36.79ms +[2025-09-05 20:01:51] [Rank 0] step:9221/10000 train_time:339183ms step_avg:36.78ms +[2025-09-05 20:01:51] [Rank 0] step:9221/10000 train_time:339183ms step_avg:36.78ms +[2025-09-05 20:01:52] [Rank 0] step:9241/10000 train_time:339841ms step_avg:36.78ms +[2025-09-05 20:01:52] [Rank 0] step:9241/10000 train_time:339841ms step_avg:36.78ms +[2025-09-05 20:01:53] [Rank 0] step:9261/10000 train_time:340500ms step_avg:36.77ms +[2025-09-05 20:01:53] [Rank 0] step:9261/10000 train_time:340500ms step_avg:36.77ms +[2025-09-05 20:01:53] [Rank 0] step:9281/10000 train_time:341159ms step_avg:36.76ms +[2025-09-05 20:01:53] [Rank 0] step:9281/10000 train_time:341159ms step_avg:36.76ms +[2025-09-05 20:01:54] [Rank 0] step:9301/10000 train_time:341818ms step_avg:36.75ms +[2025-09-05 20:01:54] [Rank 0] step:9301/10000 train_time:341818ms step_avg:36.75ms +[2025-09-05 20:01:55] [Rank 0] step:9321/10000 train_time:342475ms step_avg:36.74ms +[2025-09-05 20:01:55] [Rank 0] step:9321/10000 train_time:342475ms step_avg:36.74ms +[2025-09-05 20:01:55] [Rank 0] step:9341/10000 train_time:343133ms step_avg:36.73ms +[2025-09-05 20:01:55] [Rank 0] step:9341/10000 train_time:343133ms step_avg:36.73ms +[2025-09-05 20:01:56] [Rank 0] step:9361/10000 train_time:343791ms step_avg:36.73ms +[2025-09-05 20:01:56] [Rank 0] step:9361/10000 train_time:343791ms step_avg:36.73ms +[2025-09-05 20:01:57] [Rank 0] step:9381/10000 train_time:344450ms step_avg:36.72ms +[2025-09-05 20:01:57] [Rank 0] step:9381/10000 train_time:344450ms step_avg:36.72ms +[2025-09-05 20:01:57] [Rank 0] step:9401/10000 train_time:345109ms step_avg:36.71ms +[2025-09-05 20:01:57] [Rank 0] step:9401/10000 train_time:345109ms step_avg:36.71ms +[2025-09-05 20:01:58] [Rank 0] step:9421/10000 train_time:345767ms step_avg:36.70ms +[2025-09-05 20:01:58] [Rank 0] step:9421/10000 train_time:345767ms step_avg:36.70ms +[2025-09-05 20:01:59] [Rank 0] step:9441/10000 train_time:346425ms step_avg:36.69ms +[2025-09-05 20:01:59] [Rank 0] step:9441/10000 train_time:346425ms step_avg:36.69ms +[2025-09-05 20:01:59] [Rank 0] step:9461/10000 train_time:347084ms step_avg:36.69ms +[2025-09-05 20:01:59] [Rank 0] step:9461/10000 train_time:347084ms step_avg:36.69ms +[2025-09-05 20:02:00] [Rank 0] step:9481/10000 train_time:347742ms step_avg:36.68ms +[2025-09-05 20:02:00] [Rank 0] step:9481/10000 train_time:347742ms step_avg:36.68ms +[2025-09-05 20:02:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:02:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:02:01] [Rank 0] PRINT: step:9500/10000 train_loss:0.6661 val_loss:0.6580 train_time:348635ms step_avg:36.70ms +[2025-09-05 20:02:01] [Rank 0] PRINT: step:9500/10000 train_loss:0.6661 val_loss:0.6580 train_time:348635ms step_avg:36.70ms +[2025-09-05 20:02:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:02:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:02:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:02:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:03:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:03:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:03:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:03:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:03:23] [Rank 0] Total Loss: 5.2213 +[2025-09-05 20:03:23] [Rank 0] Total Loss: 5.2213 +[2025-09-05 20:03:23] [Rank 0] Total FTA (Unweighted): 0.8913 +[2025-09-05 20:03:23] [Rank 0] Total FTA (Unweighted): 0.8913 +[2025-09-05 20:03:23] [Rank 0] Total FTA (Weighted): 0.8912 +[2025-09-05 20:03:23] [Rank 0] Total FTA (Weighted): 0.8912 +[2025-09-05 20:03:23] [Rank 0] Group 0 Loss: 5.0755 +[2025-09-05 20:03:23] [Rank 0] Group 0 Loss: 5.0755 +[2025-09-05 20:03:23] [Rank 0] Group 1 Loss: 4.7584 +[2025-09-05 20:03:23] [Rank 0] Group 1 Loss: 4.7584 +[2025-09-05 20:03:23] [Rank 0] Group 2 Loss: 4.9115 +[2025-09-05 20:03:23] [Rank 0] Group 2 Loss: 4.9115 +[2025-09-05 20:03:23] [Rank 0] Group 3 Loss: 5.0586 +[2025-09-05 20:03:23] [Rank 0] Group 3 Loss: 5.0586 +[2025-09-05 20:03:23] [Rank 0] Group 4 Loss: 5.0021 +[2025-09-05 20:03:23] [Rank 0] Group 4 Loss: 5.0021 +[2025-09-05 20:03:23] [Rank 0] Group 5 Loss: 5.1229 +[2025-09-05 20:03:23] [Rank 0] Group 5 Loss: 5.1229 +[2025-09-05 20:03:23] [Rank 0] Group 6 Loss: 5.0976 +[2025-09-05 20:03:23] [Rank 0] Group 6 Loss: 5.0976 +[2025-09-05 20:03:23] [Rank 0] Group 7 Loss: 5.1614 +[2025-09-05 20:03:23] [Rank 0] Group 7 Loss: 5.1614 +[2025-09-05 20:03:23] [Rank 0] Group 8 Loss: 5.3277 +[2025-09-05 20:03:23] [Rank 0] Group 8 Loss: 5.3277 +[2025-09-05 20:03:23] [Rank 0] Group 9 Loss: 5.3223 +[2025-09-05 20:03:23] [Rank 0] Group 9 Loss: 5.3223 +[2025-09-05 20:03:23] [Rank 0] Group 10 Loss: 5.4669 +[2025-09-05 20:03:23] [Rank 0] Group 10 Loss: 5.4669 +[2025-09-05 20:03:23] [Rank 0] Group 11 Loss: 5.4269 +[2025-09-05 20:03:23] [Rank 0] Group 11 Loss: 5.4269 +[2025-09-05 20:03:23] [Rank 0] Group 12 Loss: 5.3599 +[2025-09-05 20:03:23] [Rank 0] Group 12 Loss: 5.3599 +[2025-09-05 20:03:23] [Rank 0] Group 13 Loss: 5.4234 +[2025-09-05 20:03:23] [Rank 0] Group 13 Loss: 5.4234 +[2025-09-05 20:03:23] [Rank 0] Group 14 Loss: 5.4987 +[2025-09-05 20:03:23] [Rank 0] Group 14 Loss: 5.4987 +[2025-09-05 20:03:23] [Rank 0] Group 15 Loss: 5.5263 +[2025-09-05 20:03:23] [Rank 0] Group 15 Loss: 5.5263 +[2025-09-05 20:03:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 20:03:23] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 20:03:23] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 20:03:23] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 20:03:23] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 20:03:23] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 20:03:23] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 20:03:23] [Rank 0] Group 13 FTA: 0.7900 +[2025-09-05 20:03:23] [Rank 0] Group 13 FTA: 0.7900 +[2025-09-05 20:03:23] [Rank 0] Group 14 FTA: 0.3300 +[2025-09-05 20:03:23] [Rank 0] Group 14 FTA: 0.3300 +[2025-09-05 20:03:23] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 20:03:23] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 20:03:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 20:03:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 20:03:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 20:03:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 20:03:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 20:03:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 20:03:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 20:03:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 20:03:24] [Rank 0] step:9501/10000 train_time:348645ms step_avg:36.70ms +[2025-09-05 20:03:24] [Rank 0] step:9501/10000 train_time:348645ms step_avg:36.70ms +[2025-09-05 20:03:25] [Rank 0] step:9521/10000 train_time:349081ms step_avg:36.66ms +[2025-09-05 20:03:25] [Rank 0] step:9521/10000 train_time:349081ms step_avg:36.66ms +[2025-09-05 20:03:26] [Rank 0] step:9541/10000 train_time:349739ms step_avg:36.66ms +[2025-09-05 20:03:26] [Rank 0] step:9541/10000 train_time:349739ms step_avg:36.66ms +[2025-09-05 20:03:26] [Rank 0] step:9561/10000 train_time:350397ms step_avg:36.65ms +[2025-09-05 20:03:26] [Rank 0] step:9561/10000 train_time:350397ms step_avg:36.65ms +[2025-09-05 20:03:27] [Rank 0] step:9581/10000 train_time:351055ms step_avg:36.64ms +[2025-09-05 20:03:27] [Rank 0] step:9581/10000 train_time:351055ms step_avg:36.64ms +[2025-09-05 20:03:28] [Rank 0] step:9601/10000 train_time:351712ms step_avg:36.63ms +[2025-09-05 20:03:28] [Rank 0] step:9601/10000 train_time:351712ms step_avg:36.63ms +[2025-09-05 20:03:28] [Rank 0] step:9621/10000 train_time:352371ms step_avg:36.63ms +[2025-09-05 20:03:28] [Rank 0] step:9621/10000 train_time:352371ms step_avg:36.63ms +[2025-09-05 20:03:29] [Rank 0] step:9641/10000 train_time:353028ms step_avg:36.62ms +[2025-09-05 20:03:29] [Rank 0] step:9641/10000 train_time:353028ms step_avg:36.62ms +[2025-09-05 20:03:30] [Rank 0] step:9661/10000 train_time:353965ms step_avg:36.64ms +[2025-09-05 20:03:30] [Rank 0] step:9661/10000 train_time:353965ms step_avg:36.64ms +[2025-09-05 20:03:30] [Rank 0] step:9681/10000 train_time:354623ms step_avg:36.63ms +[2025-09-05 20:03:30] [Rank 0] step:9681/10000 train_time:354623ms step_avg:36.63ms +[2025-09-05 20:03:31] [Rank 0] step:9701/10000 train_time:355280ms step_avg:36.62ms +[2025-09-05 20:03:31] [Rank 0] step:9701/10000 train_time:355280ms step_avg:36.62ms +[2025-09-05 20:03:32] [Rank 0] step:9721/10000 train_time:355938ms step_avg:36.62ms +[2025-09-05 20:03:32] [Rank 0] step:9721/10000 train_time:355938ms step_avg:36.62ms +[2025-09-05 20:03:32] [Rank 0] step:9741/10000 train_time:356595ms step_avg:36.61ms +[2025-09-05 20:03:32] [Rank 0] step:9741/10000 train_time:356595ms step_avg:36.61ms +[2025-09-05 20:03:33] [Rank 0] step:9761/10000 train_time:357252ms step_avg:36.60ms +[2025-09-05 20:03:33] [Rank 0] step:9761/10000 train_time:357252ms step_avg:36.60ms +[2025-09-05 20:03:34] [Rank 0] step:9781/10000 train_time:357910ms step_avg:36.59ms +[2025-09-05 20:03:34] [Rank 0] step:9781/10000 train_time:357910ms step_avg:36.59ms +[2025-09-05 20:03:34] [Rank 0] step:9801/10000 train_time:358567ms step_avg:36.58ms +[2025-09-05 20:03:34] [Rank 0] step:9801/10000 train_time:358567ms step_avg:36.58ms +[2025-09-05 20:03:35] [Rank 0] step:9821/10000 train_time:359225ms step_avg:36.58ms +[2025-09-05 20:03:35] [Rank 0] step:9821/10000 train_time:359225ms step_avg:36.58ms +[2025-09-05 20:03:36] [Rank 0] step:9841/10000 train_time:359883ms step_avg:36.57ms +[2025-09-05 20:03:36] [Rank 0] step:9841/10000 train_time:359883ms step_avg:36.57ms +[2025-09-05 20:03:36] [Rank 0] step:9861/10000 train_time:360542ms step_avg:36.56ms +[2025-09-05 20:03:36] [Rank 0] step:9861/10000 train_time:360542ms step_avg:36.56ms +[2025-09-05 20:03:37] [Rank 0] step:9881/10000 train_time:361198ms step_avg:36.55ms +[2025-09-05 20:03:37] [Rank 0] step:9881/10000 train_time:361198ms step_avg:36.55ms +[2025-09-05 20:03:38] [Rank 0] step:9901/10000 train_time:361855ms step_avg:36.55ms +[2025-09-05 20:03:38] [Rank 0] step:9901/10000 train_time:361855ms step_avg:36.55ms +[2025-09-05 20:03:38] [Rank 0] step:9921/10000 train_time:362513ms step_avg:36.54ms +[2025-09-05 20:03:38] [Rank 0] step:9921/10000 train_time:362513ms step_avg:36.54ms +[2025-09-05 20:03:39] [Rank 0] step:9941/10000 train_time:363170ms step_avg:36.53ms +[2025-09-05 20:03:39] [Rank 0] step:9941/10000 train_time:363170ms step_avg:36.53ms +[2025-09-05 20:03:40] [Rank 0] step:9961/10000 train_time:363827ms step_avg:36.53ms +[2025-09-05 20:03:40] [Rank 0] step:9961/10000 train_time:363827ms step_avg:36.53ms +[2025-09-05 20:03:40] [Rank 0] step:9981/10000 train_time:364484ms step_avg:36.52ms +[2025-09-05 20:03:40] [Rank 0] step:9981/10000 train_time:364484ms step_avg:36.52ms +[2025-09-05 20:03:41] [Rank 0] step:10000/10000 train_time:365108ms step_avg:36.51ms +[2025-09-05 20:03:41] [Rank 0] step:10000/10000 train_time:365108ms step_avg:36.51ms +[2025-09-05 20:03:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:03:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:03:41] [Rank 0] PRINT: step:10000/10000 train_loss:0.6617 val_loss:0.6542 train_time:365381ms step_avg:36.54ms +[2025-09-05 20:03:41] [Rank 0] PRINT: step:10000/10000 train_loss:0.6617 val_loss:0.6542 train_time:365381ms step_avg:36.54ms +[2025-09-05 20:03:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:03:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:03:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:03:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:05:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:05:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:05:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:05:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:05:03] [Rank 0] Total Loss: 5.2643 +[2025-09-05 20:05:03] [Rank 0] Total Loss: 5.2643 +[2025-09-05 20:05:03] [Rank 0] Total FTA (Unweighted): 0.9006 +[2025-09-05 20:05:03] [Rank 0] Total FTA (Unweighted): 0.9006 +[2025-09-05 20:05:03] [Rank 0] Total FTA (Weighted): 0.9006 +[2025-09-05 20:05:03] [Rank 0] Total FTA (Weighted): 0.9006 +[2025-09-05 20:05:03] [Rank 0] Group 0 Loss: 5.0614 +[2025-09-05 20:05:03] [Rank 0] Group 0 Loss: 5.0614 +[2025-09-05 20:05:03] [Rank 0] Group 1 Loss: 4.8025 +[2025-09-05 20:05:03] [Rank 0] Group 1 Loss: 4.8025 +[2025-09-05 20:05:03] [Rank 0] Group 2 Loss: 4.8438 +[2025-09-05 20:05:03] [Rank 0] Group 2 Loss: 4.8438 +[2025-09-05 20:05:03] [Rank 0] Group 3 Loss: 5.1063 +[2025-09-05 20:05:03] [Rank 0] Group 3 Loss: 5.1063 +[2025-09-05 20:05:03] [Rank 0] Group 4 Loss: 5.0725 +[2025-09-05 20:05:03] [Rank 0] Group 4 Loss: 5.0725 +[2025-09-05 20:05:03] [Rank 0] Group 5 Loss: 5.1559 +[2025-09-05 20:05:03] [Rank 0] Group 5 Loss: 5.1559 +[2025-09-05 20:05:03] [Rank 0] Group 6 Loss: 5.1770 +[2025-09-05 20:05:03] [Rank 0] Group 6 Loss: 5.1770 +[2025-09-05 20:05:03] [Rank 0] Group 7 Loss: 5.2011 +[2025-09-05 20:05:03] [Rank 0] Group 7 Loss: 5.2011 +[2025-09-05 20:05:03] [Rank 0] Group 8 Loss: 5.3682 +[2025-09-05 20:05:03] [Rank 0] Group 8 Loss: 5.3682 +[2025-09-05 20:05:03] [Rank 0] Group 9 Loss: 5.3774 +[2025-09-05 20:05:03] [Rank 0] Group 9 Loss: 5.3774 +[2025-09-05 20:05:03] [Rank 0] Group 10 Loss: 5.5195 +[2025-09-05 20:05:03] [Rank 0] Group 10 Loss: 5.5195 +[2025-09-05 20:05:03] [Rank 0] Group 11 Loss: 5.4751 +[2025-09-05 20:05:03] [Rank 0] Group 11 Loss: 5.4751 +[2025-09-05 20:05:03] [Rank 0] Group 12 Loss: 5.4018 +[2025-09-05 20:05:03] [Rank 0] Group 12 Loss: 5.4018 +[2025-09-05 20:05:03] [Rank 0] Group 13 Loss: 5.5127 +[2025-09-05 20:05:03] [Rank 0] Group 13 Loss: 5.5127 +[2025-09-05 20:05:03] [Rank 0] Group 14 Loss: 5.5683 +[2025-09-05 20:05:03] [Rank 0] Group 14 Loss: 5.5683 +[2025-09-05 20:05:03] [Rank 0] Group 15 Loss: 5.5855 +[2025-09-05 20:05:03] [Rank 0] Group 15 Loss: 5.5855 +[2025-09-05 20:05:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 20:05:03] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 20:05:04] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 20:05:04] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 20:05:04] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 20:05:04] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 20:05:04] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 20:05:04] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 20:05:04] [Rank 0] Group 13 FTA: 0.8400 +[2025-09-05 20:05:04] [Rank 0] Group 13 FTA: 0.8400 +[2025-09-05 20:05:04] [Rank 0] Group 14 FTA: 0.4000 +[2025-09-05 20:05:04] [Rank 0] Group 14 FTA: 0.4000 +[2025-09-05 20:05:04] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 20:05:04] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 20:05:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 20:05:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_loss_curves.png +[2025-09-05 20:05:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 20:05:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/per_class_acc_curves.png +[2025-09-05 20:05:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 20:05:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_loss_curve.png +[2025-09-05 20:05:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 20:05:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.001_seed_43/total_acc_curve.png +[2025-09-05 20:05:05] [Rank 0] step:10001/10000 train_time:365390ms step_avg:36.54ms +[2025-09-05 20:05:05] [Rank 0] step:10001/10000 train_time:365390ms step_avg:36.54ms +[2025-09-05 20:05:05] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 20:05:05 2025 --- +[2025-09-05 20:05:05] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 20:05:05 2025 --- +[2025-09-05 20:05:05] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 20:05:05] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..dd2b641253740d212f32522d19a4ed369e107605 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.002, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "eb6d15d4-10c1-421a-ae11-900a4962574d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..f83f14db23373c0dc3cb2e5b69c0c8f8caae4d88 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af003ef85a93559bf39b792f2ca9702e91cbb7eddb7a7810e9f4f4fdcd1f72df +size 392282 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..654ff7281fd29c7d3a5adceb58f79b9b9304c3b1 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1754396fb4f5188f5db5546da36954ca81d52f26a87a60dcfbb38683652c7a3c +size 446617 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..af383635f9dd417cd0ea59af5ac195d7a262b5a1 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:610a85cf54c737d7e2e61928a07f3803e61a86966f450f8e20f41222bb1a327e +size 98388 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..2b281f958d6640bf05cd821b1b1b39d394203ee2 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6195863dd348511272c843df9360e8fea3c0df6f18195b993d13a7fcdd65dac2 +size 111319 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/training_log_eb6d15d4-10c1-421a-ae11-900a4962574d.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/training_log_eb6d15d4-10c1-421a-ae11-900a4962574d.txt new file mode 100644 index 0000000000000000000000000000000000000000..e2c3b892d630e6b2541e84882d0396e2c92c451b --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/training_log_eb6d15d4-10c1-421a-ae11-900a4962574d.txt @@ -0,0 +1,5614 @@ +[2025-09-05 15:56:34] [Rank 0] PRINT: --- Script Start: Fri Sep 5 15:56:34 2025 --- +[2025-09-05 15:56:34] [Rank 0] PRINT: --- Script Start: Fri Sep 5 15:56:34 2025 --- +[2025-09-05 15:56:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.002, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 15:56:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.002, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 15:56:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 15:56:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 15:56:34] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 15:56:34] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 15:56:34] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42 +[2025-09-05 15:56:34] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42 +[2025-09-05 15:56:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 15:56:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 15:56:34] [Rank 0] PRINT: Constructing model... +[2025-09-05 15:56:34] [Rank 0] PRINT: Constructing model... +[2025-09-05 15:56:37] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 15:56:37] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 15:56:37] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 15:56:37] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 15:56:37] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 15:56:37] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 15:56:41] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 15:56:41] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 15:56:41] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 15:56:41] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 15:56:41] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 15:56:41] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 15:56:41] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 15:56:41] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 15:56:41] [Rank 0] PRINT: Model returns: +[2025-09-05 15:56:41] [Rank 0] PRINT: Model returns: +[2025-09-05 15:56:41] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 15:56:41] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 15:56:41] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 15:56:41] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 15:56:41] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-09-05 15:56:41] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-09-05 15:56:41] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 15:56:41] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 15:56:41] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 15:56:41] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 15:56:41] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 15:56:41] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 15:56:46] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 15:56:46] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 15:56:46] [Rank 0] PRINT: Starting warmup... +[2025-09-05 15:56:46] [Rank 0] PRINT: Starting warmup... +[2025-09-05 15:57:28] [Rank 0] PRINT: Warmup complete. +[2025-09-05 15:57:28] [Rank 0] PRINT: Warmup complete. +[2025-09-05 15:57:28] [Rank 0] PRINT: Starting training... +[2025-09-05 15:57:28] [Rank 0] PRINT: Starting training... +[2025-09-05 15:57:35] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/fixed_eval_indices.json +[2025-09-05 15:57:35] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/fixed_eval_indices.json +[2025-09-05 15:57:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:57:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:57:40] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 15:57:40] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 15:58:14] [Rank 0] step:21/10000 train_time:33436ms step_avg:1592.18ms +[2025-09-05 15:58:14] [Rank 0] step:21/10000 train_time:33436ms step_avg:1592.18ms +[2025-09-05 15:58:15] [Rank 0] step:41/10000 train_time:34082ms step_avg:831.27ms +[2025-09-05 15:58:15] [Rank 0] step:41/10000 train_time:34082ms step_avg:831.27ms +[2025-09-05 15:58:15] [Rank 0] step:61/10000 train_time:34729ms step_avg:569.33ms +[2025-09-05 15:58:15] [Rank 0] step:61/10000 train_time:34729ms step_avg:569.33ms +[2025-09-05 15:58:16] [Rank 0] step:81/10000 train_time:35377ms step_avg:436.75ms +[2025-09-05 15:58:16] [Rank 0] step:81/10000 train_time:35377ms step_avg:436.75ms +[2025-09-05 15:58:17] [Rank 0] step:101/10000 train_time:36025ms step_avg:356.68ms +[2025-09-05 15:58:17] [Rank 0] step:101/10000 train_time:36025ms step_avg:356.68ms +[2025-09-05 15:58:17] [Rank 0] step:121/10000 train_time:36672ms step_avg:303.08ms +[2025-09-05 15:58:17] [Rank 0] step:121/10000 train_time:36672ms step_avg:303.08ms +[2025-09-05 15:58:18] [Rank 0] step:141/10000 train_time:37319ms step_avg:264.67ms +[2025-09-05 15:58:18] [Rank 0] step:141/10000 train_time:37319ms step_avg:264.67ms +[2025-09-05 15:58:19] [Rank 0] step:161/10000 train_time:37966ms step_avg:235.81ms +[2025-09-05 15:58:19] [Rank 0] step:161/10000 train_time:37966ms step_avg:235.81ms +[2025-09-05 15:58:19] [Rank 0] step:181/10000 train_time:38613ms step_avg:213.33ms +[2025-09-05 15:58:19] [Rank 0] step:181/10000 train_time:38613ms step_avg:213.33ms +[2025-09-05 15:58:20] [Rank 0] step:201/10000 train_time:39260ms step_avg:195.33ms +[2025-09-05 15:58:20] [Rank 0] step:201/10000 train_time:39260ms step_avg:195.33ms +[2025-09-05 15:58:20] [Rank 0] step:221/10000 train_time:39908ms step_avg:180.58ms +[2025-09-05 15:58:20] [Rank 0] step:221/10000 train_time:39908ms step_avg:180.58ms +[2025-09-05 15:58:21] [Rank 0] step:241/10000 train_time:40555ms step_avg:168.28ms +[2025-09-05 15:58:21] [Rank 0] step:241/10000 train_time:40555ms step_avg:168.28ms +[2025-09-05 15:58:22] [Rank 0] step:261/10000 train_time:41202ms step_avg:157.86ms +[2025-09-05 15:58:22] [Rank 0] step:261/10000 train_time:41202ms step_avg:157.86ms +[2025-09-05 15:58:22] [Rank 0] step:281/10000 train_time:41849ms step_avg:148.93ms +[2025-09-05 15:58:22] [Rank 0] step:281/10000 train_time:41849ms step_avg:148.93ms +[2025-09-05 15:58:23] [Rank 0] step:301/10000 train_time:42496ms step_avg:141.18ms +[2025-09-05 15:58:23] [Rank 0] step:301/10000 train_time:42496ms step_avg:141.18ms +[2025-09-05 15:58:24] [Rank 0] step:321/10000 train_time:43143ms step_avg:134.40ms +[2025-09-05 15:58:24] [Rank 0] step:321/10000 train_time:43143ms step_avg:134.40ms +[2025-09-05 15:58:24] [Rank 0] step:341/10000 train_time:43790ms step_avg:128.42ms +[2025-09-05 15:58:24] [Rank 0] step:341/10000 train_time:43790ms step_avg:128.42ms +[2025-09-05 15:58:25] [Rank 0] step:361/10000 train_time:44441ms step_avg:123.11ms +[2025-09-05 15:58:25] [Rank 0] step:361/10000 train_time:44441ms step_avg:123.11ms +[2025-09-05 15:58:26] [Rank 0] step:381/10000 train_time:45089ms step_avg:118.34ms +[2025-09-05 15:58:26] [Rank 0] step:381/10000 train_time:45089ms step_avg:118.34ms +[2025-09-05 15:58:26] [Rank 0] step:401/10000 train_time:45736ms step_avg:114.06ms +[2025-09-05 15:58:26] [Rank 0] step:401/10000 train_time:45736ms step_avg:114.06ms +[2025-09-05 15:58:27] [Rank 0] step:421/10000 train_time:46383ms step_avg:110.17ms +[2025-09-05 15:58:27] [Rank 0] step:421/10000 train_time:46383ms step_avg:110.17ms +[2025-09-05 15:58:28] [Rank 0] step:441/10000 train_time:47031ms step_avg:106.65ms +[2025-09-05 15:58:28] [Rank 0] step:441/10000 train_time:47031ms step_avg:106.65ms +[2025-09-05 15:58:28] [Rank 0] step:461/10000 train_time:47678ms step_avg:103.42ms +[2025-09-05 15:58:28] [Rank 0] step:461/10000 train_time:47678ms step_avg:103.42ms +[2025-09-05 15:58:29] [Rank 0] step:481/10000 train_time:48327ms step_avg:100.47ms +[2025-09-05 15:58:29] [Rank 0] step:481/10000 train_time:48327ms step_avg:100.47ms +[2025-09-05 15:58:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:58:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:58:30] [Rank 0] PRINT: step:500/10000 train_loss:3.7661 val_loss:1.5463 train_time:49209ms step_avg:98.42ms +[2025-09-05 15:58:30] [Rank 0] PRINT: step:500/10000 train_loss:3.7661 val_loss:1.5463 train_time:49209ms step_avg:98.42ms +[2025-09-05 15:58:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:58:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:58:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:58:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:59:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:59:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:59:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:59:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:59:53] [Rank 0] Total Loss: 4.0606 +[2025-09-05 15:59:53] [Rank 0] Total Loss: 4.0606 +[2025-09-05 15:59:53] [Rank 0] Total FTA (Unweighted): 0.3288 +[2025-09-05 15:59:53] [Rank 0] Total FTA (Unweighted): 0.3288 +[2025-09-05 15:59:53] [Rank 0] Total FTA (Weighted): 0.3287 +[2025-09-05 15:59:53] [Rank 0] Total FTA (Weighted): 0.3287 +[2025-09-05 15:59:53] [Rank 0] Group 0 Loss: 3.2603 +[2025-09-05 15:59:53] [Rank 0] Group 0 Loss: 3.2603 +[2025-09-05 15:59:53] [Rank 0] Group 1 Loss: 3.1310 +[2025-09-05 15:59:53] [Rank 0] Group 1 Loss: 3.1310 +[2025-09-05 15:59:53] [Rank 0] Group 2 Loss: 3.0503 +[2025-09-05 15:59:53] [Rank 0] Group 2 Loss: 3.0503 +[2025-09-05 15:59:53] [Rank 0] Group 3 Loss: 3.3535 +[2025-09-05 15:59:53] [Rank 0] Group 3 Loss: 3.3535 +[2025-09-05 15:59:53] [Rank 0] Group 4 Loss: 3.5718 +[2025-09-05 15:59:53] [Rank 0] Group 4 Loss: 3.5718 +[2025-09-05 15:59:53] [Rank 0] Group 5 Loss: 3.7826 +[2025-09-05 15:59:53] [Rank 0] Group 5 Loss: 3.7826 +[2025-09-05 15:59:53] [Rank 0] Group 6 Loss: 3.9466 +[2025-09-05 15:59:53] [Rank 0] Group 6 Loss: 3.9466 +[2025-09-05 15:59:53] [Rank 0] Group 7 Loss: 4.1082 +[2025-09-05 15:59:53] [Rank 0] Group 7 Loss: 4.1082 +[2025-09-05 15:59:53] [Rank 0] Group 8 Loss: 4.3641 +[2025-09-05 15:59:53] [Rank 0] Group 8 Loss: 4.3641 +[2025-09-05 15:59:53] [Rank 0] Group 9 Loss: 4.4595 +[2025-09-05 15:59:53] [Rank 0] Group 9 Loss: 4.4595 +[2025-09-05 15:59:53] [Rank 0] Group 10 Loss: 4.5819 +[2025-09-05 15:59:53] [Rank 0] Group 10 Loss: 4.5819 +[2025-09-05 15:59:53] [Rank 0] Group 11 Loss: 4.6642 +[2025-09-05 15:59:53] [Rank 0] Group 11 Loss: 4.6642 +[2025-09-05 15:59:53] [Rank 0] Group 12 Loss: 4.6499 +[2025-09-05 15:59:53] [Rank 0] Group 12 Loss: 4.6499 +[2025-09-05 15:59:53] [Rank 0] Group 13 Loss: 4.6886 +[2025-09-05 15:59:53] [Rank 0] Group 13 Loss: 4.6886 +[2025-09-05 15:59:53] [Rank 0] Group 14 Loss: 4.6931 +[2025-09-05 15:59:53] [Rank 0] Group 14 Loss: 4.6931 +[2025-09-05 15:59:53] [Rank 0] Group 15 Loss: 4.6648 +[2025-09-05 15:59:53] [Rank 0] Group 15 Loss: 4.6648 +[2025-09-05 15:59:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:59:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:59:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:59:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:59:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:59:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:59:53] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 15:59:53] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 15:59:53] [Rank 0] Group 4 FTA: 0.3700 +[2025-09-05 15:59:53] [Rank 0] Group 4 FTA: 0.3700 +[2025-09-05 15:59:53] [Rank 0] Group 5 FTA: 0.2500 +[2025-09-05 15:59:53] [Rank 0] Group 5 FTA: 0.2500 +[2025-09-05 15:59:53] [Rank 0] Group 6 FTA: 0.2400 +[2025-09-05 15:59:53] [Rank 0] Group 6 FTA: 0.2400 +[2025-09-05 15:59:53] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 15:59:53] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 15:59:53] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-05 15:59:53] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-05 15:59:53] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 15:59:53] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 15:59:53] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 15:59:53] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 15:59:53] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 15:59:53] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 15:59:53] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 15:59:53] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 15:59:53] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 15:59:53] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 15:59:53] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-05 15:59:53] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-05 15:59:53] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 15:59:53] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 15:59:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 15:59:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 15:59:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 15:59:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 15:59:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 15:59:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 15:59:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 15:59:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 15:59:54] [Rank 0] step:501/10000 train_time:49217ms step_avg:98.24ms +[2025-09-05 15:59:54] [Rank 0] step:501/10000 train_time:49217ms step_avg:98.24ms +[2025-09-05 15:59:55] [Rank 0] step:521/10000 train_time:49644ms step_avg:95.29ms +[2025-09-05 15:59:55] [Rank 0] step:521/10000 train_time:49644ms step_avg:95.29ms +[2025-09-05 15:59:56] [Rank 0] step:541/10000 train_time:50291ms step_avg:92.96ms +[2025-09-05 15:59:56] [Rank 0] step:541/10000 train_time:50291ms step_avg:92.96ms +[2025-09-05 15:59:56] [Rank 0] step:561/10000 train_time:50937ms step_avg:90.80ms +[2025-09-05 15:59:56] [Rank 0] step:561/10000 train_time:50937ms step_avg:90.80ms +[2025-09-05 15:59:57] [Rank 0] step:581/10000 train_time:51584ms step_avg:88.78ms +[2025-09-05 15:59:57] [Rank 0] step:581/10000 train_time:51584ms step_avg:88.78ms +[2025-09-05 15:59:58] [Rank 0] step:601/10000 train_time:52231ms step_avg:86.91ms +[2025-09-05 15:59:58] [Rank 0] step:601/10000 train_time:52231ms step_avg:86.91ms +[2025-09-05 15:59:58] [Rank 0] step:621/10000 train_time:52878ms step_avg:85.15ms +[2025-09-05 15:59:58] [Rank 0] step:621/10000 train_time:52878ms step_avg:85.15ms +[2025-09-05 15:59:59] [Rank 0] step:641/10000 train_time:53524ms step_avg:83.50ms +[2025-09-05 15:59:59] [Rank 0] step:641/10000 train_time:53524ms step_avg:83.50ms +[2025-09-05 15:59:59] [Rank 0] step:661/10000 train_time:54171ms step_avg:81.95ms +[2025-09-05 15:59:59] [Rank 0] step:661/10000 train_time:54171ms step_avg:81.95ms +[2025-09-05 16:00:00] [Rank 0] step:681/10000 train_time:54818ms step_avg:80.50ms +[2025-09-05 16:00:00] [Rank 0] step:681/10000 train_time:54818ms step_avg:80.50ms +[2025-09-05 16:00:01] [Rank 0] step:701/10000 train_time:55464ms step_avg:79.12ms +[2025-09-05 16:00:01] [Rank 0] step:701/10000 train_time:55464ms step_avg:79.12ms +[2025-09-05 16:00:01] [Rank 0] step:721/10000 train_time:56111ms step_avg:77.82ms +[2025-09-05 16:00:01] [Rank 0] step:721/10000 train_time:56111ms step_avg:77.82ms +[2025-09-05 16:00:02] [Rank 0] step:741/10000 train_time:56759ms step_avg:76.60ms +[2025-09-05 16:00:02] [Rank 0] step:741/10000 train_time:56759ms step_avg:76.60ms +[2025-09-05 16:00:03] [Rank 0] step:761/10000 train_time:57415ms step_avg:75.45ms +[2025-09-05 16:00:03] [Rank 0] step:761/10000 train_time:57415ms step_avg:75.45ms +[2025-09-05 16:00:03] [Rank 0] step:781/10000 train_time:58068ms step_avg:74.35ms +[2025-09-05 16:00:03] [Rank 0] step:781/10000 train_time:58068ms step_avg:74.35ms +[2025-09-05 16:00:04] [Rank 0] step:801/10000 train_time:58719ms step_avg:73.31ms +[2025-09-05 16:00:04] [Rank 0] step:801/10000 train_time:58719ms step_avg:73.31ms +[2025-09-05 16:00:05] [Rank 0] step:821/10000 train_time:59479ms step_avg:72.45ms +[2025-09-05 16:00:05] [Rank 0] step:821/10000 train_time:59479ms step_avg:72.45ms +[2025-09-05 16:00:06] [Rank 0] step:841/10000 train_time:60499ms step_avg:71.94ms +[2025-09-05 16:00:06] [Rank 0] step:841/10000 train_time:60499ms step_avg:71.94ms +[2025-09-05 16:00:06] [Rank 0] step:861/10000 train_time:61153ms step_avg:71.03ms +[2025-09-05 16:00:06] [Rank 0] step:861/10000 train_time:61153ms step_avg:71.03ms +[2025-09-05 16:00:07] [Rank 0] step:881/10000 train_time:61805ms step_avg:70.15ms +[2025-09-05 16:00:07] [Rank 0] step:881/10000 train_time:61805ms step_avg:70.15ms +[2025-09-05 16:00:08] [Rank 0] step:901/10000 train_time:62458ms step_avg:69.32ms +[2025-09-05 16:00:08] [Rank 0] step:901/10000 train_time:62458ms step_avg:69.32ms +[2025-09-05 16:00:08] [Rank 0] step:921/10000 train_time:63110ms step_avg:68.52ms +[2025-09-05 16:00:08] [Rank 0] step:921/10000 train_time:63110ms step_avg:68.52ms +[2025-09-05 16:00:09] [Rank 0] step:941/10000 train_time:63762ms step_avg:67.76ms +[2025-09-05 16:00:09] [Rank 0] step:941/10000 train_time:63762ms step_avg:67.76ms +[2025-09-05 16:00:10] [Rank 0] step:961/10000 train_time:64414ms step_avg:67.03ms +[2025-09-05 16:00:10] [Rank 0] step:961/10000 train_time:64414ms step_avg:67.03ms +[2025-09-05 16:00:10] [Rank 0] step:981/10000 train_time:65067ms step_avg:66.33ms +[2025-09-05 16:00:10] [Rank 0] step:981/10000 train_time:65067ms step_avg:66.33ms +[2025-09-05 16:00:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:00:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:00:11] [Rank 0] PRINT: step:1000/10000 train_loss:1.2229 val_loss:1.0336 train_time:65952ms step_avg:65.95ms +[2025-09-05 16:00:11] [Rank 0] PRINT: step:1000/10000 train_loss:1.2229 val_loss:1.0336 train_time:65952ms step_avg:65.95ms +[2025-09-05 16:00:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:00:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:00:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:00:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:01:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:01:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:01:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:01:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:01:33] [Rank 0] Total Loss: 4.3685 +[2025-09-05 16:01:33] [Rank 0] Total Loss: 4.3685 +[2025-09-05 16:01:33] [Rank 0] Total FTA (Unweighted): 0.5356 +[2025-09-05 16:01:33] [Rank 0] Total FTA (Unweighted): 0.5356 +[2025-09-05 16:01:33] [Rank 0] Total FTA (Weighted): 0.5356 +[2025-09-05 16:01:33] [Rank 0] Total FTA (Weighted): 0.5356 +[2025-09-05 16:01:34] [Rank 0] Group 0 Loss: 4.0613 +[2025-09-05 16:01:34] [Rank 0] Group 0 Loss: 4.0613 +[2025-09-05 16:01:34] [Rank 0] Group 1 Loss: 3.9024 +[2025-09-05 16:01:34] [Rank 0] Group 1 Loss: 3.9024 +[2025-09-05 16:01:34] [Rank 0] Group 2 Loss: 3.6658 +[2025-09-05 16:01:34] [Rank 0] Group 2 Loss: 3.6658 +[2025-09-05 16:01:34] [Rank 0] Group 3 Loss: 3.9627 +[2025-09-05 16:01:34] [Rank 0] Group 3 Loss: 3.9627 +[2025-09-05 16:01:34] [Rank 0] Group 4 Loss: 3.9780 +[2025-09-05 16:01:34] [Rank 0] Group 4 Loss: 3.9780 +[2025-09-05 16:01:34] [Rank 0] Group 5 Loss: 4.0299 +[2025-09-05 16:01:34] [Rank 0] Group 5 Loss: 4.0299 +[2025-09-05 16:01:34] [Rank 0] Group 6 Loss: 4.0592 +[2025-09-05 16:01:34] [Rank 0] Group 6 Loss: 4.0592 +[2025-09-05 16:01:34] [Rank 0] Group 7 Loss: 4.1200 +[2025-09-05 16:01:34] [Rank 0] Group 7 Loss: 4.1200 +[2025-09-05 16:01:34] [Rank 0] Group 8 Loss: 4.3337 +[2025-09-05 16:01:34] [Rank 0] Group 8 Loss: 4.3337 +[2025-09-05 16:01:34] [Rank 0] Group 9 Loss: 4.4401 +[2025-09-05 16:01:34] [Rank 0] Group 9 Loss: 4.4401 +[2025-09-05 16:01:34] [Rank 0] Group 10 Loss: 4.6811 +[2025-09-05 16:01:34] [Rank 0] Group 10 Loss: 4.6811 +[2025-09-05 16:01:34] [Rank 0] Group 11 Loss: 4.8105 +[2025-09-05 16:01:34] [Rank 0] Group 11 Loss: 4.8105 +[2025-09-05 16:01:34] [Rank 0] Group 12 Loss: 4.9157 +[2025-09-05 16:01:34] [Rank 0] Group 12 Loss: 4.9157 +[2025-09-05 16:01:34] [Rank 0] Group 13 Loss: 5.0165 +[2025-09-05 16:01:34] [Rank 0] Group 13 Loss: 5.0165 +[2025-09-05 16:01:34] [Rank 0] Group 14 Loss: 4.9536 +[2025-09-05 16:01:34] [Rank 0] Group 14 Loss: 4.9536 +[2025-09-05 16:01:34] [Rank 0] Group 15 Loss: 4.9656 +[2025-09-05 16:01:34] [Rank 0] Group 15 Loss: 4.9656 +[2025-09-05 16:01:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:01:34] [Rank 0] Group 5 FTA: 0.9600 +[2025-09-05 16:01:34] [Rank 0] Group 5 FTA: 0.9600 +[2025-09-05 16:01:34] [Rank 0] Group 6 FTA: 0.6800 +[2025-09-05 16:01:34] [Rank 0] Group 6 FTA: 0.6800 +[2025-09-05 16:01:34] [Rank 0] Group 7 FTA: 0.6000 +[2025-09-05 16:01:34] [Rank 0] Group 7 FTA: 0.6000 +[2025-09-05 16:01:34] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 16:01:34] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 16:01:34] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 16:01:34] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 16:01:34] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 16:01:34] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 16:01:34] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 16:01:34] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 16:01:34] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:01:34] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:01:34] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:01:34] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:01:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:01:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:01:34] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:01:34] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:01:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:01:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:01:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:01:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:01:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:01:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:01:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:01:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:01:36] [Rank 0] step:1001/10000 train_time:65962ms step_avg:65.90ms +[2025-09-05 16:01:36] [Rank 0] step:1001/10000 train_time:65962ms step_avg:65.90ms +[2025-09-05 16:01:37] [Rank 0] step:1021/10000 train_time:66405ms step_avg:65.04ms +[2025-09-05 16:01:37] [Rank 0] step:1021/10000 train_time:66405ms step_avg:65.04ms +[2025-09-05 16:01:38] [Rank 0] step:1041/10000 train_time:67058ms step_avg:64.42ms +[2025-09-05 16:01:38] [Rank 0] step:1041/10000 train_time:67058ms step_avg:64.42ms +[2025-09-05 16:01:38] [Rank 0] step:1061/10000 train_time:67711ms step_avg:63.82ms +[2025-09-05 16:01:38] [Rank 0] step:1061/10000 train_time:67711ms step_avg:63.82ms +[2025-09-05 16:01:39] [Rank 0] step:1081/10000 train_time:68364ms step_avg:63.24ms +[2025-09-05 16:01:39] [Rank 0] step:1081/10000 train_time:68364ms step_avg:63.24ms +[2025-09-05 16:01:40] [Rank 0] step:1101/10000 train_time:69016ms step_avg:62.69ms +[2025-09-05 16:01:40] [Rank 0] step:1101/10000 train_time:69016ms step_avg:62.69ms +[2025-09-05 16:01:40] [Rank 0] step:1121/10000 train_time:69668ms step_avg:62.15ms +[2025-09-05 16:01:40] [Rank 0] step:1121/10000 train_time:69668ms step_avg:62.15ms +[2025-09-05 16:01:41] [Rank 0] step:1141/10000 train_time:70321ms step_avg:61.63ms +[2025-09-05 16:01:41] [Rank 0] step:1141/10000 train_time:70321ms step_avg:61.63ms +[2025-09-05 16:01:42] [Rank 0] step:1161/10000 train_time:70972ms step_avg:61.13ms +[2025-09-05 16:01:42] [Rank 0] step:1161/10000 train_time:70972ms step_avg:61.13ms +[2025-09-05 16:01:42] [Rank 0] step:1181/10000 train_time:71625ms step_avg:60.65ms +[2025-09-05 16:01:42] [Rank 0] step:1181/10000 train_time:71625ms step_avg:60.65ms +[2025-09-05 16:01:43] [Rank 0] step:1201/10000 train_time:72278ms step_avg:60.18ms +[2025-09-05 16:01:43] [Rank 0] step:1201/10000 train_time:72278ms step_avg:60.18ms +[2025-09-05 16:01:44] [Rank 0] step:1221/10000 train_time:72930ms step_avg:59.73ms +[2025-09-05 16:01:44] [Rank 0] step:1221/10000 train_time:72930ms step_avg:59.73ms +[2025-09-05 16:01:44] [Rank 0] step:1241/10000 train_time:73583ms step_avg:59.29ms +[2025-09-05 16:01:44] [Rank 0] step:1241/10000 train_time:73583ms step_avg:59.29ms +[2025-09-05 16:01:45] [Rank 0] step:1261/10000 train_time:74234ms step_avg:58.87ms +[2025-09-05 16:01:45] [Rank 0] step:1261/10000 train_time:74234ms step_avg:58.87ms +[2025-09-05 16:01:46] [Rank 0] step:1281/10000 train_time:74888ms step_avg:58.46ms +[2025-09-05 16:01:46] [Rank 0] step:1281/10000 train_time:74888ms step_avg:58.46ms +[2025-09-05 16:01:46] [Rank 0] step:1301/10000 train_time:75542ms step_avg:58.06ms +[2025-09-05 16:01:46] [Rank 0] step:1301/10000 train_time:75542ms step_avg:58.06ms +[2025-09-05 16:01:47] [Rank 0] step:1321/10000 train_time:76195ms step_avg:57.68ms +[2025-09-05 16:01:47] [Rank 0] step:1321/10000 train_time:76195ms step_avg:57.68ms +[2025-09-05 16:01:48] [Rank 0] step:1341/10000 train_time:77036ms step_avg:57.45ms +[2025-09-05 16:01:48] [Rank 0] step:1341/10000 train_time:77036ms step_avg:57.45ms +[2025-09-05 16:01:48] [Rank 0] step:1361/10000 train_time:77689ms step_avg:57.08ms +[2025-09-05 16:01:48] [Rank 0] step:1361/10000 train_time:77689ms step_avg:57.08ms +[2025-09-05 16:01:49] [Rank 0] step:1381/10000 train_time:78342ms step_avg:56.73ms +[2025-09-05 16:01:49] [Rank 0] step:1381/10000 train_time:78342ms step_avg:56.73ms +[2025-09-05 16:01:50] [Rank 0] step:1401/10000 train_time:79212ms step_avg:56.54ms +[2025-09-05 16:01:50] [Rank 0] step:1401/10000 train_time:79212ms step_avg:56.54ms +[2025-09-05 16:01:51] [Rank 0] step:1421/10000 train_time:79865ms step_avg:56.20ms +[2025-09-05 16:01:51] [Rank 0] step:1421/10000 train_time:79865ms step_avg:56.20ms +[2025-09-05 16:01:51] [Rank 0] step:1441/10000 train_time:80518ms step_avg:55.88ms +[2025-09-05 16:01:51] [Rank 0] step:1441/10000 train_time:80518ms step_avg:55.88ms +[2025-09-05 16:01:52] [Rank 0] step:1461/10000 train_time:81171ms step_avg:55.56ms +[2025-09-05 16:01:52] [Rank 0] step:1461/10000 train_time:81171ms step_avg:55.56ms +[2025-09-05 16:01:53] [Rank 0] step:1481/10000 train_time:81926ms step_avg:55.32ms +[2025-09-05 16:01:53] [Rank 0] step:1481/10000 train_time:81926ms step_avg:55.32ms +[2025-09-05 16:01:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:01:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:01:54] [Rank 0] PRINT: step:1500/10000 train_loss:0.9710 val_loss:0.9113 train_time:82812ms step_avg:55.21ms +[2025-09-05 16:01:54] [Rank 0] PRINT: step:1500/10000 train_loss:0.9710 val_loss:0.9113 train_time:82812ms step_avg:55.21ms +[2025-09-05 16:01:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:01:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:01:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:01:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:03:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:03:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:03:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:03:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:03:18] [Rank 0] Total Loss: 4.7162 +[2025-09-05 16:03:18] [Rank 0] Total Loss: 4.7162 +[2025-09-05 16:03:18] [Rank 0] Total FTA (Unweighted): 0.6256 +[2025-09-05 16:03:18] [Rank 0] Total FTA (Unweighted): 0.6256 +[2025-09-05 16:03:19] [Rank 0] Total FTA (Weighted): 0.6256 +[2025-09-05 16:03:19] [Rank 0] Total FTA (Weighted): 0.6256 +[2025-09-05 16:03:19] [Rank 0] Group 0 Loss: 4.5593 +[2025-09-05 16:03:19] [Rank 0] Group 0 Loss: 4.5593 +[2025-09-05 16:03:19] [Rank 0] Group 1 Loss: 4.0016 +[2025-09-05 16:03:19] [Rank 0] Group 1 Loss: 4.0016 +[2025-09-05 16:03:19] [Rank 0] Group 2 Loss: 3.9330 +[2025-09-05 16:03:19] [Rank 0] Group 2 Loss: 3.9330 +[2025-09-05 16:03:19] [Rank 0] Group 3 Loss: 4.4738 +[2025-09-05 16:03:19] [Rank 0] Group 3 Loss: 4.4738 +[2025-09-05 16:03:19] [Rank 0] Group 4 Loss: 4.4398 +[2025-09-05 16:03:19] [Rank 0] Group 4 Loss: 4.4398 +[2025-09-05 16:03:19] [Rank 0] Group 5 Loss: 4.4992 +[2025-09-05 16:03:19] [Rank 0] Group 5 Loss: 4.4992 +[2025-09-05 16:03:19] [Rank 0] Group 6 Loss: 4.3707 +[2025-09-05 16:03:19] [Rank 0] Group 6 Loss: 4.3707 +[2025-09-05 16:03:19] [Rank 0] Group 7 Loss: 4.4409 +[2025-09-05 16:03:19] [Rank 0] Group 7 Loss: 4.4409 +[2025-09-05 16:03:19] [Rank 0] Group 8 Loss: 4.6376 +[2025-09-05 16:03:19] [Rank 0] Group 8 Loss: 4.6376 +[2025-09-05 16:03:19] [Rank 0] Group 9 Loss: 4.6549 +[2025-09-05 16:03:19] [Rank 0] Group 9 Loss: 4.6549 +[2025-09-05 16:03:19] [Rank 0] Group 10 Loss: 4.9327 +[2025-09-05 16:03:19] [Rank 0] Group 10 Loss: 4.9327 +[2025-09-05 16:03:19] [Rank 0] Group 11 Loss: 5.1290 +[2025-09-05 16:03:19] [Rank 0] Group 11 Loss: 5.1290 +[2025-09-05 16:03:19] [Rank 0] Group 12 Loss: 5.1154 +[2025-09-05 16:03:19] [Rank 0] Group 12 Loss: 5.1154 +[2025-09-05 16:03:19] [Rank 0] Group 13 Loss: 5.4200 +[2025-09-05 16:03:19] [Rank 0] Group 13 Loss: 5.4200 +[2025-09-05 16:03:19] [Rank 0] Group 14 Loss: 5.4181 +[2025-09-05 16:03:19] [Rank 0] Group 14 Loss: 5.4181 +[2025-09-05 16:03:19] [Rank 0] Group 15 Loss: 5.4324 +[2025-09-05 16:03:19] [Rank 0] Group 15 Loss: 5.4324 +[2025-09-05 16:03:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:03:19] [Rank 0] Group 7 FTA: 0.8600 +[2025-09-05 16:03:19] [Rank 0] Group 7 FTA: 0.8600 +[2025-09-05 16:03:19] [Rank 0] Group 8 FTA: 0.7700 +[2025-09-05 16:03:19] [Rank 0] Group 8 FTA: 0.7700 +[2025-09-05 16:03:19] [Rank 0] Group 9 FTA: 0.5500 +[2025-09-05 16:03:19] [Rank 0] Group 9 FTA: 0.5500 +[2025-09-05 16:03:19] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-05 16:03:19] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-05 16:03:19] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 16:03:19] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 16:03:19] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 16:03:19] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 16:03:19] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:03:19] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:03:19] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:03:19] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:03:19] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:03:19] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:03:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:03:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:03:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:03:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:03:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:03:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:03:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:03:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:03:21] [Rank 0] step:1501/10000 train_time:82821ms step_avg:55.18ms +[2025-09-05 16:03:21] [Rank 0] step:1501/10000 train_time:82821ms step_avg:55.18ms +[2025-09-05 16:03:21] [Rank 0] step:1521/10000 train_time:83247ms step_avg:54.73ms +[2025-09-05 16:03:21] [Rank 0] step:1521/10000 train_time:83247ms step_avg:54.73ms +[2025-09-05 16:03:22] [Rank 0] step:1541/10000 train_time:83900ms step_avg:54.45ms +[2025-09-05 16:03:22] [Rank 0] step:1541/10000 train_time:83900ms step_avg:54.45ms +[2025-09-05 16:03:23] [Rank 0] step:1561/10000 train_time:84552ms step_avg:54.17ms +[2025-09-05 16:03:23] [Rank 0] step:1561/10000 train_time:84552ms step_avg:54.17ms +[2025-09-05 16:03:23] [Rank 0] step:1581/10000 train_time:85305ms step_avg:53.96ms +[2025-09-05 16:03:23] [Rank 0] step:1581/10000 train_time:85305ms step_avg:53.96ms +[2025-09-05 16:03:24] [Rank 0] step:1601/10000 train_time:86057ms step_avg:53.75ms +[2025-09-05 16:03:24] [Rank 0] step:1601/10000 train_time:86057ms step_avg:53.75ms +[2025-09-05 16:03:25] [Rank 0] step:1621/10000 train_time:86710ms step_avg:53.49ms +[2025-09-05 16:03:25] [Rank 0] step:1621/10000 train_time:86710ms step_avg:53.49ms +[2025-09-05 16:03:26] [Rank 0] step:1641/10000 train_time:87541ms step_avg:53.35ms +[2025-09-05 16:03:26] [Rank 0] step:1641/10000 train_time:87541ms step_avg:53.35ms +[2025-09-05 16:03:26] [Rank 0] step:1661/10000 train_time:88193ms step_avg:53.10ms +[2025-09-05 16:03:26] [Rank 0] step:1661/10000 train_time:88193ms step_avg:53.10ms +[2025-09-05 16:03:27] [Rank 0] step:1681/10000 train_time:88846ms step_avg:52.85ms +[2025-09-05 16:03:27] [Rank 0] step:1681/10000 train_time:88846ms step_avg:52.85ms +[2025-09-05 16:03:28] [Rank 0] step:1701/10000 train_time:89497ms step_avg:52.61ms +[2025-09-05 16:03:28] [Rank 0] step:1701/10000 train_time:89497ms step_avg:52.61ms +[2025-09-05 16:03:28] [Rank 0] step:1721/10000 train_time:90149ms step_avg:52.38ms +[2025-09-05 16:03:28] [Rank 0] step:1721/10000 train_time:90149ms step_avg:52.38ms +[2025-09-05 16:03:29] [Rank 0] step:1741/10000 train_time:90803ms step_avg:52.16ms +[2025-09-05 16:03:29] [Rank 0] step:1741/10000 train_time:90803ms step_avg:52.16ms +[2025-09-05 16:03:30] [Rank 0] step:1761/10000 train_time:91459ms step_avg:51.94ms +[2025-09-05 16:03:30] [Rank 0] step:1761/10000 train_time:91459ms step_avg:51.94ms +[2025-09-05 16:03:30] [Rank 0] step:1781/10000 train_time:92117ms step_avg:51.72ms +[2025-09-05 16:03:30] [Rank 0] step:1781/10000 train_time:92117ms step_avg:51.72ms +[2025-09-05 16:03:31] [Rank 0] step:1801/10000 train_time:92771ms step_avg:51.51ms +[2025-09-05 16:03:31] [Rank 0] step:1801/10000 train_time:92771ms step_avg:51.51ms +[2025-09-05 16:03:32] [Rank 0] step:1821/10000 train_time:93425ms step_avg:51.30ms +[2025-09-05 16:03:32] [Rank 0] step:1821/10000 train_time:93425ms step_avg:51.30ms +[2025-09-05 16:03:32] [Rank 0] step:1841/10000 train_time:94078ms step_avg:51.10ms +[2025-09-05 16:03:32] [Rank 0] step:1841/10000 train_time:94078ms step_avg:51.10ms +[2025-09-05 16:03:33] [Rank 0] step:1861/10000 train_time:94731ms step_avg:50.90ms +[2025-09-05 16:03:33] [Rank 0] step:1861/10000 train_time:94731ms step_avg:50.90ms +[2025-09-05 16:03:34] [Rank 0] step:1881/10000 train_time:95383ms step_avg:50.71ms +[2025-09-05 16:03:34] [Rank 0] step:1881/10000 train_time:95383ms step_avg:50.71ms +[2025-09-05 16:03:34] [Rank 0] step:1901/10000 train_time:96036ms step_avg:50.52ms +[2025-09-05 16:03:34] [Rank 0] step:1901/10000 train_time:96036ms step_avg:50.52ms +[2025-09-05 16:03:35] [Rank 0] step:1921/10000 train_time:96689ms step_avg:50.33ms +[2025-09-05 16:03:35] [Rank 0] step:1921/10000 train_time:96689ms step_avg:50.33ms +[2025-09-05 16:03:35] [Rank 0] step:1941/10000 train_time:97342ms step_avg:50.15ms +[2025-09-05 16:03:35] [Rank 0] step:1941/10000 train_time:97342ms step_avg:50.15ms +[2025-09-05 16:03:36] [Rank 0] step:1961/10000 train_time:97993ms step_avg:49.97ms +[2025-09-05 16:03:36] [Rank 0] step:1961/10000 train_time:97993ms step_avg:49.97ms +[2025-09-05 16:03:37] [Rank 0] step:1981/10000 train_time:98645ms step_avg:49.80ms +[2025-09-05 16:03:37] [Rank 0] step:1981/10000 train_time:98645ms step_avg:49.80ms +[2025-09-05 16:03:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:03:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:03:38] [Rank 0] PRINT: step:2000/10000 train_loss:0.8858 val_loss:0.8479 train_time:99630ms step_avg:49.81ms +[2025-09-05 16:03:38] [Rank 0] PRINT: step:2000/10000 train_loss:0.8858 val_loss:0.8479 train_time:99630ms step_avg:49.81ms +[2025-09-05 16:03:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:03:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:03:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:03:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:05:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:05:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:05:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:05:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:05:02] [Rank 0] Total Loss: 4.7512 +[2025-09-05 16:05:02] [Rank 0] Total Loss: 4.7512 +[2025-09-05 16:05:02] [Rank 0] Total FTA (Unweighted): 0.6794 +[2025-09-05 16:05:02] [Rank 0] Total FTA (Unweighted): 0.6794 +[2025-09-05 16:05:02] [Rank 0] Total FTA (Weighted): 0.6794 +[2025-09-05 16:05:02] [Rank 0] Total FTA (Weighted): 0.6794 +[2025-09-05 16:05:02] [Rank 0] Group 0 Loss: 4.6835 +[2025-09-05 16:05:02] [Rank 0] Group 0 Loss: 4.6835 +[2025-09-05 16:05:02] [Rank 0] Group 1 Loss: 4.2231 +[2025-09-05 16:05:02] [Rank 0] Group 1 Loss: 4.2231 +[2025-09-05 16:05:02] [Rank 0] Group 2 Loss: 4.0980 +[2025-09-05 16:05:02] [Rank 0] Group 2 Loss: 4.0980 +[2025-09-05 16:05:02] [Rank 0] Group 3 Loss: 4.5393 +[2025-09-05 16:05:02] [Rank 0] Group 3 Loss: 4.5393 +[2025-09-05 16:05:02] [Rank 0] Group 4 Loss: 4.4710 +[2025-09-05 16:05:02] [Rank 0] Group 4 Loss: 4.4710 +[2025-09-05 16:05:02] [Rank 0] Group 5 Loss: 4.5417 +[2025-09-05 16:05:02] [Rank 0] Group 5 Loss: 4.5417 +[2025-09-05 16:05:02] [Rank 0] Group 6 Loss: 4.4785 +[2025-09-05 16:05:02] [Rank 0] Group 6 Loss: 4.4785 +[2025-09-05 16:05:02] [Rank 0] Group 7 Loss: 4.5377 +[2025-09-05 16:05:02] [Rank 0] Group 7 Loss: 4.5377 +[2025-09-05 16:05:02] [Rank 0] Group 8 Loss: 4.6534 +[2025-09-05 16:05:02] [Rank 0] Group 8 Loss: 4.6534 +[2025-09-05 16:05:02] [Rank 0] Group 9 Loss: 4.7136 +[2025-09-05 16:05:02] [Rank 0] Group 9 Loss: 4.7136 +[2025-09-05 16:05:02] [Rank 0] Group 10 Loss: 4.8350 +[2025-09-05 16:05:02] [Rank 0] Group 10 Loss: 4.8350 +[2025-09-05 16:05:02] [Rank 0] Group 11 Loss: 4.9863 +[2025-09-05 16:05:02] [Rank 0] Group 11 Loss: 4.9863 +[2025-09-05 16:05:02] [Rank 0] Group 12 Loss: 5.0552 +[2025-09-05 16:05:02] [Rank 0] Group 12 Loss: 5.0552 +[2025-09-05 16:05:02] [Rank 0] Group 13 Loss: 5.3563 +[2025-09-05 16:05:02] [Rank 0] Group 13 Loss: 5.3563 +[2025-09-05 16:05:02] [Rank 0] Group 14 Loss: 5.3677 +[2025-09-05 16:05:02] [Rank 0] Group 14 Loss: 5.3677 +[2025-09-05 16:05:02] [Rank 0] Group 15 Loss: 5.4784 +[2025-09-05 16:05:02] [Rank 0] Group 15 Loss: 5.4784 +[2025-09-05 16:05:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:05:02] [Rank 0] Group 8 FTA: 0.8700 +[2025-09-05 16:05:02] [Rank 0] Group 8 FTA: 0.8700 +[2025-09-05 16:05:02] [Rank 0] Group 9 FTA: 0.7600 +[2025-09-05 16:05:02] [Rank 0] Group 9 FTA: 0.7600 +[2025-09-05 16:05:02] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 16:05:02] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 16:05:02] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 16:05:02] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 16:05:02] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 16:05:02] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 16:05:02] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:05:02] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:05:02] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:05:02] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:05:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:05:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:05:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:05:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:05:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:05:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:05:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:05:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:05:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:05:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:05:04] [Rank 0] step:2001/10000 train_time:99639ms step_avg:49.79ms +[2025-09-05 16:05:04] [Rank 0] step:2001/10000 train_time:99639ms step_avg:49.79ms +[2025-09-05 16:05:05] [Rank 0] step:2021/10000 train_time:100284ms step_avg:49.62ms +[2025-09-05 16:05:05] [Rank 0] step:2021/10000 train_time:100284ms step_avg:49.62ms +[2025-09-05 16:05:05] [Rank 0] step:2041/10000 train_time:100937ms step_avg:49.45ms +[2025-09-05 16:05:05] [Rank 0] step:2041/10000 train_time:100937ms step_avg:49.45ms +[2025-09-05 16:05:06] [Rank 0] step:2061/10000 train_time:101590ms step_avg:49.29ms +[2025-09-05 16:05:06] [Rank 0] step:2061/10000 train_time:101590ms step_avg:49.29ms +[2025-09-05 16:05:07] [Rank 0] step:2081/10000 train_time:102242ms step_avg:49.13ms +[2025-09-05 16:05:07] [Rank 0] step:2081/10000 train_time:102242ms step_avg:49.13ms +[2025-09-05 16:05:07] [Rank 0] step:2101/10000 train_time:102895ms step_avg:48.97ms +[2025-09-05 16:05:07] [Rank 0] step:2101/10000 train_time:102895ms step_avg:48.97ms +[2025-09-05 16:05:08] [Rank 0] step:2121/10000 train_time:103548ms step_avg:48.82ms +[2025-09-05 16:05:08] [Rank 0] step:2121/10000 train_time:103548ms step_avg:48.82ms +[2025-09-05 16:05:09] [Rank 0] step:2141/10000 train_time:104202ms step_avg:48.67ms +[2025-09-05 16:05:09] [Rank 0] step:2141/10000 train_time:104202ms step_avg:48.67ms +[2025-09-05 16:05:09] [Rank 0] step:2161/10000 train_time:104854ms step_avg:48.52ms +[2025-09-05 16:05:09] [Rank 0] step:2161/10000 train_time:104854ms step_avg:48.52ms +[2025-09-05 16:05:10] [Rank 0] step:2181/10000 train_time:105507ms step_avg:48.38ms +[2025-09-05 16:05:10] [Rank 0] step:2181/10000 train_time:105507ms step_avg:48.38ms +[2025-09-05 16:05:11] [Rank 0] step:2201/10000 train_time:106159ms step_avg:48.23ms +[2025-09-05 16:05:11] [Rank 0] step:2201/10000 train_time:106159ms step_avg:48.23ms +[2025-09-05 16:05:11] [Rank 0] step:2221/10000 train_time:106915ms step_avg:48.14ms +[2025-09-05 16:05:11] [Rank 0] step:2221/10000 train_time:106915ms step_avg:48.14ms +[2025-09-05 16:05:12] [Rank 0] step:2241/10000 train_time:107570ms step_avg:48.00ms +[2025-09-05 16:05:12] [Rank 0] step:2241/10000 train_time:107570ms step_avg:48.00ms +[2025-09-05 16:05:13] [Rank 0] step:2261/10000 train_time:108228ms step_avg:47.87ms +[2025-09-05 16:05:13] [Rank 0] step:2261/10000 train_time:108228ms step_avg:47.87ms +[2025-09-05 16:05:13] [Rank 0] step:2281/10000 train_time:108887ms step_avg:47.74ms +[2025-09-05 16:05:13] [Rank 0] step:2281/10000 train_time:108887ms step_avg:47.74ms +[2025-09-05 16:05:14] [Rank 0] step:2301/10000 train_time:109546ms step_avg:47.61ms +[2025-09-05 16:05:14] [Rank 0] step:2301/10000 train_time:109546ms step_avg:47.61ms +[2025-09-05 16:05:15] [Rank 0] step:2321/10000 train_time:110205ms step_avg:47.48ms +[2025-09-05 16:05:15] [Rank 0] step:2321/10000 train_time:110205ms step_avg:47.48ms +[2025-09-05 16:05:15] [Rank 0] step:2341/10000 train_time:110864ms step_avg:47.36ms +[2025-09-05 16:05:15] [Rank 0] step:2341/10000 train_time:110864ms step_avg:47.36ms +[2025-09-05 16:05:16] [Rank 0] step:2361/10000 train_time:111522ms step_avg:47.23ms +[2025-09-05 16:05:16] [Rank 0] step:2361/10000 train_time:111522ms step_avg:47.23ms +[2025-09-05 16:05:17] [Rank 0] step:2381/10000 train_time:112181ms step_avg:47.12ms +[2025-09-05 16:05:17] [Rank 0] step:2381/10000 train_time:112181ms step_avg:47.12ms +[2025-09-05 16:05:17] [Rank 0] step:2401/10000 train_time:112840ms step_avg:47.00ms +[2025-09-05 16:05:17] [Rank 0] step:2401/10000 train_time:112840ms step_avg:47.00ms +[2025-09-05 16:05:18] [Rank 0] step:2421/10000 train_time:113497ms step_avg:46.88ms +[2025-09-05 16:05:18] [Rank 0] step:2421/10000 train_time:113497ms step_avg:46.88ms +[2025-09-05 16:05:19] [Rank 0] step:2441/10000 train_time:114259ms step_avg:46.81ms +[2025-09-05 16:05:19] [Rank 0] step:2441/10000 train_time:114259ms step_avg:46.81ms +[2025-09-05 16:05:19] [Rank 0] step:2461/10000 train_time:114917ms step_avg:46.70ms +[2025-09-05 16:05:19] [Rank 0] step:2461/10000 train_time:114917ms step_avg:46.70ms +[2025-09-05 16:05:20] [Rank 0] step:2481/10000 train_time:115575ms step_avg:46.58ms +[2025-09-05 16:05:20] [Rank 0] step:2481/10000 train_time:115575ms step_avg:46.58ms +[2025-09-05 16:05:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:05:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:05:21] [Rank 0] PRINT: step:2500/10000 train_loss:0.8341 val_loss:0.8020 train_time:116569ms step_avg:46.63ms +[2025-09-05 16:05:21] [Rank 0] PRINT: step:2500/10000 train_loss:0.8341 val_loss:0.8020 train_time:116569ms step_avg:46.63ms +[2025-09-05 16:05:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:05:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:05:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:05:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:06:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:06:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:06:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:06:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:06:49] [Rank 0] Total Loss: 4.9421 +[2025-09-05 16:06:49] [Rank 0] Total Loss: 4.9421 +[2025-09-05 16:06:49] [Rank 0] Total FTA (Unweighted): 0.7131 +[2025-09-05 16:06:49] [Rank 0] Total FTA (Unweighted): 0.7131 +[2025-09-05 16:06:49] [Rank 0] Total FTA (Weighted): 0.7131 +[2025-09-05 16:06:49] [Rank 0] Total FTA (Weighted): 0.7131 +[2025-09-05 16:06:49] [Rank 0] Group 0 Loss: 4.8421 +[2025-09-05 16:06:49] [Rank 0] Group 0 Loss: 4.8421 +[2025-09-05 16:06:49] [Rank 0] Group 1 Loss: 4.4592 +[2025-09-05 16:06:49] [Rank 0] Group 1 Loss: 4.4592 +[2025-09-05 16:06:49] [Rank 0] Group 2 Loss: 4.3335 +[2025-09-05 16:06:49] [Rank 0] Group 2 Loss: 4.3335 +[2025-09-05 16:06:49] [Rank 0] Group 3 Loss: 4.6779 +[2025-09-05 16:06:49] [Rank 0] Group 3 Loss: 4.6779 +[2025-09-05 16:06:49] [Rank 0] Group 4 Loss: 4.6902 +[2025-09-05 16:06:49] [Rank 0] Group 4 Loss: 4.6902 +[2025-09-05 16:06:49] [Rank 0] Group 5 Loss: 4.7630 +[2025-09-05 16:06:49] [Rank 0] Group 5 Loss: 4.7630 +[2025-09-05 16:06:49] [Rank 0] Group 6 Loss: 4.7235 +[2025-09-05 16:06:49] [Rank 0] Group 6 Loss: 4.7235 +[2025-09-05 16:06:50] [Rank 0] Group 7 Loss: 4.8038 +[2025-09-05 16:06:50] [Rank 0] Group 7 Loss: 4.8038 +[2025-09-05 16:06:50] [Rank 0] Group 8 Loss: 4.8747 +[2025-09-05 16:06:50] [Rank 0] Group 8 Loss: 4.8747 +[2025-09-05 16:06:50] [Rank 0] Group 9 Loss: 4.9086 +[2025-09-05 16:06:50] [Rank 0] Group 9 Loss: 4.9086 +[2025-09-05 16:06:50] [Rank 0] Group 10 Loss: 5.0629 +[2025-09-05 16:06:50] [Rank 0] Group 10 Loss: 5.0629 +[2025-09-05 16:06:50] [Rank 0] Group 11 Loss: 5.1510 +[2025-09-05 16:06:50] [Rank 0] Group 11 Loss: 5.1510 +[2025-09-05 16:06:50] [Rank 0] Group 12 Loss: 5.2038 +[2025-09-05 16:06:50] [Rank 0] Group 12 Loss: 5.2038 +[2025-09-05 16:06:50] [Rank 0] Group 13 Loss: 5.4337 +[2025-09-05 16:06:50] [Rank 0] Group 13 Loss: 5.4337 +[2025-09-05 16:06:50] [Rank 0] Group 14 Loss: 5.4920 +[2025-09-05 16:06:50] [Rank 0] Group 14 Loss: 5.4920 +[2025-09-05 16:06:50] [Rank 0] Group 15 Loss: 5.6532 +[2025-09-05 16:06:50] [Rank 0] Group 15 Loss: 5.6532 +[2025-09-05 16:06:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:06:50] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 16:06:50] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 16:06:50] [Rank 0] Group 9 FTA: 0.8400 +[2025-09-05 16:06:50] [Rank 0] Group 9 FTA: 0.8400 +[2025-09-05 16:06:50] [Rank 0] Group 10 FTA: 0.7400 +[2025-09-05 16:06:50] [Rank 0] Group 10 FTA: 0.7400 +[2025-09-05 16:06:50] [Rank 0] Group 11 FTA: 0.3900 +[2025-09-05 16:06:50] [Rank 0] Group 11 FTA: 0.3900 +[2025-09-05 16:06:50] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 16:06:50] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 16:06:50] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 16:06:50] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 16:06:50] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:06:50] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:06:50] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:06:50] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:06:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:06:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:06:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:06:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:06:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:06:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:06:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:06:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:06:51] [Rank 0] step:2501/10000 train_time:116577ms step_avg:46.61ms +[2025-09-05 16:06:51] [Rank 0] step:2501/10000 train_time:116577ms step_avg:46.61ms +[2025-09-05 16:06:52] [Rank 0] step:2521/10000 train_time:117023ms step_avg:46.42ms +[2025-09-05 16:06:52] [Rank 0] step:2521/10000 train_time:117023ms step_avg:46.42ms +[2025-09-05 16:06:52] [Rank 0] step:2541/10000 train_time:117681ms step_avg:46.31ms +[2025-09-05 16:06:52] [Rank 0] step:2541/10000 train_time:117681ms step_avg:46.31ms +[2025-09-05 16:06:53] [Rank 0] step:2561/10000 train_time:118339ms step_avg:46.21ms +[2025-09-05 16:06:53] [Rank 0] step:2561/10000 train_time:118339ms step_avg:46.21ms +[2025-09-05 16:06:54] [Rank 0] step:2581/10000 train_time:118998ms step_avg:46.11ms +[2025-09-05 16:06:54] [Rank 0] step:2581/10000 train_time:118998ms step_avg:46.11ms +[2025-09-05 16:06:54] [Rank 0] step:2601/10000 train_time:119658ms step_avg:46.00ms +[2025-09-05 16:06:54] [Rank 0] step:2601/10000 train_time:119658ms step_avg:46.00ms +[2025-09-05 16:06:55] [Rank 0] step:2621/10000 train_time:120315ms step_avg:45.90ms +[2025-09-05 16:06:55] [Rank 0] step:2621/10000 train_time:120315ms step_avg:45.90ms +[2025-09-05 16:06:56] [Rank 0] step:2641/10000 train_time:120973ms step_avg:45.81ms +[2025-09-05 16:06:56] [Rank 0] step:2641/10000 train_time:120973ms step_avg:45.81ms +[2025-09-05 16:06:56] [Rank 0] step:2661/10000 train_time:121632ms step_avg:45.71ms +[2025-09-05 16:06:56] [Rank 0] step:2661/10000 train_time:121632ms step_avg:45.71ms +[2025-09-05 16:06:57] [Rank 0] step:2681/10000 train_time:122291ms step_avg:45.61ms +[2025-09-05 16:06:57] [Rank 0] step:2681/10000 train_time:122291ms step_avg:45.61ms +[2025-09-05 16:06:58] [Rank 0] step:2701/10000 train_time:122950ms step_avg:45.52ms +[2025-09-05 16:06:58] [Rank 0] step:2701/10000 train_time:122950ms step_avg:45.52ms +[2025-09-05 16:06:58] [Rank 0] step:2721/10000 train_time:123609ms step_avg:45.43ms +[2025-09-05 16:06:58] [Rank 0] step:2721/10000 train_time:123609ms step_avg:45.43ms +[2025-09-05 16:06:59] [Rank 0] step:2741/10000 train_time:124270ms step_avg:45.34ms +[2025-09-05 16:06:59] [Rank 0] step:2741/10000 train_time:124270ms step_avg:45.34ms +[2025-09-05 16:07:00] [Rank 0] step:2761/10000 train_time:124927ms step_avg:45.25ms +[2025-09-05 16:07:00] [Rank 0] step:2761/10000 train_time:124927ms step_avg:45.25ms +[2025-09-05 16:07:00] [Rank 0] step:2781/10000 train_time:125586ms step_avg:45.16ms +[2025-09-05 16:07:00] [Rank 0] step:2781/10000 train_time:125586ms step_avg:45.16ms +[2025-09-05 16:07:01] [Rank 0] step:2801/10000 train_time:126245ms step_avg:45.07ms +[2025-09-05 16:07:01] [Rank 0] step:2801/10000 train_time:126245ms step_avg:45.07ms +[2025-09-05 16:07:02] [Rank 0] step:2821/10000 train_time:126907ms step_avg:44.99ms +[2025-09-05 16:07:02] [Rank 0] step:2821/10000 train_time:126907ms step_avg:44.99ms +[2025-09-05 16:07:03] [Rank 0] step:2841/10000 train_time:128016ms step_avg:45.06ms +[2025-09-05 16:07:03] [Rank 0] step:2841/10000 train_time:128016ms step_avg:45.06ms +[2025-09-05 16:07:04] [Rank 0] step:2861/10000 train_time:128833ms step_avg:45.03ms +[2025-09-05 16:07:04] [Rank 0] step:2861/10000 train_time:128833ms step_avg:45.03ms +[2025-09-05 16:07:04] [Rank 0] step:2881/10000 train_time:129492ms step_avg:44.95ms +[2025-09-05 16:07:04] [Rank 0] step:2881/10000 train_time:129492ms step_avg:44.95ms +[2025-09-05 16:07:05] [Rank 0] step:2901/10000 train_time:130151ms step_avg:44.86ms +[2025-09-05 16:07:05] [Rank 0] step:2901/10000 train_time:130151ms step_avg:44.86ms +[2025-09-05 16:07:06] [Rank 0] step:2921/10000 train_time:130809ms step_avg:44.78ms +[2025-09-05 16:07:06] [Rank 0] step:2921/10000 train_time:130809ms step_avg:44.78ms +[2025-09-05 16:07:06] [Rank 0] step:2941/10000 train_time:131648ms step_avg:44.76ms +[2025-09-05 16:07:06] [Rank 0] step:2941/10000 train_time:131648ms step_avg:44.76ms +[2025-09-05 16:07:07] [Rank 0] step:2961/10000 train_time:132307ms step_avg:44.68ms +[2025-09-05 16:07:07] [Rank 0] step:2961/10000 train_time:132307ms step_avg:44.68ms +[2025-09-05 16:07:08] [Rank 0] step:2981/10000 train_time:133066ms step_avg:44.64ms +[2025-09-05 16:07:08] [Rank 0] step:2981/10000 train_time:133066ms step_avg:44.64ms +[2025-09-05 16:07:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:07:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:07:09] [Rank 0] PRINT: step:3000/10000 train_loss:0.7955 val_loss:0.7714 train_time:133958ms step_avg:44.65ms +[2025-09-05 16:07:09] [Rank 0] PRINT: step:3000/10000 train_loss:0.7955 val_loss:0.7714 train_time:133958ms step_avg:44.65ms +[2025-09-05 16:07:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:07:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:07:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:07:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:08:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:08:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:08:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:08:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:08:33] [Rank 0] Total Loss: 5.0266 +[2025-09-05 16:08:33] [Rank 0] Total Loss: 5.0266 +[2025-09-05 16:08:33] [Rank 0] Total FTA (Unweighted): 0.7475 +[2025-09-05 16:08:33] [Rank 0] Total FTA (Unweighted): 0.7475 +[2025-09-05 16:08:33] [Rank 0] Total FTA (Weighted): 0.7475 +[2025-09-05 16:08:33] [Rank 0] Total FTA (Weighted): 0.7475 +[2025-09-05 16:08:33] [Rank 0] Group 0 Loss: 5.0402 +[2025-09-05 16:08:33] [Rank 0] Group 0 Loss: 5.0402 +[2025-09-05 16:08:33] [Rank 0] Group 1 Loss: 4.6718 +[2025-09-05 16:08:33] [Rank 0] Group 1 Loss: 4.6718 +[2025-09-05 16:08:33] [Rank 0] Group 2 Loss: 4.4596 +[2025-09-05 16:08:33] [Rank 0] Group 2 Loss: 4.4596 +[2025-09-05 16:08:33] [Rank 0] Group 3 Loss: 4.7752 +[2025-09-05 16:08:33] [Rank 0] Group 3 Loss: 4.7752 +[2025-09-05 16:08:33] [Rank 0] Group 4 Loss: 4.8397 +[2025-09-05 16:08:33] [Rank 0] Group 4 Loss: 4.8397 +[2025-09-05 16:08:33] [Rank 0] Group 5 Loss: 4.8936 +[2025-09-05 16:08:33] [Rank 0] Group 5 Loss: 4.8936 +[2025-09-05 16:08:33] [Rank 0] Group 6 Loss: 4.7680 +[2025-09-05 16:08:33] [Rank 0] Group 6 Loss: 4.7680 +[2025-09-05 16:08:33] [Rank 0] Group 7 Loss: 4.8956 +[2025-09-05 16:08:33] [Rank 0] Group 7 Loss: 4.8956 +[2025-09-05 16:08:33] [Rank 0] Group 8 Loss: 5.0178 +[2025-09-05 16:08:33] [Rank 0] Group 8 Loss: 5.0178 +[2025-09-05 16:08:33] [Rank 0] Group 9 Loss: 5.0042 +[2025-09-05 16:08:33] [Rank 0] Group 9 Loss: 5.0042 +[2025-09-05 16:08:34] [Rank 0] Group 10 Loss: 5.1029 +[2025-09-05 16:08:34] [Rank 0] Group 10 Loss: 5.1029 +[2025-09-05 16:08:34] [Rank 0] Group 11 Loss: 5.1579 +[2025-09-05 16:08:34] [Rank 0] Group 11 Loss: 5.1579 +[2025-09-05 16:08:34] [Rank 0] Group 12 Loss: 5.2463 +[2025-09-05 16:08:34] [Rank 0] Group 12 Loss: 5.2463 +[2025-09-05 16:08:34] [Rank 0] Group 13 Loss: 5.4321 +[2025-09-05 16:08:34] [Rank 0] Group 13 Loss: 5.4321 +[2025-09-05 16:08:34] [Rank 0] Group 14 Loss: 5.4711 +[2025-09-05 16:08:34] [Rank 0] Group 14 Loss: 5.4711 +[2025-09-05 16:08:34] [Rank 0] Group 15 Loss: 5.6492 +[2025-09-05 16:08:34] [Rank 0] Group 15 Loss: 5.6492 +[2025-09-05 16:08:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:08:34] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 16:08:34] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 16:08:34] [Rank 0] Group 9 FTA: 0.9200 +[2025-09-05 16:08:34] [Rank 0] Group 9 FTA: 0.9200 +[2025-09-05 16:08:34] [Rank 0] Group 10 FTA: 0.9400 +[2025-09-05 16:08:34] [Rank 0] Group 10 FTA: 0.9400 +[2025-09-05 16:08:34] [Rank 0] Group 11 FTA: 0.5300 +[2025-09-05 16:08:34] [Rank 0] Group 11 FTA: 0.5300 +[2025-09-05 16:08:34] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 16:08:34] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 16:08:35] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:08:35] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:08:35] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:08:35] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:08:35] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:08:35] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:08:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:08:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:08:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:08:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:08:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:08:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:08:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:08:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:08:36] [Rank 0] step:3001/10000 train_time:133966ms step_avg:44.64ms +[2025-09-05 16:08:36] [Rank 0] step:3001/10000 train_time:133966ms step_avg:44.64ms +[2025-09-05 16:08:37] [Rank 0] step:3021/10000 train_time:134419ms step_avg:44.49ms +[2025-09-05 16:08:37] [Rank 0] step:3021/10000 train_time:134419ms step_avg:44.49ms +[2025-09-05 16:08:38] [Rank 0] step:3041/10000 train_time:135080ms step_avg:44.42ms +[2025-09-05 16:08:38] [Rank 0] step:3041/10000 train_time:135080ms step_avg:44.42ms +[2025-09-05 16:08:38] [Rank 0] step:3061/10000 train_time:135741ms step_avg:44.35ms +[2025-09-05 16:08:38] [Rank 0] step:3061/10000 train_time:135741ms step_avg:44.35ms +[2025-09-05 16:08:39] [Rank 0] step:3081/10000 train_time:136399ms step_avg:44.27ms +[2025-09-05 16:08:39] [Rank 0] step:3081/10000 train_time:136399ms step_avg:44.27ms +[2025-09-05 16:08:39] [Rank 0] step:3101/10000 train_time:137057ms step_avg:44.20ms +[2025-09-05 16:08:39] [Rank 0] step:3101/10000 train_time:137057ms step_avg:44.20ms +[2025-09-05 16:08:40] [Rank 0] step:3121/10000 train_time:137819ms step_avg:44.16ms +[2025-09-05 16:08:40] [Rank 0] step:3121/10000 train_time:137819ms step_avg:44.16ms +[2025-09-05 16:08:41] [Rank 0] step:3141/10000 train_time:138478ms step_avg:44.09ms +[2025-09-05 16:08:41] [Rank 0] step:3141/10000 train_time:138478ms step_avg:44.09ms +[2025-09-05 16:08:42] [Rank 0] step:3161/10000 train_time:139137ms step_avg:44.02ms +[2025-09-05 16:08:42] [Rank 0] step:3161/10000 train_time:139137ms step_avg:44.02ms +[2025-09-05 16:08:42] [Rank 0] step:3181/10000 train_time:139795ms step_avg:43.95ms +[2025-09-05 16:08:42] [Rank 0] step:3181/10000 train_time:139795ms step_avg:43.95ms +[2025-09-05 16:08:43] [Rank 0] step:3201/10000 train_time:140454ms step_avg:43.88ms +[2025-09-05 16:08:43] [Rank 0] step:3201/10000 train_time:140454ms step_avg:43.88ms +[2025-09-05 16:08:44] [Rank 0] step:3221/10000 train_time:141215ms step_avg:43.84ms +[2025-09-05 16:08:44] [Rank 0] step:3221/10000 train_time:141215ms step_avg:43.84ms +[2025-09-05 16:08:44] [Rank 0] step:3241/10000 train_time:141873ms step_avg:43.77ms +[2025-09-05 16:08:44] [Rank 0] step:3241/10000 train_time:141873ms step_avg:43.77ms +[2025-09-05 16:08:45] [Rank 0] step:3261/10000 train_time:142532ms step_avg:43.71ms +[2025-09-05 16:08:45] [Rank 0] step:3261/10000 train_time:142532ms step_avg:43.71ms +[2025-09-05 16:08:46] [Rank 0] step:3281/10000 train_time:143191ms step_avg:43.64ms +[2025-09-05 16:08:46] [Rank 0] step:3281/10000 train_time:143191ms step_avg:43.64ms +[2025-09-05 16:08:46] [Rank 0] step:3301/10000 train_time:143850ms step_avg:43.58ms +[2025-09-05 16:08:46] [Rank 0] step:3301/10000 train_time:143850ms step_avg:43.58ms +[2025-09-05 16:08:47] [Rank 0] step:3321/10000 train_time:144610ms step_avg:43.54ms +[2025-09-05 16:08:47] [Rank 0] step:3321/10000 train_time:144610ms step_avg:43.54ms +[2025-09-05 16:08:48] [Rank 0] step:3341/10000 train_time:145269ms step_avg:43.48ms +[2025-09-05 16:08:48] [Rank 0] step:3341/10000 train_time:145269ms step_avg:43.48ms +[2025-09-05 16:08:48] [Rank 0] step:3361/10000 train_time:145928ms step_avg:43.42ms +[2025-09-05 16:08:48] [Rank 0] step:3361/10000 train_time:145928ms step_avg:43.42ms +[2025-09-05 16:08:49] [Rank 0] step:3381/10000 train_time:146587ms step_avg:43.36ms +[2025-09-05 16:08:49] [Rank 0] step:3381/10000 train_time:146587ms step_avg:43.36ms +[2025-09-05 16:08:50] [Rank 0] step:3401/10000 train_time:147245ms step_avg:43.29ms +[2025-09-05 16:08:50] [Rank 0] step:3401/10000 train_time:147245ms step_avg:43.29ms +[2025-09-05 16:08:50] [Rank 0] step:3421/10000 train_time:147903ms step_avg:43.23ms +[2025-09-05 16:08:50] [Rank 0] step:3421/10000 train_time:147903ms step_avg:43.23ms +[2025-09-05 16:08:51] [Rank 0] step:3441/10000 train_time:148562ms step_avg:43.17ms +[2025-09-05 16:08:51] [Rank 0] step:3441/10000 train_time:148562ms step_avg:43.17ms +[2025-09-05 16:08:52] [Rank 0] step:3461/10000 train_time:149223ms step_avg:43.12ms +[2025-09-05 16:08:52] [Rank 0] step:3461/10000 train_time:149223ms step_avg:43.12ms +[2025-09-05 16:08:52] [Rank 0] step:3481/10000 train_time:149883ms step_avg:43.06ms +[2025-09-05 16:08:52] [Rank 0] step:3481/10000 train_time:149883ms step_avg:43.06ms +[2025-09-05 16:08:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:08:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:08:53] [Rank 0] PRINT: step:3500/10000 train_loss:0.7677 val_loss:0.7469 train_time:150776ms step_avg:43.08ms +[2025-09-05 16:08:53] [Rank 0] PRINT: step:3500/10000 train_loss:0.7677 val_loss:0.7469 train_time:150776ms step_avg:43.08ms +[2025-09-05 16:08:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:08:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:08:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:08:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:10:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:10:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:10:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:10:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:10:21] [Rank 0] Total Loss: 4.9865 +[2025-09-05 16:10:21] [Rank 0] Total Loss: 4.9865 +[2025-09-05 16:10:21] [Rank 0] Total FTA (Unweighted): 0.7669 +[2025-09-05 16:10:21] [Rank 0] Total FTA (Unweighted): 0.7669 +[2025-09-05 16:10:21] [Rank 0] Total FTA (Weighted): 0.7669 +[2025-09-05 16:10:21] [Rank 0] Total FTA (Weighted): 0.7669 +[2025-09-05 16:10:21] [Rank 0] Group 0 Loss: 5.0308 +[2025-09-05 16:10:21] [Rank 0] Group 0 Loss: 5.0308 +[2025-09-05 16:10:21] [Rank 0] Group 1 Loss: 4.6643 +[2025-09-05 16:10:21] [Rank 0] Group 1 Loss: 4.6643 +[2025-09-05 16:10:21] [Rank 0] Group 2 Loss: 4.2933 +[2025-09-05 16:10:21] [Rank 0] Group 2 Loss: 4.2933 +[2025-09-05 16:10:21] [Rank 0] Group 3 Loss: 4.7613 +[2025-09-05 16:10:21] [Rank 0] Group 3 Loss: 4.7613 +[2025-09-05 16:10:21] [Rank 0] Group 4 Loss: 4.8258 +[2025-09-05 16:10:21] [Rank 0] Group 4 Loss: 4.8258 +[2025-09-05 16:10:21] [Rank 0] Group 5 Loss: 4.8705 +[2025-09-05 16:10:21] [Rank 0] Group 5 Loss: 4.8705 +[2025-09-05 16:10:21] [Rank 0] Group 6 Loss: 4.8174 +[2025-09-05 16:10:21] [Rank 0] Group 6 Loss: 4.8174 +[2025-09-05 16:10:21] [Rank 0] Group 7 Loss: 4.8546 +[2025-09-05 16:10:21] [Rank 0] Group 7 Loss: 4.8546 +[2025-09-05 16:10:21] [Rank 0] Group 8 Loss: 4.9805 +[2025-09-05 16:10:21] [Rank 0] Group 8 Loss: 4.9805 +[2025-09-05 16:10:21] [Rank 0] Group 9 Loss: 4.9848 +[2025-09-05 16:10:21] [Rank 0] Group 9 Loss: 4.9848 +[2025-09-05 16:10:21] [Rank 0] Group 10 Loss: 5.0982 +[2025-09-05 16:10:21] [Rank 0] Group 10 Loss: 5.0982 +[2025-09-05 16:10:21] [Rank 0] Group 11 Loss: 5.1179 +[2025-09-05 16:10:21] [Rank 0] Group 11 Loss: 5.1179 +[2025-09-05 16:10:21] [Rank 0] Group 12 Loss: 5.1911 +[2025-09-05 16:10:21] [Rank 0] Group 12 Loss: 5.1911 +[2025-09-05 16:10:21] [Rank 0] Group 13 Loss: 5.3692 +[2025-09-05 16:10:21] [Rank 0] Group 13 Loss: 5.3692 +[2025-09-05 16:10:22] [Rank 0] Group 14 Loss: 5.3619 +[2025-09-05 16:10:22] [Rank 0] Group 14 Loss: 5.3619 +[2025-09-05 16:10:22] [Rank 0] Group 15 Loss: 5.5617 +[2025-09-05 16:10:22] [Rank 0] Group 15 Loss: 5.5617 +[2025-09-05 16:10:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:10:22] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 16:10:22] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 16:10:22] [Rank 0] Group 10 FTA: 0.9600 +[2025-09-05 16:10:22] [Rank 0] Group 10 FTA: 0.9600 +[2025-09-05 16:10:22] [Rank 0] Group 11 FTA: 0.7000 +[2025-09-05 16:10:22] [Rank 0] Group 11 FTA: 0.7000 +[2025-09-05 16:10:22] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 16:10:22] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 16:10:22] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:10:22] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:10:22] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:10:22] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:10:22] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:10:22] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:10:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:10:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:10:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:10:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:10:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:10:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:10:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:10:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:10:23] [Rank 0] step:3501/10000 train_time:150785ms step_avg:43.07ms +[2025-09-05 16:10:23] [Rank 0] step:3501/10000 train_time:150785ms step_avg:43.07ms +[2025-09-05 16:10:24] [Rank 0] step:3521/10000 train_time:151235ms step_avg:42.95ms +[2025-09-05 16:10:24] [Rank 0] step:3521/10000 train_time:151235ms step_avg:42.95ms +[2025-09-05 16:10:25] [Rank 0] step:3541/10000 train_time:151892ms step_avg:42.90ms +[2025-09-05 16:10:25] [Rank 0] step:3541/10000 train_time:151892ms step_avg:42.90ms +[2025-09-05 16:10:25] [Rank 0] step:3561/10000 train_time:152651ms step_avg:42.87ms +[2025-09-05 16:10:25] [Rank 0] step:3561/10000 train_time:152651ms step_avg:42.87ms +[2025-09-05 16:10:26] [Rank 0] step:3581/10000 train_time:153311ms step_avg:42.81ms +[2025-09-05 16:10:26] [Rank 0] step:3581/10000 train_time:153311ms step_avg:42.81ms +[2025-09-05 16:10:27] [Rank 0] step:3601/10000 train_time:153969ms step_avg:42.76ms +[2025-09-05 16:10:27] [Rank 0] step:3601/10000 train_time:153969ms step_avg:42.76ms +[2025-09-05 16:10:27] [Rank 0] step:3621/10000 train_time:154629ms step_avg:42.70ms +[2025-09-05 16:10:27] [Rank 0] step:3621/10000 train_time:154629ms step_avg:42.70ms +[2025-09-05 16:10:28] [Rank 0] step:3641/10000 train_time:155287ms step_avg:42.65ms +[2025-09-05 16:10:28] [Rank 0] step:3641/10000 train_time:155287ms step_avg:42.65ms +[2025-09-05 16:10:29] [Rank 0] step:3661/10000 train_time:155945ms step_avg:42.60ms +[2025-09-05 16:10:29] [Rank 0] step:3661/10000 train_time:155945ms step_avg:42.60ms +[2025-09-05 16:10:29] [Rank 0] step:3681/10000 train_time:156604ms step_avg:42.54ms +[2025-09-05 16:10:29] [Rank 0] step:3681/10000 train_time:156604ms step_avg:42.54ms +[2025-09-05 16:10:30] [Rank 0] step:3701/10000 train_time:157364ms step_avg:42.52ms +[2025-09-05 16:10:30] [Rank 0] step:3701/10000 train_time:157364ms step_avg:42.52ms +[2025-09-05 16:10:31] [Rank 0] step:3721/10000 train_time:158023ms step_avg:42.47ms +[2025-09-05 16:10:31] [Rank 0] step:3721/10000 train_time:158023ms step_avg:42.47ms +[2025-09-05 16:10:31] [Rank 0] step:3741/10000 train_time:158681ms step_avg:42.42ms +[2025-09-05 16:10:31] [Rank 0] step:3741/10000 train_time:158681ms step_avg:42.42ms +[2025-09-05 16:10:32] [Rank 0] step:3761/10000 train_time:159339ms step_avg:42.37ms +[2025-09-05 16:10:32] [Rank 0] step:3761/10000 train_time:159339ms step_avg:42.37ms +[2025-09-05 16:10:33] [Rank 0] step:3781/10000 train_time:159997ms step_avg:42.32ms +[2025-09-05 16:10:33] [Rank 0] step:3781/10000 train_time:159997ms step_avg:42.32ms +[2025-09-05 16:10:33] [Rank 0] step:3801/10000 train_time:160656ms step_avg:42.27ms +[2025-09-05 16:10:33] [Rank 0] step:3801/10000 train_time:160656ms step_avg:42.27ms +[2025-09-05 16:10:34] [Rank 0] step:3821/10000 train_time:161415ms step_avg:42.24ms +[2025-09-05 16:10:34] [Rank 0] step:3821/10000 train_time:161415ms step_avg:42.24ms +[2025-09-05 16:10:35] [Rank 0] step:3841/10000 train_time:162075ms step_avg:42.20ms +[2025-09-05 16:10:35] [Rank 0] step:3841/10000 train_time:162075ms step_avg:42.20ms +[2025-09-05 16:10:35] [Rank 0] step:3861/10000 train_time:162732ms step_avg:42.15ms +[2025-09-05 16:10:35] [Rank 0] step:3861/10000 train_time:162732ms step_avg:42.15ms +[2025-09-05 16:10:36] [Rank 0] step:3881/10000 train_time:163392ms step_avg:42.10ms +[2025-09-05 16:10:36] [Rank 0] step:3881/10000 train_time:163392ms step_avg:42.10ms +[2025-09-05 16:10:37] [Rank 0] step:3901/10000 train_time:164050ms step_avg:42.05ms +[2025-09-05 16:10:37] [Rank 0] step:3901/10000 train_time:164050ms step_avg:42.05ms +[2025-09-05 16:10:37] [Rank 0] step:3921/10000 train_time:164709ms step_avg:42.01ms +[2025-09-05 16:10:37] [Rank 0] step:3921/10000 train_time:164709ms step_avg:42.01ms +[2025-09-05 16:10:38] [Rank 0] step:3941/10000 train_time:165367ms step_avg:41.96ms +[2025-09-05 16:10:38] [Rank 0] step:3941/10000 train_time:165367ms step_avg:41.96ms +[2025-09-05 16:10:39] [Rank 0] step:3961/10000 train_time:166024ms step_avg:41.91ms +[2025-09-05 16:10:39] [Rank 0] step:3961/10000 train_time:166024ms step_avg:41.91ms +[2025-09-05 16:10:39] [Rank 0] step:3981/10000 train_time:166684ms step_avg:41.87ms +[2025-09-05 16:10:39] [Rank 0] step:3981/10000 train_time:166684ms step_avg:41.87ms +[2025-09-05 16:10:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:10:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:10:41] [Rank 0] PRINT: step:4000/10000 train_loss:0.7466 val_loss:0.7297 train_time:167677ms step_avg:41.92ms +[2025-09-05 16:10:41] [Rank 0] PRINT: step:4000/10000 train_loss:0.7466 val_loss:0.7297 train_time:167677ms step_avg:41.92ms +[2025-09-05 16:10:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:10:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:10:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:10:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:12:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:12:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:12:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:12:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:12:03] [Rank 0] Total Loss: 4.9759 +[2025-09-05 16:12:03] [Rank 0] Total Loss: 4.9759 +[2025-09-05 16:12:03] [Rank 0] Total FTA (Unweighted): 0.7837 +[2025-09-05 16:12:03] [Rank 0] Total FTA (Unweighted): 0.7837 +[2025-09-05 16:12:03] [Rank 0] Total FTA (Weighted): 0.7837 +[2025-09-05 16:12:03] [Rank 0] Total FTA (Weighted): 0.7837 +[2025-09-05 16:12:03] [Rank 0] Group 0 Loss: 5.1302 +[2025-09-05 16:12:03] [Rank 0] Group 0 Loss: 5.1302 +[2025-09-05 16:12:03] [Rank 0] Group 1 Loss: 4.3671 +[2025-09-05 16:12:03] [Rank 0] Group 1 Loss: 4.3671 +[2025-09-05 16:12:03] [Rank 0] Group 2 Loss: 4.4649 +[2025-09-05 16:12:03] [Rank 0] Group 2 Loss: 4.4649 +[2025-09-05 16:12:03] [Rank 0] Group 3 Loss: 4.8329 +[2025-09-05 16:12:03] [Rank 0] Group 3 Loss: 4.8329 +[2025-09-05 16:12:03] [Rank 0] Group 4 Loss: 4.8759 +[2025-09-05 16:12:03] [Rank 0] Group 4 Loss: 4.8759 +[2025-09-05 16:12:03] [Rank 0] Group 5 Loss: 4.9132 +[2025-09-05 16:12:03] [Rank 0] Group 5 Loss: 4.9132 +[2025-09-05 16:12:03] [Rank 0] Group 6 Loss: 4.8471 +[2025-09-05 16:12:03] [Rank 0] Group 6 Loss: 4.8471 +[2025-09-05 16:12:03] [Rank 0] Group 7 Loss: 4.8973 +[2025-09-05 16:12:03] [Rank 0] Group 7 Loss: 4.8973 +[2025-09-05 16:12:03] [Rank 0] Group 8 Loss: 4.9591 +[2025-09-05 16:12:03] [Rank 0] Group 8 Loss: 4.9591 +[2025-09-05 16:12:03] [Rank 0] Group 9 Loss: 4.9877 +[2025-09-05 16:12:03] [Rank 0] Group 9 Loss: 4.9877 +[2025-09-05 16:12:03] [Rank 0] Group 10 Loss: 5.0839 +[2025-09-05 16:12:03] [Rank 0] Group 10 Loss: 5.0839 +[2025-09-05 16:12:03] [Rank 0] Group 11 Loss: 5.0579 +[2025-09-05 16:12:03] [Rank 0] Group 11 Loss: 5.0579 +[2025-09-05 16:12:03] [Rank 0] Group 12 Loss: 5.1229 +[2025-09-05 16:12:03] [Rank 0] Group 12 Loss: 5.1229 +[2025-09-05 16:12:03] [Rank 0] Group 13 Loss: 5.2890 +[2025-09-05 16:12:03] [Rank 0] Group 13 Loss: 5.2890 +[2025-09-05 16:12:03] [Rank 0] Group 14 Loss: 5.3203 +[2025-09-05 16:12:03] [Rank 0] Group 14 Loss: 5.3203 +[2025-09-05 16:12:03] [Rank 0] Group 15 Loss: 5.4649 +[2025-09-05 16:12:03] [Rank 0] Group 15 Loss: 5.4649 +[2025-09-05 16:12:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:12:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:12:04] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:12:04] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:12:04] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:12:04] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:12:04] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:12:04] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:12:04] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 16:12:04] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 16:12:04] [Rank 0] Group 11 FTA: 0.8500 +[2025-09-05 16:12:04] [Rank 0] Group 11 FTA: 0.8500 +[2025-09-05 16:12:04] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 16:12:04] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 16:12:04] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 16:12:04] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 16:12:04] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:12:04] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:12:04] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:12:04] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:12:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:12:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:12:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:12:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:12:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:12:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:12:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:12:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:12:05] [Rank 0] step:4001/10000 train_time:167685ms step_avg:41.91ms +[2025-09-05 16:12:05] [Rank 0] step:4001/10000 train_time:167685ms step_avg:41.91ms +[2025-09-05 16:12:06] [Rank 0] step:4021/10000 train_time:168236ms step_avg:41.84ms +[2025-09-05 16:12:06] [Rank 0] step:4021/10000 train_time:168236ms step_avg:41.84ms +[2025-09-05 16:12:07] [Rank 0] step:4041/10000 train_time:168894ms step_avg:41.80ms +[2025-09-05 16:12:07] [Rank 0] step:4041/10000 train_time:168894ms step_avg:41.80ms +[2025-09-05 16:12:07] [Rank 0] step:4061/10000 train_time:169553ms step_avg:41.75ms +[2025-09-05 16:12:07] [Rank 0] step:4061/10000 train_time:169553ms step_avg:41.75ms +[2025-09-05 16:12:08] [Rank 0] step:4081/10000 train_time:170211ms step_avg:41.71ms +[2025-09-05 16:12:08] [Rank 0] step:4081/10000 train_time:170211ms step_avg:41.71ms +[2025-09-05 16:12:09] [Rank 0] step:4101/10000 train_time:170870ms step_avg:41.67ms +[2025-09-05 16:12:09] [Rank 0] step:4101/10000 train_time:170870ms step_avg:41.67ms +[2025-09-05 16:12:09] [Rank 0] step:4121/10000 train_time:171635ms step_avg:41.65ms +[2025-09-05 16:12:09] [Rank 0] step:4121/10000 train_time:171635ms step_avg:41.65ms +[2025-09-05 16:12:10] [Rank 0] step:4141/10000 train_time:172295ms step_avg:41.61ms +[2025-09-05 16:12:10] [Rank 0] step:4141/10000 train_time:172295ms step_avg:41.61ms +[2025-09-05 16:12:11] [Rank 0] step:4161/10000 train_time:172956ms step_avg:41.57ms +[2025-09-05 16:12:11] [Rank 0] step:4161/10000 train_time:172956ms step_avg:41.57ms +[2025-09-05 16:12:11] [Rank 0] step:4181/10000 train_time:173616ms step_avg:41.53ms +[2025-09-05 16:12:11] [Rank 0] step:4181/10000 train_time:173616ms step_avg:41.53ms +[2025-09-05 16:12:12] [Rank 0] step:4201/10000 train_time:174276ms step_avg:41.48ms +[2025-09-05 16:12:12] [Rank 0] step:4201/10000 train_time:174276ms step_avg:41.48ms +[2025-09-05 16:12:13] [Rank 0] step:4221/10000 train_time:174935ms step_avg:41.44ms +[2025-09-05 16:12:13] [Rank 0] step:4221/10000 train_time:174935ms step_avg:41.44ms +[2025-09-05 16:12:13] [Rank 0] step:4241/10000 train_time:175593ms step_avg:41.40ms +[2025-09-05 16:12:13] [Rank 0] step:4241/10000 train_time:175593ms step_avg:41.40ms +[2025-09-05 16:12:14] [Rank 0] step:4261/10000 train_time:176252ms step_avg:41.36ms +[2025-09-05 16:12:14] [Rank 0] step:4261/10000 train_time:176252ms step_avg:41.36ms +[2025-09-05 16:12:15] [Rank 0] step:4281/10000 train_time:176910ms step_avg:41.32ms +[2025-09-05 16:12:15] [Rank 0] step:4281/10000 train_time:176910ms step_avg:41.32ms +[2025-09-05 16:12:15] [Rank 0] step:4301/10000 train_time:177568ms step_avg:41.29ms +[2025-09-05 16:12:15] [Rank 0] step:4301/10000 train_time:177568ms step_avg:41.29ms +[2025-09-05 16:12:16] [Rank 0] step:4321/10000 train_time:178227ms step_avg:41.25ms +[2025-09-05 16:12:16] [Rank 0] step:4321/10000 train_time:178227ms step_avg:41.25ms +[2025-09-05 16:12:17] [Rank 0] step:4341/10000 train_time:178887ms step_avg:41.21ms +[2025-09-05 16:12:17] [Rank 0] step:4341/10000 train_time:178887ms step_avg:41.21ms +[2025-09-05 16:12:17] [Rank 0] step:4361/10000 train_time:179545ms step_avg:41.17ms +[2025-09-05 16:12:17] [Rank 0] step:4361/10000 train_time:179545ms step_avg:41.17ms +[2025-09-05 16:12:18] [Rank 0] step:4381/10000 train_time:180204ms step_avg:41.13ms +[2025-09-05 16:12:18] [Rank 0] step:4381/10000 train_time:180204ms step_avg:41.13ms +[2025-09-05 16:12:19] [Rank 0] step:4401/10000 train_time:180967ms step_avg:41.12ms +[2025-09-05 16:12:19] [Rank 0] step:4401/10000 train_time:180967ms step_avg:41.12ms +[2025-09-05 16:12:19] [Rank 0] step:4421/10000 train_time:181626ms step_avg:41.08ms +[2025-09-05 16:12:19] [Rank 0] step:4421/10000 train_time:181626ms step_avg:41.08ms +[2025-09-05 16:12:20] [Rank 0] step:4441/10000 train_time:182457ms step_avg:41.08ms +[2025-09-05 16:12:20] [Rank 0] step:4441/10000 train_time:182457ms step_avg:41.08ms +[2025-09-05 16:12:21] [Rank 0] step:4461/10000 train_time:183118ms step_avg:41.05ms +[2025-09-05 16:12:21] [Rank 0] step:4461/10000 train_time:183118ms step_avg:41.05ms +[2025-09-05 16:12:21] [Rank 0] step:4481/10000 train_time:183777ms step_avg:41.01ms +[2025-09-05 16:12:21] [Rank 0] step:4481/10000 train_time:183777ms step_avg:41.01ms +[2025-09-05 16:12:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:12:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:12:23] [Rank 0] PRINT: step:4500/10000 train_loss:0.7301 val_loss:0.7151 train_time:184830ms step_avg:41.07ms +[2025-09-05 16:12:23] [Rank 0] PRINT: step:4500/10000 train_loss:0.7301 val_loss:0.7151 train_time:184830ms step_avg:41.07ms +[2025-09-05 16:12:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:12:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:12:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:12:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:13:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:13:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:13:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:13:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:13:44] [Rank 0] Total Loss: 4.9877 +[2025-09-05 16:13:44] [Rank 0] Total Loss: 4.9877 +[2025-09-05 16:13:44] [Rank 0] Total FTA (Unweighted): 0.7894 +[2025-09-05 16:13:44] [Rank 0] Total FTA (Unweighted): 0.7894 +[2025-09-05 16:13:44] [Rank 0] Total FTA (Weighted): 0.7894 +[2025-09-05 16:13:44] [Rank 0] Total FTA (Weighted): 0.7894 +[2025-09-05 16:13:44] [Rank 0] Group 0 Loss: 5.1597 +[2025-09-05 16:13:44] [Rank 0] Group 0 Loss: 5.1597 +[2025-09-05 16:13:44] [Rank 0] Group 1 Loss: 4.3194 +[2025-09-05 16:13:44] [Rank 0] Group 1 Loss: 4.3194 +[2025-09-05 16:13:44] [Rank 0] Group 2 Loss: 4.5159 +[2025-09-05 16:13:44] [Rank 0] Group 2 Loss: 4.5159 +[2025-09-05 16:13:44] [Rank 0] Group 3 Loss: 4.8385 +[2025-09-05 16:13:44] [Rank 0] Group 3 Loss: 4.8385 +[2025-09-05 16:13:44] [Rank 0] Group 4 Loss: 4.8842 +[2025-09-05 16:13:44] [Rank 0] Group 4 Loss: 4.8842 +[2025-09-05 16:13:44] [Rank 0] Group 5 Loss: 4.9449 +[2025-09-05 16:13:44] [Rank 0] Group 5 Loss: 4.9449 +[2025-09-05 16:13:44] [Rank 0] Group 6 Loss: 4.8367 +[2025-09-05 16:13:44] [Rank 0] Group 6 Loss: 4.8367 +[2025-09-05 16:13:44] [Rank 0] Group 7 Loss: 4.9106 +[2025-09-05 16:13:44] [Rank 0] Group 7 Loss: 4.9106 +[2025-09-05 16:13:44] [Rank 0] Group 8 Loss: 5.0057 +[2025-09-05 16:13:44] [Rank 0] Group 8 Loss: 5.0057 +[2025-09-05 16:13:44] [Rank 0] Group 9 Loss: 5.0320 +[2025-09-05 16:13:44] [Rank 0] Group 9 Loss: 5.0320 +[2025-09-05 16:13:44] [Rank 0] Group 10 Loss: 5.1328 +[2025-09-05 16:13:44] [Rank 0] Group 10 Loss: 5.1328 +[2025-09-05 16:13:44] [Rank 0] Group 11 Loss: 5.1024 +[2025-09-05 16:13:44] [Rank 0] Group 11 Loss: 5.1024 +[2025-09-05 16:13:44] [Rank 0] Group 12 Loss: 5.1466 +[2025-09-05 16:13:44] [Rank 0] Group 12 Loss: 5.1466 +[2025-09-05 16:13:44] [Rank 0] Group 13 Loss: 5.2645 +[2025-09-05 16:13:44] [Rank 0] Group 13 Loss: 5.2645 +[2025-09-05 16:13:44] [Rank 0] Group 14 Loss: 5.2846 +[2025-09-05 16:13:44] [Rank 0] Group 14 Loss: 5.2846 +[2025-09-05 16:13:44] [Rank 0] Group 15 Loss: 5.4250 +[2025-09-05 16:13:44] [Rank 0] Group 15 Loss: 5.4250 +[2025-09-05 16:13:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:13:44] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 16:13:44] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 16:13:44] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 16:13:44] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 16:13:44] [Rank 0] Group 11 FTA: 0.9200 +[2025-09-05 16:13:44] [Rank 0] Group 11 FTA: 0.9200 +[2025-09-05 16:13:44] [Rank 0] Group 12 FTA: 0.4600 +[2025-09-05 16:13:44] [Rank 0] Group 12 FTA: 0.4600 +[2025-09-05 16:13:44] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 16:13:44] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 16:13:44] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:13:44] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:13:44] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 16:13:44] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 16:13:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:13:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:13:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:13:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:13:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:13:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:13:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:13:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:13:46] [Rank 0] step:4501/10000 train_time:184839ms step_avg:41.07ms +[2025-09-05 16:13:46] [Rank 0] step:4501/10000 train_time:184839ms step_avg:41.07ms +[2025-09-05 16:13:47] [Rank 0] step:4521/10000 train_time:185288ms step_avg:40.98ms +[2025-09-05 16:13:47] [Rank 0] step:4521/10000 train_time:185288ms step_avg:40.98ms +[2025-09-05 16:13:47] [Rank 0] step:4541/10000 train_time:185945ms step_avg:40.95ms +[2025-09-05 16:13:47] [Rank 0] step:4541/10000 train_time:185945ms step_avg:40.95ms +[2025-09-05 16:13:48] [Rank 0] step:4561/10000 train_time:186603ms step_avg:40.91ms +[2025-09-05 16:13:48] [Rank 0] step:4561/10000 train_time:186603ms step_avg:40.91ms +[2025-09-05 16:13:49] [Rank 0] step:4581/10000 train_time:187262ms step_avg:40.88ms +[2025-09-05 16:13:49] [Rank 0] step:4581/10000 train_time:187262ms step_avg:40.88ms +[2025-09-05 16:13:49] [Rank 0] step:4601/10000 train_time:187921ms step_avg:40.84ms +[2025-09-05 16:13:49] [Rank 0] step:4601/10000 train_time:187921ms step_avg:40.84ms +[2025-09-05 16:13:50] [Rank 0] step:4621/10000 train_time:188579ms step_avg:40.81ms +[2025-09-05 16:13:50] [Rank 0] step:4621/10000 train_time:188579ms step_avg:40.81ms +[2025-09-05 16:13:51] [Rank 0] step:4641/10000 train_time:189237ms step_avg:40.78ms +[2025-09-05 16:13:51] [Rank 0] step:4641/10000 train_time:189237ms step_avg:40.78ms +[2025-09-05 16:13:51] [Rank 0] step:4661/10000 train_time:189896ms step_avg:40.74ms +[2025-09-05 16:13:51] [Rank 0] step:4661/10000 train_time:189896ms step_avg:40.74ms +[2025-09-05 16:13:52] [Rank 0] step:4681/10000 train_time:190555ms step_avg:40.71ms +[2025-09-05 16:13:52] [Rank 0] step:4681/10000 train_time:190555ms step_avg:40.71ms +[2025-09-05 16:13:53] [Rank 0] step:4701/10000 train_time:191213ms step_avg:40.67ms +[2025-09-05 16:13:53] [Rank 0] step:4701/10000 train_time:191213ms step_avg:40.67ms +[2025-09-05 16:13:53] [Rank 0] step:4721/10000 train_time:191871ms step_avg:40.64ms +[2025-09-05 16:13:53] [Rank 0] step:4721/10000 train_time:191871ms step_avg:40.64ms +[2025-09-05 16:13:54] [Rank 0] step:4741/10000 train_time:192529ms step_avg:40.61ms +[2025-09-05 16:13:54] [Rank 0] step:4741/10000 train_time:192529ms step_avg:40.61ms +[2025-09-05 16:13:55] [Rank 0] step:4761/10000 train_time:193187ms step_avg:40.58ms +[2025-09-05 16:13:55] [Rank 0] step:4761/10000 train_time:193187ms step_avg:40.58ms +[2025-09-05 16:13:55] [Rank 0] step:4781/10000 train_time:193845ms step_avg:40.54ms +[2025-09-05 16:13:55] [Rank 0] step:4781/10000 train_time:193845ms step_avg:40.54ms +[2025-09-05 16:13:56] [Rank 0] step:4801/10000 train_time:194503ms step_avg:40.51ms +[2025-09-05 16:13:56] [Rank 0] step:4801/10000 train_time:194503ms step_avg:40.51ms +[2025-09-05 16:13:56] [Rank 0] step:4821/10000 train_time:195161ms step_avg:40.48ms +[2025-09-05 16:13:56] [Rank 0] step:4821/10000 train_time:195161ms step_avg:40.48ms +[2025-09-05 16:13:57] [Rank 0] step:4841/10000 train_time:196130ms step_avg:40.51ms +[2025-09-05 16:13:57] [Rank 0] step:4841/10000 train_time:196130ms step_avg:40.51ms +[2025-09-05 16:13:58] [Rank 0] step:4861/10000 train_time:196788ms step_avg:40.48ms +[2025-09-05 16:13:58] [Rank 0] step:4861/10000 train_time:196788ms step_avg:40.48ms +[2025-09-05 16:13:59] [Rank 0] step:4881/10000 train_time:197448ms step_avg:40.45ms +[2025-09-05 16:13:59] [Rank 0] step:4881/10000 train_time:197448ms step_avg:40.45ms +[2025-09-05 16:13:59] [Rank 0] step:4901/10000 train_time:198107ms step_avg:40.42ms +[2025-09-05 16:13:59] [Rank 0] step:4901/10000 train_time:198107ms step_avg:40.42ms +[2025-09-05 16:14:00] [Rank 0] step:4921/10000 train_time:198765ms step_avg:40.39ms +[2025-09-05 16:14:00] [Rank 0] step:4921/10000 train_time:198765ms step_avg:40.39ms +[2025-09-05 16:14:01] [Rank 0] step:4941/10000 train_time:199423ms step_avg:40.36ms +[2025-09-05 16:14:01] [Rank 0] step:4941/10000 train_time:199423ms step_avg:40.36ms +[2025-09-05 16:14:01] [Rank 0] step:4961/10000 train_time:200082ms step_avg:40.33ms +[2025-09-05 16:14:01] [Rank 0] step:4961/10000 train_time:200082ms step_avg:40.33ms +[2025-09-05 16:14:02] [Rank 0] step:4981/10000 train_time:200740ms step_avg:40.30ms +[2025-09-05 16:14:02] [Rank 0] step:4981/10000 train_time:200740ms step_avg:40.30ms +[2025-09-05 16:14:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:14:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:14:03] [Rank 0] PRINT: step:5000/10000 train_loss:0.7169 val_loss:0.7040 train_time:201633ms step_avg:40.33ms +[2025-09-05 16:14:03] [Rank 0] PRINT: step:5000/10000 train_loss:0.7169 val_loss:0.7040 train_time:201633ms step_avg:40.33ms +[2025-09-05 16:14:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:14:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:14:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:14:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:15:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:15:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:15:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:15:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:15:26] [Rank 0] Total Loss: 4.9665 +[2025-09-05 16:15:26] [Rank 0] Total Loss: 4.9665 +[2025-09-05 16:15:26] [Rank 0] Total FTA (Unweighted): 0.8075 +[2025-09-05 16:15:26] [Rank 0] Total FTA (Unweighted): 0.8075 +[2025-09-05 16:15:26] [Rank 0] Total FTA (Weighted): 0.8075 +[2025-09-05 16:15:26] [Rank 0] Total FTA (Weighted): 0.8075 +[2025-09-05 16:15:26] [Rank 0] Group 0 Loss: 5.1680 +[2025-09-05 16:15:26] [Rank 0] Group 0 Loss: 5.1680 +[2025-09-05 16:15:26] [Rank 0] Group 1 Loss: 4.4288 +[2025-09-05 16:15:26] [Rank 0] Group 1 Loss: 4.4288 +[2025-09-05 16:15:26] [Rank 0] Group 2 Loss: 4.5431 +[2025-09-05 16:15:26] [Rank 0] Group 2 Loss: 4.5431 +[2025-09-05 16:15:26] [Rank 0] Group 3 Loss: 4.8355 +[2025-09-05 16:15:26] [Rank 0] Group 3 Loss: 4.8355 +[2025-09-05 16:15:26] [Rank 0] Group 4 Loss: 4.8598 +[2025-09-05 16:15:26] [Rank 0] Group 4 Loss: 4.8598 +[2025-09-05 16:15:26] [Rank 0] Group 5 Loss: 4.9381 +[2025-09-05 16:15:26] [Rank 0] Group 5 Loss: 4.9381 +[2025-09-05 16:15:27] [Rank 0] Group 6 Loss: 4.8787 +[2025-09-05 16:15:27] [Rank 0] Group 6 Loss: 4.8787 +[2025-09-05 16:15:27] [Rank 0] Group 7 Loss: 4.9352 +[2025-09-05 16:15:27] [Rank 0] Group 7 Loss: 4.9352 +[2025-09-05 16:15:27] [Rank 0] Group 8 Loss: 4.9812 +[2025-09-05 16:15:27] [Rank 0] Group 8 Loss: 4.9812 +[2025-09-05 16:15:27] [Rank 0] Group 9 Loss: 5.0286 +[2025-09-05 16:15:27] [Rank 0] Group 9 Loss: 5.0286 +[2025-09-05 16:15:27] [Rank 0] Group 10 Loss: 5.1275 +[2025-09-05 16:15:27] [Rank 0] Group 10 Loss: 5.1275 +[2025-09-05 16:15:27] [Rank 0] Group 11 Loss: 5.1060 +[2025-09-05 16:15:27] [Rank 0] Group 11 Loss: 5.1060 +[2025-09-05 16:15:27] [Rank 0] Group 12 Loss: 5.0382 +[2025-09-05 16:15:27] [Rank 0] Group 12 Loss: 5.0382 +[2025-09-05 16:15:27] [Rank 0] Group 13 Loss: 5.1646 +[2025-09-05 16:15:27] [Rank 0] Group 13 Loss: 5.1646 +[2025-09-05 16:15:27] [Rank 0] Group 14 Loss: 5.1628 +[2025-09-05 16:15:27] [Rank 0] Group 14 Loss: 5.1628 +[2025-09-05 16:15:27] [Rank 0] Group 15 Loss: 5.2679 +[2025-09-05 16:15:27] [Rank 0] Group 15 Loss: 5.2679 +[2025-09-05 16:15:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:15:27] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-05 16:15:27] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-05 16:15:27] [Rank 0] Group 12 FTA: 0.5700 +[2025-09-05 16:15:27] [Rank 0] Group 12 FTA: 0.5700 +[2025-09-05 16:15:28] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 16:15:28] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 16:15:28] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:15:28] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:15:28] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:15:28] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:15:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:15:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:15:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:15:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:15:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:15:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:15:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:15:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:15:29] [Rank 0] step:5001/10000 train_time:201642ms step_avg:40.32ms +[2025-09-05 16:15:29] [Rank 0] step:5001/10000 train_time:201642ms step_avg:40.32ms +[2025-09-05 16:15:30] [Rank 0] step:5021/10000 train_time:202079ms step_avg:40.25ms +[2025-09-05 16:15:30] [Rank 0] step:5021/10000 train_time:202079ms step_avg:40.25ms +[2025-09-05 16:15:30] [Rank 0] step:5041/10000 train_time:202740ms step_avg:40.22ms +[2025-09-05 16:15:30] [Rank 0] step:5041/10000 train_time:202740ms step_avg:40.22ms +[2025-09-05 16:15:31] [Rank 0] step:5061/10000 train_time:203401ms step_avg:40.19ms +[2025-09-05 16:15:31] [Rank 0] step:5061/10000 train_time:203401ms step_avg:40.19ms +[2025-09-05 16:15:32] [Rank 0] step:5081/10000 train_time:204207ms step_avg:40.19ms +[2025-09-05 16:15:32] [Rank 0] step:5081/10000 train_time:204207ms step_avg:40.19ms +[2025-09-05 16:15:32] [Rank 0] step:5101/10000 train_time:204867ms step_avg:40.16ms +[2025-09-05 16:15:32] [Rank 0] step:5101/10000 train_time:204867ms step_avg:40.16ms +[2025-09-05 16:15:33] [Rank 0] step:5121/10000 train_time:205528ms step_avg:40.13ms +[2025-09-05 16:15:33] [Rank 0] step:5121/10000 train_time:205528ms step_avg:40.13ms +[2025-09-05 16:15:34] [Rank 0] step:5141/10000 train_time:206190ms step_avg:40.11ms +[2025-09-05 16:15:34] [Rank 0] step:5141/10000 train_time:206190ms step_avg:40.11ms +[2025-09-05 16:15:34] [Rank 0] step:5161/10000 train_time:206849ms step_avg:40.08ms +[2025-09-05 16:15:34] [Rank 0] step:5161/10000 train_time:206849ms step_avg:40.08ms +[2025-09-05 16:15:35] [Rank 0] step:5181/10000 train_time:207610ms step_avg:40.07ms +[2025-09-05 16:15:35] [Rank 0] step:5181/10000 train_time:207610ms step_avg:40.07ms +[2025-09-05 16:15:36] [Rank 0] step:5201/10000 train_time:208270ms step_avg:40.04ms +[2025-09-05 16:15:36] [Rank 0] step:5201/10000 train_time:208270ms step_avg:40.04ms +[2025-09-05 16:15:36] [Rank 0] step:5221/10000 train_time:208929ms step_avg:40.02ms +[2025-09-05 16:15:36] [Rank 0] step:5221/10000 train_time:208929ms step_avg:40.02ms +[2025-09-05 16:15:37] [Rank 0] step:5241/10000 train_time:209589ms step_avg:39.99ms +[2025-09-05 16:15:37] [Rank 0] step:5241/10000 train_time:209589ms step_avg:39.99ms +[2025-09-05 16:15:38] [Rank 0] step:5261/10000 train_time:210247ms step_avg:39.96ms +[2025-09-05 16:15:38] [Rank 0] step:5261/10000 train_time:210247ms step_avg:39.96ms +[2025-09-05 16:15:38] [Rank 0] step:5281/10000 train_time:210907ms step_avg:39.94ms +[2025-09-05 16:15:38] [Rank 0] step:5281/10000 train_time:210907ms step_avg:39.94ms +[2025-09-05 16:15:39] [Rank 0] step:5301/10000 train_time:211666ms step_avg:39.93ms +[2025-09-05 16:15:39] [Rank 0] step:5301/10000 train_time:211666ms step_avg:39.93ms +[2025-09-05 16:15:40] [Rank 0] step:5321/10000 train_time:212325ms step_avg:39.90ms +[2025-09-05 16:15:40] [Rank 0] step:5321/10000 train_time:212325ms step_avg:39.90ms +[2025-09-05 16:15:41] [Rank 0] step:5341/10000 train_time:212983ms step_avg:39.88ms +[2025-09-05 16:15:41] [Rank 0] step:5341/10000 train_time:212983ms step_avg:39.88ms +[2025-09-05 16:15:41] [Rank 0] step:5361/10000 train_time:213642ms step_avg:39.85ms +[2025-09-05 16:15:41] [Rank 0] step:5361/10000 train_time:213642ms step_avg:39.85ms +[2025-09-05 16:15:42] [Rank 0] step:5381/10000 train_time:214301ms step_avg:39.83ms +[2025-09-05 16:15:42] [Rank 0] step:5381/10000 train_time:214301ms step_avg:39.83ms +[2025-09-05 16:15:43] [Rank 0] step:5401/10000 train_time:214959ms step_avg:39.80ms +[2025-09-05 16:15:43] [Rank 0] step:5401/10000 train_time:214959ms step_avg:39.80ms +[2025-09-05 16:15:43] [Rank 0] step:5421/10000 train_time:215619ms step_avg:39.77ms +[2025-09-05 16:15:43] [Rank 0] step:5421/10000 train_time:215619ms step_avg:39.77ms +[2025-09-05 16:15:44] [Rank 0] step:5441/10000 train_time:216278ms step_avg:39.75ms +[2025-09-05 16:15:44] [Rank 0] step:5441/10000 train_time:216278ms step_avg:39.75ms +[2025-09-05 16:15:44] [Rank 0] step:5461/10000 train_time:216937ms step_avg:39.72ms +[2025-09-05 16:15:44] [Rank 0] step:5461/10000 train_time:216937ms step_avg:39.72ms +[2025-09-05 16:15:45] [Rank 0] step:5481/10000 train_time:217596ms step_avg:39.70ms +[2025-09-05 16:15:45] [Rank 0] step:5481/10000 train_time:217596ms step_avg:39.70ms +[2025-09-05 16:15:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:15:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:15:46] [Rank 0] PRINT: step:5500/10000 train_loss:0.7061 val_loss:0.6954 train_time:218490ms step_avg:39.73ms +[2025-09-05 16:15:46] [Rank 0] PRINT: step:5500/10000 train_loss:0.7061 val_loss:0.6954 train_time:218490ms step_avg:39.73ms +[2025-09-05 16:15:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:15:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:15:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:15:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:17:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:17:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:17:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:17:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:17:09] [Rank 0] Total Loss: 5.0593 +[2025-09-05 16:17:09] [Rank 0] Total Loss: 5.0593 +[2025-09-05 16:17:09] [Rank 0] Total FTA (Unweighted): 0.8219 +[2025-09-05 16:17:09] [Rank 0] Total FTA (Unweighted): 0.8219 +[2025-09-05 16:17:09] [Rank 0] Total FTA (Weighted): 0.8219 +[2025-09-05 16:17:09] [Rank 0] Total FTA (Weighted): 0.8219 +[2025-09-05 16:17:09] [Rank 0] Group 0 Loss: 5.2584 +[2025-09-05 16:17:09] [Rank 0] Group 0 Loss: 5.2584 +[2025-09-05 16:17:09] [Rank 0] Group 1 Loss: 4.5349 +[2025-09-05 16:17:09] [Rank 0] Group 1 Loss: 4.5349 +[2025-09-05 16:17:09] [Rank 0] Group 2 Loss: 4.5781 +[2025-09-05 16:17:09] [Rank 0] Group 2 Loss: 4.5781 +[2025-09-05 16:17:10] [Rank 0] Group 3 Loss: 4.8961 +[2025-09-05 16:17:10] [Rank 0] Group 3 Loss: 4.8961 +[2025-09-05 16:17:10] [Rank 0] Group 4 Loss: 4.8945 +[2025-09-05 16:17:10] [Rank 0] Group 4 Loss: 4.8945 +[2025-09-05 16:17:10] [Rank 0] Group 5 Loss: 5.0256 +[2025-09-05 16:17:10] [Rank 0] Group 5 Loss: 5.0256 +[2025-09-05 16:17:10] [Rank 0] Group 6 Loss: 4.9426 +[2025-09-05 16:17:10] [Rank 0] Group 6 Loss: 4.9426 +[2025-09-05 16:17:10] [Rank 0] Group 7 Loss: 4.9974 +[2025-09-05 16:17:10] [Rank 0] Group 7 Loss: 4.9974 +[2025-09-05 16:17:10] [Rank 0] Group 8 Loss: 5.0640 +[2025-09-05 16:17:10] [Rank 0] Group 8 Loss: 5.0640 +[2025-09-05 16:17:10] [Rank 0] Group 9 Loss: 5.1332 +[2025-09-05 16:17:10] [Rank 0] Group 9 Loss: 5.1332 +[2025-09-05 16:17:10] [Rank 0] Group 10 Loss: 5.2262 +[2025-09-05 16:17:10] [Rank 0] Group 10 Loss: 5.2262 +[2025-09-05 16:17:10] [Rank 0] Group 11 Loss: 5.1475 +[2025-09-05 16:17:10] [Rank 0] Group 11 Loss: 5.1475 +[2025-09-05 16:17:10] [Rank 0] Group 12 Loss: 5.1699 +[2025-09-05 16:17:10] [Rank 0] Group 12 Loss: 5.1699 +[2025-09-05 16:17:10] [Rank 0] Group 13 Loss: 5.3720 +[2025-09-05 16:17:10] [Rank 0] Group 13 Loss: 5.3720 +[2025-09-05 16:17:10] [Rank 0] Group 14 Loss: 5.3161 +[2025-09-05 16:17:10] [Rank 0] Group 14 Loss: 5.3161 +[2025-09-05 16:17:10] [Rank 0] Group 15 Loss: 5.3929 +[2025-09-05 16:17:10] [Rank 0] Group 15 Loss: 5.3929 +[2025-09-05 16:17:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:17:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:17:11] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:17:11] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:17:11] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:17:11] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:17:11] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:17:11] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:17:11] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 16:17:11] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 16:17:11] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 16:17:11] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 16:17:11] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 16:17:11] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 16:17:11] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 16:17:11] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 16:17:11] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-05 16:17:11] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-05 16:17:11] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:17:11] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:17:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:17:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:17:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:17:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:17:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:17:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:17:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:17:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:17:12] [Rank 0] step:5501/10000 train_time:218499ms step_avg:39.72ms +[2025-09-05 16:17:12] [Rank 0] step:5501/10000 train_time:218499ms step_avg:39.72ms +[2025-09-05 16:17:13] [Rank 0] step:5521/10000 train_time:218946ms step_avg:39.66ms +[2025-09-05 16:17:13] [Rank 0] step:5521/10000 train_time:218946ms step_avg:39.66ms +[2025-09-05 16:17:14] [Rank 0] step:5541/10000 train_time:219603ms step_avg:39.63ms +[2025-09-05 16:17:14] [Rank 0] step:5541/10000 train_time:219603ms step_avg:39.63ms +[2025-09-05 16:17:14] [Rank 0] step:5561/10000 train_time:220262ms step_avg:39.61ms +[2025-09-05 16:17:14] [Rank 0] step:5561/10000 train_time:220262ms step_avg:39.61ms +[2025-09-05 16:17:15] [Rank 0] step:5581/10000 train_time:220921ms step_avg:39.58ms +[2025-09-05 16:17:15] [Rank 0] step:5581/10000 train_time:220921ms step_avg:39.58ms +[2025-09-05 16:17:16] [Rank 0] step:5601/10000 train_time:221683ms step_avg:39.58ms +[2025-09-05 16:17:16] [Rank 0] step:5601/10000 train_time:221683ms step_avg:39.58ms +[2025-09-05 16:17:17] [Rank 0] step:5621/10000 train_time:222443ms step_avg:39.57ms +[2025-09-05 16:17:17] [Rank 0] step:5621/10000 train_time:222443ms step_avg:39.57ms +[2025-09-05 16:17:18] [Rank 0] step:5641/10000 train_time:223103ms step_avg:39.55ms +[2025-09-05 16:17:18] [Rank 0] step:5641/10000 train_time:223103ms step_avg:39.55ms +[2025-09-05 16:17:18] [Rank 0] step:5661/10000 train_time:224220ms step_avg:39.61ms +[2025-09-05 16:17:18] [Rank 0] step:5661/10000 train_time:224220ms step_avg:39.61ms +[2025-09-05 16:17:19] [Rank 0] step:5681/10000 train_time:224878ms step_avg:39.58ms +[2025-09-05 16:17:19] [Rank 0] step:5681/10000 train_time:224878ms step_avg:39.58ms +[2025-09-05 16:17:20] [Rank 0] step:5701/10000 train_time:225537ms step_avg:39.56ms +[2025-09-05 16:17:20] [Rank 0] step:5701/10000 train_time:225537ms step_avg:39.56ms +[2025-09-05 16:17:20] [Rank 0] step:5721/10000 train_time:226195ms step_avg:39.54ms +[2025-09-05 16:17:20] [Rank 0] step:5721/10000 train_time:226195ms step_avg:39.54ms +[2025-09-05 16:17:21] [Rank 0] step:5741/10000 train_time:226855ms step_avg:39.51ms +[2025-09-05 16:17:21] [Rank 0] step:5741/10000 train_time:226855ms step_avg:39.51ms +[2025-09-05 16:17:22] [Rank 0] step:5761/10000 train_time:227514ms step_avg:39.49ms +[2025-09-05 16:17:22] [Rank 0] step:5761/10000 train_time:227514ms step_avg:39.49ms +[2025-09-05 16:17:22] [Rank 0] step:5781/10000 train_time:228173ms step_avg:39.47ms +[2025-09-05 16:17:22] [Rank 0] step:5781/10000 train_time:228173ms step_avg:39.47ms +[2025-09-05 16:17:23] [Rank 0] step:5801/10000 train_time:228831ms step_avg:39.45ms +[2025-09-05 16:17:23] [Rank 0] step:5801/10000 train_time:228831ms step_avg:39.45ms +[2025-09-05 16:17:24] [Rank 0] step:5821/10000 train_time:229490ms step_avg:39.42ms +[2025-09-05 16:17:24] [Rank 0] step:5821/10000 train_time:229490ms step_avg:39.42ms +[2025-09-05 16:17:24] [Rank 0] step:5841/10000 train_time:230149ms step_avg:39.40ms +[2025-09-05 16:17:24] [Rank 0] step:5841/10000 train_time:230149ms step_avg:39.40ms +[2025-09-05 16:17:25] [Rank 0] step:5861/10000 train_time:230806ms step_avg:39.38ms +[2025-09-05 16:17:25] [Rank 0] step:5861/10000 train_time:230806ms step_avg:39.38ms +[2025-09-05 16:17:26] [Rank 0] step:5881/10000 train_time:231466ms step_avg:39.36ms +[2025-09-05 16:17:26] [Rank 0] step:5881/10000 train_time:231466ms step_avg:39.36ms +[2025-09-05 16:17:26] [Rank 0] step:5901/10000 train_time:232125ms step_avg:39.34ms +[2025-09-05 16:17:26] [Rank 0] step:5901/10000 train_time:232125ms step_avg:39.34ms +[2025-09-05 16:17:27] [Rank 0] step:5921/10000 train_time:232784ms step_avg:39.31ms +[2025-09-05 16:17:27] [Rank 0] step:5921/10000 train_time:232784ms step_avg:39.31ms +[2025-09-05 16:17:28] [Rank 0] step:5941/10000 train_time:233442ms step_avg:39.29ms +[2025-09-05 16:17:28] [Rank 0] step:5941/10000 train_time:233442ms step_avg:39.29ms +[2025-09-05 16:17:28] [Rank 0] step:5961/10000 train_time:234101ms step_avg:39.27ms +[2025-09-05 16:17:28] [Rank 0] step:5961/10000 train_time:234101ms step_avg:39.27ms +[2025-09-05 16:17:29] [Rank 0] step:5981/10000 train_time:234761ms step_avg:39.25ms +[2025-09-05 16:17:29] [Rank 0] step:5981/10000 train_time:234761ms step_avg:39.25ms +[2025-09-05 16:17:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:17:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:17:30] [Rank 0] PRINT: step:6000/10000 train_loss:0.6972 val_loss:0.6870 train_time:235654ms step_avg:39.28ms +[2025-09-05 16:17:30] [Rank 0] PRINT: step:6000/10000 train_loss:0.6972 val_loss:0.6870 train_time:235654ms step_avg:39.28ms +[2025-09-05 16:17:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:17:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:17:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:17:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:18:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:18:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:18:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:18:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:18:51] [Rank 0] Total Loss: 5.0644 +[2025-09-05 16:18:51] [Rank 0] Total Loss: 5.0644 +[2025-09-05 16:18:51] [Rank 0] Total FTA (Unweighted): 0.8331 +[2025-09-05 16:18:51] [Rank 0] Total FTA (Unweighted): 0.8331 +[2025-09-05 16:18:51] [Rank 0] Total FTA (Weighted): 0.8331 +[2025-09-05 16:18:51] [Rank 0] Total FTA (Weighted): 0.8331 +[2025-09-05 16:18:51] [Rank 0] Group 0 Loss: 5.1379 +[2025-09-05 16:18:51] [Rank 0] Group 0 Loss: 5.1379 +[2025-09-05 16:18:51] [Rank 0] Group 1 Loss: 4.4884 +[2025-09-05 16:18:51] [Rank 0] Group 1 Loss: 4.4884 +[2025-09-05 16:18:51] [Rank 0] Group 2 Loss: 4.7571 +[2025-09-05 16:18:51] [Rank 0] Group 2 Loss: 4.7571 +[2025-09-05 16:18:51] [Rank 0] Group 3 Loss: 4.9129 +[2025-09-05 16:18:51] [Rank 0] Group 3 Loss: 4.9129 +[2025-09-05 16:18:51] [Rank 0] Group 4 Loss: 4.9090 +[2025-09-05 16:18:51] [Rank 0] Group 4 Loss: 4.9090 +[2025-09-05 16:18:51] [Rank 0] Group 5 Loss: 5.0965 +[2025-09-05 16:18:51] [Rank 0] Group 5 Loss: 5.0965 +[2025-09-05 16:18:51] [Rank 0] Group 6 Loss: 4.9580 +[2025-09-05 16:18:51] [Rank 0] Group 6 Loss: 4.9580 +[2025-09-05 16:18:51] [Rank 0] Group 7 Loss: 5.0261 +[2025-09-05 16:18:51] [Rank 0] Group 7 Loss: 5.0261 +[2025-09-05 16:18:51] [Rank 0] Group 8 Loss: 5.1222 +[2025-09-05 16:18:51] [Rank 0] Group 8 Loss: 5.1222 +[2025-09-05 16:18:51] [Rank 0] Group 9 Loss: 5.1277 +[2025-09-05 16:18:51] [Rank 0] Group 9 Loss: 5.1277 +[2025-09-05 16:18:51] [Rank 0] Group 10 Loss: 5.2150 +[2025-09-05 16:18:51] [Rank 0] Group 10 Loss: 5.2150 +[2025-09-05 16:18:51] [Rank 0] Group 11 Loss: 5.1340 +[2025-09-05 16:18:51] [Rank 0] Group 11 Loss: 5.1340 +[2025-09-05 16:18:51] [Rank 0] Group 12 Loss: 5.2068 +[2025-09-05 16:18:51] [Rank 0] Group 12 Loss: 5.2068 +[2025-09-05 16:18:51] [Rank 0] Group 13 Loss: 5.2875 +[2025-09-05 16:18:51] [Rank 0] Group 13 Loss: 5.2875 +[2025-09-05 16:18:51] [Rank 0] Group 14 Loss: 5.2686 +[2025-09-05 16:18:51] [Rank 0] Group 14 Loss: 5.2686 +[2025-09-05 16:18:51] [Rank 0] Group 15 Loss: 5.3829 +[2025-09-05 16:18:51] [Rank 0] Group 15 Loss: 5.3829 +[2025-09-05 16:18:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:18:51] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 16:18:51] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 16:18:51] [Rank 0] Group 12 FTA: 0.8500 +[2025-09-05 16:18:51] [Rank 0] Group 12 FTA: 0.8500 +[2025-09-05 16:18:51] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 16:18:51] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 16:18:51] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:18:51] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:18:51] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:18:51] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:18:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:18:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:18:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:18:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:18:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:18:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:18:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:18:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:18:53] [Rank 0] step:6001/10000 train_time:235664ms step_avg:39.27ms +[2025-09-05 16:18:53] [Rank 0] step:6001/10000 train_time:235664ms step_avg:39.27ms +[2025-09-05 16:18:54] [Rank 0] step:6021/10000 train_time:236617ms step_avg:39.30ms +[2025-09-05 16:18:54] [Rank 0] step:6021/10000 train_time:236617ms step_avg:39.30ms +[2025-09-05 16:18:54] [Rank 0] step:6041/10000 train_time:237239ms step_avg:39.27ms +[2025-09-05 16:18:54] [Rank 0] step:6041/10000 train_time:237239ms step_avg:39.27ms +[2025-09-05 16:18:55] [Rank 0] step:6061/10000 train_time:237899ms step_avg:39.25ms +[2025-09-05 16:18:55] [Rank 0] step:6061/10000 train_time:237899ms step_avg:39.25ms +[2025-09-05 16:18:56] [Rank 0] step:6081/10000 train_time:238559ms step_avg:39.23ms +[2025-09-05 16:18:56] [Rank 0] step:6081/10000 train_time:238559ms step_avg:39.23ms +[2025-09-05 16:18:56] [Rank 0] step:6101/10000 train_time:239217ms step_avg:39.21ms +[2025-09-05 16:18:56] [Rank 0] step:6101/10000 train_time:239217ms step_avg:39.21ms +[2025-09-05 16:18:57] [Rank 0] step:6121/10000 train_time:239875ms step_avg:39.19ms +[2025-09-05 16:18:57] [Rank 0] step:6121/10000 train_time:239875ms step_avg:39.19ms +[2025-09-05 16:18:58] [Rank 0] step:6141/10000 train_time:240534ms step_avg:39.17ms +[2025-09-05 16:18:58] [Rank 0] step:6141/10000 train_time:240534ms step_avg:39.17ms +[2025-09-05 16:18:58] [Rank 0] step:6161/10000 train_time:241195ms step_avg:39.15ms +[2025-09-05 16:18:58] [Rank 0] step:6161/10000 train_time:241195ms step_avg:39.15ms +[2025-09-05 16:18:59] [Rank 0] step:6181/10000 train_time:241852ms step_avg:39.13ms +[2025-09-05 16:18:59] [Rank 0] step:6181/10000 train_time:241852ms step_avg:39.13ms +[2025-09-05 16:19:00] [Rank 0] step:6201/10000 train_time:242512ms step_avg:39.11ms +[2025-09-05 16:19:00] [Rank 0] step:6201/10000 train_time:242512ms step_avg:39.11ms +[2025-09-05 16:19:00] [Rank 0] step:6221/10000 train_time:243171ms step_avg:39.09ms +[2025-09-05 16:19:00] [Rank 0] step:6221/10000 train_time:243171ms step_avg:39.09ms +[2025-09-05 16:19:01] [Rank 0] step:6241/10000 train_time:243830ms step_avg:39.07ms +[2025-09-05 16:19:01] [Rank 0] step:6241/10000 train_time:243830ms step_avg:39.07ms +[2025-09-05 16:19:02] [Rank 0] step:6261/10000 train_time:244489ms step_avg:39.05ms +[2025-09-05 16:19:02] [Rank 0] step:6261/10000 train_time:244489ms step_avg:39.05ms +[2025-09-05 16:19:02] [Rank 0] step:6281/10000 train_time:245149ms step_avg:39.03ms +[2025-09-05 16:19:02] [Rank 0] step:6281/10000 train_time:245149ms step_avg:39.03ms +[2025-09-05 16:19:03] [Rank 0] step:6301/10000 train_time:245808ms step_avg:39.01ms +[2025-09-05 16:19:03] [Rank 0] step:6301/10000 train_time:245808ms step_avg:39.01ms +[2025-09-05 16:19:04] [Rank 0] step:6321/10000 train_time:246467ms step_avg:38.99ms +[2025-09-05 16:19:04] [Rank 0] step:6321/10000 train_time:246467ms step_avg:38.99ms +[2025-09-05 16:19:04] [Rank 0] step:6341/10000 train_time:247127ms step_avg:38.97ms +[2025-09-05 16:19:04] [Rank 0] step:6341/10000 train_time:247127ms step_avg:38.97ms +[2025-09-05 16:19:05] [Rank 0] step:6361/10000 train_time:247786ms step_avg:38.95ms +[2025-09-05 16:19:05] [Rank 0] step:6361/10000 train_time:247786ms step_avg:38.95ms +[2025-09-05 16:19:06] [Rank 0] step:6381/10000 train_time:248445ms step_avg:38.94ms +[2025-09-05 16:19:06] [Rank 0] step:6381/10000 train_time:248445ms step_avg:38.94ms +[2025-09-05 16:19:06] [Rank 0] step:6401/10000 train_time:249104ms step_avg:38.92ms +[2025-09-05 16:19:06] [Rank 0] step:6401/10000 train_time:249104ms step_avg:38.92ms +[2025-09-05 16:19:07] [Rank 0] step:6421/10000 train_time:249764ms step_avg:38.90ms +[2025-09-05 16:19:07] [Rank 0] step:6421/10000 train_time:249764ms step_avg:38.90ms +[2025-09-05 16:19:08] [Rank 0] step:6441/10000 train_time:250423ms step_avg:38.88ms +[2025-09-05 16:19:08] [Rank 0] step:6441/10000 train_time:250423ms step_avg:38.88ms +[2025-09-05 16:19:08] [Rank 0] step:6461/10000 train_time:251082ms step_avg:38.86ms +[2025-09-05 16:19:08] [Rank 0] step:6461/10000 train_time:251082ms step_avg:38.86ms +[2025-09-05 16:19:09] [Rank 0] step:6481/10000 train_time:251742ms step_avg:38.84ms +[2025-09-05 16:19:09] [Rank 0] step:6481/10000 train_time:251742ms step_avg:38.84ms +[2025-09-05 16:19:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:19:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:19:10] [Rank 0] PRINT: step:6500/10000 train_loss:0.6896 val_loss:0.6798 train_time:252635ms step_avg:38.87ms +[2025-09-05 16:19:10] [Rank 0] PRINT: step:6500/10000 train_loss:0.6896 val_loss:0.6798 train_time:252635ms step_avg:38.87ms +[2025-09-05 16:19:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:19:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:19:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:19:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:20:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:20:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:20:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:20:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:20:32] [Rank 0] Total Loss: 5.0832 +[2025-09-05 16:20:32] [Rank 0] Total Loss: 5.0832 +[2025-09-05 16:20:32] [Rank 0] Total FTA (Unweighted): 0.8425 +[2025-09-05 16:20:32] [Rank 0] Total FTA (Unweighted): 0.8425 +[2025-09-05 16:20:32] [Rank 0] Total FTA (Weighted): 0.8425 +[2025-09-05 16:20:32] [Rank 0] Total FTA (Weighted): 0.8425 +[2025-09-05 16:20:32] [Rank 0] Group 0 Loss: 5.1543 +[2025-09-05 16:20:32] [Rank 0] Group 0 Loss: 5.1543 +[2025-09-05 16:20:32] [Rank 0] Group 1 Loss: 4.5891 +[2025-09-05 16:20:32] [Rank 0] Group 1 Loss: 4.5891 +[2025-09-05 16:20:32] [Rank 0] Group 2 Loss: 4.6163 +[2025-09-05 16:20:32] [Rank 0] Group 2 Loss: 4.6163 +[2025-09-05 16:20:32] [Rank 0] Group 3 Loss: 4.9420 +[2025-09-05 16:20:32] [Rank 0] Group 3 Loss: 4.9420 +[2025-09-05 16:20:32] [Rank 0] Group 4 Loss: 4.9805 +[2025-09-05 16:20:32] [Rank 0] Group 4 Loss: 4.9805 +[2025-09-05 16:20:32] [Rank 0] Group 5 Loss: 5.1041 +[2025-09-05 16:20:32] [Rank 0] Group 5 Loss: 5.1041 +[2025-09-05 16:20:32] [Rank 0] Group 6 Loss: 4.9593 +[2025-09-05 16:20:32] [Rank 0] Group 6 Loss: 4.9593 +[2025-09-05 16:20:32] [Rank 0] Group 7 Loss: 5.0373 +[2025-09-05 16:20:32] [Rank 0] Group 7 Loss: 5.0373 +[2025-09-05 16:20:32] [Rank 0] Group 8 Loss: 5.1382 +[2025-09-05 16:20:32] [Rank 0] Group 8 Loss: 5.1382 +[2025-09-05 16:20:32] [Rank 0] Group 9 Loss: 5.1422 +[2025-09-05 16:20:32] [Rank 0] Group 9 Loss: 5.1422 +[2025-09-05 16:20:32] [Rank 0] Group 10 Loss: 5.2336 +[2025-09-05 16:20:32] [Rank 0] Group 10 Loss: 5.2336 +[2025-09-05 16:20:32] [Rank 0] Group 11 Loss: 5.2558 +[2025-09-05 16:20:32] [Rank 0] Group 11 Loss: 5.2558 +[2025-09-05 16:20:32] [Rank 0] Group 12 Loss: 5.2030 +[2025-09-05 16:20:32] [Rank 0] Group 12 Loss: 5.2030 +[2025-09-05 16:20:32] [Rank 0] Group 13 Loss: 5.2938 +[2025-09-05 16:20:32] [Rank 0] Group 13 Loss: 5.2938 +[2025-09-05 16:20:32] [Rank 0] Group 14 Loss: 5.2987 +[2025-09-05 16:20:32] [Rank 0] Group 14 Loss: 5.2987 +[2025-09-05 16:20:32] [Rank 0] Group 15 Loss: 5.3831 +[2025-09-05 16:20:32] [Rank 0] Group 15 Loss: 5.3831 +[2025-09-05 16:20:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 12 FTA: 0.9200 +[2025-09-05 16:20:32] [Rank 0] Group 12 FTA: 0.9200 +[2025-09-05 16:20:32] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 16:20:32] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 16:20:32] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:20:32] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:20:32] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:20:32] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:20:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:20:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:20:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:20:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:20:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:20:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:20:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:20:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:20:34] [Rank 0] step:6501/10000 train_time:252644ms step_avg:38.86ms +[2025-09-05 16:20:34] [Rank 0] step:6501/10000 train_time:252644ms step_avg:38.86ms +[2025-09-05 16:20:34] [Rank 0] step:6521/10000 train_time:253089ms step_avg:38.81ms +[2025-09-05 16:20:34] [Rank 0] step:6521/10000 train_time:253089ms step_avg:38.81ms +[2025-09-05 16:20:35] [Rank 0] step:6541/10000 train_time:253747ms step_avg:38.79ms +[2025-09-05 16:20:35] [Rank 0] step:6541/10000 train_time:253747ms step_avg:38.79ms +[2025-09-05 16:20:36] [Rank 0] step:6561/10000 train_time:254404ms step_avg:38.78ms +[2025-09-05 16:20:36] [Rank 0] step:6561/10000 train_time:254404ms step_avg:38.78ms +[2025-09-05 16:20:36] [Rank 0] step:6581/10000 train_time:255064ms step_avg:38.76ms +[2025-09-05 16:20:36] [Rank 0] step:6581/10000 train_time:255064ms step_avg:38.76ms +[2025-09-05 16:20:37] [Rank 0] step:6601/10000 train_time:255723ms step_avg:38.74ms +[2025-09-05 16:20:37] [Rank 0] step:6601/10000 train_time:255723ms step_avg:38.74ms +[2025-09-05 16:20:38] [Rank 0] step:6621/10000 train_time:256381ms step_avg:38.72ms +[2025-09-05 16:20:38] [Rank 0] step:6621/10000 train_time:256381ms step_avg:38.72ms +[2025-09-05 16:20:38] [Rank 0] step:6641/10000 train_time:257039ms step_avg:38.70ms +[2025-09-05 16:20:38] [Rank 0] step:6641/10000 train_time:257039ms step_avg:38.70ms +[2025-09-05 16:20:39] [Rank 0] step:6661/10000 train_time:257699ms step_avg:38.69ms +[2025-09-05 16:20:39] [Rank 0] step:6661/10000 train_time:257699ms step_avg:38.69ms +[2025-09-05 16:20:40] [Rank 0] step:6681/10000 train_time:258358ms step_avg:38.67ms +[2025-09-05 16:20:40] [Rank 0] step:6681/10000 train_time:258358ms step_avg:38.67ms +[2025-09-05 16:20:40] [Rank 0] step:6701/10000 train_time:259018ms step_avg:38.65ms +[2025-09-05 16:20:40] [Rank 0] step:6701/10000 train_time:259018ms step_avg:38.65ms +[2025-09-05 16:20:41] [Rank 0] step:6721/10000 train_time:259676ms step_avg:38.64ms +[2025-09-05 16:20:41] [Rank 0] step:6721/10000 train_time:259676ms step_avg:38.64ms +[2025-09-05 16:20:42] [Rank 0] step:6741/10000 train_time:260335ms step_avg:38.62ms +[2025-09-05 16:20:42] [Rank 0] step:6741/10000 train_time:260335ms step_avg:38.62ms +[2025-09-05 16:20:42] [Rank 0] step:6761/10000 train_time:260993ms step_avg:38.60ms +[2025-09-05 16:20:42] [Rank 0] step:6761/10000 train_time:260993ms step_avg:38.60ms +[2025-09-05 16:20:43] [Rank 0] step:6781/10000 train_time:261653ms step_avg:38.59ms +[2025-09-05 16:20:43] [Rank 0] step:6781/10000 train_time:261653ms step_avg:38.59ms +[2025-09-05 16:20:44] [Rank 0] step:6801/10000 train_time:262312ms step_avg:38.57ms +[2025-09-05 16:20:44] [Rank 0] step:6801/10000 train_time:262312ms step_avg:38.57ms +[2025-09-05 16:20:44] [Rank 0] step:6821/10000 train_time:262970ms step_avg:38.55ms +[2025-09-05 16:20:44] [Rank 0] step:6821/10000 train_time:262970ms step_avg:38.55ms +[2025-09-05 16:20:45] [Rank 0] step:6841/10000 train_time:263832ms step_avg:38.57ms +[2025-09-05 16:20:45] [Rank 0] step:6841/10000 train_time:263832ms step_avg:38.57ms +[2025-09-05 16:20:46] [Rank 0] step:6861/10000 train_time:264619ms step_avg:38.57ms +[2025-09-05 16:20:46] [Rank 0] step:6861/10000 train_time:264619ms step_avg:38.57ms +[2025-09-05 16:20:47] [Rank 0] step:6881/10000 train_time:265278ms step_avg:38.55ms +[2025-09-05 16:20:47] [Rank 0] step:6881/10000 train_time:265278ms step_avg:38.55ms +[2025-09-05 16:20:47] [Rank 0] step:6901/10000 train_time:265937ms step_avg:38.54ms +[2025-09-05 16:20:47] [Rank 0] step:6901/10000 train_time:265937ms step_avg:38.54ms +[2025-09-05 16:20:48] [Rank 0] step:6921/10000 train_time:266596ms step_avg:38.52ms +[2025-09-05 16:20:48] [Rank 0] step:6921/10000 train_time:266596ms step_avg:38.52ms +[2025-09-05 16:20:49] [Rank 0] step:6941/10000 train_time:267475ms step_avg:38.54ms +[2025-09-05 16:20:49] [Rank 0] step:6941/10000 train_time:267475ms step_avg:38.54ms +[2025-09-05 16:20:50] [Rank 0] step:6961/10000 train_time:268134ms step_avg:38.52ms +[2025-09-05 16:20:50] [Rank 0] step:6961/10000 train_time:268134ms step_avg:38.52ms +[2025-09-05 16:20:50] [Rank 0] step:6981/10000 train_time:268794ms step_avg:38.50ms +[2025-09-05 16:20:50] [Rank 0] step:6981/10000 train_time:268794ms step_avg:38.50ms +[2025-09-05 16:20:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:20:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:20:51] [Rank 0] PRINT: step:7000/10000 train_loss:0.6829 val_loss:0.6736 train_time:269684ms step_avg:38.53ms +[2025-09-05 16:20:51] [Rank 0] PRINT: step:7000/10000 train_loss:0.6829 val_loss:0.6736 train_time:269684ms step_avg:38.53ms +[2025-09-05 16:20:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:20:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:20:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:20:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:22:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:22:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:22:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:22:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:22:13] [Rank 0] Total Loss: 5.1146 +[2025-09-05 16:22:13] [Rank 0] Total Loss: 5.1146 +[2025-09-05 16:22:13] [Rank 0] Total FTA (Unweighted): 0.8556 +[2025-09-05 16:22:13] [Rank 0] Total FTA (Unweighted): 0.8556 +[2025-09-05 16:22:13] [Rank 0] Total FTA (Weighted): 0.8556 +[2025-09-05 16:22:13] [Rank 0] Total FTA (Weighted): 0.8556 +[2025-09-05 16:22:13] [Rank 0] Group 0 Loss: 5.3230 +[2025-09-05 16:22:13] [Rank 0] Group 0 Loss: 5.3230 +[2025-09-05 16:22:13] [Rank 0] Group 1 Loss: 4.6468 +[2025-09-05 16:22:13] [Rank 0] Group 1 Loss: 4.6468 +[2025-09-05 16:22:13] [Rank 0] Group 2 Loss: 4.6449 +[2025-09-05 16:22:13] [Rank 0] Group 2 Loss: 4.6449 +[2025-09-05 16:22:13] [Rank 0] Group 3 Loss: 5.0302 +[2025-09-05 16:22:13] [Rank 0] Group 3 Loss: 5.0302 +[2025-09-05 16:22:13] [Rank 0] Group 4 Loss: 4.9485 +[2025-09-05 16:22:13] [Rank 0] Group 4 Loss: 4.9485 +[2025-09-05 16:22:13] [Rank 0] Group 5 Loss: 5.1305 +[2025-09-05 16:22:13] [Rank 0] Group 5 Loss: 5.1305 +[2025-09-05 16:22:13] [Rank 0] Group 6 Loss: 5.0150 +[2025-09-05 16:22:13] [Rank 0] Group 6 Loss: 5.0150 +[2025-09-05 16:22:13] [Rank 0] Group 7 Loss: 5.0977 +[2025-09-05 16:22:13] [Rank 0] Group 7 Loss: 5.0977 +[2025-09-05 16:22:13] [Rank 0] Group 8 Loss: 5.1449 +[2025-09-05 16:22:13] [Rank 0] Group 8 Loss: 5.1449 +[2025-09-05 16:22:13] [Rank 0] Group 9 Loss: 5.1953 +[2025-09-05 16:22:13] [Rank 0] Group 9 Loss: 5.1953 +[2025-09-05 16:22:13] [Rank 0] Group 10 Loss: 5.2756 +[2025-09-05 16:22:13] [Rank 0] Group 10 Loss: 5.2756 +[2025-09-05 16:22:13] [Rank 0] Group 11 Loss: 5.2905 +[2025-09-05 16:22:13] [Rank 0] Group 11 Loss: 5.2905 +[2025-09-05 16:22:13] [Rank 0] Group 12 Loss: 5.2266 +[2025-09-05 16:22:13] [Rank 0] Group 12 Loss: 5.2266 +[2025-09-05 16:22:13] [Rank 0] Group 13 Loss: 5.2965 +[2025-09-05 16:22:13] [Rank 0] Group 13 Loss: 5.2965 +[2025-09-05 16:22:13] [Rank 0] Group 14 Loss: 5.2533 +[2025-09-05 16:22:13] [Rank 0] Group 14 Loss: 5.2533 +[2025-09-05 16:22:13] [Rank 0] Group 15 Loss: 5.3139 +[2025-09-05 16:22:13] [Rank 0] Group 15 Loss: 5.3139 +[2025-09-05 16:22:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 16:22:13] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 16:22:13] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:22:13] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-05 16:22:13] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-05 16:22:13] [Rank 0] Group 13 FTA: 0.4000 +[2025-09-05 16:22:13] [Rank 0] Group 13 FTA: 0.4000 +[2025-09-05 16:22:13] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 16:22:13] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 16:22:13] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 16:22:13] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 16:22:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:22:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:22:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:22:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:22:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:22:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:22:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:22:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:22:14] [Rank 0] step:7001/10000 train_time:269694ms step_avg:38.52ms +[2025-09-05 16:22:14] [Rank 0] step:7001/10000 train_time:269694ms step_avg:38.52ms +[2025-09-05 16:22:15] [Rank 0] step:7021/10000 train_time:270127ms step_avg:38.47ms +[2025-09-05 16:22:15] [Rank 0] step:7021/10000 train_time:270127ms step_avg:38.47ms +[2025-09-05 16:22:16] [Rank 0] step:7041/10000 train_time:270786ms step_avg:38.46ms +[2025-09-05 16:22:16] [Rank 0] step:7041/10000 train_time:270786ms step_avg:38.46ms +[2025-09-05 16:22:16] [Rank 0] step:7061/10000 train_time:271445ms step_avg:38.44ms +[2025-09-05 16:22:16] [Rank 0] step:7061/10000 train_time:271445ms step_avg:38.44ms +[2025-09-05 16:22:17] [Rank 0] step:7081/10000 train_time:272104ms step_avg:38.43ms +[2025-09-05 16:22:17] [Rank 0] step:7081/10000 train_time:272104ms step_avg:38.43ms +[2025-09-05 16:22:18] [Rank 0] step:7101/10000 train_time:272865ms step_avg:38.43ms +[2025-09-05 16:22:18] [Rank 0] step:7101/10000 train_time:272865ms step_avg:38.43ms +[2025-09-05 16:22:18] [Rank 0] step:7121/10000 train_time:273524ms step_avg:38.41ms +[2025-09-05 16:22:18] [Rank 0] step:7121/10000 train_time:273524ms step_avg:38.41ms +[2025-09-05 16:22:19] [Rank 0] step:7141/10000 train_time:274183ms step_avg:38.40ms +[2025-09-05 16:22:19] [Rank 0] step:7141/10000 train_time:274183ms step_avg:38.40ms +[2025-09-05 16:22:20] [Rank 0] step:7161/10000 train_time:274842ms step_avg:38.38ms +[2025-09-05 16:22:20] [Rank 0] step:7161/10000 train_time:274842ms step_avg:38.38ms +[2025-09-05 16:22:20] [Rank 0] step:7181/10000 train_time:275501ms step_avg:38.37ms +[2025-09-05 16:22:20] [Rank 0] step:7181/10000 train_time:275501ms step_avg:38.37ms +[2025-09-05 16:22:21] [Rank 0] step:7201/10000 train_time:276161ms step_avg:38.35ms +[2025-09-05 16:22:21] [Rank 0] step:7201/10000 train_time:276161ms step_avg:38.35ms +[2025-09-05 16:22:22] [Rank 0] step:7221/10000 train_time:276820ms step_avg:38.34ms +[2025-09-05 16:22:22] [Rank 0] step:7221/10000 train_time:276820ms step_avg:38.34ms +[2025-09-05 16:22:22] [Rank 0] step:7241/10000 train_time:277480ms step_avg:38.32ms +[2025-09-05 16:22:22] [Rank 0] step:7241/10000 train_time:277480ms step_avg:38.32ms +[2025-09-05 16:22:23] [Rank 0] step:7261/10000 train_time:278139ms step_avg:38.31ms +[2025-09-05 16:22:23] [Rank 0] step:7261/10000 train_time:278139ms step_avg:38.31ms +[2025-09-05 16:22:24] [Rank 0] step:7281/10000 train_time:278797ms step_avg:38.29ms +[2025-09-05 16:22:24] [Rank 0] step:7281/10000 train_time:278797ms step_avg:38.29ms +[2025-09-05 16:22:24] [Rank 0] step:7301/10000 train_time:279457ms step_avg:38.28ms +[2025-09-05 16:22:24] [Rank 0] step:7301/10000 train_time:279457ms step_avg:38.28ms +[2025-09-05 16:22:25] [Rank 0] step:7321/10000 train_time:280116ms step_avg:38.26ms +[2025-09-05 16:22:25] [Rank 0] step:7321/10000 train_time:280116ms step_avg:38.26ms +[2025-09-05 16:22:26] [Rank 0] step:7341/10000 train_time:280775ms step_avg:38.25ms +[2025-09-05 16:22:26] [Rank 0] step:7341/10000 train_time:280775ms step_avg:38.25ms +[2025-09-05 16:22:26] [Rank 0] step:7361/10000 train_time:281434ms step_avg:38.23ms +[2025-09-05 16:22:26] [Rank 0] step:7361/10000 train_time:281434ms step_avg:38.23ms +[2025-09-05 16:22:27] [Rank 0] step:7381/10000 train_time:282094ms step_avg:38.22ms +[2025-09-05 16:22:27] [Rank 0] step:7381/10000 train_time:282094ms step_avg:38.22ms +[2025-09-05 16:22:27] [Rank 0] step:7401/10000 train_time:282753ms step_avg:38.20ms +[2025-09-05 16:22:27] [Rank 0] step:7401/10000 train_time:282753ms step_avg:38.20ms +[2025-09-05 16:22:28] [Rank 0] step:7421/10000 train_time:283412ms step_avg:38.19ms +[2025-09-05 16:22:28] [Rank 0] step:7421/10000 train_time:283412ms step_avg:38.19ms +[2025-09-05 16:22:29] [Rank 0] step:7441/10000 train_time:284071ms step_avg:38.18ms +[2025-09-05 16:22:29] [Rank 0] step:7441/10000 train_time:284071ms step_avg:38.18ms +[2025-09-05 16:22:29] [Rank 0] step:7461/10000 train_time:284731ms step_avg:38.16ms +[2025-09-05 16:22:29] [Rank 0] step:7461/10000 train_time:284731ms step_avg:38.16ms +[2025-09-05 16:22:30] [Rank 0] step:7481/10000 train_time:285390ms step_avg:38.15ms +[2025-09-05 16:22:30] [Rank 0] step:7481/10000 train_time:285390ms step_avg:38.15ms +[2025-09-05 16:22:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:22:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:22:31] [Rank 0] PRINT: step:7500/10000 train_loss:0.6768 val_loss:0.6679 train_time:286283ms step_avg:38.17ms +[2025-09-05 16:22:31] [Rank 0] PRINT: step:7500/10000 train_loss:0.6768 val_loss:0.6679 train_time:286283ms step_avg:38.17ms +[2025-09-05 16:22:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:22:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:22:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:22:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:23:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:23:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:23:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:23:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:23:53] [Rank 0] Total Loss: 5.2070 +[2025-09-05 16:23:53] [Rank 0] Total Loss: 5.2070 +[2025-09-05 16:23:53] [Rank 0] Total FTA (Unweighted): 0.8531 +[2025-09-05 16:23:53] [Rank 0] Total FTA (Unweighted): 0.8531 +[2025-09-05 16:23:53] [Rank 0] Total FTA (Weighted): 0.8531 +[2025-09-05 16:23:53] [Rank 0] Total FTA (Weighted): 0.8531 +[2025-09-05 16:23:53] [Rank 0] Group 0 Loss: 5.4068 +[2025-09-05 16:23:53] [Rank 0] Group 0 Loss: 5.4068 +[2025-09-05 16:23:53] [Rank 0] Group 1 Loss: 4.6977 +[2025-09-05 16:23:53] [Rank 0] Group 1 Loss: 4.6977 +[2025-09-05 16:23:53] [Rank 0] Group 2 Loss: 4.6817 +[2025-09-05 16:23:53] [Rank 0] Group 2 Loss: 4.6817 +[2025-09-05 16:23:53] [Rank 0] Group 3 Loss: 5.0874 +[2025-09-05 16:23:53] [Rank 0] Group 3 Loss: 5.0874 +[2025-09-05 16:23:53] [Rank 0] Group 4 Loss: 5.1073 +[2025-09-05 16:23:53] [Rank 0] Group 4 Loss: 5.1073 +[2025-09-05 16:23:53] [Rank 0] Group 5 Loss: 5.1547 +[2025-09-05 16:23:53] [Rank 0] Group 5 Loss: 5.1547 +[2025-09-05 16:23:53] [Rank 0] Group 6 Loss: 5.0874 +[2025-09-05 16:23:53] [Rank 0] Group 6 Loss: 5.0874 +[2025-09-05 16:23:53] [Rank 0] Group 7 Loss: 5.1850 +[2025-09-05 16:23:53] [Rank 0] Group 7 Loss: 5.1850 +[2025-09-05 16:23:53] [Rank 0] Group 8 Loss: 5.2850 +[2025-09-05 16:23:53] [Rank 0] Group 8 Loss: 5.2850 +[2025-09-05 16:23:53] [Rank 0] Group 9 Loss: 5.3019 +[2025-09-05 16:23:53] [Rank 0] Group 9 Loss: 5.3019 +[2025-09-05 16:23:53] [Rank 0] Group 10 Loss: 5.3897 +[2025-09-05 16:23:53] [Rank 0] Group 10 Loss: 5.3897 +[2025-09-05 16:23:53] [Rank 0] Group 11 Loss: 5.3667 +[2025-09-05 16:23:53] [Rank 0] Group 11 Loss: 5.3667 +[2025-09-05 16:23:53] [Rank 0] Group 12 Loss: 5.3597 +[2025-09-05 16:23:53] [Rank 0] Group 12 Loss: 5.3597 +[2025-09-05 16:23:53] [Rank 0] Group 13 Loss: 5.4192 +[2025-09-05 16:23:53] [Rank 0] Group 13 Loss: 5.4192 +[2025-09-05 16:23:53] [Rank 0] Group 14 Loss: 5.3803 +[2025-09-05 16:23:53] [Rank 0] Group 14 Loss: 5.3803 +[2025-09-05 16:23:53] [Rank 0] Group 15 Loss: 5.4016 +[2025-09-05 16:23:53] [Rank 0] Group 15 Loss: 5.4016 +[2025-09-05 16:23:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:23:53] [Rank 0] Group 12 FTA: 0.9700 +[2025-09-05 16:23:53] [Rank 0] Group 12 FTA: 0.9700 +[2025-09-05 16:23:53] [Rank 0] Group 13 FTA: 0.4100 +[2025-09-05 16:23:53] [Rank 0] Group 13 FTA: 0.4100 +[2025-09-05 16:23:53] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:23:53] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:23:53] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:23:53] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:23:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:23:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:23:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:23:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:23:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:23:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:23:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:23:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:23:54] [Rank 0] step:7501/10000 train_time:286292ms step_avg:38.17ms +[2025-09-05 16:23:54] [Rank 0] step:7501/10000 train_time:286292ms step_avg:38.17ms +[2025-09-05 16:23:55] [Rank 0] step:7521/10000 train_time:286730ms step_avg:38.12ms +[2025-09-05 16:23:55] [Rank 0] step:7521/10000 train_time:286730ms step_avg:38.12ms +[2025-09-05 16:23:56] [Rank 0] step:7541/10000 train_time:287544ms step_avg:38.13ms +[2025-09-05 16:23:56] [Rank 0] step:7541/10000 train_time:287544ms step_avg:38.13ms +[2025-09-05 16:23:57] [Rank 0] step:7561/10000 train_time:288311ms step_avg:38.13ms +[2025-09-05 16:23:57] [Rank 0] step:7561/10000 train_time:288311ms step_avg:38.13ms +[2025-09-05 16:23:57] [Rank 0] step:7581/10000 train_time:288971ms step_avg:38.12ms +[2025-09-05 16:23:57] [Rank 0] step:7581/10000 train_time:288971ms step_avg:38.12ms +[2025-09-05 16:23:58] [Rank 0] step:7601/10000 train_time:289629ms step_avg:38.10ms +[2025-09-05 16:23:58] [Rank 0] step:7601/10000 train_time:289629ms step_avg:38.10ms +[2025-09-05 16:23:59] [Rank 0] step:7621/10000 train_time:290491ms step_avg:38.12ms +[2025-09-05 16:23:59] [Rank 0] step:7621/10000 train_time:290491ms step_avg:38.12ms +[2025-09-05 16:24:00] [Rank 0] step:7641/10000 train_time:291184ms step_avg:38.11ms +[2025-09-05 16:24:00] [Rank 0] step:7641/10000 train_time:291184ms step_avg:38.11ms +[2025-09-05 16:24:01] [Rank 0] step:7661/10000 train_time:292290ms step_avg:38.15ms +[2025-09-05 16:24:01] [Rank 0] step:7661/10000 train_time:292290ms step_avg:38.15ms +[2025-09-05 16:24:01] [Rank 0] step:7681/10000 train_time:292950ms step_avg:38.14ms +[2025-09-05 16:24:01] [Rank 0] step:7681/10000 train_time:292950ms step_avg:38.14ms +[2025-09-05 16:24:02] [Rank 0] step:7701/10000 train_time:293610ms step_avg:38.13ms +[2025-09-05 16:24:02] [Rank 0] step:7701/10000 train_time:293610ms step_avg:38.13ms +[2025-09-05 16:24:03] [Rank 0] step:7721/10000 train_time:294272ms step_avg:38.11ms +[2025-09-05 16:24:03] [Rank 0] step:7721/10000 train_time:294272ms step_avg:38.11ms +[2025-09-05 16:24:03] [Rank 0] step:7741/10000 train_time:295032ms step_avg:38.11ms +[2025-09-05 16:24:03] [Rank 0] step:7741/10000 train_time:295032ms step_avg:38.11ms +[2025-09-05 16:24:04] [Rank 0] step:7761/10000 train_time:295692ms step_avg:38.10ms +[2025-09-05 16:24:04] [Rank 0] step:7761/10000 train_time:295692ms step_avg:38.10ms +[2025-09-05 16:24:05] [Rank 0] step:7781/10000 train_time:296351ms step_avg:38.09ms +[2025-09-05 16:24:05] [Rank 0] step:7781/10000 train_time:296351ms step_avg:38.09ms +[2025-09-05 16:24:05] [Rank 0] step:7801/10000 train_time:297009ms step_avg:38.07ms +[2025-09-05 16:24:05] [Rank 0] step:7801/10000 train_time:297009ms step_avg:38.07ms +[2025-09-05 16:24:06] [Rank 0] step:7821/10000 train_time:297668ms step_avg:38.06ms +[2025-09-05 16:24:06] [Rank 0] step:7821/10000 train_time:297668ms step_avg:38.06ms +[2025-09-05 16:24:07] [Rank 0] step:7841/10000 train_time:298326ms step_avg:38.05ms +[2025-09-05 16:24:07] [Rank 0] step:7841/10000 train_time:298326ms step_avg:38.05ms +[2025-09-05 16:24:07] [Rank 0] step:7861/10000 train_time:298985ms step_avg:38.03ms +[2025-09-05 16:24:07] [Rank 0] step:7861/10000 train_time:298985ms step_avg:38.03ms +[2025-09-05 16:24:08] [Rank 0] step:7881/10000 train_time:299644ms step_avg:38.02ms +[2025-09-05 16:24:08] [Rank 0] step:7881/10000 train_time:299644ms step_avg:38.02ms +[2025-09-05 16:24:09] [Rank 0] step:7901/10000 train_time:300302ms step_avg:38.01ms +[2025-09-05 16:24:09] [Rank 0] step:7901/10000 train_time:300302ms step_avg:38.01ms +[2025-09-05 16:24:09] [Rank 0] step:7921/10000 train_time:300961ms step_avg:38.00ms +[2025-09-05 16:24:09] [Rank 0] step:7921/10000 train_time:300961ms step_avg:38.00ms +[2025-09-05 16:24:10] [Rank 0] step:7941/10000 train_time:301620ms step_avg:37.98ms +[2025-09-05 16:24:10] [Rank 0] step:7941/10000 train_time:301620ms step_avg:37.98ms +[2025-09-05 16:24:11] [Rank 0] step:7961/10000 train_time:302380ms step_avg:37.98ms +[2025-09-05 16:24:11] [Rank 0] step:7961/10000 train_time:302380ms step_avg:37.98ms +[2025-09-05 16:24:11] [Rank 0] step:7981/10000 train_time:303038ms step_avg:37.97ms +[2025-09-05 16:24:11] [Rank 0] step:7981/10000 train_time:303038ms step_avg:37.97ms +[2025-09-05 16:24:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:24:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:24:12] [Rank 0] PRINT: step:8000/10000 train_loss:0.6712 val_loss:0.6622 train_time:303930ms step_avg:37.99ms +[2025-09-05 16:24:12] [Rank 0] PRINT: step:8000/10000 train_loss:0.6712 val_loss:0.6622 train_time:303930ms step_avg:37.99ms +[2025-09-05 16:24:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:24:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:24:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:24:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:25:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:25:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:25:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:25:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:25:34] [Rank 0] Total Loss: 5.1558 +[2025-09-05 16:25:34] [Rank 0] Total Loss: 5.1558 +[2025-09-05 16:25:34] [Rank 0] Total FTA (Unweighted): 0.8650 +[2025-09-05 16:25:34] [Rank 0] Total FTA (Unweighted): 0.8650 +[2025-09-05 16:25:34] [Rank 0] Total FTA (Weighted): 0.8650 +[2025-09-05 16:25:34] [Rank 0] Total FTA (Weighted): 0.8650 +[2025-09-05 16:25:34] [Rank 0] Group 0 Loss: 5.4127 +[2025-09-05 16:25:34] [Rank 0] Group 0 Loss: 5.4127 +[2025-09-05 16:25:34] [Rank 0] Group 1 Loss: 4.6257 +[2025-09-05 16:25:34] [Rank 0] Group 1 Loss: 4.6257 +[2025-09-05 16:25:34] [Rank 0] Group 2 Loss: 4.7643 +[2025-09-05 16:25:34] [Rank 0] Group 2 Loss: 4.7643 +[2025-09-05 16:25:34] [Rank 0] Group 3 Loss: 5.0219 +[2025-09-05 16:25:34] [Rank 0] Group 3 Loss: 5.0219 +[2025-09-05 16:25:34] [Rank 0] Group 4 Loss: 5.0125 +[2025-09-05 16:25:34] [Rank 0] Group 4 Loss: 5.0125 +[2025-09-05 16:25:34] [Rank 0] Group 5 Loss: 5.1864 +[2025-09-05 16:25:34] [Rank 0] Group 5 Loss: 5.1864 +[2025-09-05 16:25:34] [Rank 0] Group 6 Loss: 5.0506 +[2025-09-05 16:25:34] [Rank 0] Group 6 Loss: 5.0506 +[2025-09-05 16:25:34] [Rank 0] Group 7 Loss: 5.1446 +[2025-09-05 16:25:34] [Rank 0] Group 7 Loss: 5.1446 +[2025-09-05 16:25:34] [Rank 0] Group 8 Loss: 5.1663 +[2025-09-05 16:25:34] [Rank 0] Group 8 Loss: 5.1663 +[2025-09-05 16:25:34] [Rank 0] Group 9 Loss: 5.2764 +[2025-09-05 16:25:34] [Rank 0] Group 9 Loss: 5.2764 +[2025-09-05 16:25:34] [Rank 0] Group 10 Loss: 5.3224 +[2025-09-05 16:25:34] [Rank 0] Group 10 Loss: 5.3224 +[2025-09-05 16:25:34] [Rank 0] Group 11 Loss: 5.2988 +[2025-09-05 16:25:34] [Rank 0] Group 11 Loss: 5.2988 +[2025-09-05 16:25:34] [Rank 0] Group 12 Loss: 5.2669 +[2025-09-05 16:25:34] [Rank 0] Group 12 Loss: 5.2669 +[2025-09-05 16:25:34] [Rank 0] Group 13 Loss: 5.3381 +[2025-09-05 16:25:34] [Rank 0] Group 13 Loss: 5.3381 +[2025-09-05 16:25:34] [Rank 0] Group 14 Loss: 5.2685 +[2025-09-05 16:25:34] [Rank 0] Group 14 Loss: 5.2685 +[2025-09-05 16:25:34] [Rank 0] Group 15 Loss: 5.3377 +[2025-09-05 16:25:34] [Rank 0] Group 15 Loss: 5.3377 +[2025-09-05 16:25:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 16:25:34] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 16:25:34] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:25:34] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 16:25:34] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 16:25:34] [Rank 0] Group 13 FTA: 0.4900 +[2025-09-05 16:25:34] [Rank 0] Group 13 FTA: 0.4900 +[2025-09-05 16:25:34] [Rank 0] Group 14 FTA: 0.2400 +[2025-09-05 16:25:34] [Rank 0] Group 14 FTA: 0.2400 +[2025-09-05 16:25:34] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 16:25:34] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 16:25:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:25:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:25:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:25:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:25:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:25:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:25:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:25:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:25:35] [Rank 0] step:8001/10000 train_time:303939ms step_avg:37.99ms +[2025-09-05 16:25:35] [Rank 0] step:8001/10000 train_time:303939ms step_avg:37.99ms +[2025-09-05 16:25:37] [Rank 0] step:8021/10000 train_time:304376ms step_avg:37.95ms +[2025-09-05 16:25:37] [Rank 0] step:8021/10000 train_time:304376ms step_avg:37.95ms +[2025-09-05 16:25:37] [Rank 0] step:8041/10000 train_time:305506ms step_avg:37.99ms +[2025-09-05 16:25:37] [Rank 0] step:8041/10000 train_time:305506ms step_avg:37.99ms +[2025-09-05 16:25:38] [Rank 0] step:8061/10000 train_time:306165ms step_avg:37.98ms +[2025-09-05 16:25:38] [Rank 0] step:8061/10000 train_time:306165ms step_avg:37.98ms +[2025-09-05 16:25:39] [Rank 0] step:8081/10000 train_time:306823ms step_avg:37.97ms +[2025-09-05 16:25:39] [Rank 0] step:8081/10000 train_time:306823ms step_avg:37.97ms +[2025-09-05 16:25:39] [Rank 0] step:8101/10000 train_time:307483ms step_avg:37.96ms +[2025-09-05 16:25:39] [Rank 0] step:8101/10000 train_time:307483ms step_avg:37.96ms +[2025-09-05 16:25:40] [Rank 0] step:8121/10000 train_time:308142ms step_avg:37.94ms +[2025-09-05 16:25:40] [Rank 0] step:8121/10000 train_time:308142ms step_avg:37.94ms +[2025-09-05 16:25:40] [Rank 0] step:8141/10000 train_time:308808ms step_avg:37.93ms +[2025-09-05 16:25:40] [Rank 0] step:8141/10000 train_time:308808ms step_avg:37.93ms +[2025-09-05 16:25:41] [Rank 0] step:8161/10000 train_time:309469ms step_avg:37.92ms +[2025-09-05 16:25:41] [Rank 0] step:8161/10000 train_time:309469ms step_avg:37.92ms +[2025-09-05 16:25:42] [Rank 0] step:8181/10000 train_time:310126ms step_avg:37.91ms +[2025-09-05 16:25:42] [Rank 0] step:8181/10000 train_time:310126ms step_avg:37.91ms +[2025-09-05 16:25:42] [Rank 0] step:8201/10000 train_time:310784ms step_avg:37.90ms +[2025-09-05 16:25:42] [Rank 0] step:8201/10000 train_time:310784ms step_avg:37.90ms +[2025-09-05 16:25:43] [Rank 0] step:8221/10000 train_time:311443ms step_avg:37.88ms +[2025-09-05 16:25:43] [Rank 0] step:8221/10000 train_time:311443ms step_avg:37.88ms +[2025-09-05 16:25:44] [Rank 0] step:8241/10000 train_time:312102ms step_avg:37.87ms +[2025-09-05 16:25:44] [Rank 0] step:8241/10000 train_time:312102ms step_avg:37.87ms +[2025-09-05 16:25:44] [Rank 0] step:8261/10000 train_time:312762ms step_avg:37.86ms +[2025-09-05 16:25:44] [Rank 0] step:8261/10000 train_time:312762ms step_avg:37.86ms +[2025-09-05 16:25:45] [Rank 0] step:8281/10000 train_time:313419ms step_avg:37.85ms +[2025-09-05 16:25:45] [Rank 0] step:8281/10000 train_time:313419ms step_avg:37.85ms +[2025-09-05 16:25:46] [Rank 0] step:8301/10000 train_time:314177ms step_avg:37.85ms +[2025-09-05 16:25:46] [Rank 0] step:8301/10000 train_time:314177ms step_avg:37.85ms +[2025-09-05 16:25:47] [Rank 0] step:8321/10000 train_time:314836ms step_avg:37.84ms +[2025-09-05 16:25:47] [Rank 0] step:8321/10000 train_time:314836ms step_avg:37.84ms +[2025-09-05 16:25:47] [Rank 0] step:8341/10000 train_time:315500ms step_avg:37.83ms +[2025-09-05 16:25:47] [Rank 0] step:8341/10000 train_time:315500ms step_avg:37.83ms +[2025-09-05 16:25:48] [Rank 0] step:8361/10000 train_time:316160ms step_avg:37.81ms +[2025-09-05 16:25:48] [Rank 0] step:8361/10000 train_time:316160ms step_avg:37.81ms +[2025-09-05 16:25:49] [Rank 0] step:8381/10000 train_time:316820ms step_avg:37.80ms +[2025-09-05 16:25:49] [Rank 0] step:8381/10000 train_time:316820ms step_avg:37.80ms +[2025-09-05 16:25:49] [Rank 0] step:8401/10000 train_time:317479ms step_avg:37.79ms +[2025-09-05 16:25:49] [Rank 0] step:8401/10000 train_time:317479ms step_avg:37.79ms +[2025-09-05 16:25:50] [Rank 0] step:8421/10000 train_time:318138ms step_avg:37.78ms +[2025-09-05 16:25:50] [Rank 0] step:8421/10000 train_time:318138ms step_avg:37.78ms +[2025-09-05 16:25:50] [Rank 0] step:8441/10000 train_time:318798ms step_avg:37.77ms +[2025-09-05 16:25:50] [Rank 0] step:8441/10000 train_time:318798ms step_avg:37.77ms +[2025-09-05 16:25:51] [Rank 0] step:8461/10000 train_time:319458ms step_avg:37.76ms +[2025-09-05 16:25:51] [Rank 0] step:8461/10000 train_time:319458ms step_avg:37.76ms +[2025-09-05 16:25:52] [Rank 0] step:8481/10000 train_time:320117ms step_avg:37.75ms +[2025-09-05 16:25:52] [Rank 0] step:8481/10000 train_time:320117ms step_avg:37.75ms +[2025-09-05 16:25:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:25:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:25:53] [Rank 0] PRINT: step:8500/10000 train_loss:0.6658 val_loss:0.6565 train_time:321010ms step_avg:37.77ms +[2025-09-05 16:25:53] [Rank 0] PRINT: step:8500/10000 train_loss:0.6658 val_loss:0.6565 train_time:321010ms step_avg:37.77ms +[2025-09-05 16:25:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:25:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:25:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:25:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:27:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:27:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:27:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:27:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:27:14] [Rank 0] Total Loss: 5.2038 +[2025-09-05 16:27:14] [Rank 0] Total Loss: 5.2038 +[2025-09-05 16:27:14] [Rank 0] Total FTA (Unweighted): 0.8856 +[2025-09-05 16:27:14] [Rank 0] Total FTA (Unweighted): 0.8856 +[2025-09-05 16:27:14] [Rank 0] Total FTA (Weighted): 0.8856 +[2025-09-05 16:27:14] [Rank 0] Total FTA (Weighted): 0.8856 +[2025-09-05 16:27:14] [Rank 0] Group 0 Loss: 5.4243 +[2025-09-05 16:27:14] [Rank 0] Group 0 Loss: 5.4243 +[2025-09-05 16:27:14] [Rank 0] Group 1 Loss: 4.6873 +[2025-09-05 16:27:14] [Rank 0] Group 1 Loss: 4.6873 +[2025-09-05 16:27:14] [Rank 0] Group 2 Loss: 4.7291 +[2025-09-05 16:27:14] [Rank 0] Group 2 Loss: 4.7291 +[2025-09-05 16:27:14] [Rank 0] Group 3 Loss: 5.0301 +[2025-09-05 16:27:14] [Rank 0] Group 3 Loss: 5.0301 +[2025-09-05 16:27:14] [Rank 0] Group 4 Loss: 5.0839 +[2025-09-05 16:27:14] [Rank 0] Group 4 Loss: 5.0839 +[2025-09-05 16:27:14] [Rank 0] Group 5 Loss: 5.2090 +[2025-09-05 16:27:14] [Rank 0] Group 5 Loss: 5.2090 +[2025-09-05 16:27:14] [Rank 0] Group 6 Loss: 5.0931 +[2025-09-05 16:27:14] [Rank 0] Group 6 Loss: 5.0931 +[2025-09-05 16:27:14] [Rank 0] Group 7 Loss: 5.1608 +[2025-09-05 16:27:14] [Rank 0] Group 7 Loss: 5.1608 +[2025-09-05 16:27:14] [Rank 0] Group 8 Loss: 5.2609 +[2025-09-05 16:27:14] [Rank 0] Group 8 Loss: 5.2609 +[2025-09-05 16:27:14] [Rank 0] Group 9 Loss: 5.3126 +[2025-09-05 16:27:14] [Rank 0] Group 9 Loss: 5.3126 +[2025-09-05 16:27:14] [Rank 0] Group 10 Loss: 5.3636 +[2025-09-05 16:27:14] [Rank 0] Group 10 Loss: 5.3636 +[2025-09-05 16:27:14] [Rank 0] Group 11 Loss: 5.3698 +[2025-09-05 16:27:14] [Rank 0] Group 11 Loss: 5.3698 +[2025-09-05 16:27:14] [Rank 0] Group 12 Loss: 5.3741 +[2025-09-05 16:27:14] [Rank 0] Group 12 Loss: 5.3741 +[2025-09-05 16:27:14] [Rank 0] Group 13 Loss: 5.4188 +[2025-09-05 16:27:14] [Rank 0] Group 13 Loss: 5.4188 +[2025-09-05 16:27:14] [Rank 0] Group 14 Loss: 5.3591 +[2025-09-05 16:27:14] [Rank 0] Group 14 Loss: 5.3591 +[2025-09-05 16:27:14] [Rank 0] Group 15 Loss: 5.3851 +[2025-09-05 16:27:14] [Rank 0] Group 15 Loss: 5.3851 +[2025-09-05 16:27:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 16:27:14] [Rank 0] Group 13 FTA: 0.7100 +[2025-09-05 16:27:14] [Rank 0] Group 13 FTA: 0.7100 +[2025-09-05 16:27:14] [Rank 0] Group 14 FTA: 0.3100 +[2025-09-05 16:27:14] [Rank 0] Group 14 FTA: 0.3100 +[2025-09-05 16:27:14] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 16:27:14] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 16:27:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:27:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:27:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:27:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:27:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:27:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:27:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:27:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:27:17] [Rank 0] step:8501/10000 train_time:321021ms step_avg:37.76ms +[2025-09-05 16:27:17] [Rank 0] step:8501/10000 train_time:321021ms step_avg:37.76ms +[2025-09-05 16:27:17] [Rank 0] step:8521/10000 train_time:321455ms step_avg:37.73ms +[2025-09-05 16:27:17] [Rank 0] step:8521/10000 train_time:321455ms step_avg:37.73ms +[2025-09-05 16:27:18] [Rank 0] step:8541/10000 train_time:322114ms step_avg:37.71ms +[2025-09-05 16:27:18] [Rank 0] step:8541/10000 train_time:322114ms step_avg:37.71ms +[2025-09-05 16:27:19] [Rank 0] step:8561/10000 train_time:322773ms step_avg:37.70ms +[2025-09-05 16:27:19] [Rank 0] step:8561/10000 train_time:322773ms step_avg:37.70ms +[2025-09-05 16:27:19] [Rank 0] step:8581/10000 train_time:323432ms step_avg:37.69ms +[2025-09-05 16:27:19] [Rank 0] step:8581/10000 train_time:323432ms step_avg:37.69ms +[2025-09-05 16:27:20] [Rank 0] step:8601/10000 train_time:324092ms step_avg:37.68ms +[2025-09-05 16:27:20] [Rank 0] step:8601/10000 train_time:324092ms step_avg:37.68ms +[2025-09-05 16:27:21] [Rank 0] step:8621/10000 train_time:324751ms step_avg:37.67ms +[2025-09-05 16:27:21] [Rank 0] step:8621/10000 train_time:324751ms step_avg:37.67ms +[2025-09-05 16:27:21] [Rank 0] step:8641/10000 train_time:325514ms step_avg:37.67ms +[2025-09-05 16:27:21] [Rank 0] step:8641/10000 train_time:325514ms step_avg:37.67ms +[2025-09-05 16:27:22] [Rank 0] step:8661/10000 train_time:326172ms step_avg:37.66ms +[2025-09-05 16:27:22] [Rank 0] step:8661/10000 train_time:326172ms step_avg:37.66ms +[2025-09-05 16:27:23] [Rank 0] step:8681/10000 train_time:326831ms step_avg:37.65ms +[2025-09-05 16:27:23] [Rank 0] step:8681/10000 train_time:326831ms step_avg:37.65ms +[2025-09-05 16:27:23] [Rank 0] step:8701/10000 train_time:327490ms step_avg:37.64ms +[2025-09-05 16:27:23] [Rank 0] step:8701/10000 train_time:327490ms step_avg:37.64ms +[2025-09-05 16:27:24] [Rank 0] step:8721/10000 train_time:328149ms step_avg:37.63ms +[2025-09-05 16:27:24] [Rank 0] step:8721/10000 train_time:328149ms step_avg:37.63ms +[2025-09-05 16:27:25] [Rank 0] step:8741/10000 train_time:328809ms step_avg:37.62ms +[2025-09-05 16:27:25] [Rank 0] step:8741/10000 train_time:328809ms step_avg:37.62ms +[2025-09-05 16:27:25] [Rank 0] step:8761/10000 train_time:329468ms step_avg:37.61ms +[2025-09-05 16:27:25] [Rank 0] step:8761/10000 train_time:329468ms step_avg:37.61ms +[2025-09-05 16:27:26] [Rank 0] step:8781/10000 train_time:330228ms step_avg:37.61ms +[2025-09-05 16:27:26] [Rank 0] step:8781/10000 train_time:330228ms step_avg:37.61ms +[2025-09-05 16:27:27] [Rank 0] step:8801/10000 train_time:330887ms step_avg:37.60ms +[2025-09-05 16:27:27] [Rank 0] step:8801/10000 train_time:330887ms step_avg:37.60ms +[2025-09-05 16:27:27] [Rank 0] step:8821/10000 train_time:331546ms step_avg:37.59ms +[2025-09-05 16:27:27] [Rank 0] step:8821/10000 train_time:331546ms step_avg:37.59ms +[2025-09-05 16:27:28] [Rank 0] step:8841/10000 train_time:332205ms step_avg:37.58ms +[2025-09-05 16:27:28] [Rank 0] step:8841/10000 train_time:332205ms step_avg:37.58ms +[2025-09-05 16:27:29] [Rank 0] step:8861/10000 train_time:332864ms step_avg:37.57ms +[2025-09-05 16:27:29] [Rank 0] step:8861/10000 train_time:332864ms step_avg:37.57ms +[2025-09-05 16:27:29] [Rank 0] step:8881/10000 train_time:333524ms step_avg:37.55ms +[2025-09-05 16:27:29] [Rank 0] step:8881/10000 train_time:333524ms step_avg:37.55ms +[2025-09-05 16:27:30] [Rank 0] step:8901/10000 train_time:334183ms step_avg:37.54ms +[2025-09-05 16:27:30] [Rank 0] step:8901/10000 train_time:334183ms step_avg:37.54ms +[2025-09-05 16:27:31] [Rank 0] step:8921/10000 train_time:334842ms step_avg:37.53ms +[2025-09-05 16:27:31] [Rank 0] step:8921/10000 train_time:334842ms step_avg:37.53ms +[2025-09-05 16:27:31] [Rank 0] step:8941/10000 train_time:335500ms step_avg:37.52ms +[2025-09-05 16:27:31] [Rank 0] step:8941/10000 train_time:335500ms step_avg:37.52ms +[2025-09-05 16:27:32] [Rank 0] step:8961/10000 train_time:336159ms step_avg:37.51ms +[2025-09-05 16:27:32] [Rank 0] step:8961/10000 train_time:336159ms step_avg:37.51ms +[2025-09-05 16:27:33] [Rank 0] step:8981/10000 train_time:336819ms step_avg:37.50ms +[2025-09-05 16:27:33] [Rank 0] step:8981/10000 train_time:336819ms step_avg:37.50ms +[2025-09-05 16:27:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:27:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:27:34] [Rank 0] PRINT: step:9000/10000 train_loss:0.6608 val_loss:0.6517 train_time:337711ms step_avg:37.52ms +[2025-09-05 16:27:34] [Rank 0] PRINT: step:9000/10000 train_loss:0.6608 val_loss:0.6517 train_time:337711ms step_avg:37.52ms +[2025-09-05 16:27:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:27:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:27:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:27:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:28:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:28:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:28:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:28:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:28:55] [Rank 0] Total Loss: 5.1507 +[2025-09-05 16:28:55] [Rank 0] Total Loss: 5.1507 +[2025-09-05 16:28:55] [Rank 0] Total FTA (Unweighted): 0.8881 +[2025-09-05 16:28:55] [Rank 0] Total FTA (Unweighted): 0.8881 +[2025-09-05 16:28:55] [Rank 0] Total FTA (Weighted): 0.8881 +[2025-09-05 16:28:55] [Rank 0] Total FTA (Weighted): 0.8881 +[2025-09-05 16:28:55] [Rank 0] Group 0 Loss: 5.3617 +[2025-09-05 16:28:55] [Rank 0] Group 0 Loss: 5.3617 +[2025-09-05 16:28:55] [Rank 0] Group 1 Loss: 4.6029 +[2025-09-05 16:28:55] [Rank 0] Group 1 Loss: 4.6029 +[2025-09-05 16:28:55] [Rank 0] Group 2 Loss: 4.6138 +[2025-09-05 16:28:55] [Rank 0] Group 2 Loss: 4.6138 +[2025-09-05 16:28:55] [Rank 0] Group 3 Loss: 5.0527 +[2025-09-05 16:28:55] [Rank 0] Group 3 Loss: 5.0527 +[2025-09-05 16:28:55] [Rank 0] Group 4 Loss: 5.0587 +[2025-09-05 16:28:55] [Rank 0] Group 4 Loss: 5.0587 +[2025-09-05 16:28:55] [Rank 0] Group 5 Loss: 5.1342 +[2025-09-05 16:28:55] [Rank 0] Group 5 Loss: 5.1342 +[2025-09-05 16:28:55] [Rank 0] Group 6 Loss: 5.0512 +[2025-09-05 16:28:55] [Rank 0] Group 6 Loss: 5.0512 +[2025-09-05 16:28:55] [Rank 0] Group 7 Loss: 5.1074 +[2025-09-05 16:28:55] [Rank 0] Group 7 Loss: 5.1074 +[2025-09-05 16:28:55] [Rank 0] Group 8 Loss: 5.2182 +[2025-09-05 16:28:55] [Rank 0] Group 8 Loss: 5.2182 +[2025-09-05 16:28:55] [Rank 0] Group 9 Loss: 5.2655 +[2025-09-05 16:28:55] [Rank 0] Group 9 Loss: 5.2655 +[2025-09-05 16:28:55] [Rank 0] Group 10 Loss: 5.3472 +[2025-09-05 16:28:55] [Rank 0] Group 10 Loss: 5.3472 +[2025-09-05 16:28:55] [Rank 0] Group 11 Loss: 5.2902 +[2025-09-05 16:28:55] [Rank 0] Group 11 Loss: 5.2902 +[2025-09-05 16:28:55] [Rank 0] Group 12 Loss: 5.3203 +[2025-09-05 16:28:55] [Rank 0] Group 12 Loss: 5.3203 +[2025-09-05 16:28:55] [Rank 0] Group 13 Loss: 5.3494 +[2025-09-05 16:28:55] [Rank 0] Group 13 Loss: 5.3494 +[2025-09-05 16:28:55] [Rank 0] Group 14 Loss: 5.3077 +[2025-09-05 16:28:55] [Rank 0] Group 14 Loss: 5.3077 +[2025-09-05 16:28:55] [Rank 0] Group 15 Loss: 5.3292 +[2025-09-05 16:28:55] [Rank 0] Group 15 Loss: 5.3292 +[2025-09-05 16:28:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 16:28:55] [Rank 0] Group 13 FTA: 0.7500 +[2025-09-05 16:28:55] [Rank 0] Group 13 FTA: 0.7500 +[2025-09-05 16:28:55] [Rank 0] Group 14 FTA: 0.3100 +[2025-09-05 16:28:55] [Rank 0] Group 14 FTA: 0.3100 +[2025-09-05 16:28:55] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 16:28:55] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 16:28:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:28:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:28:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:28:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:28:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:28:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:28:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:28:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:28:57] [Rank 0] step:9001/10000 train_time:337720ms step_avg:37.52ms +[2025-09-05 16:28:57] [Rank 0] step:9001/10000 train_time:337720ms step_avg:37.52ms +[2025-09-05 16:28:57] [Rank 0] step:9021/10000 train_time:338162ms step_avg:37.49ms +[2025-09-05 16:28:57] [Rank 0] step:9021/10000 train_time:338162ms step_avg:37.49ms +[2025-09-05 16:28:58] [Rank 0] step:9041/10000 train_time:338822ms step_avg:37.48ms +[2025-09-05 16:28:58] [Rank 0] step:9041/10000 train_time:338822ms step_avg:37.48ms +[2025-09-05 16:28:59] [Rank 0] step:9061/10000 train_time:339482ms step_avg:37.47ms +[2025-09-05 16:28:59] [Rank 0] step:9061/10000 train_time:339482ms step_avg:37.47ms +[2025-09-05 16:28:59] [Rank 0] step:9081/10000 train_time:340142ms step_avg:37.46ms +[2025-09-05 16:28:59] [Rank 0] step:9081/10000 train_time:340142ms step_avg:37.46ms +[2025-09-05 16:29:00] [Rank 0] step:9101/10000 train_time:340802ms step_avg:37.45ms +[2025-09-05 16:29:00] [Rank 0] step:9101/10000 train_time:340802ms step_avg:37.45ms +[2025-09-05 16:29:01] [Rank 0] step:9121/10000 train_time:341461ms step_avg:37.44ms +[2025-09-05 16:29:01] [Rank 0] step:9121/10000 train_time:341461ms step_avg:37.44ms +[2025-09-05 16:29:01] [Rank 0] step:9141/10000 train_time:342121ms step_avg:37.43ms +[2025-09-05 16:29:01] [Rank 0] step:9141/10000 train_time:342121ms step_avg:37.43ms +[2025-09-05 16:29:02] [Rank 0] step:9161/10000 train_time:342782ms step_avg:37.42ms +[2025-09-05 16:29:02] [Rank 0] step:9161/10000 train_time:342782ms step_avg:37.42ms +[2025-09-05 16:29:03] [Rank 0] step:9181/10000 train_time:343443ms step_avg:37.41ms +[2025-09-05 16:29:03] [Rank 0] step:9181/10000 train_time:343443ms step_avg:37.41ms +[2025-09-05 16:29:03] [Rank 0] step:9201/10000 train_time:344102ms step_avg:37.40ms +[2025-09-05 16:29:03] [Rank 0] step:9201/10000 train_time:344102ms step_avg:37.40ms +[2025-09-05 16:29:04] [Rank 0] step:9221/10000 train_time:344761ms step_avg:37.39ms +[2025-09-05 16:29:04] [Rank 0] step:9221/10000 train_time:344761ms step_avg:37.39ms +[2025-09-05 16:29:05] [Rank 0] step:9241/10000 train_time:345421ms step_avg:37.38ms +[2025-09-05 16:29:05] [Rank 0] step:9241/10000 train_time:345421ms step_avg:37.38ms +[2025-09-05 16:29:05] [Rank 0] step:9261/10000 train_time:346080ms step_avg:37.37ms +[2025-09-05 16:29:05] [Rank 0] step:9261/10000 train_time:346080ms step_avg:37.37ms +[2025-09-05 16:29:06] [Rank 0] step:9281/10000 train_time:346739ms step_avg:37.36ms +[2025-09-05 16:29:06] [Rank 0] step:9281/10000 train_time:346739ms step_avg:37.36ms +[2025-09-05 16:29:07] [Rank 0] step:9301/10000 train_time:347398ms step_avg:37.35ms +[2025-09-05 16:29:07] [Rank 0] step:9301/10000 train_time:347398ms step_avg:37.35ms +[2025-09-05 16:29:07] [Rank 0] step:9321/10000 train_time:348058ms step_avg:37.34ms +[2025-09-05 16:29:07] [Rank 0] step:9321/10000 train_time:348058ms step_avg:37.34ms +[2025-09-05 16:29:08] [Rank 0] step:9341/10000 train_time:348717ms step_avg:37.33ms +[2025-09-05 16:29:08] [Rank 0] step:9341/10000 train_time:348717ms step_avg:37.33ms +[2025-09-05 16:29:09] [Rank 0] step:9361/10000 train_time:349376ms step_avg:37.32ms +[2025-09-05 16:29:09] [Rank 0] step:9361/10000 train_time:349376ms step_avg:37.32ms +[2025-09-05 16:29:09] [Rank 0] step:9381/10000 train_time:350035ms step_avg:37.31ms +[2025-09-05 16:29:09] [Rank 0] step:9381/10000 train_time:350035ms step_avg:37.31ms +[2025-09-05 16:29:10] [Rank 0] step:9401/10000 train_time:350693ms step_avg:37.30ms +[2025-09-05 16:29:10] [Rank 0] step:9401/10000 train_time:350693ms step_avg:37.30ms +[2025-09-05 16:29:11] [Rank 0] step:9421/10000 train_time:351352ms step_avg:37.29ms +[2025-09-05 16:29:11] [Rank 0] step:9421/10000 train_time:351352ms step_avg:37.29ms +[2025-09-05 16:29:12] [Rank 0] step:9441/10000 train_time:352012ms step_avg:37.29ms +[2025-09-05 16:29:12] [Rank 0] step:9441/10000 train_time:352012ms step_avg:37.29ms +[2025-09-05 16:29:12] [Rank 0] step:9461/10000 train_time:352885ms step_avg:37.30ms +[2025-09-05 16:29:12] [Rank 0] step:9461/10000 train_time:352885ms step_avg:37.30ms +[2025-09-05 16:29:13] [Rank 0] step:9481/10000 train_time:353543ms step_avg:37.29ms +[2025-09-05 16:29:13] [Rank 0] step:9481/10000 train_time:353543ms step_avg:37.29ms +[2025-09-05 16:29:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:29:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:29:14] [Rank 0] PRINT: step:9500/10000 train_loss:0.6559 val_loss:0.6476 train_time:354436ms step_avg:37.31ms +[2025-09-05 16:29:14] [Rank 0] PRINT: step:9500/10000 train_loss:0.6559 val_loss:0.6476 train_time:354436ms step_avg:37.31ms +[2025-09-05 16:29:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:29:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:29:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:29:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:30:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:30:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:30:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:30:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:30:35] [Rank 0] Total Loss: 5.1956 +[2025-09-05 16:30:35] [Rank 0] Total Loss: 5.1956 +[2025-09-05 16:30:35] [Rank 0] Total FTA (Unweighted): 0.8969 +[2025-09-05 16:30:35] [Rank 0] Total FTA (Unweighted): 0.8969 +[2025-09-05 16:30:35] [Rank 0] Total FTA (Weighted): 0.8969 +[2025-09-05 16:30:35] [Rank 0] Total FTA (Weighted): 0.8969 +[2025-09-05 16:30:35] [Rank 0] Group 0 Loss: 5.5490 +[2025-09-05 16:30:35] [Rank 0] Group 0 Loss: 5.5490 +[2025-09-05 16:30:35] [Rank 0] Group 1 Loss: 4.6736 +[2025-09-05 16:30:35] [Rank 0] Group 1 Loss: 4.6736 +[2025-09-05 16:30:35] [Rank 0] Group 2 Loss: 4.7392 +[2025-09-05 16:30:35] [Rank 0] Group 2 Loss: 4.7392 +[2025-09-05 16:30:35] [Rank 0] Group 3 Loss: 5.0808 +[2025-09-05 16:30:35] [Rank 0] Group 3 Loss: 5.0808 +[2025-09-05 16:30:35] [Rank 0] Group 4 Loss: 5.0497 +[2025-09-05 16:30:35] [Rank 0] Group 4 Loss: 5.0497 +[2025-09-05 16:30:35] [Rank 0] Group 5 Loss: 5.1756 +[2025-09-05 16:30:35] [Rank 0] Group 5 Loss: 5.1756 +[2025-09-05 16:30:35] [Rank 0] Group 6 Loss: 5.0349 +[2025-09-05 16:30:35] [Rank 0] Group 6 Loss: 5.0349 +[2025-09-05 16:30:35] [Rank 0] Group 7 Loss: 5.1779 +[2025-09-05 16:30:35] [Rank 0] Group 7 Loss: 5.1779 +[2025-09-05 16:30:35] [Rank 0] Group 8 Loss: 5.2550 +[2025-09-05 16:30:35] [Rank 0] Group 8 Loss: 5.2550 +[2025-09-05 16:30:35] [Rank 0] Group 9 Loss: 5.2981 +[2025-09-05 16:30:35] [Rank 0] Group 9 Loss: 5.2981 +[2025-09-05 16:30:35] [Rank 0] Group 10 Loss: 5.3431 +[2025-09-05 16:30:35] [Rank 0] Group 10 Loss: 5.3431 +[2025-09-05 16:30:35] [Rank 0] Group 11 Loss: 5.3193 +[2025-09-05 16:30:35] [Rank 0] Group 11 Loss: 5.3193 +[2025-09-05 16:30:35] [Rank 0] Group 12 Loss: 5.3469 +[2025-09-05 16:30:35] [Rank 0] Group 12 Loss: 5.3469 +[2025-09-05 16:30:35] [Rank 0] Group 13 Loss: 5.3941 +[2025-09-05 16:30:35] [Rank 0] Group 13 Loss: 5.3941 +[2025-09-05 16:30:35] [Rank 0] Group 14 Loss: 5.3376 +[2025-09-05 16:30:35] [Rank 0] Group 14 Loss: 5.3376 +[2025-09-05 16:30:35] [Rank 0] Group 15 Loss: 5.3548 +[2025-09-05 16:30:35] [Rank 0] Group 15 Loss: 5.3548 +[2025-09-05 16:30:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:30:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:30:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:30:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:30:36] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 16:30:36] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 16:30:36] [Rank 0] Group 13 FTA: 0.7900 +[2025-09-05 16:30:36] [Rank 0] Group 13 FTA: 0.7900 +[2025-09-05 16:30:36] [Rank 0] Group 14 FTA: 0.4000 +[2025-09-05 16:30:36] [Rank 0] Group 14 FTA: 0.4000 +[2025-09-05 16:30:36] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 16:30:36] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 16:30:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:30:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:30:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:30:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:30:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:30:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:30:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:30:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:30:37] [Rank 0] step:9501/10000 train_time:354444ms step_avg:37.31ms +[2025-09-05 16:30:37] [Rank 0] step:9501/10000 train_time:354444ms step_avg:37.31ms +[2025-09-05 16:30:38] [Rank 0] step:9521/10000 train_time:354894ms step_avg:37.27ms +[2025-09-05 16:30:38] [Rank 0] step:9521/10000 train_time:354894ms step_avg:37.27ms +[2025-09-05 16:30:38] [Rank 0] step:9541/10000 train_time:355555ms step_avg:37.27ms +[2025-09-05 16:30:38] [Rank 0] step:9541/10000 train_time:355555ms step_avg:37.27ms +[2025-09-05 16:30:39] [Rank 0] step:9561/10000 train_time:356216ms step_avg:37.26ms +[2025-09-05 16:30:39] [Rank 0] step:9561/10000 train_time:356216ms step_avg:37.26ms +[2025-09-05 16:30:40] [Rank 0] step:9581/10000 train_time:356877ms step_avg:37.25ms +[2025-09-05 16:30:40] [Rank 0] step:9581/10000 train_time:356877ms step_avg:37.25ms +[2025-09-05 16:30:40] [Rank 0] step:9601/10000 train_time:357539ms step_avg:37.24ms +[2025-09-05 16:30:40] [Rank 0] step:9601/10000 train_time:357539ms step_avg:37.24ms +[2025-09-05 16:30:41] [Rank 0] step:9621/10000 train_time:358200ms step_avg:37.23ms +[2025-09-05 16:30:41] [Rank 0] step:9621/10000 train_time:358200ms step_avg:37.23ms +[2025-09-05 16:30:42] [Rank 0] step:9641/10000 train_time:358861ms step_avg:37.22ms +[2025-09-05 16:30:42] [Rank 0] step:9641/10000 train_time:358861ms step_avg:37.22ms +[2025-09-05 16:30:43] [Rank 0] step:9661/10000 train_time:359802ms step_avg:37.24ms +[2025-09-05 16:30:43] [Rank 0] step:9661/10000 train_time:359802ms step_avg:37.24ms +[2025-09-05 16:30:43] [Rank 0] step:9681/10000 train_time:360463ms step_avg:37.23ms +[2025-09-05 16:30:43] [Rank 0] step:9681/10000 train_time:360463ms step_avg:37.23ms +[2025-09-05 16:30:44] [Rank 0] step:9701/10000 train_time:361124ms step_avg:37.23ms +[2025-09-05 16:30:44] [Rank 0] step:9701/10000 train_time:361124ms step_avg:37.23ms +[2025-09-05 16:30:45] [Rank 0] step:9721/10000 train_time:361787ms step_avg:37.22ms +[2025-09-05 16:30:45] [Rank 0] step:9721/10000 train_time:361787ms step_avg:37.22ms +[2025-09-05 16:30:45] [Rank 0] step:9741/10000 train_time:362445ms step_avg:37.21ms +[2025-09-05 16:30:45] [Rank 0] step:9741/10000 train_time:362445ms step_avg:37.21ms +[2025-09-05 16:30:46] [Rank 0] step:9761/10000 train_time:363104ms step_avg:37.20ms +[2025-09-05 16:30:46] [Rank 0] step:9761/10000 train_time:363104ms step_avg:37.20ms +[2025-09-05 16:30:47] [Rank 0] step:9781/10000 train_time:363765ms step_avg:37.19ms +[2025-09-05 16:30:47] [Rank 0] step:9781/10000 train_time:363765ms step_avg:37.19ms +[2025-09-05 16:30:47] [Rank 0] step:9801/10000 train_time:364422ms step_avg:37.18ms +[2025-09-05 16:30:47] [Rank 0] step:9801/10000 train_time:364422ms step_avg:37.18ms +[2025-09-05 16:30:48] [Rank 0] step:9821/10000 train_time:365080ms step_avg:37.17ms +[2025-09-05 16:30:48] [Rank 0] step:9821/10000 train_time:365080ms step_avg:37.17ms +[2025-09-05 16:30:48] [Rank 0] step:9841/10000 train_time:365739ms step_avg:37.16ms +[2025-09-05 16:30:48] [Rank 0] step:9841/10000 train_time:365739ms step_avg:37.16ms +[2025-09-05 16:30:49] [Rank 0] step:9861/10000 train_time:366398ms step_avg:37.16ms +[2025-09-05 16:30:49] [Rank 0] step:9861/10000 train_time:366398ms step_avg:37.16ms +[2025-09-05 16:30:50] [Rank 0] step:9881/10000 train_time:367057ms step_avg:37.15ms +[2025-09-05 16:30:50] [Rank 0] step:9881/10000 train_time:367057ms step_avg:37.15ms +[2025-09-05 16:30:51] [Rank 0] step:9901/10000 train_time:367816ms step_avg:37.15ms +[2025-09-05 16:30:51] [Rank 0] step:9901/10000 train_time:367816ms step_avg:37.15ms +[2025-09-05 16:30:51] [Rank 0] step:9921/10000 train_time:368475ms step_avg:37.14ms +[2025-09-05 16:30:51] [Rank 0] step:9921/10000 train_time:368475ms step_avg:37.14ms +[2025-09-05 16:30:52] [Rank 0] step:9941/10000 train_time:369236ms step_avg:37.14ms +[2025-09-05 16:30:52] [Rank 0] step:9941/10000 train_time:369236ms step_avg:37.14ms +[2025-09-05 16:30:53] [Rank 0] step:9961/10000 train_time:369896ms step_avg:37.13ms +[2025-09-05 16:30:53] [Rank 0] step:9961/10000 train_time:369896ms step_avg:37.13ms +[2025-09-05 16:30:53] [Rank 0] step:9981/10000 train_time:370553ms step_avg:37.13ms +[2025-09-05 16:30:53] [Rank 0] step:9981/10000 train_time:370553ms step_avg:37.13ms +[2025-09-05 16:30:54] [Rank 0] step:10000/10000 train_time:371180ms step_avg:37.12ms +[2025-09-05 16:30:54] [Rank 0] step:10000/10000 train_time:371180ms step_avg:37.12ms +[2025-09-05 16:30:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:30:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:30:54] [Rank 0] PRINT: step:10000/10000 train_loss:0.6512 val_loss:0.6435 train_time:371453ms step_avg:37.15ms +[2025-09-05 16:30:54] [Rank 0] PRINT: step:10000/10000 train_loss:0.6512 val_loss:0.6435 train_time:371453ms step_avg:37.15ms +[2025-09-05 16:30:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:30:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:30:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:30:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:32:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:32:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:32:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:32:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:32:16] [Rank 0] Total Loss: 5.2104 +[2025-09-05 16:32:16] [Rank 0] Total Loss: 5.2104 +[2025-09-05 16:32:16] [Rank 0] Total FTA (Unweighted): 0.9087 +[2025-09-05 16:32:16] [Rank 0] Total FTA (Unweighted): 0.9087 +[2025-09-05 16:32:16] [Rank 0] Total FTA (Weighted): 0.9087 +[2025-09-05 16:32:16] [Rank 0] Total FTA (Weighted): 0.9087 +[2025-09-05 16:32:16] [Rank 0] Group 0 Loss: 5.5209 +[2025-09-05 16:32:16] [Rank 0] Group 0 Loss: 5.5209 +[2025-09-05 16:32:16] [Rank 0] Group 1 Loss: 4.6957 +[2025-09-05 16:32:16] [Rank 0] Group 1 Loss: 4.6957 +[2025-09-05 16:32:16] [Rank 0] Group 2 Loss: 4.7213 +[2025-09-05 16:32:16] [Rank 0] Group 2 Loss: 4.7213 +[2025-09-05 16:32:16] [Rank 0] Group 3 Loss: 5.0648 +[2025-09-05 16:32:16] [Rank 0] Group 3 Loss: 5.0648 +[2025-09-05 16:32:16] [Rank 0] Group 4 Loss: 5.0646 +[2025-09-05 16:32:16] [Rank 0] Group 4 Loss: 5.0646 +[2025-09-05 16:32:16] [Rank 0] Group 5 Loss: 5.2274 +[2025-09-05 16:32:16] [Rank 0] Group 5 Loss: 5.2274 +[2025-09-05 16:32:16] [Rank 0] Group 6 Loss: 5.0837 +[2025-09-05 16:32:16] [Rank 0] Group 6 Loss: 5.0837 +[2025-09-05 16:32:16] [Rank 0] Group 7 Loss: 5.1617 +[2025-09-05 16:32:16] [Rank 0] Group 7 Loss: 5.1617 +[2025-09-05 16:32:16] [Rank 0] Group 8 Loss: 5.2716 +[2025-09-05 16:32:16] [Rank 0] Group 8 Loss: 5.2716 +[2025-09-05 16:32:16] [Rank 0] Group 9 Loss: 5.3499 +[2025-09-05 16:32:16] [Rank 0] Group 9 Loss: 5.3499 +[2025-09-05 16:32:16] [Rank 0] Group 10 Loss: 5.3749 +[2025-09-05 16:32:16] [Rank 0] Group 10 Loss: 5.3749 +[2025-09-05 16:32:16] [Rank 0] Group 11 Loss: 5.3357 +[2025-09-05 16:32:16] [Rank 0] Group 11 Loss: 5.3357 +[2025-09-05 16:32:16] [Rank 0] Group 12 Loss: 5.3550 +[2025-09-05 16:32:16] [Rank 0] Group 12 Loss: 5.3550 +[2025-09-05 16:32:16] [Rank 0] Group 13 Loss: 5.4170 +[2025-09-05 16:32:16] [Rank 0] Group 13 Loss: 5.4170 +[2025-09-05 16:32:16] [Rank 0] Group 14 Loss: 5.3494 +[2025-09-05 16:32:16] [Rank 0] Group 14 Loss: 5.3494 +[2025-09-05 16:32:16] [Rank 0] Group 15 Loss: 5.3735 +[2025-09-05 16:32:16] [Rank 0] Group 15 Loss: 5.3735 +[2025-09-05 16:32:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 16:32:16] [Rank 0] Group 13 FTA: 0.8500 +[2025-09-05 16:32:16] [Rank 0] Group 13 FTA: 0.8500 +[2025-09-05 16:32:16] [Rank 0] Group 14 FTA: 0.4900 +[2025-09-05 16:32:16] [Rank 0] Group 14 FTA: 0.4900 +[2025-09-05 16:32:16] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 16:32:16] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 16:32:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:32:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_loss_curves.png +[2025-09-05 16:32:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:32:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/per_class_acc_curves.png +[2025-09-05 16:32:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:32:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_loss_curve.png +[2025-09-05 16:32:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:32:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_42/total_acc_curve.png +[2025-09-05 16:32:18] [Rank 0] step:10001/10000 train_time:371461ms step_avg:37.14ms +[2025-09-05 16:32:18] [Rank 0] step:10001/10000 train_time:371461ms step_avg:37.14ms +[2025-09-05 16:32:18] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 16:32:18 2025 --- +[2025-09-05 16:32:18] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 16:32:18 2025 --- +[2025-09-05 16:32:18] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 16:32:18] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6f169e1f8f63a67077bac69d32f6aa89db94a2c1 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.002, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "0f989c76-70b4-442b-8b13-59aa3b7a588f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..0cccbff93947de1515e9d0017cd06b9cd593ccee --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5e8f196693dcff8f4a9ee79da2a52a79e7105989e7407686590c8934c34d898 +size 402337 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..cee604502ecc249237967e39dfa59056b7c94501 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8bce71d2fe4268f61de14815ca0a9b5cbba945fc2aa613e3e87605974de41ed +size 456307 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..cbaef6b601f9dc20073338919fc287c298405a3b --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:614716f3614e3fe94c3094be374073480f656cb3bfb4d16edc9cc158f2aa3a20 +size 102354 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..7764b0e9821ef3306d1481150843b1b8b52df5d8 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cb9377dc1ba35d87e66322be42152e94c671e44222c98efb24159f66a8296c1 +size 107627 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/training_log_0f989c76-70b4-442b-8b13-59aa3b7a588f.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/training_log_0f989c76-70b4-442b-8b13-59aa3b7a588f.txt new file mode 100644 index 0000000000000000000000000000000000000000..44a97aceebd711bbf7a3976202f36cb3626cfde2 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/training_log_0f989c76-70b4-442b-8b13-59aa3b7a588f.txt @@ -0,0 +1,5614 @@ +[2025-09-05 18:54:44] [Rank 0] PRINT: --- Script Start: Fri Sep 5 18:54:44 2025 --- +[2025-09-05 18:54:44] [Rank 0] PRINT: --- Script Start: Fri Sep 5 18:54:44 2025 --- +[2025-09-05 18:54:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.002, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 18:54:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.002, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 18:54:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 18:54:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 18:54:44] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 18:54:44] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 18:54:44] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43 +[2025-09-05 18:54:44] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43 +[2025-09-05 18:54:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 18:54:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 18:54:44] [Rank 0] PRINT: Constructing model... +[2025-09-05 18:54:44] [Rank 0] PRINT: Constructing model... +[2025-09-05 18:54:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 18:54:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 18:54:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 18:54:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 18:54:46] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 18:54:46] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 18:54:50] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 18:54:50] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 18:54:50] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 18:54:50] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 18:54:50] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 18:54:50] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 18:54:50] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 18:54:50] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 18:54:50] [Rank 0] PRINT: Model returns: +[2025-09-05 18:54:50] [Rank 0] PRINT: Model returns: +[2025-09-05 18:54:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 18:54:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 18:54:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 18:54:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 18:54:50] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-09-05 18:54:50] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-09-05 18:54:50] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 18:54:50] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 18:54:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 18:54:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 18:54:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 18:54:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 18:54:55] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 18:54:55] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 18:54:55] [Rank 0] PRINT: Starting warmup... +[2025-09-05 18:54:55] [Rank 0] PRINT: Starting warmup... +[2025-09-05 18:55:34] [Rank 0] PRINT: Warmup complete. +[2025-09-05 18:55:34] [Rank 0] PRINT: Warmup complete. +[2025-09-05 18:55:34] [Rank 0] PRINT: Starting training... +[2025-09-05 18:55:34] [Rank 0] PRINT: Starting training... +[2025-09-05 18:55:40] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/fixed_eval_indices.json +[2025-09-05 18:55:40] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/fixed_eval_indices.json +[2025-09-05 18:55:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:55:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:55:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 18:55:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 18:56:18] [Rank 0] step:21/10000 train_time:32915ms step_avg:1567.37ms +[2025-09-05 18:56:18] [Rank 0] step:21/10000 train_time:32915ms step_avg:1567.37ms +[2025-09-05 18:56:19] [Rank 0] step:41/10000 train_time:33568ms step_avg:818.73ms +[2025-09-05 18:56:19] [Rank 0] step:41/10000 train_time:33568ms step_avg:818.73ms +[2025-09-05 18:56:20] [Rank 0] step:61/10000 train_time:34220ms step_avg:560.99ms +[2025-09-05 18:56:20] [Rank 0] step:61/10000 train_time:34220ms step_avg:560.99ms +[2025-09-05 18:56:20] [Rank 0] step:81/10000 train_time:34874ms step_avg:430.54ms +[2025-09-05 18:56:20] [Rank 0] step:81/10000 train_time:34874ms step_avg:430.54ms +[2025-09-05 18:56:21] [Rank 0] step:101/10000 train_time:35527ms step_avg:351.75ms +[2025-09-05 18:56:21] [Rank 0] step:101/10000 train_time:35527ms step_avg:351.75ms +[2025-09-05 18:56:22] [Rank 0] step:121/10000 train_time:36180ms step_avg:299.01ms +[2025-09-05 18:56:22] [Rank 0] step:121/10000 train_time:36180ms step_avg:299.01ms +[2025-09-05 18:56:22] [Rank 0] step:141/10000 train_time:36833ms step_avg:261.23ms +[2025-09-05 18:56:22] [Rank 0] step:141/10000 train_time:36833ms step_avg:261.23ms +[2025-09-05 18:56:23] [Rank 0] step:161/10000 train_time:37484ms step_avg:232.82ms +[2025-09-05 18:56:23] [Rank 0] step:161/10000 train_time:37484ms step_avg:232.82ms +[2025-09-05 18:56:24] [Rank 0] step:181/10000 train_time:38136ms step_avg:210.70ms +[2025-09-05 18:56:24] [Rank 0] step:181/10000 train_time:38136ms step_avg:210.70ms +[2025-09-05 18:56:24] [Rank 0] step:201/10000 train_time:38788ms step_avg:192.97ms +[2025-09-05 18:56:24] [Rank 0] step:201/10000 train_time:38788ms step_avg:192.97ms +[2025-09-05 18:56:25] [Rank 0] step:221/10000 train_time:39440ms step_avg:178.46ms +[2025-09-05 18:56:25] [Rank 0] step:221/10000 train_time:39440ms step_avg:178.46ms +[2025-09-05 18:56:26] [Rank 0] step:241/10000 train_time:40095ms step_avg:166.37ms +[2025-09-05 18:56:26] [Rank 0] step:241/10000 train_time:40095ms step_avg:166.37ms +[2025-09-05 18:56:26] [Rank 0] step:261/10000 train_time:40747ms step_avg:156.12ms +[2025-09-05 18:56:26] [Rank 0] step:261/10000 train_time:40747ms step_avg:156.12ms +[2025-09-05 18:56:27] [Rank 0] step:281/10000 train_time:41399ms step_avg:147.33ms +[2025-09-05 18:56:27] [Rank 0] step:281/10000 train_time:41399ms step_avg:147.33ms +[2025-09-05 18:56:28] [Rank 0] step:301/10000 train_time:42052ms step_avg:139.71ms +[2025-09-05 18:56:28] [Rank 0] step:301/10000 train_time:42052ms step_avg:139.71ms +[2025-09-05 18:56:28] [Rank 0] step:321/10000 train_time:42705ms step_avg:133.04ms +[2025-09-05 18:56:28] [Rank 0] step:321/10000 train_time:42705ms step_avg:133.04ms +[2025-09-05 18:56:29] [Rank 0] step:341/10000 train_time:43359ms step_avg:127.15ms +[2025-09-05 18:56:29] [Rank 0] step:341/10000 train_time:43359ms step_avg:127.15ms +[2025-09-05 18:56:29] [Rank 0] step:361/10000 train_time:44012ms step_avg:121.92ms +[2025-09-05 18:56:29] [Rank 0] step:361/10000 train_time:44012ms step_avg:121.92ms +[2025-09-05 18:56:30] [Rank 0] step:381/10000 train_time:44666ms step_avg:117.23ms +[2025-09-05 18:56:30] [Rank 0] step:381/10000 train_time:44666ms step_avg:117.23ms +[2025-09-05 18:56:31] [Rank 0] step:401/10000 train_time:45319ms step_avg:113.01ms +[2025-09-05 18:56:31] [Rank 0] step:401/10000 train_time:45319ms step_avg:113.01ms +[2025-09-05 18:56:31] [Rank 0] step:421/10000 train_time:45972ms step_avg:109.20ms +[2025-09-05 18:56:31] [Rank 0] step:421/10000 train_time:45972ms step_avg:109.20ms +[2025-09-05 18:56:32] [Rank 0] step:441/10000 train_time:46627ms step_avg:105.73ms +[2025-09-05 18:56:32] [Rank 0] step:441/10000 train_time:46627ms step_avg:105.73ms +[2025-09-05 18:56:33] [Rank 0] step:461/10000 train_time:47278ms step_avg:102.56ms +[2025-09-05 18:56:33] [Rank 0] step:461/10000 train_time:47278ms step_avg:102.56ms +[2025-09-05 18:56:33] [Rank 0] step:481/10000 train_time:47931ms step_avg:99.65ms +[2025-09-05 18:56:33] [Rank 0] step:481/10000 train_time:47931ms step_avg:99.65ms +[2025-09-05 18:56:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:56:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:56:35] [Rank 0] PRINT: step:500/10000 train_loss:4.0924 val_loss:1.7130 train_time:48816ms step_avg:97.63ms +[2025-09-05 18:56:35] [Rank 0] PRINT: step:500/10000 train_loss:4.0924 val_loss:1.7130 train_time:48816ms step_avg:97.63ms +[2025-09-05 18:56:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:56:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:56:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:56:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:57:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:57:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:57:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:57:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:57:57] [Rank 0] Total Loss: 4.2642 +[2025-09-05 18:57:57] [Rank 0] Total Loss: 4.2642 +[2025-09-05 18:57:57] [Rank 0] Total FTA (Unweighted): 0.2744 +[2025-09-05 18:57:57] [Rank 0] Total FTA (Unweighted): 0.2744 +[2025-09-05 18:57:57] [Rank 0] Total FTA (Weighted): 0.2744 +[2025-09-05 18:57:57] [Rank 0] Total FTA (Weighted): 0.2744 +[2025-09-05 18:57:57] [Rank 0] Group 0 Loss: 3.2348 +[2025-09-05 18:57:57] [Rank 0] Group 0 Loss: 3.2348 +[2025-09-05 18:57:57] [Rank 0] Group 1 Loss: 3.0521 +[2025-09-05 18:57:57] [Rank 0] Group 1 Loss: 3.0521 +[2025-09-05 18:57:57] [Rank 0] Group 2 Loss: 3.1317 +[2025-09-05 18:57:57] [Rank 0] Group 2 Loss: 3.1317 +[2025-09-05 18:57:57] [Rank 0] Group 3 Loss: 3.5385 +[2025-09-05 18:57:57] [Rank 0] Group 3 Loss: 3.5385 +[2025-09-05 18:57:57] [Rank 0] Group 4 Loss: 3.8582 +[2025-09-05 18:57:57] [Rank 0] Group 4 Loss: 3.8582 +[2025-09-05 18:57:57] [Rank 0] Group 5 Loss: 4.0755 +[2025-09-05 18:57:57] [Rank 0] Group 5 Loss: 4.0755 +[2025-09-05 18:57:57] [Rank 0] Group 6 Loss: 4.2499 +[2025-09-05 18:57:57] [Rank 0] Group 6 Loss: 4.2499 +[2025-09-05 18:57:57] [Rank 0] Group 7 Loss: 4.3835 +[2025-09-05 18:57:57] [Rank 0] Group 7 Loss: 4.3835 +[2025-09-05 18:57:57] [Rank 0] Group 8 Loss: 4.6194 +[2025-09-05 18:57:57] [Rank 0] Group 8 Loss: 4.6194 +[2025-09-05 18:57:57] [Rank 0] Group 9 Loss: 4.7448 +[2025-09-05 18:57:57] [Rank 0] Group 9 Loss: 4.7448 +[2025-09-05 18:57:57] [Rank 0] Group 10 Loss: 4.8593 +[2025-09-05 18:57:57] [Rank 0] Group 10 Loss: 4.8593 +[2025-09-05 18:57:57] [Rank 0] Group 11 Loss: 4.9586 +[2025-09-05 18:57:57] [Rank 0] Group 11 Loss: 4.9586 +[2025-09-05 18:57:57] [Rank 0] Group 12 Loss: 4.8802 +[2025-09-05 18:57:57] [Rank 0] Group 12 Loss: 4.8802 +[2025-09-05 18:57:57] [Rank 0] Group 13 Loss: 4.8967 +[2025-09-05 18:57:57] [Rank 0] Group 13 Loss: 4.8967 +[2025-09-05 18:57:57] [Rank 0] Group 14 Loss: 4.9374 +[2025-09-05 18:57:57] [Rank 0] Group 14 Loss: 4.9374 +[2025-09-05 18:57:57] [Rank 0] Group 15 Loss: 4.8066 +[2025-09-05 18:57:57] [Rank 0] Group 15 Loss: 4.8066 +[2025-09-05 18:57:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:57:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:57:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:57:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:57:57] [Rank 0] Group 2 FTA: 0.6600 +[2025-09-05 18:57:57] [Rank 0] Group 2 FTA: 0.6600 +[2025-09-05 18:57:57] [Rank 0] Group 3 FTA: 0.3600 +[2025-09-05 18:57:57] [Rank 0] Group 3 FTA: 0.3600 +[2025-09-05 18:57:57] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-05 18:57:57] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-05 18:57:57] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 18:57:57] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 18:57:57] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-05 18:57:57] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-05 18:57:57] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 18:57:57] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 18:57:57] [Rank 0] Group 8 FTA: 0.0900 +[2025-09-05 18:57:57] [Rank 0] Group 8 FTA: 0.0900 +[2025-09-05 18:57:57] [Rank 0] Group 9 FTA: 0.0600 +[2025-09-05 18:57:57] [Rank 0] Group 9 FTA: 0.0600 +[2025-09-05 18:57:57] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 18:57:57] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 18:57:57] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 18:57:57] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 18:57:57] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-05 18:57:57] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-05 18:57:57] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:57:57] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:57:57] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:57:57] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:57:57] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:57:57] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:57:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 18:57:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 18:57:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 18:57:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 18:57:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 18:57:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 18:57:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 18:57:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 18:57:58] [Rank 0] step:501/10000 train_time:48825ms step_avg:97.46ms +[2025-09-05 18:57:58] [Rank 0] step:501/10000 train_time:48825ms step_avg:97.46ms +[2025-09-05 18:57:59] [Rank 0] step:521/10000 train_time:49251ms step_avg:94.53ms +[2025-09-05 18:57:59] [Rank 0] step:521/10000 train_time:49251ms step_avg:94.53ms +[2025-09-05 18:58:00] [Rank 0] step:541/10000 train_time:49902ms step_avg:92.24ms +[2025-09-05 18:58:00] [Rank 0] step:541/10000 train_time:49902ms step_avg:92.24ms +[2025-09-05 18:58:00] [Rank 0] step:561/10000 train_time:50554ms step_avg:90.11ms +[2025-09-05 18:58:00] [Rank 0] step:561/10000 train_time:50554ms step_avg:90.11ms +[2025-09-05 18:58:01] [Rank 0] step:581/10000 train_time:51205ms step_avg:88.13ms +[2025-09-05 18:58:01] [Rank 0] step:581/10000 train_time:51205ms step_avg:88.13ms +[2025-09-05 18:58:02] [Rank 0] step:601/10000 train_time:51857ms step_avg:86.28ms +[2025-09-05 18:58:02] [Rank 0] step:601/10000 train_time:51857ms step_avg:86.28ms +[2025-09-05 18:58:02] [Rank 0] step:621/10000 train_time:52509ms step_avg:84.56ms +[2025-09-05 18:58:02] [Rank 0] step:621/10000 train_time:52509ms step_avg:84.56ms +[2025-09-05 18:58:03] [Rank 0] step:641/10000 train_time:53162ms step_avg:82.94ms +[2025-09-05 18:58:03] [Rank 0] step:641/10000 train_time:53162ms step_avg:82.94ms +[2025-09-05 18:58:04] [Rank 0] step:661/10000 train_time:53815ms step_avg:81.41ms +[2025-09-05 18:58:04] [Rank 0] step:661/10000 train_time:53815ms step_avg:81.41ms +[2025-09-05 18:58:04] [Rank 0] step:681/10000 train_time:54467ms step_avg:79.98ms +[2025-09-05 18:58:04] [Rank 0] step:681/10000 train_time:54467ms step_avg:79.98ms +[2025-09-05 18:58:05] [Rank 0] step:701/10000 train_time:55118ms step_avg:78.63ms +[2025-09-05 18:58:05] [Rank 0] step:701/10000 train_time:55118ms step_avg:78.63ms +[2025-09-05 18:58:05] [Rank 0] step:721/10000 train_time:55770ms step_avg:77.35ms +[2025-09-05 18:58:05] [Rank 0] step:721/10000 train_time:55770ms step_avg:77.35ms +[2025-09-05 18:58:06] [Rank 0] step:741/10000 train_time:56422ms step_avg:76.14ms +[2025-09-05 18:58:06] [Rank 0] step:741/10000 train_time:56422ms step_avg:76.14ms +[2025-09-05 18:58:07] [Rank 0] step:761/10000 train_time:57077ms step_avg:75.00ms +[2025-09-05 18:58:07] [Rank 0] step:761/10000 train_time:57077ms step_avg:75.00ms +[2025-09-05 18:58:07] [Rank 0] step:781/10000 train_time:57734ms step_avg:73.92ms +[2025-09-05 18:58:07] [Rank 0] step:781/10000 train_time:57734ms step_avg:73.92ms +[2025-09-05 18:58:08] [Rank 0] step:801/10000 train_time:58392ms step_avg:72.90ms +[2025-09-05 18:58:08] [Rank 0] step:801/10000 train_time:58392ms step_avg:72.90ms +[2025-09-05 18:58:09] [Rank 0] step:821/10000 train_time:59049ms step_avg:71.92ms +[2025-09-05 18:58:09] [Rank 0] step:821/10000 train_time:59049ms step_avg:71.92ms +[2025-09-05 18:58:10] [Rank 0] step:841/10000 train_time:60168ms step_avg:71.54ms +[2025-09-05 18:58:10] [Rank 0] step:841/10000 train_time:60168ms step_avg:71.54ms +[2025-09-05 18:58:11] [Rank 0] step:861/10000 train_time:60825ms step_avg:70.64ms +[2025-09-05 18:58:11] [Rank 0] step:861/10000 train_time:60825ms step_avg:70.64ms +[2025-09-05 18:58:11] [Rank 0] step:881/10000 train_time:61482ms step_avg:69.79ms +[2025-09-05 18:58:11] [Rank 0] step:881/10000 train_time:61482ms step_avg:69.79ms +[2025-09-05 18:58:12] [Rank 0] step:901/10000 train_time:62138ms step_avg:68.97ms +[2025-09-05 18:58:12] [Rank 0] step:901/10000 train_time:62138ms step_avg:68.97ms +[2025-09-05 18:58:12] [Rank 0] step:921/10000 train_time:62795ms step_avg:68.18ms +[2025-09-05 18:58:12] [Rank 0] step:921/10000 train_time:62795ms step_avg:68.18ms +[2025-09-05 18:58:13] [Rank 0] step:941/10000 train_time:63452ms step_avg:67.43ms +[2025-09-05 18:58:13] [Rank 0] step:941/10000 train_time:63452ms step_avg:67.43ms +[2025-09-05 18:58:14] [Rank 0] step:961/10000 train_time:64109ms step_avg:66.71ms +[2025-09-05 18:58:14] [Rank 0] step:961/10000 train_time:64109ms step_avg:66.71ms +[2025-09-05 18:58:14] [Rank 0] step:981/10000 train_time:64767ms step_avg:66.02ms +[2025-09-05 18:58:14] [Rank 0] step:981/10000 train_time:64767ms step_avg:66.02ms +[2025-09-05 18:58:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:58:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:58:16] [Rank 0] PRINT: step:1000/10000 train_loss:1.2815 val_loss:1.0506 train_time:65657ms step_avg:65.66ms +[2025-09-05 18:58:16] [Rank 0] PRINT: step:1000/10000 train_loss:1.2815 val_loss:1.0506 train_time:65657ms step_avg:65.66ms +[2025-09-05 18:58:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:58:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:58:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:58:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:59:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:59:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:59:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:59:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:59:37] [Rank 0] Total Loss: 4.4661 +[2025-09-05 18:59:37] [Rank 0] Total Loss: 4.4661 +[2025-09-05 18:59:37] [Rank 0] Total FTA (Unweighted): 0.5156 +[2025-09-05 18:59:37] [Rank 0] Total FTA (Unweighted): 0.5156 +[2025-09-05 18:59:37] [Rank 0] Total FTA (Weighted): 0.5156 +[2025-09-05 18:59:37] [Rank 0] Total FTA (Weighted): 0.5156 +[2025-09-05 18:59:37] [Rank 0] Group 0 Loss: 4.2117 +[2025-09-05 18:59:37] [Rank 0] Group 0 Loss: 4.2117 +[2025-09-05 18:59:37] [Rank 0] Group 1 Loss: 3.8009 +[2025-09-05 18:59:37] [Rank 0] Group 1 Loss: 3.8009 +[2025-09-05 18:59:37] [Rank 0] Group 2 Loss: 3.7403 +[2025-09-05 18:59:37] [Rank 0] Group 2 Loss: 3.7403 +[2025-09-05 18:59:37] [Rank 0] Group 3 Loss: 4.0963 +[2025-09-05 18:59:37] [Rank 0] Group 3 Loss: 4.0963 +[2025-09-05 18:59:37] [Rank 0] Group 4 Loss: 4.0585 +[2025-09-05 18:59:37] [Rank 0] Group 4 Loss: 4.0585 +[2025-09-05 18:59:37] [Rank 0] Group 5 Loss: 4.1088 +[2025-09-05 18:59:37] [Rank 0] Group 5 Loss: 4.1088 +[2025-09-05 18:59:37] [Rank 0] Group 6 Loss: 4.1609 +[2025-09-05 18:59:37] [Rank 0] Group 6 Loss: 4.1609 +[2025-09-05 18:59:37] [Rank 0] Group 7 Loss: 4.2757 +[2025-09-05 18:59:37] [Rank 0] Group 7 Loss: 4.2757 +[2025-09-05 18:59:37] [Rank 0] Group 8 Loss: 4.4729 +[2025-09-05 18:59:37] [Rank 0] Group 8 Loss: 4.4729 +[2025-09-05 18:59:37] [Rank 0] Group 9 Loss: 4.6018 +[2025-09-05 18:59:37] [Rank 0] Group 9 Loss: 4.6018 +[2025-09-05 18:59:37] [Rank 0] Group 10 Loss: 4.7810 +[2025-09-05 18:59:37] [Rank 0] Group 10 Loss: 4.7810 +[2025-09-05 18:59:37] [Rank 0] Group 11 Loss: 4.9106 +[2025-09-05 18:59:37] [Rank 0] Group 11 Loss: 4.9106 +[2025-09-05 18:59:37] [Rank 0] Group 12 Loss: 5.0189 +[2025-09-05 18:59:37] [Rank 0] Group 12 Loss: 5.0189 +[2025-09-05 18:59:37] [Rank 0] Group 13 Loss: 5.1191 +[2025-09-05 18:59:37] [Rank 0] Group 13 Loss: 5.1191 +[2025-09-05 18:59:37] [Rank 0] Group 14 Loss: 5.0330 +[2025-09-05 18:59:37] [Rank 0] Group 14 Loss: 5.0330 +[2025-09-05 18:59:37] [Rank 0] Group 15 Loss: 5.0669 +[2025-09-05 18:59:37] [Rank 0] Group 15 Loss: 5.0669 +[2025-09-05 18:59:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:59:37] [Rank 0] Group 5 FTA: 0.8900 +[2025-09-05 18:59:37] [Rank 0] Group 5 FTA: 0.8900 +[2025-09-05 18:59:37] [Rank 0] Group 6 FTA: 0.5800 +[2025-09-05 18:59:37] [Rank 0] Group 6 FTA: 0.5800 +[2025-09-05 18:59:37] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 18:59:37] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 18:59:37] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 18:59:37] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 18:59:37] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 18:59:37] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 18:59:37] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 18:59:37] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 18:59:37] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:59:37] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:59:37] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 18:59:37] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 18:59:37] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 18:59:37] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 18:59:37] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:59:37] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:59:37] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:59:37] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:59:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 18:59:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 18:59:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 18:59:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 18:59:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 18:59:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 18:59:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 18:59:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 18:59:39] [Rank 0] step:1001/10000 train_time:65665ms step_avg:65.60ms +[2025-09-05 18:59:39] [Rank 0] step:1001/10000 train_time:65665ms step_avg:65.60ms +[2025-09-05 18:59:39] [Rank 0] step:1021/10000 train_time:66096ms step_avg:64.74ms +[2025-09-05 18:59:39] [Rank 0] step:1021/10000 train_time:66096ms step_avg:64.74ms +[2025-09-05 18:59:40] [Rank 0] step:1041/10000 train_time:66754ms step_avg:64.12ms +[2025-09-05 18:59:40] [Rank 0] step:1041/10000 train_time:66754ms step_avg:64.12ms +[2025-09-05 18:59:41] [Rank 0] step:1061/10000 train_time:67411ms step_avg:63.54ms +[2025-09-05 18:59:41] [Rank 0] step:1061/10000 train_time:67411ms step_avg:63.54ms +[2025-09-05 18:59:41] [Rank 0] step:1081/10000 train_time:68069ms step_avg:62.97ms +[2025-09-05 18:59:41] [Rank 0] step:1081/10000 train_time:68069ms step_avg:62.97ms +[2025-09-05 18:59:42] [Rank 0] step:1101/10000 train_time:68728ms step_avg:62.42ms +[2025-09-05 18:59:42] [Rank 0] step:1101/10000 train_time:68728ms step_avg:62.42ms +[2025-09-05 18:59:43] [Rank 0] step:1121/10000 train_time:69386ms step_avg:61.90ms +[2025-09-05 18:59:43] [Rank 0] step:1121/10000 train_time:69386ms step_avg:61.90ms +[2025-09-05 18:59:43] [Rank 0] step:1141/10000 train_time:70043ms step_avg:61.39ms +[2025-09-05 18:59:43] [Rank 0] step:1141/10000 train_time:70043ms step_avg:61.39ms +[2025-09-05 18:59:44] [Rank 0] step:1161/10000 train_time:70702ms step_avg:60.90ms +[2025-09-05 18:59:44] [Rank 0] step:1161/10000 train_time:70702ms step_avg:60.90ms +[2025-09-05 18:59:45] [Rank 0] step:1181/10000 train_time:71360ms step_avg:60.42ms +[2025-09-05 18:59:45] [Rank 0] step:1181/10000 train_time:71360ms step_avg:60.42ms +[2025-09-05 18:59:45] [Rank 0] step:1201/10000 train_time:72018ms step_avg:59.96ms +[2025-09-05 18:59:45] [Rank 0] step:1201/10000 train_time:72018ms step_avg:59.96ms +[2025-09-05 18:59:46] [Rank 0] step:1221/10000 train_time:72677ms step_avg:59.52ms +[2025-09-05 18:59:46] [Rank 0] step:1221/10000 train_time:72677ms step_avg:59.52ms +[2025-09-05 18:59:47] [Rank 0] step:1241/10000 train_time:73335ms step_avg:59.09ms +[2025-09-05 18:59:47] [Rank 0] step:1241/10000 train_time:73335ms step_avg:59.09ms +[2025-09-05 18:59:47] [Rank 0] step:1261/10000 train_time:73994ms step_avg:58.68ms +[2025-09-05 18:59:47] [Rank 0] step:1261/10000 train_time:73994ms step_avg:58.68ms +[2025-09-05 18:59:48] [Rank 0] step:1281/10000 train_time:74652ms step_avg:58.28ms +[2025-09-05 18:59:48] [Rank 0] step:1281/10000 train_time:74652ms step_avg:58.28ms +[2025-09-05 18:59:49] [Rank 0] step:1301/10000 train_time:75311ms step_avg:57.89ms +[2025-09-05 18:59:49] [Rank 0] step:1301/10000 train_time:75311ms step_avg:57.89ms +[2025-09-05 18:59:49] [Rank 0] step:1321/10000 train_time:75968ms step_avg:57.51ms +[2025-09-05 18:59:49] [Rank 0] step:1321/10000 train_time:75968ms step_avg:57.51ms +[2025-09-05 18:59:50] [Rank 0] step:1341/10000 train_time:76625ms step_avg:57.14ms +[2025-09-05 18:59:50] [Rank 0] step:1341/10000 train_time:76625ms step_avg:57.14ms +[2025-09-05 18:59:51] [Rank 0] step:1361/10000 train_time:77283ms step_avg:56.78ms +[2025-09-05 18:59:51] [Rank 0] step:1361/10000 train_time:77283ms step_avg:56.78ms +[2025-09-05 18:59:51] [Rank 0] step:1381/10000 train_time:77941ms step_avg:56.44ms +[2025-09-05 18:59:51] [Rank 0] step:1381/10000 train_time:77941ms step_avg:56.44ms +[2025-09-05 18:59:52] [Rank 0] step:1401/10000 train_time:78599ms step_avg:56.10ms +[2025-09-05 18:59:52] [Rank 0] step:1401/10000 train_time:78599ms step_avg:56.10ms +[2025-09-05 18:59:53] [Rank 0] step:1421/10000 train_time:79258ms step_avg:55.78ms +[2025-09-05 18:59:53] [Rank 0] step:1421/10000 train_time:79258ms step_avg:55.78ms +[2025-09-05 18:59:53] [Rank 0] step:1441/10000 train_time:79916ms step_avg:55.46ms +[2025-09-05 18:59:53] [Rank 0] step:1441/10000 train_time:79916ms step_avg:55.46ms +[2025-09-05 18:59:54] [Rank 0] step:1461/10000 train_time:80574ms step_avg:55.15ms +[2025-09-05 18:59:54] [Rank 0] step:1461/10000 train_time:80574ms step_avg:55.15ms +[2025-09-05 18:59:55] [Rank 0] step:1481/10000 train_time:81233ms step_avg:54.85ms +[2025-09-05 18:59:55] [Rank 0] step:1481/10000 train_time:81233ms step_avg:54.85ms +[2025-09-05 18:59:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:59:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:59:56] [Rank 0] PRINT: step:1500/10000 train_loss:0.9781 val_loss:0.9131 train_time:82313ms step_avg:54.88ms +[2025-09-05 18:59:56] [Rank 0] PRINT: step:1500/10000 train_loss:0.9781 val_loss:0.9131 train_time:82313ms step_avg:54.88ms +[2025-09-05 18:59:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:59:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:59:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:59:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:01:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:01:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:01:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:01:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:01:17] [Rank 0] Total Loss: 4.5196 +[2025-09-05 19:01:17] [Rank 0] Total Loss: 4.5196 +[2025-09-05 19:01:17] [Rank 0] Total FTA (Unweighted): 0.6106 +[2025-09-05 19:01:17] [Rank 0] Total FTA (Unweighted): 0.6106 +[2025-09-05 19:01:17] [Rank 0] Total FTA (Weighted): 0.6106 +[2025-09-05 19:01:17] [Rank 0] Total FTA (Weighted): 0.6106 +[2025-09-05 19:01:17] [Rank 0] Group 0 Loss: 4.4055 +[2025-09-05 19:01:17] [Rank 0] Group 0 Loss: 4.4055 +[2025-09-05 19:01:17] [Rank 0] Group 1 Loss: 3.8292 +[2025-09-05 19:01:17] [Rank 0] Group 1 Loss: 3.8292 +[2025-09-05 19:01:17] [Rank 0] Group 2 Loss: 3.7727 +[2025-09-05 19:01:17] [Rank 0] Group 2 Loss: 3.7727 +[2025-09-05 19:01:17] [Rank 0] Group 3 Loss: 4.1885 +[2025-09-05 19:01:17] [Rank 0] Group 3 Loss: 4.1885 +[2025-09-05 19:01:17] [Rank 0] Group 4 Loss: 4.2090 +[2025-09-05 19:01:17] [Rank 0] Group 4 Loss: 4.2090 +[2025-09-05 19:01:17] [Rank 0] Group 5 Loss: 4.3026 +[2025-09-05 19:01:17] [Rank 0] Group 5 Loss: 4.3026 +[2025-09-05 19:01:17] [Rank 0] Group 6 Loss: 4.2551 +[2025-09-05 19:01:17] [Rank 0] Group 6 Loss: 4.2551 +[2025-09-05 19:01:17] [Rank 0] Group 7 Loss: 4.3038 +[2025-09-05 19:01:17] [Rank 0] Group 7 Loss: 4.3038 +[2025-09-05 19:01:17] [Rank 0] Group 8 Loss: 4.4428 +[2025-09-05 19:01:17] [Rank 0] Group 8 Loss: 4.4428 +[2025-09-05 19:01:17] [Rank 0] Group 9 Loss: 4.4599 +[2025-09-05 19:01:17] [Rank 0] Group 9 Loss: 4.4599 +[2025-09-05 19:01:17] [Rank 0] Group 10 Loss: 4.7240 +[2025-09-05 19:01:17] [Rank 0] Group 10 Loss: 4.7240 +[2025-09-05 19:01:17] [Rank 0] Group 11 Loss: 4.8279 +[2025-09-05 19:01:17] [Rank 0] Group 11 Loss: 4.8279 +[2025-09-05 19:01:17] [Rank 0] Group 12 Loss: 4.9977 +[2025-09-05 19:01:17] [Rank 0] Group 12 Loss: 4.9977 +[2025-09-05 19:01:17] [Rank 0] Group 13 Loss: 5.1585 +[2025-09-05 19:01:17] [Rank 0] Group 13 Loss: 5.1585 +[2025-09-05 19:01:17] [Rank 0] Group 14 Loss: 5.2245 +[2025-09-05 19:01:17] [Rank 0] Group 14 Loss: 5.2245 +[2025-09-05 19:01:17] [Rank 0] Group 15 Loss: 5.2116 +[2025-09-05 19:01:17] [Rank 0] Group 15 Loss: 5.2116 +[2025-09-05 19:01:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 19:01:17] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 19:01:17] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:01:17] [Rank 0] Group 6 FTA: 0.9700 +[2025-09-05 19:01:17] [Rank 0] Group 6 FTA: 0.9700 +[2025-09-05 19:01:17] [Rank 0] Group 7 FTA: 0.8200 +[2025-09-05 19:01:17] [Rank 0] Group 7 FTA: 0.8200 +[2025-09-05 19:01:17] [Rank 0] Group 8 FTA: 0.7500 +[2025-09-05 19:01:17] [Rank 0] Group 8 FTA: 0.7500 +[2025-09-05 19:01:17] [Rank 0] Group 9 FTA: 0.4700 +[2025-09-05 19:01:17] [Rank 0] Group 9 FTA: 0.4700 +[2025-09-05 19:01:17] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 19:01:17] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 19:01:17] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 19:01:17] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 19:01:17] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:01:17] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:01:17] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:01:17] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:01:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:01:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:01:17] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:01:17] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:01:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:01:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:01:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:01:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:01:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:01:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:01:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:01:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:01:19] [Rank 0] step:1501/10000 train_time:82322ms step_avg:54.84ms +[2025-09-05 19:01:19] [Rank 0] step:1501/10000 train_time:82322ms step_avg:54.84ms +[2025-09-05 19:01:19] [Rank 0] step:1521/10000 train_time:82761ms step_avg:54.41ms +[2025-09-05 19:01:19] [Rank 0] step:1521/10000 train_time:82761ms step_avg:54.41ms +[2025-09-05 19:01:20] [Rank 0] step:1541/10000 train_time:83417ms step_avg:54.13ms +[2025-09-05 19:01:20] [Rank 0] step:1541/10000 train_time:83417ms step_avg:54.13ms +[2025-09-05 19:01:21] [Rank 0] step:1561/10000 train_time:84074ms step_avg:53.86ms +[2025-09-05 19:01:21] [Rank 0] step:1561/10000 train_time:84074ms step_avg:53.86ms +[2025-09-05 19:01:21] [Rank 0] step:1581/10000 train_time:84731ms step_avg:53.59ms +[2025-09-05 19:01:21] [Rank 0] step:1581/10000 train_time:84731ms step_avg:53.59ms +[2025-09-05 19:01:22] [Rank 0] step:1601/10000 train_time:85387ms step_avg:53.33ms +[2025-09-05 19:01:22] [Rank 0] step:1601/10000 train_time:85387ms step_avg:53.33ms +[2025-09-05 19:01:23] [Rank 0] step:1621/10000 train_time:86044ms step_avg:53.08ms +[2025-09-05 19:01:23] [Rank 0] step:1621/10000 train_time:86044ms step_avg:53.08ms +[2025-09-05 19:01:23] [Rank 0] step:1641/10000 train_time:86700ms step_avg:52.83ms +[2025-09-05 19:01:23] [Rank 0] step:1641/10000 train_time:86700ms step_avg:52.83ms +[2025-09-05 19:01:24] [Rank 0] step:1661/10000 train_time:87357ms step_avg:52.59ms +[2025-09-05 19:01:24] [Rank 0] step:1661/10000 train_time:87357ms step_avg:52.59ms +[2025-09-05 19:01:25] [Rank 0] step:1681/10000 train_time:88015ms step_avg:52.36ms +[2025-09-05 19:01:25] [Rank 0] step:1681/10000 train_time:88015ms step_avg:52.36ms +[2025-09-05 19:01:25] [Rank 0] step:1701/10000 train_time:88673ms step_avg:52.13ms +[2025-09-05 19:01:25] [Rank 0] step:1701/10000 train_time:88673ms step_avg:52.13ms +[2025-09-05 19:01:26] [Rank 0] step:1721/10000 train_time:89329ms step_avg:51.91ms +[2025-09-05 19:01:26] [Rank 0] step:1721/10000 train_time:89329ms step_avg:51.91ms +[2025-09-05 19:01:27] [Rank 0] step:1741/10000 train_time:89986ms step_avg:51.69ms +[2025-09-05 19:01:27] [Rank 0] step:1741/10000 train_time:89986ms step_avg:51.69ms +[2025-09-05 19:01:27] [Rank 0] step:1761/10000 train_time:90643ms step_avg:51.47ms +[2025-09-05 19:01:27] [Rank 0] step:1761/10000 train_time:90643ms step_avg:51.47ms +[2025-09-05 19:01:28] [Rank 0] step:1781/10000 train_time:91300ms step_avg:51.26ms +[2025-09-05 19:01:28] [Rank 0] step:1781/10000 train_time:91300ms step_avg:51.26ms +[2025-09-05 19:01:28] [Rank 0] step:1801/10000 train_time:91958ms step_avg:51.06ms +[2025-09-05 19:01:28] [Rank 0] step:1801/10000 train_time:91958ms step_avg:51.06ms +[2025-09-05 19:01:29] [Rank 0] step:1821/10000 train_time:92615ms step_avg:50.86ms +[2025-09-05 19:01:29] [Rank 0] step:1821/10000 train_time:92615ms step_avg:50.86ms +[2025-09-05 19:01:30] [Rank 0] step:1841/10000 train_time:93272ms step_avg:50.66ms +[2025-09-05 19:01:30] [Rank 0] step:1841/10000 train_time:93272ms step_avg:50.66ms +[2025-09-05 19:01:30] [Rank 0] step:1861/10000 train_time:93930ms step_avg:50.47ms +[2025-09-05 19:01:30] [Rank 0] step:1861/10000 train_time:93930ms step_avg:50.47ms +[2025-09-05 19:01:31] [Rank 0] step:1881/10000 train_time:94590ms step_avg:50.29ms +[2025-09-05 19:01:31] [Rank 0] step:1881/10000 train_time:94590ms step_avg:50.29ms +[2025-09-05 19:01:32] [Rank 0] step:1901/10000 train_time:95248ms step_avg:50.10ms +[2025-09-05 19:01:32] [Rank 0] step:1901/10000 train_time:95248ms step_avg:50.10ms +[2025-09-05 19:01:32] [Rank 0] step:1921/10000 train_time:95905ms step_avg:49.92ms +[2025-09-05 19:01:32] [Rank 0] step:1921/10000 train_time:95905ms step_avg:49.92ms +[2025-09-05 19:01:33] [Rank 0] step:1941/10000 train_time:96562ms step_avg:49.75ms +[2025-09-05 19:01:33] [Rank 0] step:1941/10000 train_time:96562ms step_avg:49.75ms +[2025-09-05 19:01:34] [Rank 0] step:1961/10000 train_time:97220ms step_avg:49.58ms +[2025-09-05 19:01:34] [Rank 0] step:1961/10000 train_time:97220ms step_avg:49.58ms +[2025-09-05 19:01:34] [Rank 0] step:1981/10000 train_time:97878ms step_avg:49.41ms +[2025-09-05 19:01:34] [Rank 0] step:1981/10000 train_time:97878ms step_avg:49.41ms +[2025-09-05 19:01:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:01:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:01:36] [Rank 0] PRINT: step:2000/10000 train_loss:0.8861 val_loss:0.8474 train_time:98769ms step_avg:49.38ms +[2025-09-05 19:01:36] [Rank 0] PRINT: step:2000/10000 train_loss:0.8861 val_loss:0.8474 train_time:98769ms step_avg:49.38ms +[2025-09-05 19:01:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:01:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:01:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:01:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:02:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:02:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:02:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:02:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:02:57] [Rank 0] Total Loss: 4.7107 +[2025-09-05 19:02:57] [Rank 0] Total Loss: 4.7107 +[2025-09-05 19:02:57] [Rank 0] Total FTA (Unweighted): 0.6600 +[2025-09-05 19:02:57] [Rank 0] Total FTA (Unweighted): 0.6600 +[2025-09-05 19:02:57] [Rank 0] Total FTA (Weighted): 0.6600 +[2025-09-05 19:02:57] [Rank 0] Total FTA (Weighted): 0.6600 +[2025-09-05 19:02:57] [Rank 0] Group 0 Loss: 4.5310 +[2025-09-05 19:02:57] [Rank 0] Group 0 Loss: 4.5310 +[2025-09-05 19:02:57] [Rank 0] Group 1 Loss: 3.9869 +[2025-09-05 19:02:57] [Rank 0] Group 1 Loss: 3.9869 +[2025-09-05 19:02:57] [Rank 0] Group 2 Loss: 4.0903 +[2025-09-05 19:02:57] [Rank 0] Group 2 Loss: 4.0903 +[2025-09-05 19:02:57] [Rank 0] Group 3 Loss: 4.5725 +[2025-09-05 19:02:57] [Rank 0] Group 3 Loss: 4.5725 +[2025-09-05 19:02:57] [Rank 0] Group 4 Loss: 4.4300 +[2025-09-05 19:02:57] [Rank 0] Group 4 Loss: 4.4300 +[2025-09-05 19:02:57] [Rank 0] Group 5 Loss: 4.4972 +[2025-09-05 19:02:57] [Rank 0] Group 5 Loss: 4.4972 +[2025-09-05 19:02:57] [Rank 0] Group 6 Loss: 4.5213 +[2025-09-05 19:02:57] [Rank 0] Group 6 Loss: 4.5213 +[2025-09-05 19:02:57] [Rank 0] Group 7 Loss: 4.5014 +[2025-09-05 19:02:57] [Rank 0] Group 7 Loss: 4.5014 +[2025-09-05 19:02:57] [Rank 0] Group 8 Loss: 4.6820 +[2025-09-05 19:02:57] [Rank 0] Group 8 Loss: 4.6820 +[2025-09-05 19:02:57] [Rank 0] Group 9 Loss: 4.6124 +[2025-09-05 19:02:57] [Rank 0] Group 9 Loss: 4.6124 +[2025-09-05 19:02:57] [Rank 0] Group 10 Loss: 4.8074 +[2025-09-05 19:02:57] [Rank 0] Group 10 Loss: 4.8074 +[2025-09-05 19:02:57] [Rank 0] Group 11 Loss: 5.0036 +[2025-09-05 19:02:57] [Rank 0] Group 11 Loss: 5.0036 +[2025-09-05 19:02:57] [Rank 0] Group 12 Loss: 5.1007 +[2025-09-05 19:02:57] [Rank 0] Group 12 Loss: 5.1007 +[2025-09-05 19:02:57] [Rank 0] Group 13 Loss: 5.3093 +[2025-09-05 19:02:57] [Rank 0] Group 13 Loss: 5.3093 +[2025-09-05 19:02:57] [Rank 0] Group 14 Loss: 5.3259 +[2025-09-05 19:02:57] [Rank 0] Group 14 Loss: 5.3259 +[2025-09-05 19:02:57] [Rank 0] Group 15 Loss: 5.3994 +[2025-09-05 19:02:57] [Rank 0] Group 15 Loss: 5.3994 +[2025-09-05 19:02:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:02:57] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 19:02:57] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 19:02:57] [Rank 0] Group 8 FTA: 0.8600 +[2025-09-05 19:02:57] [Rank 0] Group 8 FTA: 0.8600 +[2025-09-05 19:02:57] [Rank 0] Group 9 FTA: 0.6900 +[2025-09-05 19:02:57] [Rank 0] Group 9 FTA: 0.6900 +[2025-09-05 19:02:57] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 19:02:57] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 19:02:57] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 19:02:57] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 19:02:57] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 19:02:57] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 19:02:58] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:02:58] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:02:58] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 19:02:58] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 19:02:58] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:02:58] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:02:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:02:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:02:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:02:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:02:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:02:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:02:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:02:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:02:59] [Rank 0] step:2001/10000 train_time:98778ms step_avg:49.36ms +[2025-09-05 19:02:59] [Rank 0] step:2001/10000 train_time:98778ms step_avg:49.36ms +[2025-09-05 19:03:00] [Rank 0] step:2021/10000 train_time:99428ms step_avg:49.20ms +[2025-09-05 19:03:00] [Rank 0] step:2021/10000 train_time:99428ms step_avg:49.20ms +[2025-09-05 19:03:00] [Rank 0] step:2041/10000 train_time:100085ms step_avg:49.04ms +[2025-09-05 19:03:00] [Rank 0] step:2041/10000 train_time:100085ms step_avg:49.04ms +[2025-09-05 19:03:01] [Rank 0] step:2061/10000 train_time:100743ms step_avg:48.88ms +[2025-09-05 19:03:01] [Rank 0] step:2061/10000 train_time:100743ms step_avg:48.88ms +[2025-09-05 19:03:02] [Rank 0] step:2081/10000 train_time:101403ms step_avg:48.73ms +[2025-09-05 19:03:02] [Rank 0] step:2081/10000 train_time:101403ms step_avg:48.73ms +[2025-09-05 19:03:02] [Rank 0] step:2101/10000 train_time:102062ms step_avg:48.58ms +[2025-09-05 19:03:02] [Rank 0] step:2101/10000 train_time:102062ms step_avg:48.58ms +[2025-09-05 19:03:03] [Rank 0] step:2121/10000 train_time:102721ms step_avg:48.43ms +[2025-09-05 19:03:03] [Rank 0] step:2121/10000 train_time:102721ms step_avg:48.43ms +[2025-09-05 19:03:04] [Rank 0] step:2141/10000 train_time:103380ms step_avg:48.29ms +[2025-09-05 19:03:04] [Rank 0] step:2141/10000 train_time:103380ms step_avg:48.29ms +[2025-09-05 19:03:04] [Rank 0] step:2161/10000 train_time:104037ms step_avg:48.14ms +[2025-09-05 19:03:04] [Rank 0] step:2161/10000 train_time:104037ms step_avg:48.14ms +[2025-09-05 19:03:05] [Rank 0] step:2181/10000 train_time:104839ms step_avg:48.07ms +[2025-09-05 19:03:05] [Rank 0] step:2181/10000 train_time:104839ms step_avg:48.07ms +[2025-09-05 19:03:06] [Rank 0] step:2201/10000 train_time:105496ms step_avg:47.93ms +[2025-09-05 19:03:06] [Rank 0] step:2201/10000 train_time:105496ms step_avg:47.93ms +[2025-09-05 19:03:07] [Rank 0] step:2221/10000 train_time:106157ms step_avg:47.80ms +[2025-09-05 19:03:07] [Rank 0] step:2221/10000 train_time:106157ms step_avg:47.80ms +[2025-09-05 19:03:07] [Rank 0] step:2241/10000 train_time:106819ms step_avg:47.67ms +[2025-09-05 19:03:07] [Rank 0] step:2241/10000 train_time:106819ms step_avg:47.67ms +[2025-09-05 19:03:08] [Rank 0] step:2261/10000 train_time:107630ms step_avg:47.60ms +[2025-09-05 19:03:08] [Rank 0] step:2261/10000 train_time:107630ms step_avg:47.60ms +[2025-09-05 19:03:09] [Rank 0] step:2281/10000 train_time:108294ms step_avg:47.48ms +[2025-09-05 19:03:09] [Rank 0] step:2281/10000 train_time:108294ms step_avg:47.48ms +[2025-09-05 19:03:09] [Rank 0] step:2301/10000 train_time:108959ms step_avg:47.35ms +[2025-09-05 19:03:09] [Rank 0] step:2301/10000 train_time:108959ms step_avg:47.35ms +[2025-09-05 19:03:10] [Rank 0] step:2321/10000 train_time:109624ms step_avg:47.23ms +[2025-09-05 19:03:10] [Rank 0] step:2321/10000 train_time:109624ms step_avg:47.23ms +[2025-09-05 19:03:11] [Rank 0] step:2341/10000 train_time:110288ms step_avg:47.11ms +[2025-09-05 19:03:11] [Rank 0] step:2341/10000 train_time:110288ms step_avg:47.11ms +[2025-09-05 19:03:11] [Rank 0] step:2361/10000 train_time:110952ms step_avg:46.99ms +[2025-09-05 19:03:11] [Rank 0] step:2361/10000 train_time:110952ms step_avg:46.99ms +[2025-09-05 19:03:12] [Rank 0] step:2381/10000 train_time:111617ms step_avg:46.88ms +[2025-09-05 19:03:12] [Rank 0] step:2381/10000 train_time:111617ms step_avg:46.88ms +[2025-09-05 19:03:13] [Rank 0] step:2401/10000 train_time:112281ms step_avg:46.76ms +[2025-09-05 19:03:13] [Rank 0] step:2401/10000 train_time:112281ms step_avg:46.76ms +[2025-09-05 19:03:13] [Rank 0] step:2421/10000 train_time:112945ms step_avg:46.65ms +[2025-09-05 19:03:13] [Rank 0] step:2421/10000 train_time:112945ms step_avg:46.65ms +[2025-09-05 19:03:14] [Rank 0] step:2441/10000 train_time:113610ms step_avg:46.54ms +[2025-09-05 19:03:14] [Rank 0] step:2441/10000 train_time:113610ms step_avg:46.54ms +[2025-09-05 19:03:15] [Rank 0] step:2461/10000 train_time:114274ms step_avg:46.43ms +[2025-09-05 19:03:15] [Rank 0] step:2461/10000 train_time:114274ms step_avg:46.43ms +[2025-09-05 19:03:15] [Rank 0] step:2481/10000 train_time:114940ms step_avg:46.33ms +[2025-09-05 19:03:15] [Rank 0] step:2481/10000 train_time:114940ms step_avg:46.33ms +[2025-09-05 19:03:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:03:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:03:16] [Rank 0] PRINT: step:2500/10000 train_loss:0.8325 val_loss:0.7997 train_time:115841ms step_avg:46.34ms +[2025-09-05 19:03:16] [Rank 0] PRINT: step:2500/10000 train_loss:0.8325 val_loss:0.7997 train_time:115841ms step_avg:46.34ms +[2025-09-05 19:03:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:03:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:03:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:03:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:04:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:04:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:04:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:04:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:04:38] [Rank 0] Total Loss: 4.8616 +[2025-09-05 19:04:38] [Rank 0] Total Loss: 4.8616 +[2025-09-05 19:04:38] [Rank 0] Total FTA (Unweighted): 0.7012 +[2025-09-05 19:04:38] [Rank 0] Total FTA (Unweighted): 0.7012 +[2025-09-05 19:04:38] [Rank 0] Total FTA (Weighted): 0.7013 +[2025-09-05 19:04:38] [Rank 0] Total FTA (Weighted): 0.7013 +[2025-09-05 19:04:38] [Rank 0] Group 0 Loss: 4.8334 +[2025-09-05 19:04:38] [Rank 0] Group 0 Loss: 4.8334 +[2025-09-05 19:04:38] [Rank 0] Group 1 Loss: 4.3502 +[2025-09-05 19:04:38] [Rank 0] Group 1 Loss: 4.3502 +[2025-09-05 19:04:38] [Rank 0] Group 2 Loss: 4.2538 +[2025-09-05 19:04:38] [Rank 0] Group 2 Loss: 4.2538 +[2025-09-05 19:04:38] [Rank 0] Group 3 Loss: 4.7363 +[2025-09-05 19:04:38] [Rank 0] Group 3 Loss: 4.7363 +[2025-09-05 19:04:38] [Rank 0] Group 4 Loss: 4.5601 +[2025-09-05 19:04:38] [Rank 0] Group 4 Loss: 4.5601 +[2025-09-05 19:04:38] [Rank 0] Group 5 Loss: 4.7456 +[2025-09-05 19:04:38] [Rank 0] Group 5 Loss: 4.7456 +[2025-09-05 19:04:38] [Rank 0] Group 6 Loss: 4.6219 +[2025-09-05 19:04:38] [Rank 0] Group 6 Loss: 4.6219 +[2025-09-05 19:04:38] [Rank 0] Group 7 Loss: 4.6961 +[2025-09-05 19:04:38] [Rank 0] Group 7 Loss: 4.6961 +[2025-09-05 19:04:38] [Rank 0] Group 8 Loss: 4.8689 +[2025-09-05 19:04:38] [Rank 0] Group 8 Loss: 4.8689 +[2025-09-05 19:04:38] [Rank 0] Group 9 Loss: 4.7881 +[2025-09-05 19:04:38] [Rank 0] Group 9 Loss: 4.7881 +[2025-09-05 19:04:38] [Rank 0] Group 10 Loss: 4.9584 +[2025-09-05 19:04:38] [Rank 0] Group 10 Loss: 4.9584 +[2025-09-05 19:04:38] [Rank 0] Group 11 Loss: 5.0535 +[2025-09-05 19:04:38] [Rank 0] Group 11 Loss: 5.0535 +[2025-09-05 19:04:38] [Rank 0] Group 12 Loss: 5.1348 +[2025-09-05 19:04:38] [Rank 0] Group 12 Loss: 5.1348 +[2025-09-05 19:04:38] [Rank 0] Group 13 Loss: 5.2925 +[2025-09-05 19:04:38] [Rank 0] Group 13 Loss: 5.2925 +[2025-09-05 19:04:38] [Rank 0] Group 14 Loss: 5.3940 +[2025-09-05 19:04:38] [Rank 0] Group 14 Loss: 5.3940 +[2025-09-05 19:04:38] [Rank 0] Group 15 Loss: 5.4976 +[2025-09-05 19:04:38] [Rank 0] Group 15 Loss: 5.4976 +[2025-09-05 19:04:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:04:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:04:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:04:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:04:39] [Rank 0] Group 8 FTA: 0.9500 +[2025-09-05 19:04:39] [Rank 0] Group 8 FTA: 0.9500 +[2025-09-05 19:04:39] [Rank 0] Group 9 FTA: 0.8000 +[2025-09-05 19:04:39] [Rank 0] Group 9 FTA: 0.8000 +[2025-09-05 19:04:39] [Rank 0] Group 10 FTA: 0.6600 +[2025-09-05 19:04:39] [Rank 0] Group 10 FTA: 0.6600 +[2025-09-05 19:04:39] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:04:39] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:04:39] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 19:04:39] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 19:04:39] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:04:39] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:04:39] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 19:04:39] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 19:04:39] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:04:39] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:04:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:04:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:04:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:04:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:04:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:04:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:04:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:04:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:04:40] [Rank 0] step:2501/10000 train_time:115849ms step_avg:46.32ms +[2025-09-05 19:04:40] [Rank 0] step:2501/10000 train_time:115849ms step_avg:46.32ms +[2025-09-05 19:04:41] [Rank 0] step:2521/10000 train_time:116301ms step_avg:46.13ms +[2025-09-05 19:04:41] [Rank 0] step:2521/10000 train_time:116301ms step_avg:46.13ms +[2025-09-05 19:04:41] [Rank 0] step:2541/10000 train_time:116963ms step_avg:46.03ms +[2025-09-05 19:04:41] [Rank 0] step:2541/10000 train_time:116963ms step_avg:46.03ms +[2025-09-05 19:04:42] [Rank 0] step:2561/10000 train_time:117627ms step_avg:45.93ms +[2025-09-05 19:04:42] [Rank 0] step:2561/10000 train_time:117627ms step_avg:45.93ms +[2025-09-05 19:04:43] [Rank 0] step:2581/10000 train_time:118289ms step_avg:45.83ms +[2025-09-05 19:04:43] [Rank 0] step:2581/10000 train_time:118289ms step_avg:45.83ms +[2025-09-05 19:04:43] [Rank 0] step:2601/10000 train_time:118952ms step_avg:45.73ms +[2025-09-05 19:04:43] [Rank 0] step:2601/10000 train_time:118952ms step_avg:45.73ms +[2025-09-05 19:04:44] [Rank 0] step:2621/10000 train_time:119615ms step_avg:45.64ms +[2025-09-05 19:04:44] [Rank 0] step:2621/10000 train_time:119615ms step_avg:45.64ms +[2025-09-05 19:04:45] [Rank 0] step:2641/10000 train_time:120278ms step_avg:45.54ms +[2025-09-05 19:04:45] [Rank 0] step:2641/10000 train_time:120278ms step_avg:45.54ms +[2025-09-05 19:04:45] [Rank 0] step:2661/10000 train_time:120941ms step_avg:45.45ms +[2025-09-05 19:04:45] [Rank 0] step:2661/10000 train_time:120941ms step_avg:45.45ms +[2025-09-05 19:04:46] [Rank 0] step:2681/10000 train_time:121604ms step_avg:45.36ms +[2025-09-05 19:04:46] [Rank 0] step:2681/10000 train_time:121604ms step_avg:45.36ms +[2025-09-05 19:04:47] [Rank 0] step:2701/10000 train_time:122268ms step_avg:45.27ms +[2025-09-05 19:04:47] [Rank 0] step:2701/10000 train_time:122268ms step_avg:45.27ms +[2025-09-05 19:04:47] [Rank 0] step:2721/10000 train_time:122932ms step_avg:45.18ms +[2025-09-05 19:04:47] [Rank 0] step:2721/10000 train_time:122932ms step_avg:45.18ms +[2025-09-05 19:04:48] [Rank 0] step:2741/10000 train_time:123595ms step_avg:45.09ms +[2025-09-05 19:04:48] [Rank 0] step:2741/10000 train_time:123595ms step_avg:45.09ms +[2025-09-05 19:04:49] [Rank 0] step:2761/10000 train_time:124258ms step_avg:45.00ms +[2025-09-05 19:04:49] [Rank 0] step:2761/10000 train_time:124258ms step_avg:45.00ms +[2025-09-05 19:04:49] [Rank 0] step:2781/10000 train_time:124921ms step_avg:44.92ms +[2025-09-05 19:04:49] [Rank 0] step:2781/10000 train_time:124921ms step_avg:44.92ms +[2025-09-05 19:04:50] [Rank 0] step:2801/10000 train_time:125584ms step_avg:44.84ms +[2025-09-05 19:04:50] [Rank 0] step:2801/10000 train_time:125584ms step_avg:44.84ms +[2025-09-05 19:04:51] [Rank 0] step:2821/10000 train_time:126248ms step_avg:44.75ms +[2025-09-05 19:04:51] [Rank 0] step:2821/10000 train_time:126248ms step_avg:44.75ms +[2025-09-05 19:04:52] [Rank 0] step:2841/10000 train_time:127383ms step_avg:44.84ms +[2025-09-05 19:04:52] [Rank 0] step:2841/10000 train_time:127383ms step_avg:44.84ms +[2025-09-05 19:04:52] [Rank 0] step:2861/10000 train_time:128045ms step_avg:44.76ms +[2025-09-05 19:04:52] [Rank 0] step:2861/10000 train_time:128045ms step_avg:44.76ms +[2025-09-05 19:04:53] [Rank 0] step:2881/10000 train_time:128708ms step_avg:44.67ms +[2025-09-05 19:04:53] [Rank 0] step:2881/10000 train_time:128708ms step_avg:44.67ms +[2025-09-05 19:04:54] [Rank 0] step:2901/10000 train_time:129371ms step_avg:44.60ms +[2025-09-05 19:04:54] [Rank 0] step:2901/10000 train_time:129371ms step_avg:44.60ms +[2025-09-05 19:04:54] [Rank 0] step:2921/10000 train_time:130034ms step_avg:44.52ms +[2025-09-05 19:04:54] [Rank 0] step:2921/10000 train_time:130034ms step_avg:44.52ms +[2025-09-05 19:04:55] [Rank 0] step:2941/10000 train_time:130697ms step_avg:44.44ms +[2025-09-05 19:04:55] [Rank 0] step:2941/10000 train_time:130697ms step_avg:44.44ms +[2025-09-05 19:04:56] [Rank 0] step:2961/10000 train_time:131360ms step_avg:44.36ms +[2025-09-05 19:04:56] [Rank 0] step:2961/10000 train_time:131360ms step_avg:44.36ms +[2025-09-05 19:04:56] [Rank 0] step:2981/10000 train_time:132022ms step_avg:44.29ms +[2025-09-05 19:04:56] [Rank 0] step:2981/10000 train_time:132022ms step_avg:44.29ms +[2025-09-05 19:04:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:04:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:04:58] [Rank 0] PRINT: step:3000/10000 train_loss:0.7929 val_loss:0.7679 train_time:132921ms step_avg:44.31ms +[2025-09-05 19:04:58] [Rank 0] PRINT: step:3000/10000 train_loss:0.7929 val_loss:0.7679 train_time:132921ms step_avg:44.31ms +[2025-09-05 19:04:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:04:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:04:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:04:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:06:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:06:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:06:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:06:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:06:19] [Rank 0] Total Loss: 4.7935 +[2025-09-05 19:06:19] [Rank 0] Total Loss: 4.7935 +[2025-09-05 19:06:19] [Rank 0] Total FTA (Unweighted): 0.7412 +[2025-09-05 19:06:19] [Rank 0] Total FTA (Unweighted): 0.7412 +[2025-09-05 19:06:19] [Rank 0] Total FTA (Weighted): 0.7412 +[2025-09-05 19:06:19] [Rank 0] Total FTA (Weighted): 0.7412 +[2025-09-05 19:06:19] [Rank 0] Group 0 Loss: 4.8048 +[2025-09-05 19:06:19] [Rank 0] Group 0 Loss: 4.8048 +[2025-09-05 19:06:19] [Rank 0] Group 1 Loss: 4.4112 +[2025-09-05 19:06:19] [Rank 0] Group 1 Loss: 4.4112 +[2025-09-05 19:06:19] [Rank 0] Group 2 Loss: 4.2290 +[2025-09-05 19:06:19] [Rank 0] Group 2 Loss: 4.2290 +[2025-09-05 19:06:19] [Rank 0] Group 3 Loss: 4.7082 +[2025-09-05 19:06:19] [Rank 0] Group 3 Loss: 4.7082 +[2025-09-05 19:06:19] [Rank 0] Group 4 Loss: 4.5549 +[2025-09-05 19:06:19] [Rank 0] Group 4 Loss: 4.5549 +[2025-09-05 19:06:19] [Rank 0] Group 5 Loss: 4.6697 +[2025-09-05 19:06:19] [Rank 0] Group 5 Loss: 4.6697 +[2025-09-05 19:06:19] [Rank 0] Group 6 Loss: 4.5806 +[2025-09-05 19:06:19] [Rank 0] Group 6 Loss: 4.5806 +[2025-09-05 19:06:19] [Rank 0] Group 7 Loss: 4.6527 +[2025-09-05 19:06:19] [Rank 0] Group 7 Loss: 4.6527 +[2025-09-05 19:06:19] [Rank 0] Group 8 Loss: 4.8149 +[2025-09-05 19:06:19] [Rank 0] Group 8 Loss: 4.8149 +[2025-09-05 19:06:19] [Rank 0] Group 9 Loss: 4.7088 +[2025-09-05 19:06:19] [Rank 0] Group 9 Loss: 4.7088 +[2025-09-05 19:06:19] [Rank 0] Group 10 Loss: 4.8242 +[2025-09-05 19:06:19] [Rank 0] Group 10 Loss: 4.8242 +[2025-09-05 19:06:19] [Rank 0] Group 11 Loss: 4.9453 +[2025-09-05 19:06:19] [Rank 0] Group 11 Loss: 4.9453 +[2025-09-05 19:06:19] [Rank 0] Group 12 Loss: 5.0408 +[2025-09-05 19:06:19] [Rank 0] Group 12 Loss: 5.0408 +[2025-09-05 19:06:19] [Rank 0] Group 13 Loss: 5.1668 +[2025-09-05 19:06:19] [Rank 0] Group 13 Loss: 5.1668 +[2025-09-05 19:06:19] [Rank 0] Group 14 Loss: 5.2328 +[2025-09-05 19:06:19] [Rank 0] Group 14 Loss: 5.2328 +[2025-09-05 19:06:19] [Rank 0] Group 15 Loss: 5.3507 +[2025-09-05 19:06:19] [Rank 0] Group 15 Loss: 5.3507 +[2025-09-05 19:06:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:06:19] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:06:20] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:06:20] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:06:20] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 19:06:20] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 19:06:20] [Rank 0] Group 9 FTA: 0.9300 +[2025-09-05 19:06:20] [Rank 0] Group 9 FTA: 0.9300 +[2025-09-05 19:06:20] [Rank 0] Group 10 FTA: 0.8600 +[2025-09-05 19:06:20] [Rank 0] Group 10 FTA: 0.8600 +[2025-09-05 19:06:20] [Rank 0] Group 11 FTA: 0.5000 +[2025-09-05 19:06:20] [Rank 0] Group 11 FTA: 0.5000 +[2025-09-05 19:06:20] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 19:06:20] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 19:06:20] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:06:20] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:06:20] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:06:20] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:06:20] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:06:20] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:06:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:06:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:06:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:06:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:06:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:06:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:06:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:06:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:06:21] [Rank 0] step:3001/10000 train_time:132930ms step_avg:44.30ms +[2025-09-05 19:06:21] [Rank 0] step:3001/10000 train_time:132930ms step_avg:44.30ms +[2025-09-05 19:06:22] [Rank 0] step:3021/10000 train_time:133373ms step_avg:44.15ms +[2025-09-05 19:06:22] [Rank 0] step:3021/10000 train_time:133373ms step_avg:44.15ms +[2025-09-05 19:06:22] [Rank 0] step:3041/10000 train_time:134038ms step_avg:44.08ms +[2025-09-05 19:06:22] [Rank 0] step:3041/10000 train_time:134038ms step_avg:44.08ms +[2025-09-05 19:06:23] [Rank 0] step:3061/10000 train_time:134704ms step_avg:44.01ms +[2025-09-05 19:06:23] [Rank 0] step:3061/10000 train_time:134704ms step_avg:44.01ms +[2025-09-05 19:06:24] [Rank 0] step:3081/10000 train_time:135368ms step_avg:43.94ms +[2025-09-05 19:06:24] [Rank 0] step:3081/10000 train_time:135368ms step_avg:43.94ms +[2025-09-05 19:06:24] [Rank 0] step:3101/10000 train_time:136032ms step_avg:43.87ms +[2025-09-05 19:06:24] [Rank 0] step:3101/10000 train_time:136032ms step_avg:43.87ms +[2025-09-05 19:06:25] [Rank 0] step:3121/10000 train_time:136696ms step_avg:43.80ms +[2025-09-05 19:06:25] [Rank 0] step:3121/10000 train_time:136696ms step_avg:43.80ms +[2025-09-05 19:06:26] [Rank 0] step:3141/10000 train_time:137361ms step_avg:43.73ms +[2025-09-05 19:06:26] [Rank 0] step:3141/10000 train_time:137361ms step_avg:43.73ms +[2025-09-05 19:06:26] [Rank 0] step:3161/10000 train_time:138025ms step_avg:43.67ms +[2025-09-05 19:06:26] [Rank 0] step:3161/10000 train_time:138025ms step_avg:43.67ms +[2025-09-05 19:06:27] [Rank 0] step:3181/10000 train_time:138690ms step_avg:43.60ms +[2025-09-05 19:06:27] [Rank 0] step:3181/10000 train_time:138690ms step_avg:43.60ms +[2025-09-05 19:06:28] [Rank 0] step:3201/10000 train_time:139355ms step_avg:43.53ms +[2025-09-05 19:06:28] [Rank 0] step:3201/10000 train_time:139355ms step_avg:43.53ms +[2025-09-05 19:06:28] [Rank 0] step:3221/10000 train_time:140019ms step_avg:43.47ms +[2025-09-05 19:06:28] [Rank 0] step:3221/10000 train_time:140019ms step_avg:43.47ms +[2025-09-05 19:06:29] [Rank 0] step:3241/10000 train_time:140684ms step_avg:43.41ms +[2025-09-05 19:06:29] [Rank 0] step:3241/10000 train_time:140684ms step_avg:43.41ms +[2025-09-05 19:06:30] [Rank 0] step:3261/10000 train_time:141348ms step_avg:43.35ms +[2025-09-05 19:06:30] [Rank 0] step:3261/10000 train_time:141348ms step_avg:43.35ms +[2025-09-05 19:06:30] [Rank 0] step:3281/10000 train_time:142013ms step_avg:43.28ms +[2025-09-05 19:06:30] [Rank 0] step:3281/10000 train_time:142013ms step_avg:43.28ms +[2025-09-05 19:06:31] [Rank 0] step:3301/10000 train_time:142676ms step_avg:43.22ms +[2025-09-05 19:06:31] [Rank 0] step:3301/10000 train_time:142676ms step_avg:43.22ms +[2025-09-05 19:06:32] [Rank 0] step:3321/10000 train_time:143341ms step_avg:43.16ms +[2025-09-05 19:06:32] [Rank 0] step:3321/10000 train_time:143341ms step_avg:43.16ms +[2025-09-05 19:06:32] [Rank 0] step:3341/10000 train_time:144005ms step_avg:43.10ms +[2025-09-05 19:06:32] [Rank 0] step:3341/10000 train_time:144005ms step_avg:43.10ms +[2025-09-05 19:06:33] [Rank 0] step:3361/10000 train_time:144668ms step_avg:43.04ms +[2025-09-05 19:06:33] [Rank 0] step:3361/10000 train_time:144668ms step_avg:43.04ms +[2025-09-05 19:06:34] [Rank 0] step:3381/10000 train_time:145333ms step_avg:42.99ms +[2025-09-05 19:06:34] [Rank 0] step:3381/10000 train_time:145333ms step_avg:42.99ms +[2025-09-05 19:06:34] [Rank 0] step:3401/10000 train_time:145997ms step_avg:42.93ms +[2025-09-05 19:06:34] [Rank 0] step:3401/10000 train_time:145997ms step_avg:42.93ms +[2025-09-05 19:06:35] [Rank 0] step:3421/10000 train_time:146662ms step_avg:42.87ms +[2025-09-05 19:06:35] [Rank 0] step:3421/10000 train_time:146662ms step_avg:42.87ms +[2025-09-05 19:06:36] [Rank 0] step:3441/10000 train_time:147328ms step_avg:42.82ms +[2025-09-05 19:06:36] [Rank 0] step:3441/10000 train_time:147328ms step_avg:42.82ms +[2025-09-05 19:06:36] [Rank 0] step:3461/10000 train_time:147992ms step_avg:42.76ms +[2025-09-05 19:06:36] [Rank 0] step:3461/10000 train_time:147992ms step_avg:42.76ms +[2025-09-05 19:06:37] [Rank 0] step:3481/10000 train_time:148657ms step_avg:42.71ms +[2025-09-05 19:06:37] [Rank 0] step:3481/10000 train_time:148657ms step_avg:42.71ms +[2025-09-05 19:06:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:06:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:06:38] [Rank 0] PRINT: step:3500/10000 train_loss:0.7634 val_loss:0.7426 train_time:149558ms step_avg:42.73ms +[2025-09-05 19:06:38] [Rank 0] PRINT: step:3500/10000 train_loss:0.7634 val_loss:0.7426 train_time:149558ms step_avg:42.73ms +[2025-09-05 19:06:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:06:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:06:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:06:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:07:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:07:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:07:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:07:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:07:59] [Rank 0] Total Loss: 4.9255 +[2025-09-05 19:07:59] [Rank 0] Total Loss: 4.9255 +[2025-09-05 19:07:59] [Rank 0] Total FTA (Unweighted): 0.7556 +[2025-09-05 19:07:59] [Rank 0] Total FTA (Unweighted): 0.7556 +[2025-09-05 19:07:59] [Rank 0] Total FTA (Weighted): 0.7556 +[2025-09-05 19:07:59] [Rank 0] Total FTA (Weighted): 0.7556 +[2025-09-05 19:07:59] [Rank 0] Group 0 Loss: 5.0162 +[2025-09-05 19:07:59] [Rank 0] Group 0 Loss: 5.0162 +[2025-09-05 19:07:59] [Rank 0] Group 1 Loss: 4.5648 +[2025-09-05 19:07:59] [Rank 0] Group 1 Loss: 4.5648 +[2025-09-05 19:07:59] [Rank 0] Group 2 Loss: 4.2782 +[2025-09-05 19:07:59] [Rank 0] Group 2 Loss: 4.2782 +[2025-09-05 19:07:59] [Rank 0] Group 3 Loss: 4.8379 +[2025-09-05 19:07:59] [Rank 0] Group 3 Loss: 4.8379 +[2025-09-05 19:07:59] [Rank 0] Group 4 Loss: 4.7053 +[2025-09-05 19:07:59] [Rank 0] Group 4 Loss: 4.7053 +[2025-09-05 19:07:59] [Rank 0] Group 5 Loss: 4.8583 +[2025-09-05 19:07:59] [Rank 0] Group 5 Loss: 4.8583 +[2025-09-05 19:07:59] [Rank 0] Group 6 Loss: 4.7560 +[2025-09-05 19:07:59] [Rank 0] Group 6 Loss: 4.7560 +[2025-09-05 19:07:59] [Rank 0] Group 7 Loss: 4.8249 +[2025-09-05 19:07:59] [Rank 0] Group 7 Loss: 4.8249 +[2025-09-05 19:07:59] [Rank 0] Group 8 Loss: 4.9890 +[2025-09-05 19:07:59] [Rank 0] Group 8 Loss: 4.9890 +[2025-09-05 19:07:59] [Rank 0] Group 9 Loss: 4.9155 +[2025-09-05 19:07:59] [Rank 0] Group 9 Loss: 4.9155 +[2025-09-05 19:07:59] [Rank 0] Group 10 Loss: 5.0212 +[2025-09-05 19:07:59] [Rank 0] Group 10 Loss: 5.0212 +[2025-09-05 19:08:00] [Rank 0] Group 11 Loss: 4.9858 +[2025-09-05 19:08:00] [Rank 0] Group 11 Loss: 4.9858 +[2025-09-05 19:08:00] [Rank 0] Group 12 Loss: 5.1313 +[2025-09-05 19:08:00] [Rank 0] Group 12 Loss: 5.1313 +[2025-09-05 19:08:00] [Rank 0] Group 13 Loss: 5.2131 +[2025-09-05 19:08:00] [Rank 0] Group 13 Loss: 5.2131 +[2025-09-05 19:08:00] [Rank 0] Group 14 Loss: 5.2747 +[2025-09-05 19:08:00] [Rank 0] Group 14 Loss: 5.2747 +[2025-09-05 19:08:00] [Rank 0] Group 15 Loss: 5.4364 +[2025-09-05 19:08:00] [Rank 0] Group 15 Loss: 5.4364 +[2025-09-05 19:08:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:08:00] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 19:08:00] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 19:08:00] [Rank 0] Group 9 FTA: 0.9400 +[2025-09-05 19:08:00] [Rank 0] Group 9 FTA: 0.9400 +[2025-09-05 19:08:00] [Rank 0] Group 10 FTA: 0.9400 +[2025-09-05 19:08:00] [Rank 0] Group 10 FTA: 0.9400 +[2025-09-05 19:08:00] [Rank 0] Group 11 FTA: 0.6700 +[2025-09-05 19:08:00] [Rank 0] Group 11 FTA: 0.6700 +[2025-09-05 19:08:00] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 19:08:00] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 19:08:00] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:08:00] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:08:00] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-05 19:08:00] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-05 19:08:00] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:08:00] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:08:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:08:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:08:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:08:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:08:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:08:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:08:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:08:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:08:01] [Rank 0] step:3501/10000 train_time:149566ms step_avg:42.72ms +[2025-09-05 19:08:01] [Rank 0] step:3501/10000 train_time:149566ms step_avg:42.72ms +[2025-09-05 19:08:02] [Rank 0] step:3521/10000 train_time:150015ms step_avg:42.61ms +[2025-09-05 19:08:02] [Rank 0] step:3521/10000 train_time:150015ms step_avg:42.61ms +[2025-09-05 19:08:02] [Rank 0] step:3541/10000 train_time:150680ms step_avg:42.55ms +[2025-09-05 19:08:02] [Rank 0] step:3541/10000 train_time:150680ms step_avg:42.55ms +[2025-09-05 19:08:03] [Rank 0] step:3561/10000 train_time:151344ms step_avg:42.50ms +[2025-09-05 19:08:03] [Rank 0] step:3561/10000 train_time:151344ms step_avg:42.50ms +[2025-09-05 19:08:04] [Rank 0] step:3581/10000 train_time:152008ms step_avg:42.45ms +[2025-09-05 19:08:04] [Rank 0] step:3581/10000 train_time:152008ms step_avg:42.45ms +[2025-09-05 19:08:04] [Rank 0] step:3601/10000 train_time:152672ms step_avg:42.40ms +[2025-09-05 19:08:04] [Rank 0] step:3601/10000 train_time:152672ms step_avg:42.40ms +[2025-09-05 19:08:05] [Rank 0] step:3621/10000 train_time:153334ms step_avg:42.35ms +[2025-09-05 19:08:05] [Rank 0] step:3621/10000 train_time:153334ms step_avg:42.35ms +[2025-09-05 19:08:06] [Rank 0] step:3641/10000 train_time:153998ms step_avg:42.30ms +[2025-09-05 19:08:06] [Rank 0] step:3641/10000 train_time:153998ms step_avg:42.30ms +[2025-09-05 19:08:06] [Rank 0] step:3661/10000 train_time:154662ms step_avg:42.25ms +[2025-09-05 19:08:06] [Rank 0] step:3661/10000 train_time:154662ms step_avg:42.25ms +[2025-09-05 19:08:07] [Rank 0] step:3681/10000 train_time:155326ms step_avg:42.20ms +[2025-09-05 19:08:07] [Rank 0] step:3681/10000 train_time:155326ms step_avg:42.20ms +[2025-09-05 19:08:08] [Rank 0] step:3701/10000 train_time:155989ms step_avg:42.15ms +[2025-09-05 19:08:08] [Rank 0] step:3701/10000 train_time:155989ms step_avg:42.15ms +[2025-09-05 19:08:08] [Rank 0] step:3721/10000 train_time:156656ms step_avg:42.10ms +[2025-09-05 19:08:08] [Rank 0] step:3721/10000 train_time:156656ms step_avg:42.10ms +[2025-09-05 19:08:09] [Rank 0] step:3741/10000 train_time:157319ms step_avg:42.05ms +[2025-09-05 19:08:09] [Rank 0] step:3741/10000 train_time:157319ms step_avg:42.05ms +[2025-09-05 19:08:10] [Rank 0] step:3761/10000 train_time:157983ms step_avg:42.01ms +[2025-09-05 19:08:10] [Rank 0] step:3761/10000 train_time:157983ms step_avg:42.01ms +[2025-09-05 19:08:10] [Rank 0] step:3781/10000 train_time:158646ms step_avg:41.96ms +[2025-09-05 19:08:10] [Rank 0] step:3781/10000 train_time:158646ms step_avg:41.96ms +[2025-09-05 19:08:11] [Rank 0] step:3801/10000 train_time:159307ms step_avg:41.91ms +[2025-09-05 19:08:11] [Rank 0] step:3801/10000 train_time:159307ms step_avg:41.91ms +[2025-09-05 19:08:12] [Rank 0] step:3821/10000 train_time:159971ms step_avg:41.87ms +[2025-09-05 19:08:12] [Rank 0] step:3821/10000 train_time:159971ms step_avg:41.87ms +[2025-09-05 19:08:12] [Rank 0] step:3841/10000 train_time:160633ms step_avg:41.82ms +[2025-09-05 19:08:12] [Rank 0] step:3841/10000 train_time:160633ms step_avg:41.82ms +[2025-09-05 19:08:13] [Rank 0] step:3861/10000 train_time:161296ms step_avg:41.78ms +[2025-09-05 19:08:13] [Rank 0] step:3861/10000 train_time:161296ms step_avg:41.78ms +[2025-09-05 19:08:14] [Rank 0] step:3881/10000 train_time:161959ms step_avg:41.73ms +[2025-09-05 19:08:14] [Rank 0] step:3881/10000 train_time:161959ms step_avg:41.73ms +[2025-09-05 19:08:14] [Rank 0] step:3901/10000 train_time:162622ms step_avg:41.69ms +[2025-09-05 19:08:14] [Rank 0] step:3901/10000 train_time:162622ms step_avg:41.69ms +[2025-09-05 19:08:15] [Rank 0] step:3921/10000 train_time:163286ms step_avg:41.64ms +[2025-09-05 19:08:15] [Rank 0] step:3921/10000 train_time:163286ms step_avg:41.64ms +[2025-09-05 19:08:16] [Rank 0] step:3941/10000 train_time:163949ms step_avg:41.60ms +[2025-09-05 19:08:16] [Rank 0] step:3941/10000 train_time:163949ms step_avg:41.60ms +[2025-09-05 19:08:16] [Rank 0] step:3961/10000 train_time:164612ms step_avg:41.56ms +[2025-09-05 19:08:16] [Rank 0] step:3961/10000 train_time:164612ms step_avg:41.56ms +[2025-09-05 19:08:17] [Rank 0] step:3981/10000 train_time:165275ms step_avg:41.52ms +[2025-09-05 19:08:17] [Rank 0] step:3981/10000 train_time:165275ms step_avg:41.52ms +[2025-09-05 19:08:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:08:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:08:18] [Rank 0] PRINT: step:4000/10000 train_loss:0.7416 val_loss:0.7244 train_time:166174ms step_avg:41.54ms +[2025-09-05 19:08:18] [Rank 0] PRINT: step:4000/10000 train_loss:0.7416 val_loss:0.7244 train_time:166174ms step_avg:41.54ms +[2025-09-05 19:08:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:08:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:08:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:08:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:09:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:09:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:09:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:09:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:09:40] [Rank 0] Total Loss: 4.9151 +[2025-09-05 19:09:40] [Rank 0] Total Loss: 4.9151 +[2025-09-05 19:09:40] [Rank 0] Total FTA (Unweighted): 0.7819 +[2025-09-05 19:09:40] [Rank 0] Total FTA (Unweighted): 0.7819 +[2025-09-05 19:09:40] [Rank 0] Total FTA (Weighted): 0.7819 +[2025-09-05 19:09:40] [Rank 0] Total FTA (Weighted): 0.7819 +[2025-09-05 19:09:40] [Rank 0] Group 0 Loss: 5.0864 +[2025-09-05 19:09:40] [Rank 0] Group 0 Loss: 5.0864 +[2025-09-05 19:09:40] [Rank 0] Group 1 Loss: 4.5158 +[2025-09-05 19:09:40] [Rank 0] Group 1 Loss: 4.5158 +[2025-09-05 19:09:40] [Rank 0] Group 2 Loss: 4.3430 +[2025-09-05 19:09:40] [Rank 0] Group 2 Loss: 4.3430 +[2025-09-05 19:09:40] [Rank 0] Group 3 Loss: 4.8351 +[2025-09-05 19:09:40] [Rank 0] Group 3 Loss: 4.8351 +[2025-09-05 19:09:40] [Rank 0] Group 4 Loss: 4.7340 +[2025-09-05 19:09:40] [Rank 0] Group 4 Loss: 4.7340 +[2025-09-05 19:09:40] [Rank 0] Group 5 Loss: 4.8670 +[2025-09-05 19:09:40] [Rank 0] Group 5 Loss: 4.8670 +[2025-09-05 19:09:40] [Rank 0] Group 6 Loss: 4.7179 +[2025-09-05 19:09:40] [Rank 0] Group 6 Loss: 4.7179 +[2025-09-05 19:09:40] [Rank 0] Group 7 Loss: 4.8196 +[2025-09-05 19:09:40] [Rank 0] Group 7 Loss: 4.8196 +[2025-09-05 19:09:40] [Rank 0] Group 8 Loss: 4.9324 +[2025-09-05 19:09:40] [Rank 0] Group 8 Loss: 4.9324 +[2025-09-05 19:09:40] [Rank 0] Group 9 Loss: 4.8637 +[2025-09-05 19:09:40] [Rank 0] Group 9 Loss: 4.8637 +[2025-09-05 19:09:40] [Rank 0] Group 10 Loss: 5.0295 +[2025-09-05 19:09:40] [Rank 0] Group 10 Loss: 5.0295 +[2025-09-05 19:09:40] [Rank 0] Group 11 Loss: 5.0149 +[2025-09-05 19:09:40] [Rank 0] Group 11 Loss: 5.0149 +[2025-09-05 19:09:40] [Rank 0] Group 12 Loss: 5.0738 +[2025-09-05 19:09:40] [Rank 0] Group 12 Loss: 5.0738 +[2025-09-05 19:09:40] [Rank 0] Group 13 Loss: 5.2112 +[2025-09-05 19:09:40] [Rank 0] Group 13 Loss: 5.2112 +[2025-09-05 19:09:40] [Rank 0] Group 14 Loss: 5.2139 +[2025-09-05 19:09:40] [Rank 0] Group 14 Loss: 5.2139 +[2025-09-05 19:09:40] [Rank 0] Group 15 Loss: 5.3837 +[2025-09-05 19:09:40] [Rank 0] Group 15 Loss: 5.3837 +[2025-09-05 19:09:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 19:09:40] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 19:09:40] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:09:40] [Rank 0] Group 9 FTA: 0.9500 +[2025-09-05 19:09:40] [Rank 0] Group 9 FTA: 0.9500 +[2025-09-05 19:09:40] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 19:09:40] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 19:09:40] [Rank 0] Group 11 FTA: 0.8500 +[2025-09-05 19:09:40] [Rank 0] Group 11 FTA: 0.8500 +[2025-09-05 19:09:40] [Rank 0] Group 12 FTA: 0.3600 +[2025-09-05 19:09:40] [Rank 0] Group 12 FTA: 0.3600 +[2025-09-05 19:09:40] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 19:09:40] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 19:09:40] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:09:40] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:09:40] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:09:40] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:09:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:09:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:09:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:09:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:09:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:09:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:09:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:09:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:09:42] [Rank 0] step:4001/10000 train_time:166183ms step_avg:41.54ms +[2025-09-05 19:09:42] [Rank 0] step:4001/10000 train_time:166183ms step_avg:41.54ms +[2025-09-05 19:09:42] [Rank 0] step:4021/10000 train_time:166728ms step_avg:41.46ms +[2025-09-05 19:09:42] [Rank 0] step:4021/10000 train_time:166728ms step_avg:41.46ms +[2025-09-05 19:09:43] [Rank 0] step:4041/10000 train_time:167392ms step_avg:41.42ms +[2025-09-05 19:09:43] [Rank 0] step:4041/10000 train_time:167392ms step_avg:41.42ms +[2025-09-05 19:09:44] [Rank 0] step:4061/10000 train_time:168056ms step_avg:41.38ms +[2025-09-05 19:09:44] [Rank 0] step:4061/10000 train_time:168056ms step_avg:41.38ms +[2025-09-05 19:09:44] [Rank 0] step:4081/10000 train_time:168720ms step_avg:41.34ms +[2025-09-05 19:09:44] [Rank 0] step:4081/10000 train_time:168720ms step_avg:41.34ms +[2025-09-05 19:09:45] [Rank 0] step:4101/10000 train_time:169385ms step_avg:41.30ms +[2025-09-05 19:09:45] [Rank 0] step:4101/10000 train_time:169385ms step_avg:41.30ms +[2025-09-05 19:09:46] [Rank 0] step:4121/10000 train_time:170049ms step_avg:41.26ms +[2025-09-05 19:09:46] [Rank 0] step:4121/10000 train_time:170049ms step_avg:41.26ms +[2025-09-05 19:09:46] [Rank 0] step:4141/10000 train_time:170713ms step_avg:41.23ms +[2025-09-05 19:09:46] [Rank 0] step:4141/10000 train_time:170713ms step_avg:41.23ms +[2025-09-05 19:09:47] [Rank 0] step:4161/10000 train_time:171377ms step_avg:41.19ms +[2025-09-05 19:09:47] [Rank 0] step:4161/10000 train_time:171377ms step_avg:41.19ms +[2025-09-05 19:09:48] [Rank 0] step:4181/10000 train_time:172041ms step_avg:41.15ms +[2025-09-05 19:09:48] [Rank 0] step:4181/10000 train_time:172041ms step_avg:41.15ms +[2025-09-05 19:09:48] [Rank 0] step:4201/10000 train_time:172705ms step_avg:41.11ms +[2025-09-05 19:09:48] [Rank 0] step:4201/10000 train_time:172705ms step_avg:41.11ms +[2025-09-05 19:09:49] [Rank 0] step:4221/10000 train_time:173370ms step_avg:41.07ms +[2025-09-05 19:09:49] [Rank 0] step:4221/10000 train_time:173370ms step_avg:41.07ms +[2025-09-05 19:09:50] [Rank 0] step:4241/10000 train_time:174035ms step_avg:41.04ms +[2025-09-05 19:09:50] [Rank 0] step:4241/10000 train_time:174035ms step_avg:41.04ms +[2025-09-05 19:09:50] [Rank 0] step:4261/10000 train_time:174699ms step_avg:41.00ms +[2025-09-05 19:09:50] [Rank 0] step:4261/10000 train_time:174699ms step_avg:41.00ms +[2025-09-05 19:09:51] [Rank 0] step:4281/10000 train_time:175363ms step_avg:40.96ms +[2025-09-05 19:09:51] [Rank 0] step:4281/10000 train_time:175363ms step_avg:40.96ms +[2025-09-05 19:09:52] [Rank 0] step:4301/10000 train_time:176028ms step_avg:40.93ms +[2025-09-05 19:09:52] [Rank 0] step:4301/10000 train_time:176028ms step_avg:40.93ms +[2025-09-05 19:09:52] [Rank 0] step:4321/10000 train_time:176692ms step_avg:40.89ms +[2025-09-05 19:09:52] [Rank 0] step:4321/10000 train_time:176692ms step_avg:40.89ms +[2025-09-05 19:09:53] [Rank 0] step:4341/10000 train_time:177357ms step_avg:40.86ms +[2025-09-05 19:09:53] [Rank 0] step:4341/10000 train_time:177357ms step_avg:40.86ms +[2025-09-05 19:09:54] [Rank 0] step:4361/10000 train_time:178022ms step_avg:40.82ms +[2025-09-05 19:09:54] [Rank 0] step:4361/10000 train_time:178022ms step_avg:40.82ms +[2025-09-05 19:09:54] [Rank 0] step:4381/10000 train_time:178686ms step_avg:40.79ms +[2025-09-05 19:09:54] [Rank 0] step:4381/10000 train_time:178686ms step_avg:40.79ms +[2025-09-05 19:09:55] [Rank 0] step:4401/10000 train_time:179351ms step_avg:40.75ms +[2025-09-05 19:09:55] [Rank 0] step:4401/10000 train_time:179351ms step_avg:40.75ms +[2025-09-05 19:09:56] [Rank 0] step:4421/10000 train_time:180016ms step_avg:40.72ms +[2025-09-05 19:09:56] [Rank 0] step:4421/10000 train_time:180016ms step_avg:40.72ms +[2025-09-05 19:09:56] [Rank 0] step:4441/10000 train_time:180682ms step_avg:40.68ms +[2025-09-05 19:09:56] [Rank 0] step:4441/10000 train_time:180682ms step_avg:40.68ms +[2025-09-05 19:09:57] [Rank 0] step:4461/10000 train_time:181347ms step_avg:40.65ms +[2025-09-05 19:09:57] [Rank 0] step:4461/10000 train_time:181347ms step_avg:40.65ms +[2025-09-05 19:09:58] [Rank 0] step:4481/10000 train_time:182012ms step_avg:40.62ms +[2025-09-05 19:09:58] [Rank 0] step:4481/10000 train_time:182012ms step_avg:40.62ms +[2025-09-05 19:09:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:09:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:09:59] [Rank 0] PRINT: step:4500/10000 train_loss:0.7251 val_loss:0.7096 train_time:182913ms step_avg:40.65ms +[2025-09-05 19:09:59] [Rank 0] PRINT: step:4500/10000 train_loss:0.7251 val_loss:0.7096 train_time:182913ms step_avg:40.65ms +[2025-09-05 19:09:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:09:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:09:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:09:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:11:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:11:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:11:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:11:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:11:20] [Rank 0] Total Loss: 4.9252 +[2025-09-05 19:11:20] [Rank 0] Total Loss: 4.9252 +[2025-09-05 19:11:20] [Rank 0] Total FTA (Unweighted): 0.8031 +[2025-09-05 19:11:20] [Rank 0] Total FTA (Unweighted): 0.8031 +[2025-09-05 19:11:20] [Rank 0] Total FTA (Weighted): 0.8031 +[2025-09-05 19:11:20] [Rank 0] Total FTA (Weighted): 0.8031 +[2025-09-05 19:11:20] [Rank 0] Group 0 Loss: 4.9352 +[2025-09-05 19:11:20] [Rank 0] Group 0 Loss: 4.9352 +[2025-09-05 19:11:20] [Rank 0] Group 1 Loss: 4.5586 +[2025-09-05 19:11:20] [Rank 0] Group 1 Loss: 4.5586 +[2025-09-05 19:11:20] [Rank 0] Group 2 Loss: 4.3089 +[2025-09-05 19:11:20] [Rank 0] Group 2 Loss: 4.3089 +[2025-09-05 19:11:20] [Rank 0] Group 3 Loss: 4.8219 +[2025-09-05 19:11:20] [Rank 0] Group 3 Loss: 4.8219 +[2025-09-05 19:11:20] [Rank 0] Group 4 Loss: 4.7358 +[2025-09-05 19:11:20] [Rank 0] Group 4 Loss: 4.7358 +[2025-09-05 19:11:20] [Rank 0] Group 5 Loss: 4.8811 +[2025-09-05 19:11:20] [Rank 0] Group 5 Loss: 4.8811 +[2025-09-05 19:11:20] [Rank 0] Group 6 Loss: 4.7568 +[2025-09-05 19:11:20] [Rank 0] Group 6 Loss: 4.7568 +[2025-09-05 19:11:20] [Rank 0] Group 7 Loss: 4.8336 +[2025-09-05 19:11:20] [Rank 0] Group 7 Loss: 4.8336 +[2025-09-05 19:11:20] [Rank 0] Group 8 Loss: 4.9922 +[2025-09-05 19:11:20] [Rank 0] Group 8 Loss: 4.9922 +[2025-09-05 19:11:20] [Rank 0] Group 9 Loss: 4.9655 +[2025-09-05 19:11:20] [Rank 0] Group 9 Loss: 4.9655 +[2025-09-05 19:11:20] [Rank 0] Group 10 Loss: 5.0520 +[2025-09-05 19:11:20] [Rank 0] Group 10 Loss: 5.0520 +[2025-09-05 19:11:20] [Rank 0] Group 11 Loss: 5.0882 +[2025-09-05 19:11:20] [Rank 0] Group 11 Loss: 5.0882 +[2025-09-05 19:11:20] [Rank 0] Group 12 Loss: 5.0436 +[2025-09-05 19:11:20] [Rank 0] Group 12 Loss: 5.0436 +[2025-09-05 19:11:20] [Rank 0] Group 13 Loss: 5.2309 +[2025-09-05 19:11:20] [Rank 0] Group 13 Loss: 5.2309 +[2025-09-05 19:11:20] [Rank 0] Group 14 Loss: 5.2395 +[2025-09-05 19:11:20] [Rank 0] Group 14 Loss: 5.2395 +[2025-09-05 19:11:20] [Rank 0] Group 15 Loss: 5.3602 +[2025-09-05 19:11:20] [Rank 0] Group 15 Loss: 5.3602 +[2025-09-05 19:11:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:11:20] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 19:11:20] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 19:11:20] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 19:11:20] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 19:11:20] [Rank 0] Group 11 FTA: 0.9100 +[2025-09-05 19:11:20] [Rank 0] Group 11 FTA: 0.9100 +[2025-09-05 19:11:20] [Rank 0] Group 12 FTA: 0.5100 +[2025-09-05 19:11:20] [Rank 0] Group 12 FTA: 0.5100 +[2025-09-05 19:11:20] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 19:11:20] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 19:11:20] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:11:20] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:11:20] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:11:20] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:11:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:11:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:11:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:11:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:11:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:11:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:11:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:11:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:11:22] [Rank 0] step:4501/10000 train_time:182922ms step_avg:40.64ms +[2025-09-05 19:11:22] [Rank 0] step:4501/10000 train_time:182922ms step_avg:40.64ms +[2025-09-05 19:11:23] [Rank 0] step:4521/10000 train_time:183362ms step_avg:40.56ms +[2025-09-05 19:11:23] [Rank 0] step:4521/10000 train_time:183362ms step_avg:40.56ms +[2025-09-05 19:11:23] [Rank 0] step:4541/10000 train_time:184026ms step_avg:40.53ms +[2025-09-05 19:11:23] [Rank 0] step:4541/10000 train_time:184026ms step_avg:40.53ms +[2025-09-05 19:11:24] [Rank 0] step:4561/10000 train_time:184689ms step_avg:40.49ms +[2025-09-05 19:11:24] [Rank 0] step:4561/10000 train_time:184689ms step_avg:40.49ms +[2025-09-05 19:11:25] [Rank 0] step:4581/10000 train_time:185353ms step_avg:40.46ms +[2025-09-05 19:11:25] [Rank 0] step:4581/10000 train_time:185353ms step_avg:40.46ms +[2025-09-05 19:11:25] [Rank 0] step:4601/10000 train_time:186017ms step_avg:40.43ms +[2025-09-05 19:11:25] [Rank 0] step:4601/10000 train_time:186017ms step_avg:40.43ms +[2025-09-05 19:11:26] [Rank 0] step:4621/10000 train_time:186681ms step_avg:40.40ms +[2025-09-05 19:11:26] [Rank 0] step:4621/10000 train_time:186681ms step_avg:40.40ms +[2025-09-05 19:11:27] [Rank 0] step:4641/10000 train_time:187345ms step_avg:40.37ms +[2025-09-05 19:11:27] [Rank 0] step:4641/10000 train_time:187345ms step_avg:40.37ms +[2025-09-05 19:11:27] [Rank 0] step:4661/10000 train_time:188009ms step_avg:40.34ms +[2025-09-05 19:11:27] [Rank 0] step:4661/10000 train_time:188009ms step_avg:40.34ms +[2025-09-05 19:11:28] [Rank 0] step:4681/10000 train_time:188673ms step_avg:40.31ms +[2025-09-05 19:11:28] [Rank 0] step:4681/10000 train_time:188673ms step_avg:40.31ms +[2025-09-05 19:11:29] [Rank 0] step:4701/10000 train_time:189337ms step_avg:40.28ms +[2025-09-05 19:11:29] [Rank 0] step:4701/10000 train_time:189337ms step_avg:40.28ms +[2025-09-05 19:11:29] [Rank 0] step:4721/10000 train_time:190000ms step_avg:40.25ms +[2025-09-05 19:11:29] [Rank 0] step:4721/10000 train_time:190000ms step_avg:40.25ms +[2025-09-05 19:11:30] [Rank 0] step:4741/10000 train_time:190664ms step_avg:40.22ms +[2025-09-05 19:11:30] [Rank 0] step:4741/10000 train_time:190664ms step_avg:40.22ms +[2025-09-05 19:11:31] [Rank 0] step:4761/10000 train_time:191327ms step_avg:40.19ms +[2025-09-05 19:11:31] [Rank 0] step:4761/10000 train_time:191327ms step_avg:40.19ms +[2025-09-05 19:11:31] [Rank 0] step:4781/10000 train_time:192206ms step_avg:40.20ms +[2025-09-05 19:11:31] [Rank 0] step:4781/10000 train_time:192206ms step_avg:40.20ms +[2025-09-05 19:11:32] [Rank 0] step:4801/10000 train_time:192869ms step_avg:40.17ms +[2025-09-05 19:11:32] [Rank 0] step:4801/10000 train_time:192869ms step_avg:40.17ms +[2025-09-05 19:11:33] [Rank 0] step:4821/10000 train_time:193533ms step_avg:40.14ms +[2025-09-05 19:11:33] [Rank 0] step:4821/10000 train_time:193533ms step_avg:40.14ms +[2025-09-05 19:11:34] [Rank 0] step:4841/10000 train_time:194344ms step_avg:40.15ms +[2025-09-05 19:11:34] [Rank 0] step:4841/10000 train_time:194344ms step_avg:40.15ms +[2025-09-05 19:11:34] [Rank 0] step:4861/10000 train_time:195007ms step_avg:40.12ms +[2025-09-05 19:11:34] [Rank 0] step:4861/10000 train_time:195007ms step_avg:40.12ms +[2025-09-05 19:11:35] [Rank 0] step:4881/10000 train_time:195670ms step_avg:40.09ms +[2025-09-05 19:11:35] [Rank 0] step:4881/10000 train_time:195670ms step_avg:40.09ms +[2025-09-05 19:11:36] [Rank 0] step:4901/10000 train_time:196333ms step_avg:40.06ms +[2025-09-05 19:11:36] [Rank 0] step:4901/10000 train_time:196333ms step_avg:40.06ms +[2025-09-05 19:11:36] [Rank 0] step:4921/10000 train_time:196997ms step_avg:40.03ms +[2025-09-05 19:11:36] [Rank 0] step:4921/10000 train_time:196997ms step_avg:40.03ms +[2025-09-05 19:11:37] [Rank 0] step:4941/10000 train_time:197660ms step_avg:40.00ms +[2025-09-05 19:11:37] [Rank 0] step:4941/10000 train_time:197660ms step_avg:40.00ms +[2025-09-05 19:11:38] [Rank 0] step:4961/10000 train_time:198323ms step_avg:39.98ms +[2025-09-05 19:11:38] [Rank 0] step:4961/10000 train_time:198323ms step_avg:39.98ms +[2025-09-05 19:11:38] [Rank 0] step:4981/10000 train_time:198986ms step_avg:39.95ms +[2025-09-05 19:11:38] [Rank 0] step:4981/10000 train_time:198986ms step_avg:39.95ms +[2025-09-05 19:11:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:11:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:11:39] [Rank 0] PRINT: step:5000/10000 train_loss:0.7118 val_loss:0.6985 train_time:199885ms step_avg:39.98ms +[2025-09-05 19:11:39] [Rank 0] PRINT: step:5000/10000 train_loss:0.7118 val_loss:0.6985 train_time:199885ms step_avg:39.98ms +[2025-09-05 19:11:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:11:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:11:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:11:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:13:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:13:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:13:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:13:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:13:01] [Rank 0] Total Loss: 5.0328 +[2025-09-05 19:13:01] [Rank 0] Total Loss: 5.0328 +[2025-09-05 19:13:01] [Rank 0] Total FTA (Unweighted): 0.8150 +[2025-09-05 19:13:01] [Rank 0] Total FTA (Unweighted): 0.8150 +[2025-09-05 19:13:01] [Rank 0] Total FTA (Weighted): 0.8150 +[2025-09-05 19:13:01] [Rank 0] Total FTA (Weighted): 0.8150 +[2025-09-05 19:13:01] [Rank 0] Group 0 Loss: 5.1345 +[2025-09-05 19:13:01] [Rank 0] Group 0 Loss: 5.1345 +[2025-09-05 19:13:01] [Rank 0] Group 1 Loss: 4.6983 +[2025-09-05 19:13:01] [Rank 0] Group 1 Loss: 4.6983 +[2025-09-05 19:13:01] [Rank 0] Group 2 Loss: 4.4056 +[2025-09-05 19:13:01] [Rank 0] Group 2 Loss: 4.4056 +[2025-09-05 19:13:01] [Rank 0] Group 3 Loss: 4.9387 +[2025-09-05 19:13:01] [Rank 0] Group 3 Loss: 4.9387 +[2025-09-05 19:13:01] [Rank 0] Group 4 Loss: 4.9062 +[2025-09-05 19:13:01] [Rank 0] Group 4 Loss: 4.9062 +[2025-09-05 19:13:01] [Rank 0] Group 5 Loss: 5.0208 +[2025-09-05 19:13:01] [Rank 0] Group 5 Loss: 5.0208 +[2025-09-05 19:13:01] [Rank 0] Group 6 Loss: 4.8875 +[2025-09-05 19:13:01] [Rank 0] Group 6 Loss: 4.8875 +[2025-09-05 19:13:01] [Rank 0] Group 7 Loss: 4.9936 +[2025-09-05 19:13:01] [Rank 0] Group 7 Loss: 4.9936 +[2025-09-05 19:13:01] [Rank 0] Group 8 Loss: 5.0857 +[2025-09-05 19:13:01] [Rank 0] Group 8 Loss: 5.0857 +[2025-09-05 19:13:01] [Rank 0] Group 9 Loss: 5.0073 +[2025-09-05 19:13:01] [Rank 0] Group 9 Loss: 5.0073 +[2025-09-05 19:13:01] [Rank 0] Group 10 Loss: 5.1473 +[2025-09-05 19:13:01] [Rank 0] Group 10 Loss: 5.1473 +[2025-09-05 19:13:01] [Rank 0] Group 11 Loss: 5.1430 +[2025-09-05 19:13:01] [Rank 0] Group 11 Loss: 5.1430 +[2025-09-05 19:13:01] [Rank 0] Group 12 Loss: 5.1665 +[2025-09-05 19:13:01] [Rank 0] Group 12 Loss: 5.1665 +[2025-09-05 19:13:01] [Rank 0] Group 13 Loss: 5.3530 +[2025-09-05 19:13:01] [Rank 0] Group 13 Loss: 5.3530 +[2025-09-05 19:13:01] [Rank 0] Group 14 Loss: 5.2278 +[2025-09-05 19:13:01] [Rank 0] Group 14 Loss: 5.2278 +[2025-09-05 19:13:01] [Rank 0] Group 15 Loss: 5.4098 +[2025-09-05 19:13:01] [Rank 0] Group 15 Loss: 5.4098 +[2025-09-05 19:13:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:13:01] [Rank 0] Group 11 FTA: 0.9200 +[2025-09-05 19:13:01] [Rank 0] Group 11 FTA: 0.9200 +[2025-09-05 19:13:01] [Rank 0] Group 12 FTA: 0.6200 +[2025-09-05 19:13:01] [Rank 0] Group 12 FTA: 0.6200 +[2025-09-05 19:13:01] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 19:13:01] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 19:13:01] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:13:01] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:13:01] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:13:01] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:13:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:13:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:13:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:13:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:13:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:13:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:13:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:13:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:13:03] [Rank 0] step:5001/10000 train_time:199894ms step_avg:39.97ms +[2025-09-05 19:13:03] [Rank 0] step:5001/10000 train_time:199894ms step_avg:39.97ms +[2025-09-05 19:13:04] [Rank 0] step:5021/10000 train_time:200348ms step_avg:39.90ms +[2025-09-05 19:13:04] [Rank 0] step:5021/10000 train_time:200348ms step_avg:39.90ms +[2025-09-05 19:13:04] [Rank 0] step:5041/10000 train_time:201012ms step_avg:39.88ms +[2025-09-05 19:13:04] [Rank 0] step:5041/10000 train_time:201012ms step_avg:39.88ms +[2025-09-05 19:13:05] [Rank 0] step:5061/10000 train_time:201676ms step_avg:39.85ms +[2025-09-05 19:13:05] [Rank 0] step:5061/10000 train_time:201676ms step_avg:39.85ms +[2025-09-05 19:13:06] [Rank 0] step:5081/10000 train_time:202340ms step_avg:39.82ms +[2025-09-05 19:13:06] [Rank 0] step:5081/10000 train_time:202340ms step_avg:39.82ms +[2025-09-05 19:13:06] [Rank 0] step:5101/10000 train_time:203005ms step_avg:39.80ms +[2025-09-05 19:13:06] [Rank 0] step:5101/10000 train_time:203005ms step_avg:39.80ms +[2025-09-05 19:13:07] [Rank 0] step:5121/10000 train_time:203670ms step_avg:39.77ms +[2025-09-05 19:13:07] [Rank 0] step:5121/10000 train_time:203670ms step_avg:39.77ms +[2025-09-05 19:13:08] [Rank 0] step:5141/10000 train_time:204335ms step_avg:39.75ms +[2025-09-05 19:13:08] [Rank 0] step:5141/10000 train_time:204335ms step_avg:39.75ms +[2025-09-05 19:13:08] [Rank 0] step:5161/10000 train_time:205000ms step_avg:39.72ms +[2025-09-05 19:13:08] [Rank 0] step:5161/10000 train_time:205000ms step_avg:39.72ms +[2025-09-05 19:13:09] [Rank 0] step:5181/10000 train_time:205665ms step_avg:39.70ms +[2025-09-05 19:13:09] [Rank 0] step:5181/10000 train_time:205665ms step_avg:39.70ms +[2025-09-05 19:13:10] [Rank 0] step:5201/10000 train_time:206331ms step_avg:39.67ms +[2025-09-05 19:13:10] [Rank 0] step:5201/10000 train_time:206331ms step_avg:39.67ms +[2025-09-05 19:13:10] [Rank 0] step:5221/10000 train_time:206995ms step_avg:39.65ms +[2025-09-05 19:13:10] [Rank 0] step:5221/10000 train_time:206995ms step_avg:39.65ms +[2025-09-05 19:13:11] [Rank 0] step:5241/10000 train_time:207659ms step_avg:39.62ms +[2025-09-05 19:13:11] [Rank 0] step:5241/10000 train_time:207659ms step_avg:39.62ms +[2025-09-05 19:13:12] [Rank 0] step:5261/10000 train_time:208324ms step_avg:39.60ms +[2025-09-05 19:13:12] [Rank 0] step:5261/10000 train_time:208324ms step_avg:39.60ms +[2025-09-05 19:13:12] [Rank 0] step:5281/10000 train_time:208988ms step_avg:39.57ms +[2025-09-05 19:13:12] [Rank 0] step:5281/10000 train_time:208988ms step_avg:39.57ms +[2025-09-05 19:13:13] [Rank 0] step:5301/10000 train_time:209653ms step_avg:39.55ms +[2025-09-05 19:13:13] [Rank 0] step:5301/10000 train_time:209653ms step_avg:39.55ms +[2025-09-05 19:13:14] [Rank 0] step:5321/10000 train_time:210318ms step_avg:39.53ms +[2025-09-05 19:13:14] [Rank 0] step:5321/10000 train_time:210318ms step_avg:39.53ms +[2025-09-05 19:13:14] [Rank 0] step:5341/10000 train_time:210983ms step_avg:39.50ms +[2025-09-05 19:13:14] [Rank 0] step:5341/10000 train_time:210983ms step_avg:39.50ms +[2025-09-05 19:13:15] [Rank 0] step:5361/10000 train_time:211648ms step_avg:39.48ms +[2025-09-05 19:13:15] [Rank 0] step:5361/10000 train_time:211648ms step_avg:39.48ms +[2025-09-05 19:13:16] [Rank 0] step:5381/10000 train_time:212313ms step_avg:39.46ms +[2025-09-05 19:13:16] [Rank 0] step:5381/10000 train_time:212313ms step_avg:39.46ms +[2025-09-05 19:13:16] [Rank 0] step:5401/10000 train_time:212978ms step_avg:39.43ms +[2025-09-05 19:13:16] [Rank 0] step:5401/10000 train_time:212978ms step_avg:39.43ms +[2025-09-05 19:13:17] [Rank 0] step:5421/10000 train_time:213643ms step_avg:39.41ms +[2025-09-05 19:13:17] [Rank 0] step:5421/10000 train_time:213643ms step_avg:39.41ms +[2025-09-05 19:13:18] [Rank 0] step:5441/10000 train_time:214309ms step_avg:39.39ms +[2025-09-05 19:13:18] [Rank 0] step:5441/10000 train_time:214309ms step_avg:39.39ms +[2025-09-05 19:13:18] [Rank 0] step:5461/10000 train_time:214975ms step_avg:39.37ms +[2025-09-05 19:13:18] [Rank 0] step:5461/10000 train_time:214975ms step_avg:39.37ms +[2025-09-05 19:13:19] [Rank 0] step:5481/10000 train_time:215640ms step_avg:39.34ms +[2025-09-05 19:13:19] [Rank 0] step:5481/10000 train_time:215640ms step_avg:39.34ms +[2025-09-05 19:13:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:13:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:13:20] [Rank 0] PRINT: step:5500/10000 train_loss:0.7007 val_loss:0.6888 train_time:216541ms step_avg:39.37ms +[2025-09-05 19:13:20] [Rank 0] PRINT: step:5500/10000 train_loss:0.7007 val_loss:0.6888 train_time:216541ms step_avg:39.37ms +[2025-09-05 19:13:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:13:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:13:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:13:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:14:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:14:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:14:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:14:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:14:41] [Rank 0] Total Loss: 4.9923 +[2025-09-05 19:14:41] [Rank 0] Total Loss: 4.9923 +[2025-09-05 19:14:41] [Rank 0] Total FTA (Unweighted): 0.8344 +[2025-09-05 19:14:41] [Rank 0] Total FTA (Unweighted): 0.8344 +[2025-09-05 19:14:41] [Rank 0] Total FTA (Weighted): 0.8344 +[2025-09-05 19:14:41] [Rank 0] Total FTA (Weighted): 0.8344 +[2025-09-05 19:14:41] [Rank 0] Group 0 Loss: 5.0015 +[2025-09-05 19:14:41] [Rank 0] Group 0 Loss: 5.0015 +[2025-09-05 19:14:41] [Rank 0] Group 1 Loss: 4.7375 +[2025-09-05 19:14:41] [Rank 0] Group 1 Loss: 4.7375 +[2025-09-05 19:14:41] [Rank 0] Group 2 Loss: 4.4648 +[2025-09-05 19:14:41] [Rank 0] Group 2 Loss: 4.4648 +[2025-09-05 19:14:41] [Rank 0] Group 3 Loss: 4.8453 +[2025-09-05 19:14:41] [Rank 0] Group 3 Loss: 4.8453 +[2025-09-05 19:14:41] [Rank 0] Group 4 Loss: 4.7899 +[2025-09-05 19:14:41] [Rank 0] Group 4 Loss: 4.7899 +[2025-09-05 19:14:41] [Rank 0] Group 5 Loss: 4.9505 +[2025-09-05 19:14:41] [Rank 0] Group 5 Loss: 4.9505 +[2025-09-05 19:14:41] [Rank 0] Group 6 Loss: 4.8498 +[2025-09-05 19:14:41] [Rank 0] Group 6 Loss: 4.8498 +[2025-09-05 19:14:41] [Rank 0] Group 7 Loss: 4.9156 +[2025-09-05 19:14:41] [Rank 0] Group 7 Loss: 4.9156 +[2025-09-05 19:14:41] [Rank 0] Group 8 Loss: 5.0921 +[2025-09-05 19:14:41] [Rank 0] Group 8 Loss: 5.0921 +[2025-09-05 19:14:41] [Rank 0] Group 9 Loss: 5.0339 +[2025-09-05 19:14:41] [Rank 0] Group 9 Loss: 5.0339 +[2025-09-05 19:14:41] [Rank 0] Group 10 Loss: 5.1529 +[2025-09-05 19:14:41] [Rank 0] Group 10 Loss: 5.1529 +[2025-09-05 19:14:41] [Rank 0] Group 11 Loss: 5.1055 +[2025-09-05 19:14:41] [Rank 0] Group 11 Loss: 5.1055 +[2025-09-05 19:14:41] [Rank 0] Group 12 Loss: 5.1051 +[2025-09-05 19:14:41] [Rank 0] Group 12 Loss: 5.1051 +[2025-09-05 19:14:41] [Rank 0] Group 13 Loss: 5.2604 +[2025-09-05 19:14:41] [Rank 0] Group 13 Loss: 5.2604 +[2025-09-05 19:14:42] [Rank 0] Group 14 Loss: 5.2187 +[2025-09-05 19:14:42] [Rank 0] Group 14 Loss: 5.2187 +[2025-09-05 19:14:42] [Rank 0] Group 15 Loss: 5.3538 +[2025-09-05 19:14:42] [Rank 0] Group 15 Loss: 5.3538 +[2025-09-05 19:14:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:14:42] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-05 19:14:42] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-05 19:14:42] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 19:14:42] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 19:14:42] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-05 19:14:42] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-05 19:14:42] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:14:42] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:14:42] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:14:42] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:14:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:14:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:14:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:14:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:14:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:14:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:14:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:14:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:14:43] [Rank 0] step:5501/10000 train_time:216549ms step_avg:39.37ms +[2025-09-05 19:14:43] [Rank 0] step:5501/10000 train_time:216549ms step_avg:39.37ms +[2025-09-05 19:14:44] [Rank 0] step:5521/10000 train_time:217002ms step_avg:39.30ms +[2025-09-05 19:14:44] [Rank 0] step:5521/10000 train_time:217002ms step_avg:39.30ms +[2025-09-05 19:14:44] [Rank 0] step:5541/10000 train_time:217828ms step_avg:39.31ms +[2025-09-05 19:14:44] [Rank 0] step:5541/10000 train_time:217828ms step_avg:39.31ms +[2025-09-05 19:14:45] [Rank 0] step:5561/10000 train_time:218492ms step_avg:39.29ms +[2025-09-05 19:14:45] [Rank 0] step:5561/10000 train_time:218492ms step_avg:39.29ms +[2025-09-05 19:14:46] [Rank 0] step:5581/10000 train_time:219156ms step_avg:39.27ms +[2025-09-05 19:14:46] [Rank 0] step:5581/10000 train_time:219156ms step_avg:39.27ms +[2025-09-05 19:14:46] [Rank 0] step:5601/10000 train_time:219820ms step_avg:39.25ms +[2025-09-05 19:14:46] [Rank 0] step:5601/10000 train_time:219820ms step_avg:39.25ms +[2025-09-05 19:14:47] [Rank 0] step:5621/10000 train_time:220483ms step_avg:39.22ms +[2025-09-05 19:14:47] [Rank 0] step:5621/10000 train_time:220483ms step_avg:39.22ms +[2025-09-05 19:14:48] [Rank 0] step:5641/10000 train_time:221146ms step_avg:39.20ms +[2025-09-05 19:14:48] [Rank 0] step:5641/10000 train_time:221146ms step_avg:39.20ms +[2025-09-05 19:14:49] [Rank 0] step:5661/10000 train_time:222286ms step_avg:39.27ms +[2025-09-05 19:14:49] [Rank 0] step:5661/10000 train_time:222286ms step_avg:39.27ms +[2025-09-05 19:14:50] [Rank 0] step:5681/10000 train_time:222950ms step_avg:39.24ms +[2025-09-05 19:14:50] [Rank 0] step:5681/10000 train_time:222950ms step_avg:39.24ms +[2025-09-05 19:14:50] [Rank 0] step:5701/10000 train_time:223613ms step_avg:39.22ms +[2025-09-05 19:14:50] [Rank 0] step:5701/10000 train_time:223613ms step_avg:39.22ms +[2025-09-05 19:14:51] [Rank 0] step:5721/10000 train_time:224275ms step_avg:39.20ms +[2025-09-05 19:14:51] [Rank 0] step:5721/10000 train_time:224275ms step_avg:39.20ms +[2025-09-05 19:14:52] [Rank 0] step:5741/10000 train_time:224938ms step_avg:39.18ms +[2025-09-05 19:14:52] [Rank 0] step:5741/10000 train_time:224938ms step_avg:39.18ms +[2025-09-05 19:14:52] [Rank 0] step:5761/10000 train_time:225602ms step_avg:39.16ms +[2025-09-05 19:14:52] [Rank 0] step:5761/10000 train_time:225602ms step_avg:39.16ms +[2025-09-05 19:14:53] [Rank 0] step:5781/10000 train_time:226266ms step_avg:39.14ms +[2025-09-05 19:14:53] [Rank 0] step:5781/10000 train_time:226266ms step_avg:39.14ms +[2025-09-05 19:14:54] [Rank 0] step:5801/10000 train_time:226930ms step_avg:39.12ms +[2025-09-05 19:14:54] [Rank 0] step:5801/10000 train_time:226930ms step_avg:39.12ms +[2025-09-05 19:14:54] [Rank 0] step:5821/10000 train_time:227593ms step_avg:39.10ms +[2025-09-05 19:14:54] [Rank 0] step:5821/10000 train_time:227593ms step_avg:39.10ms +[2025-09-05 19:14:55] [Rank 0] step:5841/10000 train_time:228258ms step_avg:39.08ms +[2025-09-05 19:14:55] [Rank 0] step:5841/10000 train_time:228258ms step_avg:39.08ms +[2025-09-05 19:14:56] [Rank 0] step:5861/10000 train_time:228920ms step_avg:39.06ms +[2025-09-05 19:14:56] [Rank 0] step:5861/10000 train_time:228920ms step_avg:39.06ms +[2025-09-05 19:14:56] [Rank 0] step:5881/10000 train_time:229583ms step_avg:39.04ms +[2025-09-05 19:14:56] [Rank 0] step:5881/10000 train_time:229583ms step_avg:39.04ms +[2025-09-05 19:14:57] [Rank 0] step:5901/10000 train_time:230245ms step_avg:39.02ms +[2025-09-05 19:14:57] [Rank 0] step:5901/10000 train_time:230245ms step_avg:39.02ms +[2025-09-05 19:14:58] [Rank 0] step:5921/10000 train_time:230909ms step_avg:39.00ms +[2025-09-05 19:14:58] [Rank 0] step:5921/10000 train_time:230909ms step_avg:39.00ms +[2025-09-05 19:14:58] [Rank 0] step:5941/10000 train_time:231572ms step_avg:38.98ms +[2025-09-05 19:14:58] [Rank 0] step:5941/10000 train_time:231572ms step_avg:38.98ms +[2025-09-05 19:14:59] [Rank 0] step:5961/10000 train_time:232236ms step_avg:38.96ms +[2025-09-05 19:14:59] [Rank 0] step:5961/10000 train_time:232236ms step_avg:38.96ms +[2025-09-05 19:15:00] [Rank 0] step:5981/10000 train_time:232901ms step_avg:38.94ms +[2025-09-05 19:15:00] [Rank 0] step:5981/10000 train_time:232901ms step_avg:38.94ms +[2025-09-05 19:15:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:15:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:15:01] [Rank 0] PRINT: step:6000/10000 train_loss:0.6918 val_loss:0.6798 train_time:233806ms step_avg:38.97ms +[2025-09-05 19:15:01] [Rank 0] PRINT: step:6000/10000 train_loss:0.6918 val_loss:0.6798 train_time:233806ms step_avg:38.97ms +[2025-09-05 19:15:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:15:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:15:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:15:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:16:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:16:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:16:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:16:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:16:22] [Rank 0] Total Loss: 4.9929 +[2025-09-05 19:16:22] [Rank 0] Total Loss: 4.9929 +[2025-09-05 19:16:22] [Rank 0] Total FTA (Unweighted): 0.8425 +[2025-09-05 19:16:22] [Rank 0] Total FTA (Unweighted): 0.8425 +[2025-09-05 19:16:22] [Rank 0] Total FTA (Weighted): 0.8425 +[2025-09-05 19:16:22] [Rank 0] Total FTA (Weighted): 0.8425 +[2025-09-05 19:16:22] [Rank 0] Group 0 Loss: 5.1730 +[2025-09-05 19:16:22] [Rank 0] Group 0 Loss: 5.1730 +[2025-09-05 19:16:22] [Rank 0] Group 1 Loss: 4.6475 +[2025-09-05 19:16:22] [Rank 0] Group 1 Loss: 4.6475 +[2025-09-05 19:16:22] [Rank 0] Group 2 Loss: 4.5028 +[2025-09-05 19:16:22] [Rank 0] Group 2 Loss: 4.5028 +[2025-09-05 19:16:22] [Rank 0] Group 3 Loss: 4.9049 +[2025-09-05 19:16:22] [Rank 0] Group 3 Loss: 4.9049 +[2025-09-05 19:16:22] [Rank 0] Group 4 Loss: 4.7845 +[2025-09-05 19:16:22] [Rank 0] Group 4 Loss: 4.7845 +[2025-09-05 19:16:22] [Rank 0] Group 5 Loss: 4.9696 +[2025-09-05 19:16:22] [Rank 0] Group 5 Loss: 4.9696 +[2025-09-05 19:16:22] [Rank 0] Group 6 Loss: 4.8363 +[2025-09-05 19:16:22] [Rank 0] Group 6 Loss: 4.8363 +[2025-09-05 19:16:22] [Rank 0] Group 7 Loss: 4.9515 +[2025-09-05 19:16:22] [Rank 0] Group 7 Loss: 4.9515 +[2025-09-05 19:16:22] [Rank 0] Group 8 Loss: 5.0882 +[2025-09-05 19:16:22] [Rank 0] Group 8 Loss: 5.0882 +[2025-09-05 19:16:22] [Rank 0] Group 9 Loss: 5.0415 +[2025-09-05 19:16:22] [Rank 0] Group 9 Loss: 5.0415 +[2025-09-05 19:16:22] [Rank 0] Group 10 Loss: 5.1616 +[2025-09-05 19:16:22] [Rank 0] Group 10 Loss: 5.1616 +[2025-09-05 19:16:22] [Rank 0] Group 11 Loss: 5.0944 +[2025-09-05 19:16:22] [Rank 0] Group 11 Loss: 5.0944 +[2025-09-05 19:16:22] [Rank 0] Group 12 Loss: 5.0650 +[2025-09-05 19:16:22] [Rank 0] Group 12 Loss: 5.0650 +[2025-09-05 19:16:22] [Rank 0] Group 13 Loss: 5.2049 +[2025-09-05 19:16:22] [Rank 0] Group 13 Loss: 5.2049 +[2025-09-05 19:16:22] [Rank 0] Group 14 Loss: 5.1992 +[2025-09-05 19:16:22] [Rank 0] Group 14 Loss: 5.1992 +[2025-09-05 19:16:22] [Rank 0] Group 15 Loss: 5.2619 +[2025-09-05 19:16:22] [Rank 0] Group 15 Loss: 5.2619 +[2025-09-05 19:16:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 19:16:22] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 19:16:22] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:16:22] [Rank 0] Group 12 FTA: 0.8300 +[2025-09-05 19:16:22] [Rank 0] Group 12 FTA: 0.8300 +[2025-09-05 19:16:22] [Rank 0] Group 13 FTA: 0.3700 +[2025-09-05 19:16:22] [Rank 0] Group 13 FTA: 0.3700 +[2025-09-05 19:16:22] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:16:22] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:16:22] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:16:22] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:16:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:16:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:16:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:16:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:16:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:16:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:16:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:16:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:16:24] [Rank 0] step:6001/10000 train_time:233815ms step_avg:38.96ms +[2025-09-05 19:16:24] [Rank 0] step:6001/10000 train_time:233815ms step_avg:38.96ms +[2025-09-05 19:16:25] [Rank 0] step:6021/10000 train_time:234713ms step_avg:38.98ms +[2025-09-05 19:16:25] [Rank 0] step:6021/10000 train_time:234713ms step_avg:38.98ms +[2025-09-05 19:16:25] [Rank 0] step:6041/10000 train_time:235376ms step_avg:38.96ms +[2025-09-05 19:16:25] [Rank 0] step:6041/10000 train_time:235376ms step_avg:38.96ms +[2025-09-05 19:16:26] [Rank 0] step:6061/10000 train_time:236039ms step_avg:38.94ms +[2025-09-05 19:16:26] [Rank 0] step:6061/10000 train_time:236039ms step_avg:38.94ms +[2025-09-05 19:16:27] [Rank 0] step:6081/10000 train_time:236702ms step_avg:38.92ms +[2025-09-05 19:16:27] [Rank 0] step:6081/10000 train_time:236702ms step_avg:38.92ms +[2025-09-05 19:16:27] [Rank 0] step:6101/10000 train_time:237367ms step_avg:38.91ms +[2025-09-05 19:16:27] [Rank 0] step:6101/10000 train_time:237367ms step_avg:38.91ms +[2025-09-05 19:16:28] [Rank 0] step:6121/10000 train_time:238032ms step_avg:38.89ms +[2025-09-05 19:16:28] [Rank 0] step:6121/10000 train_time:238032ms step_avg:38.89ms +[2025-09-05 19:16:29] [Rank 0] step:6141/10000 train_time:238696ms step_avg:38.87ms +[2025-09-05 19:16:29] [Rank 0] step:6141/10000 train_time:238696ms step_avg:38.87ms +[2025-09-05 19:16:29] [Rank 0] step:6161/10000 train_time:239361ms step_avg:38.85ms +[2025-09-05 19:16:29] [Rank 0] step:6161/10000 train_time:239361ms step_avg:38.85ms +[2025-09-05 19:16:30] [Rank 0] step:6181/10000 train_time:240025ms step_avg:38.83ms +[2025-09-05 19:16:30] [Rank 0] step:6181/10000 train_time:240025ms step_avg:38.83ms +[2025-09-05 19:16:31] [Rank 0] step:6201/10000 train_time:240690ms step_avg:38.81ms +[2025-09-05 19:16:31] [Rank 0] step:6201/10000 train_time:240690ms step_avg:38.81ms +[2025-09-05 19:16:31] [Rank 0] step:6221/10000 train_time:241353ms step_avg:38.80ms +[2025-09-05 19:16:31] [Rank 0] step:6221/10000 train_time:241353ms step_avg:38.80ms +[2025-09-05 19:16:32] [Rank 0] step:6241/10000 train_time:242019ms step_avg:38.78ms +[2025-09-05 19:16:32] [Rank 0] step:6241/10000 train_time:242019ms step_avg:38.78ms +[2025-09-05 19:16:33] [Rank 0] step:6261/10000 train_time:242684ms step_avg:38.76ms +[2025-09-05 19:16:33] [Rank 0] step:6261/10000 train_time:242684ms step_avg:38.76ms +[2025-09-05 19:16:33] [Rank 0] step:6281/10000 train_time:243349ms step_avg:38.74ms +[2025-09-05 19:16:33] [Rank 0] step:6281/10000 train_time:243349ms step_avg:38.74ms +[2025-09-05 19:16:34] [Rank 0] step:6301/10000 train_time:244013ms step_avg:38.73ms +[2025-09-05 19:16:34] [Rank 0] step:6301/10000 train_time:244013ms step_avg:38.73ms +[2025-09-05 19:16:35] [Rank 0] step:6321/10000 train_time:244678ms step_avg:38.71ms +[2025-09-05 19:16:35] [Rank 0] step:6321/10000 train_time:244678ms step_avg:38.71ms +[2025-09-05 19:16:35] [Rank 0] step:6341/10000 train_time:245343ms step_avg:38.69ms +[2025-09-05 19:16:35] [Rank 0] step:6341/10000 train_time:245343ms step_avg:38.69ms +[2025-09-05 19:16:36] [Rank 0] step:6361/10000 train_time:246008ms step_avg:38.67ms +[2025-09-05 19:16:36] [Rank 0] step:6361/10000 train_time:246008ms step_avg:38.67ms +[2025-09-05 19:16:37] [Rank 0] step:6381/10000 train_time:246673ms step_avg:38.66ms +[2025-09-05 19:16:37] [Rank 0] step:6381/10000 train_time:246673ms step_avg:38.66ms +[2025-09-05 19:16:37] [Rank 0] step:6401/10000 train_time:247339ms step_avg:38.64ms +[2025-09-05 19:16:37] [Rank 0] step:6401/10000 train_time:247339ms step_avg:38.64ms +[2025-09-05 19:16:38] [Rank 0] step:6421/10000 train_time:248005ms step_avg:38.62ms +[2025-09-05 19:16:38] [Rank 0] step:6421/10000 train_time:248005ms step_avg:38.62ms +[2025-09-05 19:16:39] [Rank 0] step:6441/10000 train_time:248669ms step_avg:38.61ms +[2025-09-05 19:16:39] [Rank 0] step:6441/10000 train_time:248669ms step_avg:38.61ms +[2025-09-05 19:16:39] [Rank 0] step:6461/10000 train_time:249334ms step_avg:38.59ms +[2025-09-05 19:16:39] [Rank 0] step:6461/10000 train_time:249334ms step_avg:38.59ms +[2025-09-05 19:16:40] [Rank 0] step:6481/10000 train_time:250000ms step_avg:38.57ms +[2025-09-05 19:16:40] [Rank 0] step:6481/10000 train_time:250000ms step_avg:38.57ms +[2025-09-05 19:16:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:16:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:16:41] [Rank 0] PRINT: step:6500/10000 train_loss:0.6841 val_loss:0.6725 train_time:250902ms step_avg:38.60ms +[2025-09-05 19:16:41] [Rank 0] PRINT: step:6500/10000 train_loss:0.6841 val_loss:0.6725 train_time:250902ms step_avg:38.60ms +[2025-09-05 19:16:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:16:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:16:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:16:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:18:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:18:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:18:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:18:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:18:03] [Rank 0] Total Loss: 4.9712 +[2025-09-05 19:18:03] [Rank 0] Total Loss: 4.9712 +[2025-09-05 19:18:03] [Rank 0] Total FTA (Unweighted): 0.8600 +[2025-09-05 19:18:03] [Rank 0] Total FTA (Unweighted): 0.8600 +[2025-09-05 19:18:03] [Rank 0] Total FTA (Weighted): 0.8600 +[2025-09-05 19:18:03] [Rank 0] Total FTA (Weighted): 0.8600 +[2025-09-05 19:18:03] [Rank 0] Group 0 Loss: 5.2970 +[2025-09-05 19:18:03] [Rank 0] Group 0 Loss: 5.2970 +[2025-09-05 19:18:03] [Rank 0] Group 1 Loss: 4.6217 +[2025-09-05 19:18:03] [Rank 0] Group 1 Loss: 4.6217 +[2025-09-05 19:18:03] [Rank 0] Group 2 Loss: 4.4118 +[2025-09-05 19:18:03] [Rank 0] Group 2 Loss: 4.4118 +[2025-09-05 19:18:03] [Rank 0] Group 3 Loss: 4.9070 +[2025-09-05 19:18:03] [Rank 0] Group 3 Loss: 4.9070 +[2025-09-05 19:18:03] [Rank 0] Group 4 Loss: 4.7982 +[2025-09-05 19:18:03] [Rank 0] Group 4 Loss: 4.7982 +[2025-09-05 19:18:03] [Rank 0] Group 5 Loss: 4.9357 +[2025-09-05 19:18:03] [Rank 0] Group 5 Loss: 4.9357 +[2025-09-05 19:18:03] [Rank 0] Group 6 Loss: 4.8206 +[2025-09-05 19:18:03] [Rank 0] Group 6 Loss: 4.8206 +[2025-09-05 19:18:03] [Rank 0] Group 7 Loss: 4.8720 +[2025-09-05 19:18:03] [Rank 0] Group 7 Loss: 4.8720 +[2025-09-05 19:18:03] [Rank 0] Group 8 Loss: 5.0623 +[2025-09-05 19:18:03] [Rank 0] Group 8 Loss: 5.0623 +[2025-09-05 19:18:03] [Rank 0] Group 9 Loss: 4.9977 +[2025-09-05 19:18:03] [Rank 0] Group 9 Loss: 4.9977 +[2025-09-05 19:18:03] [Rank 0] Group 10 Loss: 5.1105 +[2025-09-05 19:18:03] [Rank 0] Group 10 Loss: 5.1105 +[2025-09-05 19:18:03] [Rank 0] Group 11 Loss: 5.0771 +[2025-09-05 19:18:03] [Rank 0] Group 11 Loss: 5.0771 +[2025-09-05 19:18:03] [Rank 0] Group 12 Loss: 5.0380 +[2025-09-05 19:18:03] [Rank 0] Group 12 Loss: 5.0380 +[2025-09-05 19:18:03] [Rank 0] Group 13 Loss: 5.1547 +[2025-09-05 19:18:03] [Rank 0] Group 13 Loss: 5.1547 +[2025-09-05 19:18:03] [Rank 0] Group 14 Loss: 5.1971 +[2025-09-05 19:18:03] [Rank 0] Group 14 Loss: 5.1971 +[2025-09-05 19:18:03] [Rank 0] Group 15 Loss: 5.2376 +[2025-09-05 19:18:03] [Rank 0] Group 15 Loss: 5.2376 +[2025-09-05 19:18:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:18:03] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 19:18:03] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 19:18:03] [Rank 0] Group 12 FTA: 0.9200 +[2025-09-05 19:18:03] [Rank 0] Group 12 FTA: 0.9200 +[2025-09-05 19:18:03] [Rank 0] Group 13 FTA: 0.5400 +[2025-09-05 19:18:03] [Rank 0] Group 13 FTA: 0.5400 +[2025-09-05 19:18:03] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 19:18:03] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 19:18:03] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 19:18:03] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 19:18:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:18:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:18:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:18:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:18:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:18:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:18:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:18:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:18:05] [Rank 0] step:6501/10000 train_time:250910ms step_avg:38.60ms +[2025-09-05 19:18:05] [Rank 0] step:6501/10000 train_time:250910ms step_avg:38.60ms +[2025-09-05 19:18:05] [Rank 0] step:6521/10000 train_time:251347ms step_avg:38.54ms +[2025-09-05 19:18:05] [Rank 0] step:6521/10000 train_time:251347ms step_avg:38.54ms +[2025-09-05 19:18:06] [Rank 0] step:6541/10000 train_time:252010ms step_avg:38.53ms +[2025-09-05 19:18:06] [Rank 0] step:6541/10000 train_time:252010ms step_avg:38.53ms +[2025-09-05 19:18:07] [Rank 0] step:6561/10000 train_time:252674ms step_avg:38.51ms +[2025-09-05 19:18:07] [Rank 0] step:6561/10000 train_time:252674ms step_avg:38.51ms +[2025-09-05 19:18:07] [Rank 0] step:6581/10000 train_time:253339ms step_avg:38.50ms +[2025-09-05 19:18:07] [Rank 0] step:6581/10000 train_time:253339ms step_avg:38.50ms +[2025-09-05 19:18:08] [Rank 0] step:6601/10000 train_time:254003ms step_avg:38.48ms +[2025-09-05 19:18:08] [Rank 0] step:6601/10000 train_time:254003ms step_avg:38.48ms +[2025-09-05 19:18:09] [Rank 0] step:6621/10000 train_time:254666ms step_avg:38.46ms +[2025-09-05 19:18:09] [Rank 0] step:6621/10000 train_time:254666ms step_avg:38.46ms +[2025-09-05 19:18:09] [Rank 0] step:6641/10000 train_time:255330ms step_avg:38.45ms +[2025-09-05 19:18:09] [Rank 0] step:6641/10000 train_time:255330ms step_avg:38.45ms +[2025-09-05 19:18:10] [Rank 0] step:6661/10000 train_time:255993ms step_avg:38.43ms +[2025-09-05 19:18:10] [Rank 0] step:6661/10000 train_time:255993ms step_avg:38.43ms +[2025-09-05 19:18:11] [Rank 0] step:6681/10000 train_time:256656ms step_avg:38.42ms +[2025-09-05 19:18:11] [Rank 0] step:6681/10000 train_time:256656ms step_avg:38.42ms +[2025-09-05 19:18:11] [Rank 0] step:6701/10000 train_time:257319ms step_avg:38.40ms +[2025-09-05 19:18:11] [Rank 0] step:6701/10000 train_time:257319ms step_avg:38.40ms +[2025-09-05 19:18:12] [Rank 0] step:6721/10000 train_time:257982ms step_avg:38.38ms +[2025-09-05 19:18:12] [Rank 0] step:6721/10000 train_time:257982ms step_avg:38.38ms +[2025-09-05 19:18:13] [Rank 0] step:6741/10000 train_time:258645ms step_avg:38.37ms +[2025-09-05 19:18:13] [Rank 0] step:6741/10000 train_time:258645ms step_avg:38.37ms +[2025-09-05 19:18:13] [Rank 0] step:6761/10000 train_time:259308ms step_avg:38.35ms +[2025-09-05 19:18:13] [Rank 0] step:6761/10000 train_time:259308ms step_avg:38.35ms +[2025-09-05 19:18:14] [Rank 0] step:6781/10000 train_time:259971ms step_avg:38.34ms +[2025-09-05 19:18:14] [Rank 0] step:6781/10000 train_time:259971ms step_avg:38.34ms +[2025-09-05 19:18:15] [Rank 0] step:6801/10000 train_time:260634ms step_avg:38.32ms +[2025-09-05 19:18:15] [Rank 0] step:6801/10000 train_time:260634ms step_avg:38.32ms +[2025-09-05 19:18:15] [Rank 0] step:6821/10000 train_time:261297ms step_avg:38.31ms +[2025-09-05 19:18:15] [Rank 0] step:6821/10000 train_time:261297ms step_avg:38.31ms +[2025-09-05 19:18:16] [Rank 0] step:6841/10000 train_time:261964ms step_avg:38.29ms +[2025-09-05 19:18:16] [Rank 0] step:6841/10000 train_time:261964ms step_avg:38.29ms +[2025-09-05 19:18:17] [Rank 0] step:6861/10000 train_time:262627ms step_avg:38.28ms +[2025-09-05 19:18:17] [Rank 0] step:6861/10000 train_time:262627ms step_avg:38.28ms +[2025-09-05 19:18:17] [Rank 0] step:6881/10000 train_time:263291ms step_avg:38.26ms +[2025-09-05 19:18:17] [Rank 0] step:6881/10000 train_time:263291ms step_avg:38.26ms +[2025-09-05 19:18:18] [Rank 0] step:6901/10000 train_time:263955ms step_avg:38.25ms +[2025-09-05 19:18:18] [Rank 0] step:6901/10000 train_time:263955ms step_avg:38.25ms +[2025-09-05 19:18:19] [Rank 0] step:6921/10000 train_time:264619ms step_avg:38.23ms +[2025-09-05 19:18:19] [Rank 0] step:6921/10000 train_time:264619ms step_avg:38.23ms +[2025-09-05 19:18:19] [Rank 0] step:6941/10000 train_time:265283ms step_avg:38.22ms +[2025-09-05 19:18:19] [Rank 0] step:6941/10000 train_time:265283ms step_avg:38.22ms +[2025-09-05 19:18:20] [Rank 0] step:6961/10000 train_time:265947ms step_avg:38.21ms +[2025-09-05 19:18:20] [Rank 0] step:6961/10000 train_time:265947ms step_avg:38.21ms +[2025-09-05 19:18:21] [Rank 0] step:6981/10000 train_time:266611ms step_avg:38.19ms +[2025-09-05 19:18:21] [Rank 0] step:6981/10000 train_time:266611ms step_avg:38.19ms +[2025-09-05 19:18:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:18:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:18:22] [Rank 0] PRINT: step:7000/10000 train_loss:0.6766 val_loss:0.6655 train_time:267510ms step_avg:38.22ms +[2025-09-05 19:18:22] [Rank 0] PRINT: step:7000/10000 train_loss:0.6766 val_loss:0.6655 train_time:267510ms step_avg:38.22ms +[2025-09-05 19:18:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:18:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:18:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:18:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:19:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:19:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:19:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:19:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:19:43] [Rank 0] Total Loss: 5.0329 +[2025-09-05 19:19:43] [Rank 0] Total Loss: 5.0329 +[2025-09-05 19:19:43] [Rank 0] Total FTA (Unweighted): 0.8719 +[2025-09-05 19:19:43] [Rank 0] Total FTA (Unweighted): 0.8719 +[2025-09-05 19:19:43] [Rank 0] Total FTA (Weighted): 0.8719 +[2025-09-05 19:19:43] [Rank 0] Total FTA (Weighted): 0.8719 +[2025-09-05 19:19:43] [Rank 0] Group 0 Loss: 5.2407 +[2025-09-05 19:19:43] [Rank 0] Group 0 Loss: 5.2407 +[2025-09-05 19:19:43] [Rank 0] Group 1 Loss: 4.7731 +[2025-09-05 19:19:43] [Rank 0] Group 1 Loss: 4.7731 +[2025-09-05 19:19:43] [Rank 0] Group 2 Loss: 4.4686 +[2025-09-05 19:19:43] [Rank 0] Group 2 Loss: 4.4686 +[2025-09-05 19:19:43] [Rank 0] Group 3 Loss: 4.9838 +[2025-09-05 19:19:43] [Rank 0] Group 3 Loss: 4.9838 +[2025-09-05 19:19:43] [Rank 0] Group 4 Loss: 4.8751 +[2025-09-05 19:19:43] [Rank 0] Group 4 Loss: 4.8751 +[2025-09-05 19:19:43] [Rank 0] Group 5 Loss: 5.0163 +[2025-09-05 19:19:43] [Rank 0] Group 5 Loss: 5.0163 +[2025-09-05 19:19:43] [Rank 0] Group 6 Loss: 4.8638 +[2025-09-05 19:19:43] [Rank 0] Group 6 Loss: 4.8638 +[2025-09-05 19:19:43] [Rank 0] Group 7 Loss: 4.9589 +[2025-09-05 19:19:43] [Rank 0] Group 7 Loss: 4.9589 +[2025-09-05 19:19:43] [Rank 0] Group 8 Loss: 5.1238 +[2025-09-05 19:19:43] [Rank 0] Group 8 Loss: 5.1238 +[2025-09-05 19:19:43] [Rank 0] Group 9 Loss: 5.0699 +[2025-09-05 19:19:43] [Rank 0] Group 9 Loss: 5.0699 +[2025-09-05 19:19:43] [Rank 0] Group 10 Loss: 5.1345 +[2025-09-05 19:19:43] [Rank 0] Group 10 Loss: 5.1345 +[2025-09-05 19:19:43] [Rank 0] Group 11 Loss: 5.1489 +[2025-09-05 19:19:43] [Rank 0] Group 11 Loss: 5.1489 +[2025-09-05 19:19:43] [Rank 0] Group 12 Loss: 5.1142 +[2025-09-05 19:19:43] [Rank 0] Group 12 Loss: 5.1142 +[2025-09-05 19:19:43] [Rank 0] Group 13 Loss: 5.2587 +[2025-09-05 19:19:43] [Rank 0] Group 13 Loss: 5.2587 +[2025-09-05 19:19:43] [Rank 0] Group 14 Loss: 5.1935 +[2025-09-05 19:19:43] [Rank 0] Group 14 Loss: 5.1935 +[2025-09-05 19:19:43] [Rank 0] Group 15 Loss: 5.3023 +[2025-09-05 19:19:43] [Rank 0] Group 15 Loss: 5.3023 +[2025-09-05 19:19:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:19:43] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 19:19:43] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 19:19:43] [Rank 0] Group 13 FTA: 0.5900 +[2025-09-05 19:19:43] [Rank 0] Group 13 FTA: 0.5900 +[2025-09-05 19:19:43] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 19:19:43] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 19:19:43] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:19:43] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:19:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:19:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:19:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:19:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:19:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:19:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:19:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:19:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:19:45] [Rank 0] step:7001/10000 train_time:267518ms step_avg:38.21ms +[2025-09-05 19:19:45] [Rank 0] step:7001/10000 train_time:267518ms step_avg:38.21ms +[2025-09-05 19:19:45] [Rank 0] step:7021/10000 train_time:267970ms step_avg:38.17ms +[2025-09-05 19:19:45] [Rank 0] step:7021/10000 train_time:267970ms step_avg:38.17ms +[2025-09-05 19:19:46] [Rank 0] step:7041/10000 train_time:268634ms step_avg:38.15ms +[2025-09-05 19:19:46] [Rank 0] step:7041/10000 train_time:268634ms step_avg:38.15ms +[2025-09-05 19:19:47] [Rank 0] step:7061/10000 train_time:269299ms step_avg:38.14ms +[2025-09-05 19:19:47] [Rank 0] step:7061/10000 train_time:269299ms step_avg:38.14ms +[2025-09-05 19:19:47] [Rank 0] step:7081/10000 train_time:269965ms step_avg:38.13ms +[2025-09-05 19:19:47] [Rank 0] step:7081/10000 train_time:269965ms step_avg:38.13ms +[2025-09-05 19:19:48] [Rank 0] step:7101/10000 train_time:270630ms step_avg:38.11ms +[2025-09-05 19:19:48] [Rank 0] step:7101/10000 train_time:270630ms step_avg:38.11ms +[2025-09-05 19:19:49] [Rank 0] step:7121/10000 train_time:271295ms step_avg:38.10ms +[2025-09-05 19:19:49] [Rank 0] step:7121/10000 train_time:271295ms step_avg:38.10ms +[2025-09-05 19:19:49] [Rank 0] step:7141/10000 train_time:271959ms step_avg:38.08ms +[2025-09-05 19:19:49] [Rank 0] step:7141/10000 train_time:271959ms step_avg:38.08ms +[2025-09-05 19:19:50] [Rank 0] step:7161/10000 train_time:272624ms step_avg:38.07ms +[2025-09-05 19:19:50] [Rank 0] step:7161/10000 train_time:272624ms step_avg:38.07ms +[2025-09-05 19:19:51] [Rank 0] step:7181/10000 train_time:273289ms step_avg:38.06ms +[2025-09-05 19:19:51] [Rank 0] step:7181/10000 train_time:273289ms step_avg:38.06ms +[2025-09-05 19:19:51] [Rank 0] step:7201/10000 train_time:273953ms step_avg:38.04ms +[2025-09-05 19:19:51] [Rank 0] step:7201/10000 train_time:273953ms step_avg:38.04ms +[2025-09-05 19:19:52] [Rank 0] step:7221/10000 train_time:274619ms step_avg:38.03ms +[2025-09-05 19:19:52] [Rank 0] step:7221/10000 train_time:274619ms step_avg:38.03ms +[2025-09-05 19:19:53] [Rank 0] step:7241/10000 train_time:275284ms step_avg:38.02ms +[2025-09-05 19:19:53] [Rank 0] step:7241/10000 train_time:275284ms step_avg:38.02ms +[2025-09-05 19:19:53] [Rank 0] step:7261/10000 train_time:275948ms step_avg:38.00ms +[2025-09-05 19:19:53] [Rank 0] step:7261/10000 train_time:275948ms step_avg:38.00ms +[2025-09-05 19:19:54] [Rank 0] step:7281/10000 train_time:276613ms step_avg:37.99ms +[2025-09-05 19:19:54] [Rank 0] step:7281/10000 train_time:276613ms step_avg:37.99ms +[2025-09-05 19:19:55] [Rank 0] step:7301/10000 train_time:277278ms step_avg:37.98ms +[2025-09-05 19:19:55] [Rank 0] step:7301/10000 train_time:277278ms step_avg:37.98ms +[2025-09-05 19:19:55] [Rank 0] step:7321/10000 train_time:277941ms step_avg:37.96ms +[2025-09-05 19:19:55] [Rank 0] step:7321/10000 train_time:277941ms step_avg:37.96ms +[2025-09-05 19:19:56] [Rank 0] step:7341/10000 train_time:278606ms step_avg:37.95ms +[2025-09-05 19:19:56] [Rank 0] step:7341/10000 train_time:278606ms step_avg:37.95ms +[2025-09-05 19:19:57] [Rank 0] step:7361/10000 train_time:279272ms step_avg:37.94ms +[2025-09-05 19:19:57] [Rank 0] step:7361/10000 train_time:279272ms step_avg:37.94ms +[2025-09-05 19:19:58] [Rank 0] step:7381/10000 train_time:280068ms step_avg:37.94ms +[2025-09-05 19:19:58] [Rank 0] step:7381/10000 train_time:280068ms step_avg:37.94ms +[2025-09-05 19:19:58] [Rank 0] step:7401/10000 train_time:280836ms step_avg:37.95ms +[2025-09-05 19:19:58] [Rank 0] step:7401/10000 train_time:280836ms step_avg:37.95ms +[2025-09-05 19:19:59] [Rank 0] step:7421/10000 train_time:281501ms step_avg:37.93ms +[2025-09-05 19:19:59] [Rank 0] step:7421/10000 train_time:281501ms step_avg:37.93ms +[2025-09-05 19:20:00] [Rank 0] step:7441/10000 train_time:282166ms step_avg:37.92ms +[2025-09-05 19:20:00] [Rank 0] step:7441/10000 train_time:282166ms step_avg:37.92ms +[2025-09-05 19:20:00] [Rank 0] step:7461/10000 train_time:282995ms step_avg:37.93ms +[2025-09-05 19:20:00] [Rank 0] step:7461/10000 train_time:282995ms step_avg:37.93ms +[2025-09-05 19:20:01] [Rank 0] step:7481/10000 train_time:283659ms step_avg:37.92ms +[2025-09-05 19:20:01] [Rank 0] step:7481/10000 train_time:283659ms step_avg:37.92ms +[2025-09-05 19:20:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:20:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:20:02] [Rank 0] PRINT: step:7500/10000 train_loss:0.6698 val_loss:0.6600 train_time:284561ms step_avg:37.94ms +[2025-09-05 19:20:02] [Rank 0] PRINT: step:7500/10000 train_loss:0.6698 val_loss:0.6600 train_time:284561ms step_avg:37.94ms +[2025-09-05 19:20:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:20:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:20:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:20:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:21:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:21:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:21:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:21:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:21:24] [Rank 0] Total Loss: 5.0711 +[2025-09-05 19:21:24] [Rank 0] Total Loss: 5.0711 +[2025-09-05 19:21:24] [Rank 0] Total FTA (Unweighted): 0.8763 +[2025-09-05 19:21:24] [Rank 0] Total FTA (Unweighted): 0.8763 +[2025-09-05 19:21:24] [Rank 0] Total FTA (Weighted): 0.8762 +[2025-09-05 19:21:24] [Rank 0] Total FTA (Weighted): 0.8762 +[2025-09-05 19:21:24] [Rank 0] Group 0 Loss: 5.3130 +[2025-09-05 19:21:24] [Rank 0] Group 0 Loss: 5.3130 +[2025-09-05 19:21:24] [Rank 0] Group 1 Loss: 4.7072 +[2025-09-05 19:21:24] [Rank 0] Group 1 Loss: 4.7072 +[2025-09-05 19:21:24] [Rank 0] Group 2 Loss: 4.5109 +[2025-09-05 19:21:24] [Rank 0] Group 2 Loss: 4.5109 +[2025-09-05 19:21:24] [Rank 0] Group 3 Loss: 4.9964 +[2025-09-05 19:21:24] [Rank 0] Group 3 Loss: 4.9964 +[2025-09-05 19:21:24] [Rank 0] Group 4 Loss: 4.9074 +[2025-09-05 19:21:24] [Rank 0] Group 4 Loss: 4.9074 +[2025-09-05 19:21:24] [Rank 0] Group 5 Loss: 5.0375 +[2025-09-05 19:21:24] [Rank 0] Group 5 Loss: 5.0375 +[2025-09-05 19:21:24] [Rank 0] Group 6 Loss: 4.8769 +[2025-09-05 19:21:24] [Rank 0] Group 6 Loss: 4.8769 +[2025-09-05 19:21:24] [Rank 0] Group 7 Loss: 5.0082 +[2025-09-05 19:21:24] [Rank 0] Group 7 Loss: 5.0082 +[2025-09-05 19:21:24] [Rank 0] Group 8 Loss: 5.1728 +[2025-09-05 19:21:24] [Rank 0] Group 8 Loss: 5.1728 +[2025-09-05 19:21:24] [Rank 0] Group 9 Loss: 5.1122 +[2025-09-05 19:21:24] [Rank 0] Group 9 Loss: 5.1122 +[2025-09-05 19:21:24] [Rank 0] Group 10 Loss: 5.2311 +[2025-09-05 19:21:24] [Rank 0] Group 10 Loss: 5.2311 +[2025-09-05 19:21:24] [Rank 0] Group 11 Loss: 5.2099 +[2025-09-05 19:21:24] [Rank 0] Group 11 Loss: 5.2099 +[2025-09-05 19:21:24] [Rank 0] Group 12 Loss: 5.1652 +[2025-09-05 19:21:24] [Rank 0] Group 12 Loss: 5.1652 +[2025-09-05 19:21:24] [Rank 0] Group 13 Loss: 5.3037 +[2025-09-05 19:21:24] [Rank 0] Group 13 Loss: 5.3037 +[2025-09-05 19:21:24] [Rank 0] Group 14 Loss: 5.2512 +[2025-09-05 19:21:24] [Rank 0] Group 14 Loss: 5.2512 +[2025-09-05 19:21:24] [Rank 0] Group 15 Loss: 5.3343 +[2025-09-05 19:21:24] [Rank 0] Group 15 Loss: 5.3343 +[2025-09-05 19:21:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:21:24] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 19:21:24] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 19:21:24] [Rank 0] Group 13 FTA: 0.6500 +[2025-09-05 19:21:24] [Rank 0] Group 13 FTA: 0.6500 +[2025-09-05 19:21:24] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 19:21:24] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 19:21:24] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 19:21:24] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 19:21:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:21:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:21:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:21:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:21:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:21:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:21:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:21:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:21:25] [Rank 0] step:7501/10000 train_time:284568ms step_avg:37.94ms +[2025-09-05 19:21:25] [Rank 0] step:7501/10000 train_time:284568ms step_avg:37.94ms +[2025-09-05 19:21:26] [Rank 0] step:7521/10000 train_time:285015ms step_avg:37.90ms +[2025-09-05 19:21:26] [Rank 0] step:7521/10000 train_time:285015ms step_avg:37.90ms +[2025-09-05 19:21:27] [Rank 0] step:7541/10000 train_time:285678ms step_avg:37.88ms +[2025-09-05 19:21:27] [Rank 0] step:7541/10000 train_time:285678ms step_avg:37.88ms +[2025-09-05 19:21:27] [Rank 0] step:7561/10000 train_time:286341ms step_avg:37.87ms +[2025-09-05 19:21:27] [Rank 0] step:7561/10000 train_time:286341ms step_avg:37.87ms +[2025-09-05 19:21:28] [Rank 0] step:7581/10000 train_time:287004ms step_avg:37.86ms +[2025-09-05 19:21:28] [Rank 0] step:7581/10000 train_time:287004ms step_avg:37.86ms +[2025-09-05 19:21:29] [Rank 0] step:7601/10000 train_time:287668ms step_avg:37.85ms +[2025-09-05 19:21:29] [Rank 0] step:7601/10000 train_time:287668ms step_avg:37.85ms +[2025-09-05 19:21:29] [Rank 0] step:7621/10000 train_time:288330ms step_avg:37.83ms +[2025-09-05 19:21:29] [Rank 0] step:7621/10000 train_time:288330ms step_avg:37.83ms +[2025-09-05 19:21:30] [Rank 0] step:7641/10000 train_time:289648ms step_avg:37.91ms +[2025-09-05 19:21:30] [Rank 0] step:7641/10000 train_time:289648ms step_avg:37.91ms +[2025-09-05 19:21:31] [Rank 0] step:7661/10000 train_time:290124ms step_avg:37.87ms +[2025-09-05 19:21:31] [Rank 0] step:7661/10000 train_time:290124ms step_avg:37.87ms +[2025-09-05 19:21:32] [Rank 0] step:7681/10000 train_time:290787ms step_avg:37.86ms +[2025-09-05 19:21:32] [Rank 0] step:7681/10000 train_time:290787ms step_avg:37.86ms +[2025-09-05 19:21:32] [Rank 0] step:7701/10000 train_time:291450ms step_avg:37.85ms +[2025-09-05 19:21:32] [Rank 0] step:7701/10000 train_time:291450ms step_avg:37.85ms +[2025-09-05 19:21:33] [Rank 0] step:7721/10000 train_time:292113ms step_avg:37.83ms +[2025-09-05 19:21:33] [Rank 0] step:7721/10000 train_time:292113ms step_avg:37.83ms +[2025-09-05 19:21:34] [Rank 0] step:7741/10000 train_time:292776ms step_avg:37.82ms +[2025-09-05 19:21:34] [Rank 0] step:7741/10000 train_time:292776ms step_avg:37.82ms +[2025-09-05 19:21:34] [Rank 0] step:7761/10000 train_time:293438ms step_avg:37.81ms +[2025-09-05 19:21:34] [Rank 0] step:7761/10000 train_time:293438ms step_avg:37.81ms +[2025-09-05 19:21:35] [Rank 0] step:7781/10000 train_time:294101ms step_avg:37.80ms +[2025-09-05 19:21:35] [Rank 0] step:7781/10000 train_time:294101ms step_avg:37.80ms +[2025-09-05 19:21:36] [Rank 0] step:7801/10000 train_time:294764ms step_avg:37.79ms +[2025-09-05 19:21:36] [Rank 0] step:7801/10000 train_time:294764ms step_avg:37.79ms +[2025-09-05 19:21:36] [Rank 0] step:7821/10000 train_time:295427ms step_avg:37.77ms +[2025-09-05 19:21:36] [Rank 0] step:7821/10000 train_time:295427ms step_avg:37.77ms +[2025-09-05 19:21:37] [Rank 0] step:7841/10000 train_time:296090ms step_avg:37.76ms +[2025-09-05 19:21:37] [Rank 0] step:7841/10000 train_time:296090ms step_avg:37.76ms +[2025-09-05 19:21:38] [Rank 0] step:7861/10000 train_time:296753ms step_avg:37.75ms +[2025-09-05 19:21:38] [Rank 0] step:7861/10000 train_time:296753ms step_avg:37.75ms +[2025-09-05 19:21:38] [Rank 0] step:7881/10000 train_time:297417ms step_avg:37.74ms +[2025-09-05 19:21:38] [Rank 0] step:7881/10000 train_time:297417ms step_avg:37.74ms +[2025-09-05 19:21:39] [Rank 0] step:7901/10000 train_time:298082ms step_avg:37.73ms +[2025-09-05 19:21:39] [Rank 0] step:7901/10000 train_time:298082ms step_avg:37.73ms +[2025-09-05 19:21:40] [Rank 0] step:7921/10000 train_time:298745ms step_avg:37.72ms +[2025-09-05 19:21:40] [Rank 0] step:7921/10000 train_time:298745ms step_avg:37.72ms +[2025-09-05 19:21:40] [Rank 0] step:7941/10000 train_time:299407ms step_avg:37.70ms +[2025-09-05 19:21:40] [Rank 0] step:7941/10000 train_time:299407ms step_avg:37.70ms +[2025-09-05 19:21:41] [Rank 0] step:7961/10000 train_time:300070ms step_avg:37.69ms +[2025-09-05 19:21:41] [Rank 0] step:7961/10000 train_time:300070ms step_avg:37.69ms +[2025-09-05 19:21:42] [Rank 0] step:7981/10000 train_time:300733ms step_avg:37.68ms +[2025-09-05 19:21:42] [Rank 0] step:7981/10000 train_time:300733ms step_avg:37.68ms +[2025-09-05 19:21:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:21:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:21:43] [Rank 0] PRINT: step:8000/10000 train_loss:0.6638 val_loss:0.6533 train_time:301632ms step_avg:37.70ms +[2025-09-05 19:21:43] [Rank 0] PRINT: step:8000/10000 train_loss:0.6638 val_loss:0.6533 train_time:301632ms step_avg:37.70ms +[2025-09-05 19:21:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:21:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:21:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:21:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:23:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:23:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:23:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:23:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:23:04] [Rank 0] Total Loss: 5.0468 +[2025-09-05 19:23:04] [Rank 0] Total Loss: 5.0468 +[2025-09-05 19:23:04] [Rank 0] Total FTA (Unweighted): 0.8956 +[2025-09-05 19:23:04] [Rank 0] Total FTA (Unweighted): 0.8956 +[2025-09-05 19:23:04] [Rank 0] Total FTA (Weighted): 0.8956 +[2025-09-05 19:23:04] [Rank 0] Total FTA (Weighted): 0.8956 +[2025-09-05 19:23:04] [Rank 0] Group 0 Loss: 5.2085 +[2025-09-05 19:23:04] [Rank 0] Group 0 Loss: 5.2085 +[2025-09-05 19:23:04] [Rank 0] Group 1 Loss: 4.5918 +[2025-09-05 19:23:04] [Rank 0] Group 1 Loss: 4.5918 +[2025-09-05 19:23:04] [Rank 0] Group 2 Loss: 4.5754 +[2025-09-05 19:23:04] [Rank 0] Group 2 Loss: 4.5754 +[2025-09-05 19:23:04] [Rank 0] Group 3 Loss: 4.9495 +[2025-09-05 19:23:04] [Rank 0] Group 3 Loss: 4.9495 +[2025-09-05 19:23:04] [Rank 0] Group 4 Loss: 4.8683 +[2025-09-05 19:23:04] [Rank 0] Group 4 Loss: 4.8683 +[2025-09-05 19:23:04] [Rank 0] Group 5 Loss: 5.0417 +[2025-09-05 19:23:04] [Rank 0] Group 5 Loss: 5.0417 +[2025-09-05 19:23:04] [Rank 0] Group 6 Loss: 4.8861 +[2025-09-05 19:23:04] [Rank 0] Group 6 Loss: 4.8861 +[2025-09-05 19:23:04] [Rank 0] Group 7 Loss: 4.9829 +[2025-09-05 19:23:04] [Rank 0] Group 7 Loss: 4.9829 +[2025-09-05 19:23:04] [Rank 0] Group 8 Loss: 5.1636 +[2025-09-05 19:23:04] [Rank 0] Group 8 Loss: 5.1636 +[2025-09-05 19:23:04] [Rank 0] Group 9 Loss: 5.0761 +[2025-09-05 19:23:04] [Rank 0] Group 9 Loss: 5.0761 +[2025-09-05 19:23:04] [Rank 0] Group 10 Loss: 5.2001 +[2025-09-05 19:23:04] [Rank 0] Group 10 Loss: 5.2001 +[2025-09-05 19:23:04] [Rank 0] Group 11 Loss: 5.2035 +[2025-09-05 19:23:04] [Rank 0] Group 11 Loss: 5.2035 +[2025-09-05 19:23:04] [Rank 0] Group 12 Loss: 5.1561 +[2025-09-05 19:23:04] [Rank 0] Group 12 Loss: 5.1561 +[2025-09-05 19:23:04] [Rank 0] Group 13 Loss: 5.2460 +[2025-09-05 19:23:04] [Rank 0] Group 13 Loss: 5.2460 +[2025-09-05 19:23:04] [Rank 0] Group 14 Loss: 5.2707 +[2025-09-05 19:23:04] [Rank 0] Group 14 Loss: 5.2707 +[2025-09-05 19:23:04] [Rank 0] Group 15 Loss: 5.3278 +[2025-09-05 19:23:04] [Rank 0] Group 15 Loss: 5.3278 +[2025-09-05 19:23:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:23:04] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:23:05] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:23:05] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:23:05] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:23:05] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:23:05] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 19:23:05] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 19:23:05] [Rank 0] Group 13 FTA: 0.7700 +[2025-09-05 19:23:05] [Rank 0] Group 13 FTA: 0.7700 +[2025-09-05 19:23:05] [Rank 0] Group 14 FTA: 0.3500 +[2025-09-05 19:23:05] [Rank 0] Group 14 FTA: 0.3500 +[2025-09-05 19:23:05] [Rank 0] Group 15 FTA: 0.2100 +[2025-09-05 19:23:05] [Rank 0] Group 15 FTA: 0.2100 +[2025-09-05 19:23:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:23:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:23:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:23:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:23:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:23:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:23:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:23:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:23:06] [Rank 0] step:8001/10000 train_time:301639ms step_avg:37.70ms +[2025-09-05 19:23:06] [Rank 0] step:8001/10000 train_time:301639ms step_avg:37.70ms +[2025-09-05 19:23:07] [Rank 0] step:8021/10000 train_time:302093ms step_avg:37.66ms +[2025-09-05 19:23:07] [Rank 0] step:8021/10000 train_time:302093ms step_avg:37.66ms +[2025-09-05 19:23:08] [Rank 0] step:8041/10000 train_time:303229ms step_avg:37.71ms +[2025-09-05 19:23:08] [Rank 0] step:8041/10000 train_time:303229ms step_avg:37.71ms +[2025-09-05 19:23:08] [Rank 0] step:8061/10000 train_time:303894ms step_avg:37.70ms +[2025-09-05 19:23:08] [Rank 0] step:8061/10000 train_time:303894ms step_avg:37.70ms +[2025-09-05 19:23:09] [Rank 0] step:8081/10000 train_time:304557ms step_avg:37.69ms +[2025-09-05 19:23:09] [Rank 0] step:8081/10000 train_time:304557ms step_avg:37.69ms +[2025-09-05 19:23:10] [Rank 0] step:8101/10000 train_time:305384ms step_avg:37.70ms +[2025-09-05 19:23:10] [Rank 0] step:8101/10000 train_time:305384ms step_avg:37.70ms +[2025-09-05 19:23:11] [Rank 0] step:8121/10000 train_time:306049ms step_avg:37.69ms +[2025-09-05 19:23:11] [Rank 0] step:8121/10000 train_time:306049ms step_avg:37.69ms +[2025-09-05 19:23:11] [Rank 0] step:8141/10000 train_time:306713ms step_avg:37.68ms +[2025-09-05 19:23:11] [Rank 0] step:8141/10000 train_time:306713ms step_avg:37.68ms +[2025-09-05 19:23:12] [Rank 0] step:8161/10000 train_time:307377ms step_avg:37.66ms +[2025-09-05 19:23:12] [Rank 0] step:8161/10000 train_time:307377ms step_avg:37.66ms +[2025-09-05 19:23:13] [Rank 0] step:8181/10000 train_time:308041ms step_avg:37.65ms +[2025-09-05 19:23:13] [Rank 0] step:8181/10000 train_time:308041ms step_avg:37.65ms +[2025-09-05 19:23:13] [Rank 0] step:8201/10000 train_time:308705ms step_avg:37.64ms +[2025-09-05 19:23:13] [Rank 0] step:8201/10000 train_time:308705ms step_avg:37.64ms +[2025-09-05 19:23:14] [Rank 0] step:8221/10000 train_time:309369ms step_avg:37.63ms +[2025-09-05 19:23:14] [Rank 0] step:8221/10000 train_time:309369ms step_avg:37.63ms +[2025-09-05 19:23:15] [Rank 0] step:8241/10000 train_time:310033ms step_avg:37.62ms +[2025-09-05 19:23:15] [Rank 0] step:8241/10000 train_time:310033ms step_avg:37.62ms +[2025-09-05 19:23:15] [Rank 0] step:8261/10000 train_time:310698ms step_avg:37.61ms +[2025-09-05 19:23:15] [Rank 0] step:8261/10000 train_time:310698ms step_avg:37.61ms +[2025-09-05 19:23:16] [Rank 0] step:8281/10000 train_time:311364ms step_avg:37.60ms +[2025-09-05 19:23:16] [Rank 0] step:8281/10000 train_time:311364ms step_avg:37.60ms +[2025-09-05 19:23:17] [Rank 0] step:8301/10000 train_time:312026ms step_avg:37.59ms +[2025-09-05 19:23:17] [Rank 0] step:8301/10000 train_time:312026ms step_avg:37.59ms +[2025-09-05 19:23:17] [Rank 0] step:8321/10000 train_time:312690ms step_avg:37.58ms +[2025-09-05 19:23:17] [Rank 0] step:8321/10000 train_time:312690ms step_avg:37.58ms +[2025-09-05 19:23:18] [Rank 0] step:8341/10000 train_time:313355ms step_avg:37.57ms +[2025-09-05 19:23:18] [Rank 0] step:8341/10000 train_time:313355ms step_avg:37.57ms +[2025-09-05 19:23:19] [Rank 0] step:8361/10000 train_time:314020ms step_avg:37.56ms +[2025-09-05 19:23:19] [Rank 0] step:8361/10000 train_time:314020ms step_avg:37.56ms +[2025-09-05 19:23:19] [Rank 0] step:8381/10000 train_time:314685ms step_avg:37.55ms +[2025-09-05 19:23:19] [Rank 0] step:8381/10000 train_time:314685ms step_avg:37.55ms +[2025-09-05 19:23:20] [Rank 0] step:8401/10000 train_time:315350ms step_avg:37.54ms +[2025-09-05 19:23:20] [Rank 0] step:8401/10000 train_time:315350ms step_avg:37.54ms +[2025-09-05 19:23:21] [Rank 0] step:8421/10000 train_time:316015ms step_avg:37.53ms +[2025-09-05 19:23:21] [Rank 0] step:8421/10000 train_time:316015ms step_avg:37.53ms +[2025-09-05 19:23:21] [Rank 0] step:8441/10000 train_time:316679ms step_avg:37.52ms +[2025-09-05 19:23:21] [Rank 0] step:8441/10000 train_time:316679ms step_avg:37.52ms +[2025-09-05 19:23:22] [Rank 0] step:8461/10000 train_time:317344ms step_avg:37.51ms +[2025-09-05 19:23:22] [Rank 0] step:8461/10000 train_time:317344ms step_avg:37.51ms +[2025-09-05 19:23:23] [Rank 0] step:8481/10000 train_time:318009ms step_avg:37.50ms +[2025-09-05 19:23:23] [Rank 0] step:8481/10000 train_time:318009ms step_avg:37.50ms +[2025-09-05 19:23:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:23:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:23:24] [Rank 0] PRINT: step:8500/10000 train_loss:0.6579 val_loss:0.6479 train_time:318910ms step_avg:37.52ms +[2025-09-05 19:23:24] [Rank 0] PRINT: step:8500/10000 train_loss:0.6579 val_loss:0.6479 train_time:318910ms step_avg:37.52ms +[2025-09-05 19:23:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:23:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:23:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:23:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:24:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:24:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:24:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:24:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:24:45] [Rank 0] Total Loss: 5.0846 +[2025-09-05 19:24:45] [Rank 0] Total Loss: 5.0846 +[2025-09-05 19:24:45] [Rank 0] Total FTA (Unweighted): 0.9113 +[2025-09-05 19:24:45] [Rank 0] Total FTA (Unweighted): 0.9113 +[2025-09-05 19:24:45] [Rank 0] Total FTA (Weighted): 0.9113 +[2025-09-05 19:24:45] [Rank 0] Total FTA (Weighted): 0.9113 +[2025-09-05 19:24:45] [Rank 0] Group 0 Loss: 5.3564 +[2025-09-05 19:24:45] [Rank 0] Group 0 Loss: 5.3564 +[2025-09-05 19:24:45] [Rank 0] Group 1 Loss: 4.6909 +[2025-09-05 19:24:45] [Rank 0] Group 1 Loss: 4.6909 +[2025-09-05 19:24:45] [Rank 0] Group 2 Loss: 4.6302 +[2025-09-05 19:24:45] [Rank 0] Group 2 Loss: 4.6302 +[2025-09-05 19:24:45] [Rank 0] Group 3 Loss: 4.9808 +[2025-09-05 19:24:45] [Rank 0] Group 3 Loss: 4.9808 +[2025-09-05 19:24:45] [Rank 0] Group 4 Loss: 4.8892 +[2025-09-05 19:24:45] [Rank 0] Group 4 Loss: 4.8892 +[2025-09-05 19:24:45] [Rank 0] Group 5 Loss: 5.0685 +[2025-09-05 19:24:45] [Rank 0] Group 5 Loss: 5.0685 +[2025-09-05 19:24:45] [Rank 0] Group 6 Loss: 4.9061 +[2025-09-05 19:24:45] [Rank 0] Group 6 Loss: 4.9061 +[2025-09-05 19:24:45] [Rank 0] Group 7 Loss: 5.0307 +[2025-09-05 19:24:45] [Rank 0] Group 7 Loss: 5.0307 +[2025-09-05 19:24:45] [Rank 0] Group 8 Loss: 5.2085 +[2025-09-05 19:24:45] [Rank 0] Group 8 Loss: 5.2085 +[2025-09-05 19:24:45] [Rank 0] Group 9 Loss: 5.1121 +[2025-09-05 19:24:45] [Rank 0] Group 9 Loss: 5.1121 +[2025-09-05 19:24:45] [Rank 0] Group 10 Loss: 5.2210 +[2025-09-05 19:24:45] [Rank 0] Group 10 Loss: 5.2210 +[2025-09-05 19:24:45] [Rank 0] Group 11 Loss: 5.1943 +[2025-09-05 19:24:45] [Rank 0] Group 11 Loss: 5.1943 +[2025-09-05 19:24:45] [Rank 0] Group 12 Loss: 5.2143 +[2025-09-05 19:24:45] [Rank 0] Group 12 Loss: 5.2143 +[2025-09-05 19:24:45] [Rank 0] Group 13 Loss: 5.2867 +[2025-09-05 19:24:45] [Rank 0] Group 13 Loss: 5.2867 +[2025-09-05 19:24:45] [Rank 0] Group 14 Loss: 5.2502 +[2025-09-05 19:24:45] [Rank 0] Group 14 Loss: 5.2502 +[2025-09-05 19:24:45] [Rank 0] Group 15 Loss: 5.3133 +[2025-09-05 19:24:45] [Rank 0] Group 15 Loss: 5.3133 +[2025-09-05 19:24:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:24:45] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 19:24:45] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 19:24:45] [Rank 0] Group 13 FTA: 0.9100 +[2025-09-05 19:24:45] [Rank 0] Group 13 FTA: 0.9100 +[2025-09-05 19:24:45] [Rank 0] Group 14 FTA: 0.4400 +[2025-09-05 19:24:45] [Rank 0] Group 14 FTA: 0.4400 +[2025-09-05 19:24:45] [Rank 0] Group 15 FTA: 0.2400 +[2025-09-05 19:24:45] [Rank 0] Group 15 FTA: 0.2400 +[2025-09-05 19:24:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:24:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:24:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:24:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:24:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:24:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:24:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:24:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:24:47] [Rank 0] step:8501/10000 train_time:318917ms step_avg:37.52ms +[2025-09-05 19:24:47] [Rank 0] step:8501/10000 train_time:318917ms step_avg:37.52ms +[2025-09-05 19:24:47] [Rank 0] step:8521/10000 train_time:319357ms step_avg:37.48ms +[2025-09-05 19:24:47] [Rank 0] step:8521/10000 train_time:319357ms step_avg:37.48ms +[2025-09-05 19:24:48] [Rank 0] step:8541/10000 train_time:320018ms step_avg:37.47ms +[2025-09-05 19:24:48] [Rank 0] step:8541/10000 train_time:320018ms step_avg:37.47ms +[2025-09-05 19:24:49] [Rank 0] step:8561/10000 train_time:320682ms step_avg:37.46ms +[2025-09-05 19:24:49] [Rank 0] step:8561/10000 train_time:320682ms step_avg:37.46ms +[2025-09-05 19:24:49] [Rank 0] step:8581/10000 train_time:321346ms step_avg:37.45ms +[2025-09-05 19:24:49] [Rank 0] step:8581/10000 train_time:321346ms step_avg:37.45ms +[2025-09-05 19:24:50] [Rank 0] step:8601/10000 train_time:322010ms step_avg:37.44ms +[2025-09-05 19:24:50] [Rank 0] step:8601/10000 train_time:322010ms step_avg:37.44ms +[2025-09-05 19:24:51] [Rank 0] step:8621/10000 train_time:322674ms step_avg:37.43ms +[2025-09-05 19:24:51] [Rank 0] step:8621/10000 train_time:322674ms step_avg:37.43ms +[2025-09-05 19:24:51] [Rank 0] step:8641/10000 train_time:323338ms step_avg:37.42ms +[2025-09-05 19:24:51] [Rank 0] step:8641/10000 train_time:323338ms step_avg:37.42ms +[2025-09-05 19:24:52] [Rank 0] step:8661/10000 train_time:324003ms step_avg:37.41ms +[2025-09-05 19:24:52] [Rank 0] step:8661/10000 train_time:324003ms step_avg:37.41ms +[2025-09-05 19:24:53] [Rank 0] step:8681/10000 train_time:324667ms step_avg:37.40ms +[2025-09-05 19:24:53] [Rank 0] step:8681/10000 train_time:324667ms step_avg:37.40ms +[2025-09-05 19:24:53] [Rank 0] step:8701/10000 train_time:325338ms step_avg:37.39ms +[2025-09-05 19:24:53] [Rank 0] step:8701/10000 train_time:325338ms step_avg:37.39ms +[2025-09-05 19:24:54] [Rank 0] step:8721/10000 train_time:326001ms step_avg:37.38ms +[2025-09-05 19:24:54] [Rank 0] step:8721/10000 train_time:326001ms step_avg:37.38ms +[2025-09-05 19:24:55] [Rank 0] step:8741/10000 train_time:326665ms step_avg:37.37ms +[2025-09-05 19:24:55] [Rank 0] step:8741/10000 train_time:326665ms step_avg:37.37ms +[2025-09-05 19:24:55] [Rank 0] step:8761/10000 train_time:327329ms step_avg:37.36ms +[2025-09-05 19:24:55] [Rank 0] step:8761/10000 train_time:327329ms step_avg:37.36ms +[2025-09-05 19:24:56] [Rank 0] step:8781/10000 train_time:327993ms step_avg:37.35ms +[2025-09-05 19:24:56] [Rank 0] step:8781/10000 train_time:327993ms step_avg:37.35ms +[2025-09-05 19:24:57] [Rank 0] step:8801/10000 train_time:328656ms step_avg:37.34ms +[2025-09-05 19:24:57] [Rank 0] step:8801/10000 train_time:328656ms step_avg:37.34ms +[2025-09-05 19:24:57] [Rank 0] step:8821/10000 train_time:329321ms step_avg:37.33ms +[2025-09-05 19:24:57] [Rank 0] step:8821/10000 train_time:329321ms step_avg:37.33ms +[2025-09-05 19:24:58] [Rank 0] step:8841/10000 train_time:330080ms step_avg:37.34ms +[2025-09-05 19:24:58] [Rank 0] step:8841/10000 train_time:330080ms step_avg:37.34ms +[2025-09-05 19:24:59] [Rank 0] step:8861/10000 train_time:330744ms step_avg:37.33ms +[2025-09-05 19:24:59] [Rank 0] step:8861/10000 train_time:330744ms step_avg:37.33ms +[2025-09-05 19:24:59] [Rank 0] step:8881/10000 train_time:331408ms step_avg:37.32ms +[2025-09-05 19:24:59] [Rank 0] step:8881/10000 train_time:331408ms step_avg:37.32ms +[2025-09-05 19:25:00] [Rank 0] step:8901/10000 train_time:332071ms step_avg:37.31ms +[2025-09-05 19:25:00] [Rank 0] step:8901/10000 train_time:332071ms step_avg:37.31ms +[2025-09-05 19:25:01] [Rank 0] step:8921/10000 train_time:332734ms step_avg:37.30ms +[2025-09-05 19:25:01] [Rank 0] step:8921/10000 train_time:332734ms step_avg:37.30ms +[2025-09-05 19:25:02] [Rank 0] step:8941/10000 train_time:333415ms step_avg:37.29ms +[2025-09-05 19:25:02] [Rank 0] step:8941/10000 train_time:333415ms step_avg:37.29ms +[2025-09-05 19:25:02] [Rank 0] step:8961/10000 train_time:334083ms step_avg:37.28ms +[2025-09-05 19:25:02] [Rank 0] step:8961/10000 train_time:334083ms step_avg:37.28ms +[2025-09-05 19:25:03] [Rank 0] step:8981/10000 train_time:334749ms step_avg:37.27ms +[2025-09-05 19:25:03] [Rank 0] step:8981/10000 train_time:334749ms step_avg:37.27ms +[2025-09-05 19:25:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:25:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:25:04] [Rank 0] PRINT: step:9000/10000 train_loss:0.6522 val_loss:0.6428 train_time:335649ms step_avg:37.29ms +[2025-09-05 19:25:04] [Rank 0] PRINT: step:9000/10000 train_loss:0.6522 val_loss:0.6428 train_time:335649ms step_avg:37.29ms +[2025-09-05 19:25:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:25:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:25:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:25:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:26:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:26:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:26:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:26:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:26:25] [Rank 0] Total Loss: 5.0811 +[2025-09-05 19:26:25] [Rank 0] Total Loss: 5.0811 +[2025-09-05 19:26:25] [Rank 0] Total FTA (Unweighted): 0.9256 +[2025-09-05 19:26:25] [Rank 0] Total FTA (Unweighted): 0.9256 +[2025-09-05 19:26:25] [Rank 0] Total FTA (Weighted): 0.9256 +[2025-09-05 19:26:25] [Rank 0] Total FTA (Weighted): 0.9256 +[2025-09-05 19:26:25] [Rank 0] Group 0 Loss: 5.4474 +[2025-09-05 19:26:25] [Rank 0] Group 0 Loss: 5.4474 +[2025-09-05 19:26:25] [Rank 0] Group 1 Loss: 4.5996 +[2025-09-05 19:26:25] [Rank 0] Group 1 Loss: 4.5996 +[2025-09-05 19:26:25] [Rank 0] Group 2 Loss: 4.6103 +[2025-09-05 19:26:25] [Rank 0] Group 2 Loss: 4.6103 +[2025-09-05 19:26:25] [Rank 0] Group 3 Loss: 4.9667 +[2025-09-05 19:26:25] [Rank 0] Group 3 Loss: 4.9667 +[2025-09-05 19:26:25] [Rank 0] Group 4 Loss: 4.8968 +[2025-09-05 19:26:25] [Rank 0] Group 4 Loss: 4.8968 +[2025-09-05 19:26:25] [Rank 0] Group 5 Loss: 5.0629 +[2025-09-05 19:26:25] [Rank 0] Group 5 Loss: 5.0629 +[2025-09-05 19:26:25] [Rank 0] Group 6 Loss: 4.9101 +[2025-09-05 19:26:25] [Rank 0] Group 6 Loss: 4.9101 +[2025-09-05 19:26:25] [Rank 0] Group 7 Loss: 5.0051 +[2025-09-05 19:26:25] [Rank 0] Group 7 Loss: 5.0051 +[2025-09-05 19:26:25] [Rank 0] Group 8 Loss: 5.1979 +[2025-09-05 19:26:25] [Rank 0] Group 8 Loss: 5.1979 +[2025-09-05 19:26:25] [Rank 0] Group 9 Loss: 5.1372 +[2025-09-05 19:26:25] [Rank 0] Group 9 Loss: 5.1372 +[2025-09-05 19:26:25] [Rank 0] Group 10 Loss: 5.2318 +[2025-09-05 19:26:25] [Rank 0] Group 10 Loss: 5.2318 +[2025-09-05 19:26:25] [Rank 0] Group 11 Loss: 5.2142 +[2025-09-05 19:26:25] [Rank 0] Group 11 Loss: 5.2142 +[2025-09-05 19:26:25] [Rank 0] Group 12 Loss: 5.1773 +[2025-09-05 19:26:25] [Rank 0] Group 12 Loss: 5.1773 +[2025-09-05 19:26:25] [Rank 0] Group 13 Loss: 5.3225 +[2025-09-05 19:26:25] [Rank 0] Group 13 Loss: 5.3225 +[2025-09-05 19:26:25] [Rank 0] Group 14 Loss: 5.2129 +[2025-09-05 19:26:25] [Rank 0] Group 14 Loss: 5.2129 +[2025-09-05 19:26:25] [Rank 0] Group 15 Loss: 5.3052 +[2025-09-05 19:26:25] [Rank 0] Group 15 Loss: 5.3052 +[2025-09-05 19:26:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 19:26:25] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-05 19:26:25] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-05 19:26:25] [Rank 0] Group 14 FTA: 0.5800 +[2025-09-05 19:26:25] [Rank 0] Group 14 FTA: 0.5800 +[2025-09-05 19:26:25] [Rank 0] Group 15 FTA: 0.2800 +[2025-09-05 19:26:25] [Rank 0] Group 15 FTA: 0.2800 +[2025-09-05 19:26:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:26:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:26:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:26:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:26:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:26:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:26:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:26:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:26:28] [Rank 0] step:9001/10000 train_time:335658ms step_avg:37.29ms +[2025-09-05 19:26:28] [Rank 0] step:9001/10000 train_time:335658ms step_avg:37.29ms +[2025-09-05 19:26:28] [Rank 0] step:9021/10000 train_time:336101ms step_avg:37.26ms +[2025-09-05 19:26:28] [Rank 0] step:9021/10000 train_time:336101ms step_avg:37.26ms +[2025-09-05 19:26:29] [Rank 0] step:9041/10000 train_time:336765ms step_avg:37.25ms +[2025-09-05 19:26:29] [Rank 0] step:9041/10000 train_time:336765ms step_avg:37.25ms +[2025-09-05 19:26:30] [Rank 0] step:9061/10000 train_time:337430ms step_avg:37.24ms +[2025-09-05 19:26:30] [Rank 0] step:9061/10000 train_time:337430ms step_avg:37.24ms +[2025-09-05 19:26:30] [Rank 0] step:9081/10000 train_time:338095ms step_avg:37.23ms +[2025-09-05 19:26:30] [Rank 0] step:9081/10000 train_time:338095ms step_avg:37.23ms +[2025-09-05 19:26:31] [Rank 0] step:9101/10000 train_time:338761ms step_avg:37.22ms +[2025-09-05 19:26:31] [Rank 0] step:9101/10000 train_time:338761ms step_avg:37.22ms +[2025-09-05 19:26:32] [Rank 0] step:9121/10000 train_time:339423ms step_avg:37.21ms +[2025-09-05 19:26:32] [Rank 0] step:9121/10000 train_time:339423ms step_avg:37.21ms +[2025-09-05 19:26:32] [Rank 0] step:9141/10000 train_time:340088ms step_avg:37.20ms +[2025-09-05 19:26:32] [Rank 0] step:9141/10000 train_time:340088ms step_avg:37.20ms +[2025-09-05 19:26:33] [Rank 0] step:9161/10000 train_time:340753ms step_avg:37.20ms +[2025-09-05 19:26:33] [Rank 0] step:9161/10000 train_time:340753ms step_avg:37.20ms +[2025-09-05 19:26:34] [Rank 0] step:9181/10000 train_time:341418ms step_avg:37.19ms +[2025-09-05 19:26:34] [Rank 0] step:9181/10000 train_time:341418ms step_avg:37.19ms +[2025-09-05 19:26:34] [Rank 0] step:9201/10000 train_time:342082ms step_avg:37.18ms +[2025-09-05 19:26:34] [Rank 0] step:9201/10000 train_time:342082ms step_avg:37.18ms +[2025-09-05 19:26:35] [Rank 0] step:9221/10000 train_time:342747ms step_avg:37.17ms +[2025-09-05 19:26:35] [Rank 0] step:9221/10000 train_time:342747ms step_avg:37.17ms +[2025-09-05 19:26:36] [Rank 0] step:9241/10000 train_time:343412ms step_avg:37.16ms +[2025-09-05 19:26:36] [Rank 0] step:9241/10000 train_time:343412ms step_avg:37.16ms +[2025-09-05 19:26:36] [Rank 0] step:9261/10000 train_time:344077ms step_avg:37.15ms +[2025-09-05 19:26:36] [Rank 0] step:9261/10000 train_time:344077ms step_avg:37.15ms +[2025-09-05 19:26:37] [Rank 0] step:9281/10000 train_time:344743ms step_avg:37.14ms +[2025-09-05 19:26:37] [Rank 0] step:9281/10000 train_time:344743ms step_avg:37.14ms +[2025-09-05 19:26:38] [Rank 0] step:9301/10000 train_time:345409ms step_avg:37.14ms +[2025-09-05 19:26:38] [Rank 0] step:9301/10000 train_time:345409ms step_avg:37.14ms +[2025-09-05 19:26:38] [Rank 0] step:9321/10000 train_time:346073ms step_avg:37.13ms +[2025-09-05 19:26:38] [Rank 0] step:9321/10000 train_time:346073ms step_avg:37.13ms +[2025-09-05 19:26:39] [Rank 0] step:9341/10000 train_time:346739ms step_avg:37.12ms +[2025-09-05 19:26:39] [Rank 0] step:9341/10000 train_time:346739ms step_avg:37.12ms +[2025-09-05 19:26:40] [Rank 0] step:9361/10000 train_time:347404ms step_avg:37.11ms +[2025-09-05 19:26:40] [Rank 0] step:9361/10000 train_time:347404ms step_avg:37.11ms +[2025-09-05 19:26:40] [Rank 0] step:9381/10000 train_time:348068ms step_avg:37.10ms +[2025-09-05 19:26:40] [Rank 0] step:9381/10000 train_time:348068ms step_avg:37.10ms +[2025-09-05 19:26:41] [Rank 0] step:9401/10000 train_time:348733ms step_avg:37.10ms +[2025-09-05 19:26:41] [Rank 0] step:9401/10000 train_time:348733ms step_avg:37.10ms +[2025-09-05 19:26:42] [Rank 0] step:9421/10000 train_time:349399ms step_avg:37.09ms +[2025-09-05 19:26:42] [Rank 0] step:9421/10000 train_time:349399ms step_avg:37.09ms +[2025-09-05 19:26:42] [Rank 0] step:9441/10000 train_time:350065ms step_avg:37.08ms +[2025-09-05 19:26:42] [Rank 0] step:9441/10000 train_time:350065ms step_avg:37.08ms +[2025-09-05 19:26:43] [Rank 0] step:9461/10000 train_time:350730ms step_avg:37.07ms +[2025-09-05 19:26:43] [Rank 0] step:9461/10000 train_time:350730ms step_avg:37.07ms +[2025-09-05 19:26:44] [Rank 0] step:9481/10000 train_time:351396ms step_avg:37.06ms +[2025-09-05 19:26:44] [Rank 0] step:9481/10000 train_time:351396ms step_avg:37.06ms +[2025-09-05 19:26:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:26:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:26:45] [Rank 0] PRINT: step:9500/10000 train_loss:0.6468 val_loss:0.6383 train_time:352297ms step_avg:37.08ms +[2025-09-05 19:26:45] [Rank 0] PRINT: step:9500/10000 train_loss:0.6468 val_loss:0.6383 train_time:352297ms step_avg:37.08ms +[2025-09-05 19:26:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:26:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:26:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:26:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:28:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:28:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:28:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:28:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:28:06] [Rank 0] Total Loss: 5.1079 +[2025-09-05 19:28:06] [Rank 0] Total Loss: 5.1079 +[2025-09-05 19:28:06] [Rank 0] Total FTA (Unweighted): 0.9275 +[2025-09-05 19:28:06] [Rank 0] Total FTA (Unweighted): 0.9275 +[2025-09-05 19:28:06] [Rank 0] Total FTA (Weighted): 0.9275 +[2025-09-05 19:28:06] [Rank 0] Total FTA (Weighted): 0.9275 +[2025-09-05 19:28:06] [Rank 0] Group 0 Loss: 5.4161 +[2025-09-05 19:28:06] [Rank 0] Group 0 Loss: 5.4161 +[2025-09-05 19:28:06] [Rank 0] Group 1 Loss: 4.7377 +[2025-09-05 19:28:06] [Rank 0] Group 1 Loss: 4.7377 +[2025-09-05 19:28:06] [Rank 0] Group 2 Loss: 4.6258 +[2025-09-05 19:28:06] [Rank 0] Group 2 Loss: 4.6258 +[2025-09-05 19:28:06] [Rank 0] Group 3 Loss: 4.9722 +[2025-09-05 19:28:06] [Rank 0] Group 3 Loss: 4.9722 +[2025-09-05 19:28:06] [Rank 0] Group 4 Loss: 4.9257 +[2025-09-05 19:28:06] [Rank 0] Group 4 Loss: 4.9257 +[2025-09-05 19:28:06] [Rank 0] Group 5 Loss: 5.1127 +[2025-09-05 19:28:06] [Rank 0] Group 5 Loss: 5.1127 +[2025-09-05 19:28:06] [Rank 0] Group 6 Loss: 4.9341 +[2025-09-05 19:28:06] [Rank 0] Group 6 Loss: 4.9341 +[2025-09-05 19:28:06] [Rank 0] Group 7 Loss: 5.0335 +[2025-09-05 19:28:06] [Rank 0] Group 7 Loss: 5.0335 +[2025-09-05 19:28:06] [Rank 0] Group 8 Loss: 5.2086 +[2025-09-05 19:28:06] [Rank 0] Group 8 Loss: 5.2086 +[2025-09-05 19:28:06] [Rank 0] Group 9 Loss: 5.1572 +[2025-09-05 19:28:06] [Rank 0] Group 9 Loss: 5.1572 +[2025-09-05 19:28:06] [Rank 0] Group 10 Loss: 5.2865 +[2025-09-05 19:28:06] [Rank 0] Group 10 Loss: 5.2865 +[2025-09-05 19:28:06] [Rank 0] Group 11 Loss: 5.2527 +[2025-09-05 19:28:06] [Rank 0] Group 11 Loss: 5.2527 +[2025-09-05 19:28:06] [Rank 0] Group 12 Loss: 5.2149 +[2025-09-05 19:28:06] [Rank 0] Group 12 Loss: 5.2149 +[2025-09-05 19:28:06] [Rank 0] Group 13 Loss: 5.3071 +[2025-09-05 19:28:06] [Rank 0] Group 13 Loss: 5.3071 +[2025-09-05 19:28:06] [Rank 0] Group 14 Loss: 5.2532 +[2025-09-05 19:28:06] [Rank 0] Group 14 Loss: 5.2532 +[2025-09-05 19:28:06] [Rank 0] Group 15 Loss: 5.2889 +[2025-09-05 19:28:06] [Rank 0] Group 15 Loss: 5.2889 +[2025-09-05 19:28:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 19:28:06] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-05 19:28:06] [Rank 0] Group 13 FTA: 0.9500 +[2025-09-05 19:28:06] [Rank 0] Group 14 FTA: 0.5800 +[2025-09-05 19:28:06] [Rank 0] Group 14 FTA: 0.5800 +[2025-09-05 19:28:06] [Rank 0] Group 15 FTA: 0.3100 +[2025-09-05 19:28:06] [Rank 0] Group 15 FTA: 0.3100 +[2025-09-05 19:28:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:28:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:28:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:28:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:28:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:28:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:28:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:28:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:28:08] [Rank 0] step:9501/10000 train_time:352304ms step_avg:37.08ms +[2025-09-05 19:28:08] [Rank 0] step:9501/10000 train_time:352304ms step_avg:37.08ms +[2025-09-05 19:28:08] [Rank 0] step:9521/10000 train_time:352750ms step_avg:37.05ms +[2025-09-05 19:28:08] [Rank 0] step:9521/10000 train_time:352750ms step_avg:37.05ms +[2025-09-05 19:28:09] [Rank 0] step:9541/10000 train_time:353412ms step_avg:37.04ms +[2025-09-05 19:28:09] [Rank 0] step:9541/10000 train_time:353412ms step_avg:37.04ms +[2025-09-05 19:28:10] [Rank 0] step:9561/10000 train_time:354074ms step_avg:37.03ms +[2025-09-05 19:28:10] [Rank 0] step:9561/10000 train_time:354074ms step_avg:37.03ms +[2025-09-05 19:28:10] [Rank 0] step:9581/10000 train_time:354737ms step_avg:37.03ms +[2025-09-05 19:28:10] [Rank 0] step:9581/10000 train_time:354737ms step_avg:37.03ms +[2025-09-05 19:28:11] [Rank 0] step:9601/10000 train_time:355399ms step_avg:37.02ms +[2025-09-05 19:28:11] [Rank 0] step:9601/10000 train_time:355399ms step_avg:37.02ms +[2025-09-05 19:28:12] [Rank 0] step:9621/10000 train_time:356063ms step_avg:37.01ms +[2025-09-05 19:28:12] [Rank 0] step:9621/10000 train_time:356063ms step_avg:37.01ms +[2025-09-05 19:28:12] [Rank 0] step:9641/10000 train_time:356725ms step_avg:37.00ms +[2025-09-05 19:28:12] [Rank 0] step:9641/10000 train_time:356725ms step_avg:37.00ms +[2025-09-05 19:28:13] [Rank 0] step:9661/10000 train_time:357667ms step_avg:37.02ms +[2025-09-05 19:28:13] [Rank 0] step:9661/10000 train_time:357667ms step_avg:37.02ms +[2025-09-05 19:28:14] [Rank 0] step:9681/10000 train_time:358330ms step_avg:37.01ms +[2025-09-05 19:28:14] [Rank 0] step:9681/10000 train_time:358330ms step_avg:37.01ms +[2025-09-05 19:28:14] [Rank 0] step:9701/10000 train_time:358993ms step_avg:37.01ms +[2025-09-05 19:28:14] [Rank 0] step:9701/10000 train_time:358993ms step_avg:37.01ms +[2025-09-05 19:28:15] [Rank 0] step:9721/10000 train_time:359657ms step_avg:37.00ms +[2025-09-05 19:28:15] [Rank 0] step:9721/10000 train_time:359657ms step_avg:37.00ms +[2025-09-05 19:28:16] [Rank 0] step:9741/10000 train_time:360321ms step_avg:36.99ms +[2025-09-05 19:28:16] [Rank 0] step:9741/10000 train_time:360321ms step_avg:36.99ms +[2025-09-05 19:28:16] [Rank 0] step:9761/10000 train_time:360985ms step_avg:36.98ms +[2025-09-05 19:28:16] [Rank 0] step:9761/10000 train_time:360985ms step_avg:36.98ms +[2025-09-05 19:28:17] [Rank 0] step:9781/10000 train_time:361650ms step_avg:36.97ms +[2025-09-05 19:28:17] [Rank 0] step:9781/10000 train_time:361650ms step_avg:36.97ms +[2025-09-05 19:28:18] [Rank 0] step:9801/10000 train_time:362313ms step_avg:36.97ms +[2025-09-05 19:28:18] [Rank 0] step:9801/10000 train_time:362313ms step_avg:36.97ms +[2025-09-05 19:28:18] [Rank 0] step:9821/10000 train_time:362978ms step_avg:36.96ms +[2025-09-05 19:28:18] [Rank 0] step:9821/10000 train_time:362978ms step_avg:36.96ms +[2025-09-05 19:28:19] [Rank 0] step:9841/10000 train_time:363642ms step_avg:36.95ms +[2025-09-05 19:28:19] [Rank 0] step:9841/10000 train_time:363642ms step_avg:36.95ms +[2025-09-05 19:28:20] [Rank 0] step:9861/10000 train_time:364306ms step_avg:36.94ms +[2025-09-05 19:28:20] [Rank 0] step:9861/10000 train_time:364306ms step_avg:36.94ms +[2025-09-05 19:28:20] [Rank 0] step:9881/10000 train_time:364971ms step_avg:36.94ms +[2025-09-05 19:28:20] [Rank 0] step:9881/10000 train_time:364971ms step_avg:36.94ms +[2025-09-05 19:28:21] [Rank 0] step:9901/10000 train_time:365635ms step_avg:36.93ms +[2025-09-05 19:28:21] [Rank 0] step:9901/10000 train_time:365635ms step_avg:36.93ms +[2025-09-05 19:28:22] [Rank 0] step:9921/10000 train_time:366299ms step_avg:36.92ms +[2025-09-05 19:28:22] [Rank 0] step:9921/10000 train_time:366299ms step_avg:36.92ms +[2025-09-05 19:28:22] [Rank 0] step:9941/10000 train_time:366963ms step_avg:36.91ms +[2025-09-05 19:28:22] [Rank 0] step:9941/10000 train_time:366963ms step_avg:36.91ms +[2025-09-05 19:28:23] [Rank 0] step:9961/10000 train_time:367815ms step_avg:36.93ms +[2025-09-05 19:28:23] [Rank 0] step:9961/10000 train_time:367815ms step_avg:36.93ms +[2025-09-05 19:28:24] [Rank 0] step:9981/10000 train_time:368477ms step_avg:36.92ms +[2025-09-05 19:28:24] [Rank 0] step:9981/10000 train_time:368477ms step_avg:36.92ms +[2025-09-05 19:28:25] [Rank 0] step:10000/10000 train_time:369109ms step_avg:36.91ms +[2025-09-05 19:28:25] [Rank 0] step:10000/10000 train_time:369109ms step_avg:36.91ms +[2025-09-05 19:28:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:28:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:28:25] [Rank 0] PRINT: step:10000/10000 train_loss:0.6417 val_loss:0.6338 train_time:369384ms step_avg:36.94ms +[2025-09-05 19:28:25] [Rank 0] PRINT: step:10000/10000 train_loss:0.6417 val_loss:0.6338 train_time:369384ms step_avg:36.94ms +[2025-09-05 19:28:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:28:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:28:25] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:28:25] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:29:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:29:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:29:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:29:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:29:46] [Rank 0] Total Loss: 5.1379 +[2025-09-05 19:29:46] [Rank 0] Total Loss: 5.1379 +[2025-09-05 19:29:46] [Rank 0] Total FTA (Unweighted): 0.9381 +[2025-09-05 19:29:46] [Rank 0] Total FTA (Unweighted): 0.9381 +[2025-09-05 19:29:46] [Rank 0] Total FTA (Weighted): 0.9381 +[2025-09-05 19:29:46] [Rank 0] Total FTA (Weighted): 0.9381 +[2025-09-05 19:29:46] [Rank 0] Group 0 Loss: 5.3834 +[2025-09-05 19:29:46] [Rank 0] Group 0 Loss: 5.3834 +[2025-09-05 19:29:46] [Rank 0] Group 1 Loss: 4.8528 +[2025-09-05 19:29:46] [Rank 0] Group 1 Loss: 4.8528 +[2025-09-05 19:29:46] [Rank 0] Group 2 Loss: 4.6575 +[2025-09-05 19:29:46] [Rank 0] Group 2 Loss: 4.6575 +[2025-09-05 19:29:46] [Rank 0] Group 3 Loss: 5.0117 +[2025-09-05 19:29:46] [Rank 0] Group 3 Loss: 5.0117 +[2025-09-05 19:29:46] [Rank 0] Group 4 Loss: 4.9293 +[2025-09-05 19:29:46] [Rank 0] Group 4 Loss: 4.9293 +[2025-09-05 19:29:46] [Rank 0] Group 5 Loss: 5.1302 +[2025-09-05 19:29:46] [Rank 0] Group 5 Loss: 5.1302 +[2025-09-05 19:29:46] [Rank 0] Group 6 Loss: 4.9585 +[2025-09-05 19:29:46] [Rank 0] Group 6 Loss: 4.9585 +[2025-09-05 19:29:46] [Rank 0] Group 7 Loss: 5.0666 +[2025-09-05 19:29:46] [Rank 0] Group 7 Loss: 5.0666 +[2025-09-05 19:29:46] [Rank 0] Group 8 Loss: 5.2463 +[2025-09-05 19:29:46] [Rank 0] Group 8 Loss: 5.2463 +[2025-09-05 19:29:46] [Rank 0] Group 9 Loss: 5.1729 +[2025-09-05 19:29:46] [Rank 0] Group 9 Loss: 5.1729 +[2025-09-05 19:29:46] [Rank 0] Group 10 Loss: 5.3076 +[2025-09-05 19:29:46] [Rank 0] Group 10 Loss: 5.3076 +[2025-09-05 19:29:46] [Rank 0] Group 11 Loss: 5.2903 +[2025-09-05 19:29:46] [Rank 0] Group 11 Loss: 5.2903 +[2025-09-05 19:29:46] [Rank 0] Group 12 Loss: 5.2584 +[2025-09-05 19:29:46] [Rank 0] Group 12 Loss: 5.2584 +[2025-09-05 19:29:46] [Rank 0] Group 13 Loss: 5.3528 +[2025-09-05 19:29:46] [Rank 0] Group 13 Loss: 5.3528 +[2025-09-05 19:29:46] [Rank 0] Group 14 Loss: 5.2795 +[2025-09-05 19:29:46] [Rank 0] Group 14 Loss: 5.2795 +[2025-09-05 19:29:46] [Rank 0] Group 15 Loss: 5.3083 +[2025-09-05 19:29:46] [Rank 0] Group 15 Loss: 5.3083 +[2025-09-05 19:29:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:29:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:29:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:29:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 19:29:47] [Rank 0] Group 13 FTA: 0.9800 +[2025-09-05 19:29:47] [Rank 0] Group 13 FTA: 0.9800 +[2025-09-05 19:29:47] [Rank 0] Group 14 FTA: 0.6600 +[2025-09-05 19:29:47] [Rank 0] Group 14 FTA: 0.6600 +[2025-09-05 19:29:47] [Rank 0] Group 15 FTA: 0.3700 +[2025-09-05 19:29:47] [Rank 0] Group 15 FTA: 0.3700 +[2025-09-05 19:29:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:29:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_loss_curves.png +[2025-09-05 19:29:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:29:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/per_class_acc_curves.png +[2025-09-05 19:29:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:29:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_loss_curve.png +[2025-09-05 19:29:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:29:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.002_seed_43/total_acc_curve.png +[2025-09-05 19:29:48] [Rank 0] step:10001/10000 train_time:369392ms step_avg:36.94ms +[2025-09-05 19:29:48] [Rank 0] step:10001/10000 train_time:369392ms step_avg:36.94ms +[2025-09-05 19:29:48] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 19:29:48 2025 --- +[2025-09-05 19:29:48] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 19:29:48 2025 --- +[2025-09-05 19:29:48] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 19:29:48] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..26c1036df761d10b846b9f9d5b0552cd545639b6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.005, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "2306edcd-d751-4310-9f13-725126df0f12", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..1302a6b3c76d089b97a07e00552a59c0411d83a9 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:873b8c7f8bc2822c1e7ba53ade70ca5ddd4f8cbc0209ebeb92e5883ae907b5ab +size 411833 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7ad3312962793b76eb76b94673f691ea5dda88b0 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa3d5b42dcea8e150985e32da49236840e5e62dd314927870a632ebfded874b4 +size 425321 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..e1b096bbe9136e0c769c02c27591406e28a36600 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b75de1cfdca3b0a188b996cd466b6edf76edb36f0217b00265694dcf4244cb29 +size 99609 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ccb2032d51f1fcd851fbcb936e1750e5c12778af --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3789e6537bc58aecc0326e79e6c13a763f35b8635813437b92a4c398549ab16c +size 107795 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/training_log_2306edcd-d751-4310-9f13-725126df0f12.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/training_log_2306edcd-d751-4310-9f13-725126df0f12.txt new file mode 100644 index 0000000000000000000000000000000000000000..0fc1d1561092b75f75ffaabd566dfcee6624d4b4 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/training_log_2306edcd-d751-4310-9f13-725126df0f12.txt @@ -0,0 +1,5614 @@ +[2025-09-05 15:20:53] [Rank 0] PRINT: --- Script Start: Fri Sep 5 15:20:53 2025 --- +[2025-09-05 15:20:53] [Rank 0] PRINT: --- Script Start: Fri Sep 5 15:20:53 2025 --- +[2025-09-05 15:20:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.005, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 15:20:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.005, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 15:20:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 15:20:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 15:20:53] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 15:20:53] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 15:20:53] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42 +[2025-09-05 15:20:53] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42 +[2025-09-05 15:20:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 15:20:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 15:20:53] [Rank 0] PRINT: Constructing model... +[2025-09-05 15:20:53] [Rank 0] PRINT: Constructing model... +[2025-09-05 15:20:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 15:20:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 15:20:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 15:20:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 15:20:54] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 15:20:54] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 15:20:59] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 15:20:59] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 15:20:59] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 15:20:59] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 15:20:59] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 15:20:59] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 15:20:59] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 15:20:59] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 15:20:59] [Rank 0] PRINT: Model returns: +[2025-09-05 15:20:59] [Rank 0] PRINT: Model returns: +[2025-09-05 15:20:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 15:20:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 15:20:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 15:20:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 15:20:59] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-09-05 15:20:59] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-09-05 15:20:59] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 15:20:59] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 15:20:59] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 15:20:59] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 15:20:59] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 15:20:59] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 15:21:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 15:21:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 15:21:03] [Rank 0] PRINT: Starting warmup... +[2025-09-05 15:21:03] [Rank 0] PRINT: Starting warmup... +[2025-09-05 15:21:54] [Rank 0] PRINT: Warmup complete. +[2025-09-05 15:21:54] [Rank 0] PRINT: Warmup complete. +[2025-09-05 15:21:54] [Rank 0] PRINT: Starting training... +[2025-09-05 15:21:54] [Rank 0] PRINT: Starting training... +[2025-09-05 15:22:02] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/fixed_eval_indices.json +[2025-09-05 15:22:02] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/fixed_eval_indices.json +[2025-09-05 15:22:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:22:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:22:06] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 15:22:06] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 15:22:42] [Rank 0] step:21/10000 train_time:34901ms step_avg:1661.96ms +[2025-09-05 15:22:42] [Rank 0] step:21/10000 train_time:34901ms step_avg:1661.96ms +[2025-09-05 15:22:42] [Rank 0] step:41/10000 train_time:35551ms step_avg:867.09ms +[2025-09-05 15:22:42] [Rank 0] step:41/10000 train_time:35551ms step_avg:867.09ms +[2025-09-05 15:22:43] [Rank 0] step:61/10000 train_time:36198ms step_avg:593.42ms +[2025-09-05 15:22:43] [Rank 0] step:61/10000 train_time:36198ms step_avg:593.42ms +[2025-09-05 15:22:44] [Rank 0] step:81/10000 train_time:36846ms step_avg:454.89ms +[2025-09-05 15:22:44] [Rank 0] step:81/10000 train_time:36846ms step_avg:454.89ms +[2025-09-05 15:22:44] [Rank 0] step:101/10000 train_time:37599ms step_avg:372.27ms +[2025-09-05 15:22:44] [Rank 0] step:101/10000 train_time:37599ms step_avg:372.27ms +[2025-09-05 15:22:45] [Rank 0] step:121/10000 train_time:38255ms step_avg:316.16ms +[2025-09-05 15:22:45] [Rank 0] step:121/10000 train_time:38255ms step_avg:316.16ms +[2025-09-05 15:22:46] [Rank 0] step:141/10000 train_time:38903ms step_avg:275.90ms +[2025-09-05 15:22:46] [Rank 0] step:141/10000 train_time:38903ms step_avg:275.90ms +[2025-09-05 15:22:46] [Rank 0] step:161/10000 train_time:39550ms step_avg:245.65ms +[2025-09-05 15:22:46] [Rank 0] step:161/10000 train_time:39550ms step_avg:245.65ms +[2025-09-05 15:22:47] [Rank 0] step:181/10000 train_time:40198ms step_avg:222.09ms +[2025-09-05 15:22:47] [Rank 0] step:181/10000 train_time:40198ms step_avg:222.09ms +[2025-09-05 15:22:48] [Rank 0] step:201/10000 train_time:41014ms step_avg:204.05ms +[2025-09-05 15:22:48] [Rank 0] step:201/10000 train_time:41014ms step_avg:204.05ms +[2025-09-05 15:22:48] [Rank 0] step:221/10000 train_time:41663ms step_avg:188.52ms +[2025-09-05 15:22:48] [Rank 0] step:221/10000 train_time:41663ms step_avg:188.52ms +[2025-09-05 15:22:49] [Rank 0] step:241/10000 train_time:42311ms step_avg:175.57ms +[2025-09-05 15:22:49] [Rank 0] step:241/10000 train_time:42311ms step_avg:175.57ms +[2025-09-05 15:22:50] [Rank 0] step:261/10000 train_time:42960ms step_avg:164.60ms +[2025-09-05 15:22:50] [Rank 0] step:261/10000 train_time:42960ms step_avg:164.60ms +[2025-09-05 15:22:51] [Rank 0] step:281/10000 train_time:43766ms step_avg:155.75ms +[2025-09-05 15:22:51] [Rank 0] step:281/10000 train_time:43766ms step_avg:155.75ms +[2025-09-05 15:22:51] [Rank 0] step:301/10000 train_time:44415ms step_avg:147.56ms +[2025-09-05 15:22:51] [Rank 0] step:301/10000 train_time:44415ms step_avg:147.56ms +[2025-09-05 15:22:52] [Rank 0] step:321/10000 train_time:45064ms step_avg:140.39ms +[2025-09-05 15:22:52] [Rank 0] step:321/10000 train_time:45064ms step_avg:140.39ms +[2025-09-05 15:22:52] [Rank 0] step:341/10000 train_time:45712ms step_avg:134.05ms +[2025-09-05 15:22:52] [Rank 0] step:341/10000 train_time:45712ms step_avg:134.05ms +[2025-09-05 15:22:53] [Rank 0] step:361/10000 train_time:46364ms step_avg:128.43ms +[2025-09-05 15:22:53] [Rank 0] step:361/10000 train_time:46364ms step_avg:128.43ms +[2025-09-05 15:22:54] [Rank 0] step:381/10000 train_time:47010ms step_avg:123.39ms +[2025-09-05 15:22:54] [Rank 0] step:381/10000 train_time:47010ms step_avg:123.39ms +[2025-09-05 15:22:54] [Rank 0] step:401/10000 train_time:47659ms step_avg:118.85ms +[2025-09-05 15:22:54] [Rank 0] step:401/10000 train_time:47659ms step_avg:118.85ms +[2025-09-05 15:22:55] [Rank 0] step:421/10000 train_time:48309ms step_avg:114.75ms +[2025-09-05 15:22:55] [Rank 0] step:421/10000 train_time:48309ms step_avg:114.75ms +[2025-09-05 15:22:56] [Rank 0] step:441/10000 train_time:48958ms step_avg:111.02ms +[2025-09-05 15:22:56] [Rank 0] step:441/10000 train_time:48958ms step_avg:111.02ms +[2025-09-05 15:22:56] [Rank 0] step:461/10000 train_time:49606ms step_avg:107.61ms +[2025-09-05 15:22:56] [Rank 0] step:461/10000 train_time:49606ms step_avg:107.61ms +[2025-09-05 15:22:57] [Rank 0] step:481/10000 train_time:50255ms step_avg:104.48ms +[2025-09-05 15:22:57] [Rank 0] step:481/10000 train_time:50255ms step_avg:104.48ms +[2025-09-05 15:22:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:22:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:22:58] [Rank 0] PRINT: step:500/10000 train_loss:3.1768 val_loss:1.2970 train_time:51135ms step_avg:102.27ms +[2025-09-05 15:22:58] [Rank 0] PRINT: step:500/10000 train_loss:3.1768 val_loss:1.2970 train_time:51135ms step_avg:102.27ms +[2025-09-05 15:22:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:22:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:22:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:22:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:24:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:24:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:24:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:24:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:24:21] [Rank 0] Total Loss: 4.4007 +[2025-09-05 15:24:21] [Rank 0] Total Loss: 4.4007 +[2025-09-05 15:24:21] [Rank 0] Total FTA (Unweighted): 0.3481 +[2025-09-05 15:24:21] [Rank 0] Total FTA (Unweighted): 0.3481 +[2025-09-05 15:24:21] [Rank 0] Total FTA (Weighted): 0.3481 +[2025-09-05 15:24:21] [Rank 0] Total FTA (Weighted): 0.3481 +[2025-09-05 15:24:21] [Rank 0] Group 0 Loss: 3.9347 +[2025-09-05 15:24:21] [Rank 0] Group 0 Loss: 3.9347 +[2025-09-05 15:24:21] [Rank 0] Group 1 Loss: 3.5706 +[2025-09-05 15:24:21] [Rank 0] Group 1 Loss: 3.5706 +[2025-09-05 15:24:21] [Rank 0] Group 2 Loss: 3.5008 +[2025-09-05 15:24:21] [Rank 0] Group 2 Loss: 3.5008 +[2025-09-05 15:24:21] [Rank 0] Group 3 Loss: 3.8632 +[2025-09-05 15:24:21] [Rank 0] Group 3 Loss: 3.8632 +[2025-09-05 15:24:21] [Rank 0] Group 4 Loss: 3.9161 +[2025-09-05 15:24:21] [Rank 0] Group 4 Loss: 3.9161 +[2025-09-05 15:24:21] [Rank 0] Group 5 Loss: 4.0376 +[2025-09-05 15:24:21] [Rank 0] Group 5 Loss: 4.0376 +[2025-09-05 15:24:21] [Rank 0] Group 6 Loss: 4.0785 +[2025-09-05 15:24:21] [Rank 0] Group 6 Loss: 4.0785 +[2025-09-05 15:24:21] [Rank 0] Group 7 Loss: 4.3164 +[2025-09-05 15:24:21] [Rank 0] Group 7 Loss: 4.3164 +[2025-09-05 15:24:21] [Rank 0] Group 8 Loss: 4.5890 +[2025-09-05 15:24:21] [Rank 0] Group 8 Loss: 4.5890 +[2025-09-05 15:24:21] [Rank 0] Group 9 Loss: 4.7274 +[2025-09-05 15:24:21] [Rank 0] Group 9 Loss: 4.7274 +[2025-09-05 15:24:21] [Rank 0] Group 10 Loss: 4.8980 +[2025-09-05 15:24:21] [Rank 0] Group 10 Loss: 4.8980 +[2025-09-05 15:24:21] [Rank 0] Group 11 Loss: 4.9775 +[2025-09-05 15:24:21] [Rank 0] Group 11 Loss: 4.9775 +[2025-09-05 15:24:21] [Rank 0] Group 12 Loss: 4.9795 +[2025-09-05 15:24:21] [Rank 0] Group 12 Loss: 4.9795 +[2025-09-05 15:24:21] [Rank 0] Group 13 Loss: 5.0408 +[2025-09-05 15:24:21] [Rank 0] Group 13 Loss: 5.0408 +[2025-09-05 15:24:21] [Rank 0] Group 14 Loss: 4.9983 +[2025-09-05 15:24:21] [Rank 0] Group 14 Loss: 4.9983 +[2025-09-05 15:24:21] [Rank 0] Group 15 Loss: 4.9827 +[2025-09-05 15:24:21] [Rank 0] Group 15 Loss: 4.9827 +[2025-09-05 15:24:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:24:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:24:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:24:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:24:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:24:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:24:21] [Rank 0] Group 3 FTA: 0.8700 +[2025-09-05 15:24:21] [Rank 0] Group 3 FTA: 0.8700 +[2025-09-05 15:24:21] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 15:24:21] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 15:24:21] [Rank 0] Group 5 FTA: 0.2900 +[2025-09-05 15:24:21] [Rank 0] Group 5 FTA: 0.2900 +[2025-09-05 15:24:21] [Rank 0] Group 6 FTA: 0.1200 +[2025-09-05 15:24:21] [Rank 0] Group 6 FTA: 0.1200 +[2025-09-05 15:24:21] [Rank 0] Group 7 FTA: 0.0500 +[2025-09-05 15:24:21] [Rank 0] Group 7 FTA: 0.0500 +[2025-09-05 15:24:21] [Rank 0] Group 8 FTA: 0.0700 +[2025-09-05 15:24:21] [Rank 0] Group 8 FTA: 0.0700 +[2025-09-05 15:24:22] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 15:24:22] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 15:24:22] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 15:24:22] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 15:24:22] [Rank 0] Group 11 FTA: 0.0800 +[2025-09-05 15:24:22] [Rank 0] Group 11 FTA: 0.0800 +[2025-09-05 15:24:22] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 15:24:22] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 15:24:22] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:24:22] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:24:22] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:24:22] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:24:22] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:24:22] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:24:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:24:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:24:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:24:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:24:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:24:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:24:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:24:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:24:23] [Rank 0] step:501/10000 train_time:51145ms step_avg:102.09ms +[2025-09-05 15:24:23] [Rank 0] step:501/10000 train_time:51145ms step_avg:102.09ms +[2025-09-05 15:24:24] [Rank 0] step:521/10000 train_time:51577ms step_avg:99.00ms +[2025-09-05 15:24:24] [Rank 0] step:521/10000 train_time:51577ms step_avg:99.00ms +[2025-09-05 15:24:24] [Rank 0] step:541/10000 train_time:52224ms step_avg:96.53ms +[2025-09-05 15:24:24] [Rank 0] step:541/10000 train_time:52224ms step_avg:96.53ms +[2025-09-05 15:24:25] [Rank 0] step:561/10000 train_time:52872ms step_avg:94.25ms +[2025-09-05 15:24:25] [Rank 0] step:561/10000 train_time:52872ms step_avg:94.25ms +[2025-09-05 15:24:26] [Rank 0] step:581/10000 train_time:53519ms step_avg:92.12ms +[2025-09-05 15:24:26] [Rank 0] step:581/10000 train_time:53519ms step_avg:92.12ms +[2025-09-05 15:24:26] [Rank 0] step:601/10000 train_time:54166ms step_avg:90.13ms +[2025-09-05 15:24:26] [Rank 0] step:601/10000 train_time:54166ms step_avg:90.13ms +[2025-09-05 15:24:27] [Rank 0] step:621/10000 train_time:54816ms step_avg:88.27ms +[2025-09-05 15:24:27] [Rank 0] step:621/10000 train_time:54816ms step_avg:88.27ms +[2025-09-05 15:24:27] [Rank 0] step:641/10000 train_time:55461ms step_avg:86.52ms +[2025-09-05 15:24:27] [Rank 0] step:641/10000 train_time:55461ms step_avg:86.52ms +[2025-09-05 15:24:28] [Rank 0] step:661/10000 train_time:56109ms step_avg:84.88ms +[2025-09-05 15:24:28] [Rank 0] step:661/10000 train_time:56109ms step_avg:84.88ms +[2025-09-05 15:24:29] [Rank 0] step:681/10000 train_time:56756ms step_avg:83.34ms +[2025-09-05 15:24:29] [Rank 0] step:681/10000 train_time:56756ms step_avg:83.34ms +[2025-09-05 15:24:29] [Rank 0] step:701/10000 train_time:57403ms step_avg:81.89ms +[2025-09-05 15:24:29] [Rank 0] step:701/10000 train_time:57403ms step_avg:81.89ms +[2025-09-05 15:24:30] [Rank 0] step:721/10000 train_time:58051ms step_avg:80.51ms +[2025-09-05 15:24:30] [Rank 0] step:721/10000 train_time:58051ms step_avg:80.51ms +[2025-09-05 15:24:31] [Rank 0] step:741/10000 train_time:58699ms step_avg:79.22ms +[2025-09-05 15:24:31] [Rank 0] step:741/10000 train_time:58699ms step_avg:79.22ms +[2025-09-05 15:24:31] [Rank 0] step:761/10000 train_time:59349ms step_avg:77.99ms +[2025-09-05 15:24:31] [Rank 0] step:761/10000 train_time:59349ms step_avg:77.99ms +[2025-09-05 15:24:32] [Rank 0] step:781/10000 train_time:60007ms step_avg:76.83ms +[2025-09-05 15:24:32] [Rank 0] step:781/10000 train_time:60007ms step_avg:76.83ms +[2025-09-05 15:24:33] [Rank 0] step:801/10000 train_time:60659ms step_avg:75.73ms +[2025-09-05 15:24:33] [Rank 0] step:801/10000 train_time:60659ms step_avg:75.73ms +[2025-09-05 15:24:34] [Rank 0] step:821/10000 train_time:61310ms step_avg:74.68ms +[2025-09-05 15:24:34] [Rank 0] step:821/10000 train_time:61310ms step_avg:74.68ms +[2025-09-05 15:24:34] [Rank 0] step:841/10000 train_time:62446ms step_avg:74.25ms +[2025-09-05 15:24:34] [Rank 0] step:841/10000 train_time:62446ms step_avg:74.25ms +[2025-09-05 15:24:35] [Rank 0] step:861/10000 train_time:63099ms step_avg:73.29ms +[2025-09-05 15:24:35] [Rank 0] step:861/10000 train_time:63099ms step_avg:73.29ms +[2025-09-05 15:24:36] [Rank 0] step:881/10000 train_time:63752ms step_avg:72.36ms +[2025-09-05 15:24:36] [Rank 0] step:881/10000 train_time:63752ms step_avg:72.36ms +[2025-09-05 15:24:36] [Rank 0] step:901/10000 train_time:64404ms step_avg:71.48ms +[2025-09-05 15:24:36] [Rank 0] step:901/10000 train_time:64404ms step_avg:71.48ms +[2025-09-05 15:24:37] [Rank 0] step:921/10000 train_time:65056ms step_avg:70.64ms +[2025-09-05 15:24:37] [Rank 0] step:921/10000 train_time:65056ms step_avg:70.64ms +[2025-09-05 15:24:38] [Rank 0] step:941/10000 train_time:65709ms step_avg:69.83ms +[2025-09-05 15:24:38] [Rank 0] step:941/10000 train_time:65709ms step_avg:69.83ms +[2025-09-05 15:24:38] [Rank 0] step:961/10000 train_time:66361ms step_avg:69.05ms +[2025-09-05 15:24:38] [Rank 0] step:961/10000 train_time:66361ms step_avg:69.05ms +[2025-09-05 15:24:39] [Rank 0] step:981/10000 train_time:67012ms step_avg:68.31ms +[2025-09-05 15:24:39] [Rank 0] step:981/10000 train_time:67012ms step_avg:68.31ms +[2025-09-05 15:24:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:24:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:24:40] [Rank 0] PRINT: step:1000/10000 train_loss:1.0755 val_loss:0.9429 train_time:67896ms step_avg:67.90ms +[2025-09-05 15:24:40] [Rank 0] PRINT: step:1000/10000 train_loss:1.0755 val_loss:0.9429 train_time:67896ms step_avg:67.90ms +[2025-09-05 15:24:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:24:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:24:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:24:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:26:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:26:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:26:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:26:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:26:02] [Rank 0] Total Loss: 4.6857 +[2025-09-05 15:26:02] [Rank 0] Total Loss: 4.6857 +[2025-09-05 15:26:02] [Rank 0] Total FTA (Unweighted): 0.5687 +[2025-09-05 15:26:02] [Rank 0] Total FTA (Unweighted): 0.5687 +[2025-09-05 15:26:02] [Rank 0] Total FTA (Weighted): 0.5687 +[2025-09-05 15:26:02] [Rank 0] Total FTA (Weighted): 0.5687 +[2025-09-05 15:26:02] [Rank 0] Group 0 Loss: 4.5348 +[2025-09-05 15:26:02] [Rank 0] Group 0 Loss: 4.5348 +[2025-09-05 15:26:02] [Rank 0] Group 1 Loss: 4.1178 +[2025-09-05 15:26:02] [Rank 0] Group 1 Loss: 4.1178 +[2025-09-05 15:26:02] [Rank 0] Group 2 Loss: 3.9752 +[2025-09-05 15:26:02] [Rank 0] Group 2 Loss: 3.9752 +[2025-09-05 15:26:02] [Rank 0] Group 3 Loss: 4.3680 +[2025-09-05 15:26:02] [Rank 0] Group 3 Loss: 4.3680 +[2025-09-05 15:26:02] [Rank 0] Group 4 Loss: 4.3640 +[2025-09-05 15:26:02] [Rank 0] Group 4 Loss: 4.3640 +[2025-09-05 15:26:02] [Rank 0] Group 5 Loss: 4.4848 +[2025-09-05 15:26:02] [Rank 0] Group 5 Loss: 4.4848 +[2025-09-05 15:26:02] [Rank 0] Group 6 Loss: 4.2922 +[2025-09-05 15:26:02] [Rank 0] Group 6 Loss: 4.2922 +[2025-09-05 15:26:02] [Rank 0] Group 7 Loss: 4.4100 +[2025-09-05 15:26:02] [Rank 0] Group 7 Loss: 4.4100 +[2025-09-05 15:26:02] [Rank 0] Group 8 Loss: 4.6444 +[2025-09-05 15:26:02] [Rank 0] Group 8 Loss: 4.6444 +[2025-09-05 15:26:02] [Rank 0] Group 9 Loss: 4.6772 +[2025-09-05 15:26:02] [Rank 0] Group 9 Loss: 4.6772 +[2025-09-05 15:26:02] [Rank 0] Group 10 Loss: 4.8856 +[2025-09-05 15:26:02] [Rank 0] Group 10 Loss: 4.8856 +[2025-09-05 15:26:02] [Rank 0] Group 11 Loss: 5.0347 +[2025-09-05 15:26:02] [Rank 0] Group 11 Loss: 5.0347 +[2025-09-05 15:26:02] [Rank 0] Group 12 Loss: 5.2149 +[2025-09-05 15:26:02] [Rank 0] Group 12 Loss: 5.2149 +[2025-09-05 15:26:02] [Rank 0] Group 13 Loss: 5.3311 +[2025-09-05 15:26:02] [Rank 0] Group 13 Loss: 5.3311 +[2025-09-05 15:26:02] [Rank 0] Group 14 Loss: 5.3046 +[2025-09-05 15:26:02] [Rank 0] Group 14 Loss: 5.3046 +[2025-09-05 15:26:02] [Rank 0] Group 15 Loss: 5.3316 +[2025-09-05 15:26:02] [Rank 0] Group 15 Loss: 5.3316 +[2025-09-05 15:26:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:26:02] [Rank 0] Group 6 FTA: 0.9400 +[2025-09-05 15:26:02] [Rank 0] Group 6 FTA: 0.9400 +[2025-09-05 15:26:02] [Rank 0] Group 7 FTA: 0.8100 +[2025-09-05 15:26:02] [Rank 0] Group 7 FTA: 0.8100 +[2025-09-05 15:26:02] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 15:26:02] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 15:26:02] [Rank 0] Group 9 FTA: 0.2200 +[2025-09-05 15:26:02] [Rank 0] Group 9 FTA: 0.2200 +[2025-09-05 15:26:02] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 15:26:02] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 15:26:02] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 15:26:02] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 15:26:02] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 15:26:02] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 15:26:02] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:26:02] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:26:02] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:26:02] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:26:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:26:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:26:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:26:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:26:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:26:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:26:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:26:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:26:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:26:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:26:04] [Rank 0] step:1001/10000 train_time:67905ms step_avg:67.84ms +[2025-09-05 15:26:04] [Rank 0] step:1001/10000 train_time:67905ms step_avg:67.84ms +[2025-09-05 15:26:05] [Rank 0] step:1021/10000 train_time:68339ms step_avg:66.93ms +[2025-09-05 15:26:05] [Rank 0] step:1021/10000 train_time:68339ms step_avg:66.93ms +[2025-09-05 15:26:05] [Rank 0] step:1041/10000 train_time:68992ms step_avg:66.27ms +[2025-09-05 15:26:05] [Rank 0] step:1041/10000 train_time:68992ms step_avg:66.27ms +[2025-09-05 15:26:06] [Rank 0] step:1061/10000 train_time:69646ms step_avg:65.64ms +[2025-09-05 15:26:06] [Rank 0] step:1061/10000 train_time:69646ms step_avg:65.64ms +[2025-09-05 15:26:07] [Rank 0] step:1081/10000 train_time:70299ms step_avg:65.03ms +[2025-09-05 15:26:07] [Rank 0] step:1081/10000 train_time:70299ms step_avg:65.03ms +[2025-09-05 15:26:07] [Rank 0] step:1101/10000 train_time:70953ms step_avg:64.44ms +[2025-09-05 15:26:07] [Rank 0] step:1101/10000 train_time:70953ms step_avg:64.44ms +[2025-09-05 15:26:08] [Rank 0] step:1121/10000 train_time:71606ms step_avg:63.88ms +[2025-09-05 15:26:08] [Rank 0] step:1121/10000 train_time:71606ms step_avg:63.88ms +[2025-09-05 15:26:09] [Rank 0] step:1141/10000 train_time:72259ms step_avg:63.33ms +[2025-09-05 15:26:09] [Rank 0] step:1141/10000 train_time:72259ms step_avg:63.33ms +[2025-09-05 15:26:09] [Rank 0] step:1161/10000 train_time:72914ms step_avg:62.80ms +[2025-09-05 15:26:09] [Rank 0] step:1161/10000 train_time:72914ms step_avg:62.80ms +[2025-09-05 15:26:10] [Rank 0] step:1181/10000 train_time:73567ms step_avg:62.29ms +[2025-09-05 15:26:10] [Rank 0] step:1181/10000 train_time:73567ms step_avg:62.29ms +[2025-09-05 15:26:11] [Rank 0] step:1201/10000 train_time:74220ms step_avg:61.80ms +[2025-09-05 15:26:11] [Rank 0] step:1201/10000 train_time:74220ms step_avg:61.80ms +[2025-09-05 15:26:11] [Rank 0] step:1221/10000 train_time:74873ms step_avg:61.32ms +[2025-09-05 15:26:11] [Rank 0] step:1221/10000 train_time:74873ms step_avg:61.32ms +[2025-09-05 15:26:12] [Rank 0] step:1241/10000 train_time:75526ms step_avg:60.86ms +[2025-09-05 15:26:12] [Rank 0] step:1241/10000 train_time:75526ms step_avg:60.86ms +[2025-09-05 15:26:13] [Rank 0] step:1261/10000 train_time:76179ms step_avg:60.41ms +[2025-09-05 15:26:13] [Rank 0] step:1261/10000 train_time:76179ms step_avg:60.41ms +[2025-09-05 15:26:13] [Rank 0] step:1281/10000 train_time:76833ms step_avg:59.98ms +[2025-09-05 15:26:13] [Rank 0] step:1281/10000 train_time:76833ms step_avg:59.98ms +[2025-09-05 15:26:14] [Rank 0] step:1301/10000 train_time:77485ms step_avg:59.56ms +[2025-09-05 15:26:14] [Rank 0] step:1301/10000 train_time:77485ms step_avg:59.56ms +[2025-09-05 15:26:15] [Rank 0] step:1321/10000 train_time:78139ms step_avg:59.15ms +[2025-09-05 15:26:15] [Rank 0] step:1321/10000 train_time:78139ms step_avg:59.15ms +[2025-09-05 15:26:15] [Rank 0] step:1341/10000 train_time:78792ms step_avg:58.76ms +[2025-09-05 15:26:15] [Rank 0] step:1341/10000 train_time:78792ms step_avg:58.76ms +[2025-09-05 15:26:16] [Rank 0] step:1361/10000 train_time:79445ms step_avg:58.37ms +[2025-09-05 15:26:16] [Rank 0] step:1361/10000 train_time:79445ms step_avg:58.37ms +[2025-09-05 15:26:17] [Rank 0] step:1381/10000 train_time:80099ms step_avg:58.00ms +[2025-09-05 15:26:17] [Rank 0] step:1381/10000 train_time:80099ms step_avg:58.00ms +[2025-09-05 15:26:17] [Rank 0] step:1401/10000 train_time:80753ms step_avg:57.64ms +[2025-09-05 15:26:17] [Rank 0] step:1401/10000 train_time:80753ms step_avg:57.64ms +[2025-09-05 15:26:18] [Rank 0] step:1421/10000 train_time:81405ms step_avg:57.29ms +[2025-09-05 15:26:18] [Rank 0] step:1421/10000 train_time:81405ms step_avg:57.29ms +[2025-09-05 15:26:19] [Rank 0] step:1441/10000 train_time:82059ms step_avg:56.95ms +[2025-09-05 15:26:19] [Rank 0] step:1441/10000 train_time:82059ms step_avg:56.95ms +[2025-09-05 15:26:19] [Rank 0] step:1461/10000 train_time:82711ms step_avg:56.61ms +[2025-09-05 15:26:19] [Rank 0] step:1461/10000 train_time:82711ms step_avg:56.61ms +[2025-09-05 15:26:20] [Rank 0] step:1481/10000 train_time:83365ms step_avg:56.29ms +[2025-09-05 15:26:20] [Rank 0] step:1481/10000 train_time:83365ms step_avg:56.29ms +[2025-09-05 15:26:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:26:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:26:21] [Rank 0] PRINT: step:1500/10000 train_loss:0.9039 val_loss:0.8546 train_time:84251ms step_avg:56.17ms +[2025-09-05 15:26:21] [Rank 0] PRINT: step:1500/10000 train_loss:0.9039 val_loss:0.8546 train_time:84251ms step_avg:56.17ms +[2025-09-05 15:26:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:26:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:26:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:26:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:27:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:27:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:27:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:27:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:27:42] [Rank 0] Total Loss: 4.8617 +[2025-09-05 15:27:42] [Rank 0] Total Loss: 4.8617 +[2025-09-05 15:27:42] [Rank 0] Total FTA (Unweighted): 0.6312 +[2025-09-05 15:27:42] [Rank 0] Total FTA (Unweighted): 0.6312 +[2025-09-05 15:27:42] [Rank 0] Total FTA (Weighted): 0.6312 +[2025-09-05 15:27:42] [Rank 0] Total FTA (Weighted): 0.6312 +[2025-09-05 15:27:42] [Rank 0] Group 0 Loss: 4.6958 +[2025-09-05 15:27:42] [Rank 0] Group 0 Loss: 4.6958 +[2025-09-05 15:27:42] [Rank 0] Group 1 Loss: 4.4662 +[2025-09-05 15:27:42] [Rank 0] Group 1 Loss: 4.4662 +[2025-09-05 15:27:42] [Rank 0] Group 2 Loss: 4.2662 +[2025-09-05 15:27:42] [Rank 0] Group 2 Loss: 4.2662 +[2025-09-05 15:27:42] [Rank 0] Group 3 Loss: 4.6699 +[2025-09-05 15:27:42] [Rank 0] Group 3 Loss: 4.6699 +[2025-09-05 15:27:42] [Rank 0] Group 4 Loss: 4.6146 +[2025-09-05 15:27:42] [Rank 0] Group 4 Loss: 4.6146 +[2025-09-05 15:27:42] [Rank 0] Group 5 Loss: 4.6887 +[2025-09-05 15:27:42] [Rank 0] Group 5 Loss: 4.6887 +[2025-09-05 15:27:42] [Rank 0] Group 6 Loss: 4.5371 +[2025-09-05 15:27:42] [Rank 0] Group 6 Loss: 4.5371 +[2025-09-05 15:27:42] [Rank 0] Group 7 Loss: 4.6360 +[2025-09-05 15:27:42] [Rank 0] Group 7 Loss: 4.6360 +[2025-09-05 15:27:42] [Rank 0] Group 8 Loss: 4.7560 +[2025-09-05 15:27:42] [Rank 0] Group 8 Loss: 4.7560 +[2025-09-05 15:27:42] [Rank 0] Group 9 Loss: 4.7444 +[2025-09-05 15:27:42] [Rank 0] Group 9 Loss: 4.7444 +[2025-09-05 15:27:42] [Rank 0] Group 10 Loss: 4.9938 +[2025-09-05 15:27:42] [Rank 0] Group 10 Loss: 4.9938 +[2025-09-05 15:27:42] [Rank 0] Group 11 Loss: 5.0749 +[2025-09-05 15:27:42] [Rank 0] Group 11 Loss: 5.0749 +[2025-09-05 15:27:42] [Rank 0] Group 12 Loss: 5.1933 +[2025-09-05 15:27:42] [Rank 0] Group 12 Loss: 5.1933 +[2025-09-05 15:27:42] [Rank 0] Group 13 Loss: 5.4037 +[2025-09-05 15:27:42] [Rank 0] Group 13 Loss: 5.4037 +[2025-09-05 15:27:42] [Rank 0] Group 14 Loss: 5.4753 +[2025-09-05 15:27:42] [Rank 0] Group 14 Loss: 5.4753 +[2025-09-05 15:27:42] [Rank 0] Group 15 Loss: 5.5719 +[2025-09-05 15:27:42] [Rank 0] Group 15 Loss: 5.5719 +[2025-09-05 15:27:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:27:42] [Rank 0] Group 6 FTA: 0.9800 +[2025-09-05 15:27:42] [Rank 0] Group 6 FTA: 0.9800 +[2025-09-05 15:27:42] [Rank 0] Group 7 FTA: 0.9500 +[2025-09-05 15:27:42] [Rank 0] Group 7 FTA: 0.9500 +[2025-09-05 15:27:42] [Rank 0] Group 8 FTA: 0.9200 +[2025-09-05 15:27:42] [Rank 0] Group 8 FTA: 0.9200 +[2025-09-05 15:27:42] [Rank 0] Group 9 FTA: 0.6000 +[2025-09-05 15:27:42] [Rank 0] Group 9 FTA: 0.6000 +[2025-09-05 15:27:42] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 15:27:42] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 15:27:42] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 15:27:42] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 15:27:42] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 15:27:42] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 15:27:42] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 15:27:42] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 15:27:42] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 15:27:42] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 15:27:42] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:27:42] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:27:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:27:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:27:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:27:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:27:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:27:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:27:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:27:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:27:43] [Rank 0] step:1501/10000 train_time:84260ms step_avg:56.14ms +[2025-09-05 15:27:43] [Rank 0] step:1501/10000 train_time:84260ms step_avg:56.14ms +[2025-09-05 15:27:44] [Rank 0] step:1521/10000 train_time:84693ms step_avg:55.68ms +[2025-09-05 15:27:44] [Rank 0] step:1521/10000 train_time:84693ms step_avg:55.68ms +[2025-09-05 15:27:45] [Rank 0] step:1541/10000 train_time:85346ms step_avg:55.38ms +[2025-09-05 15:27:45] [Rank 0] step:1541/10000 train_time:85346ms step_avg:55.38ms +[2025-09-05 15:27:45] [Rank 0] step:1561/10000 train_time:85999ms step_avg:55.09ms +[2025-09-05 15:27:45] [Rank 0] step:1561/10000 train_time:85999ms step_avg:55.09ms +[2025-09-05 15:27:46] [Rank 0] step:1581/10000 train_time:86652ms step_avg:54.81ms +[2025-09-05 15:27:46] [Rank 0] step:1581/10000 train_time:86652ms step_avg:54.81ms +[2025-09-05 15:27:47] [Rank 0] step:1601/10000 train_time:87304ms step_avg:54.53ms +[2025-09-05 15:27:47] [Rank 0] step:1601/10000 train_time:87304ms step_avg:54.53ms +[2025-09-05 15:27:47] [Rank 0] step:1621/10000 train_time:87957ms step_avg:54.26ms +[2025-09-05 15:27:47] [Rank 0] step:1621/10000 train_time:87957ms step_avg:54.26ms +[2025-09-05 15:27:48] [Rank 0] step:1641/10000 train_time:88792ms step_avg:54.11ms +[2025-09-05 15:27:48] [Rank 0] step:1641/10000 train_time:88792ms step_avg:54.11ms +[2025-09-05 15:27:49] [Rank 0] step:1661/10000 train_time:89446ms step_avg:53.85ms +[2025-09-05 15:27:49] [Rank 0] step:1661/10000 train_time:89446ms step_avg:53.85ms +[2025-09-05 15:27:49] [Rank 0] step:1681/10000 train_time:90099ms step_avg:53.60ms +[2025-09-05 15:27:49] [Rank 0] step:1681/10000 train_time:90099ms step_avg:53.60ms +[2025-09-05 15:27:50] [Rank 0] step:1701/10000 train_time:90752ms step_avg:53.35ms +[2025-09-05 15:27:50] [Rank 0] step:1701/10000 train_time:90752ms step_avg:53.35ms +[2025-09-05 15:27:51] [Rank 0] step:1721/10000 train_time:91405ms step_avg:53.11ms +[2025-09-05 15:27:51] [Rank 0] step:1721/10000 train_time:91405ms step_avg:53.11ms +[2025-09-05 15:27:51] [Rank 0] step:1741/10000 train_time:92057ms step_avg:52.88ms +[2025-09-05 15:27:51] [Rank 0] step:1741/10000 train_time:92057ms step_avg:52.88ms +[2025-09-05 15:27:52] [Rank 0] step:1761/10000 train_time:92710ms step_avg:52.65ms +[2025-09-05 15:27:52] [Rank 0] step:1761/10000 train_time:92710ms step_avg:52.65ms +[2025-09-05 15:27:53] [Rank 0] step:1781/10000 train_time:93363ms step_avg:52.42ms +[2025-09-05 15:27:53] [Rank 0] step:1781/10000 train_time:93363ms step_avg:52.42ms +[2025-09-05 15:27:53] [Rank 0] step:1801/10000 train_time:94015ms step_avg:52.20ms +[2025-09-05 15:27:53] [Rank 0] step:1801/10000 train_time:94015ms step_avg:52.20ms +[2025-09-05 15:27:54] [Rank 0] step:1821/10000 train_time:94668ms step_avg:51.99ms +[2025-09-05 15:27:54] [Rank 0] step:1821/10000 train_time:94668ms step_avg:51.99ms +[2025-09-05 15:27:55] [Rank 0] step:1841/10000 train_time:95321ms step_avg:51.78ms +[2025-09-05 15:27:55] [Rank 0] step:1841/10000 train_time:95321ms step_avg:51.78ms +[2025-09-05 15:27:55] [Rank 0] step:1861/10000 train_time:95974ms step_avg:51.57ms +[2025-09-05 15:27:55] [Rank 0] step:1861/10000 train_time:95974ms step_avg:51.57ms +[2025-09-05 15:27:56] [Rank 0] step:1881/10000 train_time:96627ms step_avg:51.37ms +[2025-09-05 15:27:56] [Rank 0] step:1881/10000 train_time:96627ms step_avg:51.37ms +[2025-09-05 15:27:57] [Rank 0] step:1901/10000 train_time:97280ms step_avg:51.17ms +[2025-09-05 15:27:57] [Rank 0] step:1901/10000 train_time:97280ms step_avg:51.17ms +[2025-09-05 15:27:57] [Rank 0] step:1921/10000 train_time:97933ms step_avg:50.98ms +[2025-09-05 15:27:57] [Rank 0] step:1921/10000 train_time:97933ms step_avg:50.98ms +[2025-09-05 15:27:58] [Rank 0] step:1941/10000 train_time:98585ms step_avg:50.79ms +[2025-09-05 15:27:58] [Rank 0] step:1941/10000 train_time:98585ms step_avg:50.79ms +[2025-09-05 15:27:59] [Rank 0] step:1961/10000 train_time:99238ms step_avg:50.61ms +[2025-09-05 15:27:59] [Rank 0] step:1961/10000 train_time:99238ms step_avg:50.61ms +[2025-09-05 15:27:59] [Rank 0] step:1981/10000 train_time:99890ms step_avg:50.42ms +[2025-09-05 15:27:59] [Rank 0] step:1981/10000 train_time:99890ms step_avg:50.42ms +[2025-09-05 15:28:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:28:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:28:00] [Rank 0] PRINT: step:2000/10000 train_loss:0.8365 val_loss:0.8027 train_time:100775ms step_avg:50.39ms +[2025-09-05 15:28:00] [Rank 0] PRINT: step:2000/10000 train_loss:0.8365 val_loss:0.8027 train_time:100775ms step_avg:50.39ms +[2025-09-05 15:28:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:28:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:28:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:28:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:29:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:29:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:29:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:29:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:29:22] [Rank 0] Total Loss: 5.0865 +[2025-09-05 15:29:22] [Rank 0] Total Loss: 5.0865 +[2025-09-05 15:29:22] [Rank 0] Total FTA (Unweighted): 0.6819 +[2025-09-05 15:29:22] [Rank 0] Total FTA (Unweighted): 0.6819 +[2025-09-05 15:29:22] [Rank 0] Total FTA (Weighted): 0.6819 +[2025-09-05 15:29:22] [Rank 0] Total FTA (Weighted): 0.6819 +[2025-09-05 15:29:22] [Rank 0] Group 0 Loss: 4.7842 +[2025-09-05 15:29:22] [Rank 0] Group 0 Loss: 4.7842 +[2025-09-05 15:29:22] [Rank 0] Group 1 Loss: 4.7426 +[2025-09-05 15:29:22] [Rank 0] Group 1 Loss: 4.7426 +[2025-09-05 15:29:22] [Rank 0] Group 2 Loss: 4.5668 +[2025-09-05 15:29:22] [Rank 0] Group 2 Loss: 4.5668 +[2025-09-05 15:29:22] [Rank 0] Group 3 Loss: 4.9108 +[2025-09-05 15:29:22] [Rank 0] Group 3 Loss: 4.9108 +[2025-09-05 15:29:22] [Rank 0] Group 4 Loss: 4.8389 +[2025-09-05 15:29:22] [Rank 0] Group 4 Loss: 4.8389 +[2025-09-05 15:29:22] [Rank 0] Group 5 Loss: 4.8573 +[2025-09-05 15:29:22] [Rank 0] Group 5 Loss: 4.8573 +[2025-09-05 15:29:22] [Rank 0] Group 6 Loss: 4.8474 +[2025-09-05 15:29:22] [Rank 0] Group 6 Loss: 4.8474 +[2025-09-05 15:29:22] [Rank 0] Group 7 Loss: 4.9157 +[2025-09-05 15:29:22] [Rank 0] Group 7 Loss: 4.9157 +[2025-09-05 15:29:22] [Rank 0] Group 8 Loss: 5.0462 +[2025-09-05 15:29:22] [Rank 0] Group 8 Loss: 5.0462 +[2025-09-05 15:29:22] [Rank 0] Group 9 Loss: 5.0061 +[2025-09-05 15:29:22] [Rank 0] Group 9 Loss: 5.0061 +[2025-09-05 15:29:22] [Rank 0] Group 10 Loss: 5.1905 +[2025-09-05 15:29:22] [Rank 0] Group 10 Loss: 5.1905 +[2025-09-05 15:29:22] [Rank 0] Group 11 Loss: 5.2972 +[2025-09-05 15:29:22] [Rank 0] Group 11 Loss: 5.2972 +[2025-09-05 15:29:23] [Rank 0] Group 12 Loss: 5.3708 +[2025-09-05 15:29:23] [Rank 0] Group 12 Loss: 5.3708 +[2025-09-05 15:29:23] [Rank 0] Group 13 Loss: 5.5542 +[2025-09-05 15:29:23] [Rank 0] Group 13 Loss: 5.5542 +[2025-09-05 15:29:23] [Rank 0] Group 14 Loss: 5.6145 +[2025-09-05 15:29:23] [Rank 0] Group 14 Loss: 5.6145 +[2025-09-05 15:29:23] [Rank 0] Group 15 Loss: 5.8404 +[2025-09-05 15:29:23] [Rank 0] Group 15 Loss: 5.8404 +[2025-09-05 15:29:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:29:23] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 15:29:23] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 15:29:23] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 15:29:23] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 15:29:23] [Rank 0] Group 8 FTA: 0.9600 +[2025-09-05 15:29:23] [Rank 0] Group 8 FTA: 0.9600 +[2025-09-05 15:29:23] [Rank 0] Group 9 FTA: 0.8000 +[2025-09-05 15:29:23] [Rank 0] Group 9 FTA: 0.8000 +[2025-09-05 15:29:23] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 15:29:23] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 15:29:23] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 15:29:23] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 15:29:23] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 15:29:23] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 15:29:23] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:29:23] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:29:23] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:29:23] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:29:23] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 15:29:23] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 15:29:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:29:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:29:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:29:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:29:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:29:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:29:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:29:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:29:24] [Rank 0] step:2001/10000 train_time:100785ms step_avg:50.37ms +[2025-09-05 15:29:24] [Rank 0] step:2001/10000 train_time:100785ms step_avg:50.37ms +[2025-09-05 15:29:25] [Rank 0] step:2021/10000 train_time:101233ms step_avg:50.09ms +[2025-09-05 15:29:25] [Rank 0] step:2021/10000 train_time:101233ms step_avg:50.09ms +[2025-09-05 15:29:25] [Rank 0] step:2041/10000 train_time:101887ms step_avg:49.92ms +[2025-09-05 15:29:25] [Rank 0] step:2041/10000 train_time:101887ms step_avg:49.92ms +[2025-09-05 15:29:26] [Rank 0] step:2061/10000 train_time:102540ms step_avg:49.75ms +[2025-09-05 15:29:26] [Rank 0] step:2061/10000 train_time:102540ms step_avg:49.75ms +[2025-09-05 15:29:27] [Rank 0] step:2081/10000 train_time:103194ms step_avg:49.59ms +[2025-09-05 15:29:27] [Rank 0] step:2081/10000 train_time:103194ms step_avg:49.59ms +[2025-09-05 15:29:27] [Rank 0] step:2101/10000 train_time:103848ms step_avg:49.43ms +[2025-09-05 15:29:27] [Rank 0] step:2101/10000 train_time:103848ms step_avg:49.43ms +[2025-09-05 15:29:28] [Rank 0] step:2121/10000 train_time:104503ms step_avg:49.27ms +[2025-09-05 15:29:28] [Rank 0] step:2121/10000 train_time:104503ms step_avg:49.27ms +[2025-09-05 15:29:29] [Rank 0] step:2141/10000 train_time:105156ms step_avg:49.12ms +[2025-09-05 15:29:29] [Rank 0] step:2141/10000 train_time:105156ms step_avg:49.12ms +[2025-09-05 15:29:29] [Rank 0] step:2161/10000 train_time:105811ms step_avg:48.96ms +[2025-09-05 15:29:29] [Rank 0] step:2161/10000 train_time:105811ms step_avg:48.96ms +[2025-09-05 15:29:30] [Rank 0] step:2181/10000 train_time:106464ms step_avg:48.81ms +[2025-09-05 15:29:30] [Rank 0] step:2181/10000 train_time:106464ms step_avg:48.81ms +[2025-09-05 15:29:31] [Rank 0] step:2201/10000 train_time:107117ms step_avg:48.67ms +[2025-09-05 15:29:31] [Rank 0] step:2201/10000 train_time:107117ms step_avg:48.67ms +[2025-09-05 15:29:31] [Rank 0] step:2221/10000 train_time:107770ms step_avg:48.52ms +[2025-09-05 15:29:31] [Rank 0] step:2221/10000 train_time:107770ms step_avg:48.52ms +[2025-09-05 15:29:32] [Rank 0] step:2241/10000 train_time:108427ms step_avg:48.38ms +[2025-09-05 15:29:32] [Rank 0] step:2241/10000 train_time:108427ms step_avg:48.38ms +[2025-09-05 15:29:33] [Rank 0] step:2261/10000 train_time:109087ms step_avg:48.25ms +[2025-09-05 15:29:33] [Rank 0] step:2261/10000 train_time:109087ms step_avg:48.25ms +[2025-09-05 15:29:33] [Rank 0] step:2281/10000 train_time:109747ms step_avg:48.11ms +[2025-09-05 15:29:33] [Rank 0] step:2281/10000 train_time:109747ms step_avg:48.11ms +[2025-09-05 15:29:34] [Rank 0] step:2301/10000 train_time:110407ms step_avg:47.98ms +[2025-09-05 15:29:34] [Rank 0] step:2301/10000 train_time:110407ms step_avg:47.98ms +[2025-09-05 15:29:35] [Rank 0] step:2321/10000 train_time:111067ms step_avg:47.85ms +[2025-09-05 15:29:35] [Rank 0] step:2321/10000 train_time:111067ms step_avg:47.85ms +[2025-09-05 15:29:35] [Rank 0] step:2341/10000 train_time:111727ms step_avg:47.73ms +[2025-09-05 15:29:35] [Rank 0] step:2341/10000 train_time:111727ms step_avg:47.73ms +[2025-09-05 15:29:36] [Rank 0] step:2361/10000 train_time:112386ms step_avg:47.60ms +[2025-09-05 15:29:36] [Rank 0] step:2361/10000 train_time:112386ms step_avg:47.60ms +[2025-09-05 15:29:37] [Rank 0] step:2381/10000 train_time:113046ms step_avg:47.48ms +[2025-09-05 15:29:37] [Rank 0] step:2381/10000 train_time:113046ms step_avg:47.48ms +[2025-09-05 15:29:37] [Rank 0] step:2401/10000 train_time:113706ms step_avg:47.36ms +[2025-09-05 15:29:37] [Rank 0] step:2401/10000 train_time:113706ms step_avg:47.36ms +[2025-09-05 15:29:38] [Rank 0] step:2421/10000 train_time:114367ms step_avg:47.24ms +[2025-09-05 15:29:38] [Rank 0] step:2421/10000 train_time:114367ms step_avg:47.24ms +[2025-09-05 15:29:38] [Rank 0] step:2441/10000 train_time:115025ms step_avg:47.12ms +[2025-09-05 15:29:38] [Rank 0] step:2441/10000 train_time:115025ms step_avg:47.12ms +[2025-09-05 15:29:39] [Rank 0] step:2461/10000 train_time:115685ms step_avg:47.01ms +[2025-09-05 15:29:39] [Rank 0] step:2461/10000 train_time:115685ms step_avg:47.01ms +[2025-09-05 15:29:40] [Rank 0] step:2481/10000 train_time:116344ms step_avg:46.89ms +[2025-09-05 15:29:40] [Rank 0] step:2481/10000 train_time:116344ms step_avg:46.89ms +[2025-09-05 15:29:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:29:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:29:41] [Rank 0] PRINT: step:2500/10000 train_loss:0.7932 val_loss:0.7637 train_time:117238ms step_avg:46.90ms +[2025-09-05 15:29:41] [Rank 0] PRINT: step:2500/10000 train_loss:0.7932 val_loss:0.7637 train_time:117238ms step_avg:46.90ms +[2025-09-05 15:29:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:29:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:29:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:29:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:31:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:31:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:31:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:31:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:31:02] [Rank 0] Total Loss: 5.0323 +[2025-09-05 15:31:02] [Rank 0] Total Loss: 5.0323 +[2025-09-05 15:31:02] [Rank 0] Total FTA (Unweighted): 0.7106 +[2025-09-05 15:31:02] [Rank 0] Total FTA (Unweighted): 0.7106 +[2025-09-05 15:31:02] [Rank 0] Total FTA (Weighted): 0.7106 +[2025-09-05 15:31:02] [Rank 0] Total FTA (Weighted): 0.7106 +[2025-09-05 15:31:02] [Rank 0] Group 0 Loss: 4.8980 +[2025-09-05 15:31:02] [Rank 0] Group 0 Loss: 4.8980 +[2025-09-05 15:31:02] [Rank 0] Group 1 Loss: 4.5640 +[2025-09-05 15:31:02] [Rank 0] Group 1 Loss: 4.5640 +[2025-09-05 15:31:02] [Rank 0] Group 2 Loss: 4.5392 +[2025-09-05 15:31:02] [Rank 0] Group 2 Loss: 4.5392 +[2025-09-05 15:31:02] [Rank 0] Group 3 Loss: 4.9249 +[2025-09-05 15:31:02] [Rank 0] Group 3 Loss: 4.9249 +[2025-09-05 15:31:02] [Rank 0] Group 4 Loss: 4.8245 +[2025-09-05 15:31:02] [Rank 0] Group 4 Loss: 4.8245 +[2025-09-05 15:31:02] [Rank 0] Group 5 Loss: 4.8867 +[2025-09-05 15:31:02] [Rank 0] Group 5 Loss: 4.8867 +[2025-09-05 15:31:02] [Rank 0] Group 6 Loss: 4.8185 +[2025-09-05 15:31:02] [Rank 0] Group 6 Loss: 4.8185 +[2025-09-05 15:31:02] [Rank 0] Group 7 Loss: 4.8616 +[2025-09-05 15:31:02] [Rank 0] Group 7 Loss: 4.8616 +[2025-09-05 15:31:02] [Rank 0] Group 8 Loss: 5.0191 +[2025-09-05 15:31:02] [Rank 0] Group 8 Loss: 5.0191 +[2025-09-05 15:31:03] [Rank 0] Group 9 Loss: 4.9827 +[2025-09-05 15:31:03] [Rank 0] Group 9 Loss: 4.9827 +[2025-09-05 15:31:03] [Rank 0] Group 10 Loss: 5.1392 +[2025-09-05 15:31:03] [Rank 0] Group 10 Loss: 5.1392 +[2025-09-05 15:31:03] [Rank 0] Group 11 Loss: 5.2088 +[2025-09-05 15:31:03] [Rank 0] Group 11 Loss: 5.2088 +[2025-09-05 15:31:03] [Rank 0] Group 12 Loss: 5.2801 +[2025-09-05 15:31:03] [Rank 0] Group 12 Loss: 5.2801 +[2025-09-05 15:31:03] [Rank 0] Group 13 Loss: 5.4009 +[2025-09-05 15:31:03] [Rank 0] Group 13 Loss: 5.4009 +[2025-09-05 15:31:03] [Rank 0] Group 14 Loss: 5.4935 +[2025-09-05 15:31:03] [Rank 0] Group 14 Loss: 5.4935 +[2025-09-05 15:31:03] [Rank 0] Group 15 Loss: 5.6753 +[2025-09-05 15:31:03] [Rank 0] Group 15 Loss: 5.6753 +[2025-09-05 15:31:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:31:03] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:31:03] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:31:03] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 15:31:03] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 15:31:03] [Rank 0] Group 9 FTA: 0.9300 +[2025-09-05 15:31:03] [Rank 0] Group 9 FTA: 0.9300 +[2025-09-05 15:31:03] [Rank 0] Group 10 FTA: 0.7900 +[2025-09-05 15:31:03] [Rank 0] Group 10 FTA: 0.7900 +[2025-09-05 15:31:03] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 15:31:03] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 15:31:03] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 15:31:03] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 15:31:03] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 15:31:03] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 15:31:03] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:31:03] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:31:03] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 15:31:03] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 15:31:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:31:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:31:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:31:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:31:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:31:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:31:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:31:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:31:04] [Rank 0] step:2501/10000 train_time:117247ms step_avg:46.88ms +[2025-09-05 15:31:04] [Rank 0] step:2501/10000 train_time:117247ms step_avg:46.88ms +[2025-09-05 15:31:05] [Rank 0] step:2521/10000 train_time:117681ms step_avg:46.68ms +[2025-09-05 15:31:05] [Rank 0] step:2521/10000 train_time:117681ms step_avg:46.68ms +[2025-09-05 15:31:05] [Rank 0] step:2541/10000 train_time:118340ms step_avg:46.57ms +[2025-09-05 15:31:05] [Rank 0] step:2541/10000 train_time:118340ms step_avg:46.57ms +[2025-09-05 15:31:06] [Rank 0] step:2561/10000 train_time:119000ms step_avg:46.47ms +[2025-09-05 15:31:06] [Rank 0] step:2561/10000 train_time:119000ms step_avg:46.47ms +[2025-09-05 15:31:07] [Rank 0] step:2581/10000 train_time:119659ms step_avg:46.36ms +[2025-09-05 15:31:07] [Rank 0] step:2581/10000 train_time:119659ms step_avg:46.36ms +[2025-09-05 15:31:07] [Rank 0] step:2601/10000 train_time:120318ms step_avg:46.26ms +[2025-09-05 15:31:07] [Rank 0] step:2601/10000 train_time:120318ms step_avg:46.26ms +[2025-09-05 15:31:08] [Rank 0] step:2621/10000 train_time:120977ms step_avg:46.16ms +[2025-09-05 15:31:08] [Rank 0] step:2621/10000 train_time:120977ms step_avg:46.16ms +[2025-09-05 15:31:09] [Rank 0] step:2641/10000 train_time:121636ms step_avg:46.06ms +[2025-09-05 15:31:09] [Rank 0] step:2641/10000 train_time:121636ms step_avg:46.06ms +[2025-09-05 15:31:09] [Rank 0] step:2661/10000 train_time:122295ms step_avg:45.96ms +[2025-09-05 15:31:09] [Rank 0] step:2661/10000 train_time:122295ms step_avg:45.96ms +[2025-09-05 15:31:10] [Rank 0] step:2681/10000 train_time:122955ms step_avg:45.86ms +[2025-09-05 15:31:10] [Rank 0] step:2681/10000 train_time:122955ms step_avg:45.86ms +[2025-09-05 15:31:11] [Rank 0] step:2701/10000 train_time:123614ms step_avg:45.77ms +[2025-09-05 15:31:11] [Rank 0] step:2701/10000 train_time:123614ms step_avg:45.77ms +[2025-09-05 15:31:11] [Rank 0] step:2721/10000 train_time:124273ms step_avg:45.67ms +[2025-09-05 15:31:11] [Rank 0] step:2721/10000 train_time:124273ms step_avg:45.67ms +[2025-09-05 15:31:12] [Rank 0] step:2741/10000 train_time:124933ms step_avg:45.58ms +[2025-09-05 15:31:12] [Rank 0] step:2741/10000 train_time:124933ms step_avg:45.58ms +[2025-09-05 15:31:13] [Rank 0] step:2761/10000 train_time:125593ms step_avg:45.49ms +[2025-09-05 15:31:13] [Rank 0] step:2761/10000 train_time:125593ms step_avg:45.49ms +[2025-09-05 15:31:13] [Rank 0] step:2781/10000 train_time:126407ms step_avg:45.45ms +[2025-09-05 15:31:13] [Rank 0] step:2781/10000 train_time:126407ms step_avg:45.45ms +[2025-09-05 15:31:14] [Rank 0] step:2801/10000 train_time:127067ms step_avg:45.37ms +[2025-09-05 15:31:14] [Rank 0] step:2801/10000 train_time:127067ms step_avg:45.37ms +[2025-09-05 15:31:15] [Rank 0] step:2821/10000 train_time:127726ms step_avg:45.28ms +[2025-09-05 15:31:15] [Rank 0] step:2821/10000 train_time:127726ms step_avg:45.28ms +[2025-09-05 15:31:16] [Rank 0] step:2841/10000 train_time:128900ms step_avg:45.37ms +[2025-09-05 15:31:16] [Rank 0] step:2841/10000 train_time:128900ms step_avg:45.37ms +[2025-09-05 15:31:17] [Rank 0] step:2861/10000 train_time:129559ms step_avg:45.28ms +[2025-09-05 15:31:17] [Rank 0] step:2861/10000 train_time:129559ms step_avg:45.28ms +[2025-09-05 15:31:17] [Rank 0] step:2881/10000 train_time:130218ms step_avg:45.20ms +[2025-09-05 15:31:17] [Rank 0] step:2881/10000 train_time:130218ms step_avg:45.20ms +[2025-09-05 15:31:18] [Rank 0] step:2901/10000 train_time:130877ms step_avg:45.11ms +[2025-09-05 15:31:18] [Rank 0] step:2901/10000 train_time:130877ms step_avg:45.11ms +[2025-09-05 15:31:19] [Rank 0] step:2921/10000 train_time:131535ms step_avg:45.03ms +[2025-09-05 15:31:19] [Rank 0] step:2921/10000 train_time:131535ms step_avg:45.03ms +[2025-09-05 15:31:19] [Rank 0] step:2941/10000 train_time:132194ms step_avg:44.95ms +[2025-09-05 15:31:19] [Rank 0] step:2941/10000 train_time:132194ms step_avg:44.95ms +[2025-09-05 15:31:20] [Rank 0] step:2961/10000 train_time:132852ms step_avg:44.87ms +[2025-09-05 15:31:20] [Rank 0] step:2961/10000 train_time:132852ms step_avg:44.87ms +[2025-09-05 15:31:21] [Rank 0] step:2981/10000 train_time:133511ms step_avg:44.79ms +[2025-09-05 15:31:21] [Rank 0] step:2981/10000 train_time:133511ms step_avg:44.79ms +[2025-09-05 15:31:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:31:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:31:22] [Rank 0] PRINT: step:3000/10000 train_loss:0.7617 val_loss:0.7396 train_time:134404ms step_avg:44.80ms +[2025-09-05 15:31:22] [Rank 0] PRINT: step:3000/10000 train_loss:0.7617 val_loss:0.7396 train_time:134404ms step_avg:44.80ms +[2025-09-05 15:31:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:31:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:31:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:31:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:32:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:32:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:32:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:32:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:32:43] [Rank 0] Total Loss: 5.0261 +[2025-09-05 15:32:43] [Rank 0] Total Loss: 5.0261 +[2025-09-05 15:32:43] [Rank 0] Total FTA (Unweighted): 0.7300 +[2025-09-05 15:32:43] [Rank 0] Total FTA (Unweighted): 0.7300 +[2025-09-05 15:32:43] [Rank 0] Total FTA (Weighted): 0.7300 +[2025-09-05 15:32:43] [Rank 0] Total FTA (Weighted): 0.7300 +[2025-09-05 15:32:43] [Rank 0] Group 0 Loss: 4.8832 +[2025-09-05 15:32:43] [Rank 0] Group 0 Loss: 4.8832 +[2025-09-05 15:32:43] [Rank 0] Group 1 Loss: 4.6032 +[2025-09-05 15:32:43] [Rank 0] Group 1 Loss: 4.6032 +[2025-09-05 15:32:43] [Rank 0] Group 2 Loss: 4.5055 +[2025-09-05 15:32:43] [Rank 0] Group 2 Loss: 4.5055 +[2025-09-05 15:32:43] [Rank 0] Group 3 Loss: 4.9808 +[2025-09-05 15:32:43] [Rank 0] Group 3 Loss: 4.9808 +[2025-09-05 15:32:43] [Rank 0] Group 4 Loss: 4.9036 +[2025-09-05 15:32:43] [Rank 0] Group 4 Loss: 4.9036 +[2025-09-05 15:32:43] [Rank 0] Group 5 Loss: 4.8963 +[2025-09-05 15:32:43] [Rank 0] Group 5 Loss: 4.8963 +[2025-09-05 15:32:43] [Rank 0] Group 6 Loss: 4.8396 +[2025-09-05 15:32:43] [Rank 0] Group 6 Loss: 4.8396 +[2025-09-05 15:32:43] [Rank 0] Group 7 Loss: 4.8802 +[2025-09-05 15:32:43] [Rank 0] Group 7 Loss: 4.8802 +[2025-09-05 15:32:43] [Rank 0] Group 8 Loss: 5.0267 +[2025-09-05 15:32:43] [Rank 0] Group 8 Loss: 5.0267 +[2025-09-05 15:32:43] [Rank 0] Group 9 Loss: 4.9795 +[2025-09-05 15:32:43] [Rank 0] Group 9 Loss: 4.9795 +[2025-09-05 15:32:43] [Rank 0] Group 10 Loss: 5.1221 +[2025-09-05 15:32:43] [Rank 0] Group 10 Loss: 5.1221 +[2025-09-05 15:32:43] [Rank 0] Group 11 Loss: 5.1730 +[2025-09-05 15:32:43] [Rank 0] Group 11 Loss: 5.1730 +[2025-09-05 15:32:43] [Rank 0] Group 12 Loss: 5.2736 +[2025-09-05 15:32:43] [Rank 0] Group 12 Loss: 5.2736 +[2025-09-05 15:32:44] [Rank 0] Group 13 Loss: 5.3737 +[2025-09-05 15:32:44] [Rank 0] Group 13 Loss: 5.3737 +[2025-09-05 15:32:44] [Rank 0] Group 14 Loss: 5.4055 +[2025-09-05 15:32:44] [Rank 0] Group 14 Loss: 5.4055 +[2025-09-05 15:32:44] [Rank 0] Group 15 Loss: 5.5721 +[2025-09-05 15:32:44] [Rank 0] Group 15 Loss: 5.5721 +[2025-09-05 15:32:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 15:32:44] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 15:32:44] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:32:44] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 15:32:44] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 15:32:44] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:32:44] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:32:44] [Rank 0] Group 9 FTA: 0.9500 +[2025-09-05 15:32:44] [Rank 0] Group 9 FTA: 0.9500 +[2025-09-05 15:32:44] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 15:32:44] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 15:32:44] [Rank 0] Group 11 FTA: 0.3900 +[2025-09-05 15:32:44] [Rank 0] Group 11 FTA: 0.3900 +[2025-09-05 15:32:44] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 15:32:44] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 15:32:44] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 15:32:44] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 15:32:44] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:32:44] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:32:44] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:32:44] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:32:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:32:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:32:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:32:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:32:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:32:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:32:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:32:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:32:45] [Rank 0] step:3001/10000 train_time:134413ms step_avg:44.79ms +[2025-09-05 15:32:45] [Rank 0] step:3001/10000 train_time:134413ms step_avg:44.79ms +[2025-09-05 15:32:46] [Rank 0] step:3021/10000 train_time:134866ms step_avg:44.64ms +[2025-09-05 15:32:46] [Rank 0] step:3021/10000 train_time:134866ms step_avg:44.64ms +[2025-09-05 15:32:46] [Rank 0] step:3041/10000 train_time:135526ms step_avg:44.57ms +[2025-09-05 15:32:46] [Rank 0] step:3041/10000 train_time:135526ms step_avg:44.57ms +[2025-09-05 15:32:47] [Rank 0] step:3061/10000 train_time:136185ms step_avg:44.49ms +[2025-09-05 15:32:47] [Rank 0] step:3061/10000 train_time:136185ms step_avg:44.49ms +[2025-09-05 15:32:48] [Rank 0] step:3081/10000 train_time:136845ms step_avg:44.42ms +[2025-09-05 15:32:48] [Rank 0] step:3081/10000 train_time:136845ms step_avg:44.42ms +[2025-09-05 15:32:48] [Rank 0] step:3101/10000 train_time:137505ms step_avg:44.34ms +[2025-09-05 15:32:48] [Rank 0] step:3101/10000 train_time:137505ms step_avg:44.34ms +[2025-09-05 15:32:49] [Rank 0] step:3121/10000 train_time:138164ms step_avg:44.27ms +[2025-09-05 15:32:49] [Rank 0] step:3121/10000 train_time:138164ms step_avg:44.27ms +[2025-09-05 15:32:50] [Rank 0] step:3141/10000 train_time:138823ms step_avg:44.20ms +[2025-09-05 15:32:50] [Rank 0] step:3141/10000 train_time:138823ms step_avg:44.20ms +[2025-09-05 15:32:50] [Rank 0] step:3161/10000 train_time:139482ms step_avg:44.13ms +[2025-09-05 15:32:50] [Rank 0] step:3161/10000 train_time:139482ms step_avg:44.13ms +[2025-09-05 15:32:51] [Rank 0] step:3181/10000 train_time:140141ms step_avg:44.06ms +[2025-09-05 15:32:51] [Rank 0] step:3181/10000 train_time:140141ms step_avg:44.06ms +[2025-09-05 15:32:52] [Rank 0] step:3201/10000 train_time:140800ms step_avg:43.99ms +[2025-09-05 15:32:52] [Rank 0] step:3201/10000 train_time:140800ms step_avg:43.99ms +[2025-09-05 15:32:52] [Rank 0] step:3221/10000 train_time:141460ms step_avg:43.92ms +[2025-09-05 15:32:52] [Rank 0] step:3221/10000 train_time:141460ms step_avg:43.92ms +[2025-09-05 15:32:53] [Rank 0] step:3241/10000 train_time:142120ms step_avg:43.85ms +[2025-09-05 15:32:53] [Rank 0] step:3241/10000 train_time:142120ms step_avg:43.85ms +[2025-09-05 15:32:54] [Rank 0] step:3261/10000 train_time:142779ms step_avg:43.78ms +[2025-09-05 15:32:54] [Rank 0] step:3261/10000 train_time:142779ms step_avg:43.78ms +[2025-09-05 15:32:54] [Rank 0] step:3281/10000 train_time:143439ms step_avg:43.72ms +[2025-09-05 15:32:54] [Rank 0] step:3281/10000 train_time:143439ms step_avg:43.72ms +[2025-09-05 15:32:55] [Rank 0] step:3301/10000 train_time:144098ms step_avg:43.65ms +[2025-09-05 15:32:55] [Rank 0] step:3301/10000 train_time:144098ms step_avg:43.65ms +[2025-09-05 15:32:56] [Rank 0] step:3321/10000 train_time:144757ms step_avg:43.59ms +[2025-09-05 15:32:56] [Rank 0] step:3321/10000 train_time:144757ms step_avg:43.59ms +[2025-09-05 15:32:56] [Rank 0] step:3341/10000 train_time:145416ms step_avg:43.52ms +[2025-09-05 15:32:56] [Rank 0] step:3341/10000 train_time:145416ms step_avg:43.52ms +[2025-09-05 15:32:57] [Rank 0] step:3361/10000 train_time:146075ms step_avg:43.46ms +[2025-09-05 15:32:57] [Rank 0] step:3361/10000 train_time:146075ms step_avg:43.46ms +[2025-09-05 15:32:58] [Rank 0] step:3381/10000 train_time:146734ms step_avg:43.40ms +[2025-09-05 15:32:58] [Rank 0] step:3381/10000 train_time:146734ms step_avg:43.40ms +[2025-09-05 15:32:58] [Rank 0] step:3401/10000 train_time:147394ms step_avg:43.34ms +[2025-09-05 15:32:58] [Rank 0] step:3401/10000 train_time:147394ms step_avg:43.34ms +[2025-09-05 15:32:59] [Rank 0] step:3421/10000 train_time:148053ms step_avg:43.28ms +[2025-09-05 15:32:59] [Rank 0] step:3421/10000 train_time:148053ms step_avg:43.28ms +[2025-09-05 15:33:00] [Rank 0] step:3441/10000 train_time:148713ms step_avg:43.22ms +[2025-09-05 15:33:00] [Rank 0] step:3441/10000 train_time:148713ms step_avg:43.22ms +[2025-09-05 15:33:00] [Rank 0] step:3461/10000 train_time:149370ms step_avg:43.16ms +[2025-09-05 15:33:00] [Rank 0] step:3461/10000 train_time:149370ms step_avg:43.16ms +[2025-09-05 15:33:01] [Rank 0] step:3481/10000 train_time:150030ms step_avg:43.10ms +[2025-09-05 15:33:01] [Rank 0] step:3481/10000 train_time:150030ms step_avg:43.10ms +[2025-09-05 15:33:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:33:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:33:02] [Rank 0] PRINT: step:3500/10000 train_loss:0.7391 val_loss:0.7218 train_time:150924ms step_avg:43.12ms +[2025-09-05 15:33:02] [Rank 0] PRINT: step:3500/10000 train_loss:0.7391 val_loss:0.7218 train_time:150924ms step_avg:43.12ms +[2025-09-05 15:33:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:33:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:33:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:33:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:34:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:34:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:34:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:34:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:34:23] [Rank 0] Total Loss: 5.0598 +[2025-09-05 15:34:23] [Rank 0] Total Loss: 5.0598 +[2025-09-05 15:34:23] [Rank 0] Total FTA (Unweighted): 0.7538 +[2025-09-05 15:34:23] [Rank 0] Total FTA (Unweighted): 0.7538 +[2025-09-05 15:34:23] [Rank 0] Total FTA (Weighted): 0.7538 +[2025-09-05 15:34:23] [Rank 0] Total FTA (Weighted): 0.7538 +[2025-09-05 15:34:23] [Rank 0] Group 0 Loss: 4.9719 +[2025-09-05 15:34:23] [Rank 0] Group 0 Loss: 4.9719 +[2025-09-05 15:34:23] [Rank 0] Group 1 Loss: 4.6201 +[2025-09-05 15:34:23] [Rank 0] Group 1 Loss: 4.6201 +[2025-09-05 15:34:23] [Rank 0] Group 2 Loss: 4.5195 +[2025-09-05 15:34:23] [Rank 0] Group 2 Loss: 4.5195 +[2025-09-05 15:34:23] [Rank 0] Group 3 Loss: 5.0046 +[2025-09-05 15:34:23] [Rank 0] Group 3 Loss: 5.0046 +[2025-09-05 15:34:23] [Rank 0] Group 4 Loss: 4.9448 +[2025-09-05 15:34:23] [Rank 0] Group 4 Loss: 4.9448 +[2025-09-05 15:34:23] [Rank 0] Group 5 Loss: 4.9560 +[2025-09-05 15:34:23] [Rank 0] Group 5 Loss: 4.9560 +[2025-09-05 15:34:23] [Rank 0] Group 6 Loss: 4.8232 +[2025-09-05 15:34:23] [Rank 0] Group 6 Loss: 4.8232 +[2025-09-05 15:34:23] [Rank 0] Group 7 Loss: 4.9253 +[2025-09-05 15:34:23] [Rank 0] Group 7 Loss: 4.9253 +[2025-09-05 15:34:23] [Rank 0] Group 8 Loss: 5.0579 +[2025-09-05 15:34:23] [Rank 0] Group 8 Loss: 5.0579 +[2025-09-05 15:34:23] [Rank 0] Group 9 Loss: 5.0343 +[2025-09-05 15:34:23] [Rank 0] Group 9 Loss: 5.0343 +[2025-09-05 15:34:23] [Rank 0] Group 10 Loss: 5.1796 +[2025-09-05 15:34:23] [Rank 0] Group 10 Loss: 5.1796 +[2025-09-05 15:34:23] [Rank 0] Group 11 Loss: 5.2074 +[2025-09-05 15:34:23] [Rank 0] Group 11 Loss: 5.2074 +[2025-09-05 15:34:23] [Rank 0] Group 12 Loss: 5.2802 +[2025-09-05 15:34:23] [Rank 0] Group 12 Loss: 5.2802 +[2025-09-05 15:34:23] [Rank 0] Group 13 Loss: 5.4175 +[2025-09-05 15:34:23] [Rank 0] Group 13 Loss: 5.4175 +[2025-09-05 15:34:23] [Rank 0] Group 14 Loss: 5.4126 +[2025-09-05 15:34:23] [Rank 0] Group 14 Loss: 5.4126 +[2025-09-05 15:34:23] [Rank 0] Group 15 Loss: 5.6016 +[2025-09-05 15:34:23] [Rank 0] Group 15 Loss: 5.6016 +[2025-09-05 15:34:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 15:34:23] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 15:34:23] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 7 FTA: 0.9600 +[2025-09-05 15:34:23] [Rank 0] Group 7 FTA: 0.9600 +[2025-09-05 15:34:23] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:34:23] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 15:34:23] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 15:34:23] [Rank 0] Group 10 FTA: 0.9300 +[2025-09-05 15:34:23] [Rank 0] Group 10 FTA: 0.9300 +[2025-09-05 15:34:23] [Rank 0] Group 11 FTA: 0.6300 +[2025-09-05 15:34:23] [Rank 0] Group 11 FTA: 0.6300 +[2025-09-05 15:34:23] [Rank 0] Group 12 FTA: 0.2400 +[2025-09-05 15:34:23] [Rank 0] Group 12 FTA: 0.2400 +[2025-09-05 15:34:23] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:34:23] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:34:23] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:34:23] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:34:23] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:34:23] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:34:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:34:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:34:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:34:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:34:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:34:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:34:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:34:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:34:25] [Rank 0] step:3501/10000 train_time:150933ms step_avg:43.11ms +[2025-09-05 15:34:25] [Rank 0] step:3501/10000 train_time:150933ms step_avg:43.11ms +[2025-09-05 15:34:25] [Rank 0] step:3521/10000 train_time:151564ms step_avg:43.05ms +[2025-09-05 15:34:25] [Rank 0] step:3521/10000 train_time:151564ms step_avg:43.05ms +[2025-09-05 15:34:26] [Rank 0] step:3541/10000 train_time:152223ms step_avg:42.99ms +[2025-09-05 15:34:26] [Rank 0] step:3541/10000 train_time:152223ms step_avg:42.99ms +[2025-09-05 15:34:27] [Rank 0] step:3561/10000 train_time:152882ms step_avg:42.93ms +[2025-09-05 15:34:27] [Rank 0] step:3561/10000 train_time:152882ms step_avg:42.93ms +[2025-09-05 15:34:27] [Rank 0] step:3581/10000 train_time:153541ms step_avg:42.88ms +[2025-09-05 15:34:27] [Rank 0] step:3581/10000 train_time:153541ms step_avg:42.88ms +[2025-09-05 15:34:28] [Rank 0] step:3601/10000 train_time:154199ms step_avg:42.82ms +[2025-09-05 15:34:28] [Rank 0] step:3601/10000 train_time:154199ms step_avg:42.82ms +[2025-09-05 15:34:29] [Rank 0] step:3621/10000 train_time:154857ms step_avg:42.77ms +[2025-09-05 15:34:29] [Rank 0] step:3621/10000 train_time:154857ms step_avg:42.77ms +[2025-09-05 15:34:29] [Rank 0] step:3641/10000 train_time:155516ms step_avg:42.71ms +[2025-09-05 15:34:29] [Rank 0] step:3641/10000 train_time:155516ms step_avg:42.71ms +[2025-09-05 15:34:30] [Rank 0] step:3661/10000 train_time:156174ms step_avg:42.66ms +[2025-09-05 15:34:30] [Rank 0] step:3661/10000 train_time:156174ms step_avg:42.66ms +[2025-09-05 15:34:31] [Rank 0] step:3681/10000 train_time:156832ms step_avg:42.61ms +[2025-09-05 15:34:31] [Rank 0] step:3681/10000 train_time:156832ms step_avg:42.61ms +[2025-09-05 15:34:31] [Rank 0] step:3701/10000 train_time:157491ms step_avg:42.55ms +[2025-09-05 15:34:31] [Rank 0] step:3701/10000 train_time:157491ms step_avg:42.55ms +[2025-09-05 15:34:32] [Rank 0] step:3721/10000 train_time:158150ms step_avg:42.50ms +[2025-09-05 15:34:32] [Rank 0] step:3721/10000 train_time:158150ms step_avg:42.50ms +[2025-09-05 15:34:33] [Rank 0] step:3741/10000 train_time:158808ms step_avg:42.45ms +[2025-09-05 15:34:33] [Rank 0] step:3741/10000 train_time:158808ms step_avg:42.45ms +[2025-09-05 15:34:33] [Rank 0] step:3761/10000 train_time:159467ms step_avg:42.40ms +[2025-09-05 15:34:33] [Rank 0] step:3761/10000 train_time:159467ms step_avg:42.40ms +[2025-09-05 15:34:34] [Rank 0] step:3781/10000 train_time:160126ms step_avg:42.35ms +[2025-09-05 15:34:34] [Rank 0] step:3781/10000 train_time:160126ms step_avg:42.35ms +[2025-09-05 15:34:35] [Rank 0] step:3801/10000 train_time:160784ms step_avg:42.30ms +[2025-09-05 15:34:35] [Rank 0] step:3801/10000 train_time:160784ms step_avg:42.30ms +[2025-09-05 15:34:35] [Rank 0] step:3821/10000 train_time:161443ms step_avg:42.25ms +[2025-09-05 15:34:35] [Rank 0] step:3821/10000 train_time:161443ms step_avg:42.25ms +[2025-09-05 15:34:36] [Rank 0] step:3841/10000 train_time:162101ms step_avg:42.20ms +[2025-09-05 15:34:36] [Rank 0] step:3841/10000 train_time:162101ms step_avg:42.20ms +[2025-09-05 15:34:37] [Rank 0] step:3861/10000 train_time:162759ms step_avg:42.15ms +[2025-09-05 15:34:37] [Rank 0] step:3861/10000 train_time:162759ms step_avg:42.15ms +[2025-09-05 15:34:37] [Rank 0] step:3881/10000 train_time:163417ms step_avg:42.11ms +[2025-09-05 15:34:37] [Rank 0] step:3881/10000 train_time:163417ms step_avg:42.11ms +[2025-09-05 15:34:38] [Rank 0] step:3901/10000 train_time:164075ms step_avg:42.06ms +[2025-09-05 15:34:38] [Rank 0] step:3901/10000 train_time:164075ms step_avg:42.06ms +[2025-09-05 15:34:39] [Rank 0] step:3921/10000 train_time:164734ms step_avg:42.01ms +[2025-09-05 15:34:39] [Rank 0] step:3921/10000 train_time:164734ms step_avg:42.01ms +[2025-09-05 15:34:39] [Rank 0] step:3941/10000 train_time:165393ms step_avg:41.97ms +[2025-09-05 15:34:39] [Rank 0] step:3941/10000 train_time:165393ms step_avg:41.97ms +[2025-09-05 15:34:40] [Rank 0] step:3961/10000 train_time:166051ms step_avg:41.92ms +[2025-09-05 15:34:40] [Rank 0] step:3961/10000 train_time:166051ms step_avg:41.92ms +[2025-09-05 15:34:41] [Rank 0] step:3981/10000 train_time:166709ms step_avg:41.88ms +[2025-09-05 15:34:41] [Rank 0] step:3981/10000 train_time:166709ms step_avg:41.88ms +[2025-09-05 15:34:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:34:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:34:42] [Rank 0] PRINT: step:4000/10000 train_loss:0.7230 val_loss:0.7088 train_time:167602ms step_avg:41.90ms +[2025-09-05 15:34:42] [Rank 0] PRINT: step:4000/10000 train_loss:0.7230 val_loss:0.7088 train_time:167602ms step_avg:41.90ms +[2025-09-05 15:34:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:34:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:34:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:34:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:36:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:36:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:36:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:36:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:36:03] [Rank 0] Total Loss: 5.0695 +[2025-09-05 15:36:03] [Rank 0] Total Loss: 5.0695 +[2025-09-05 15:36:03] [Rank 0] Total FTA (Unweighted): 0.7769 +[2025-09-05 15:36:03] [Rank 0] Total FTA (Unweighted): 0.7769 +[2025-09-05 15:36:03] [Rank 0] Total FTA (Weighted): 0.7769 +[2025-09-05 15:36:03] [Rank 0] Total FTA (Weighted): 0.7769 +[2025-09-05 15:36:03] [Rank 0] Group 0 Loss: 5.0705 +[2025-09-05 15:36:03] [Rank 0] Group 0 Loss: 5.0705 +[2025-09-05 15:36:03] [Rank 0] Group 1 Loss: 4.7230 +[2025-09-05 15:36:03] [Rank 0] Group 1 Loss: 4.7230 +[2025-09-05 15:36:03] [Rank 0] Group 2 Loss: 4.6432 +[2025-09-05 15:36:03] [Rank 0] Group 2 Loss: 4.6432 +[2025-09-05 15:36:03] [Rank 0] Group 3 Loss: 5.0235 +[2025-09-05 15:36:03] [Rank 0] Group 3 Loss: 5.0235 +[2025-09-05 15:36:03] [Rank 0] Group 4 Loss: 4.9704 +[2025-09-05 15:36:03] [Rank 0] Group 4 Loss: 4.9704 +[2025-09-05 15:36:04] [Rank 0] Group 5 Loss: 4.8984 +[2025-09-05 15:36:04] [Rank 0] Group 5 Loss: 4.8984 +[2025-09-05 15:36:04] [Rank 0] Group 6 Loss: 4.8519 +[2025-09-05 15:36:04] [Rank 0] Group 6 Loss: 4.8519 +[2025-09-05 15:36:04] [Rank 0] Group 7 Loss: 4.9462 +[2025-09-05 15:36:04] [Rank 0] Group 7 Loss: 4.9462 +[2025-09-05 15:36:04] [Rank 0] Group 8 Loss: 5.0580 +[2025-09-05 15:36:04] [Rank 0] Group 8 Loss: 5.0580 +[2025-09-05 15:36:04] [Rank 0] Group 9 Loss: 5.0536 +[2025-09-05 15:36:04] [Rank 0] Group 9 Loss: 5.0536 +[2025-09-05 15:36:04] [Rank 0] Group 10 Loss: 5.1481 +[2025-09-05 15:36:04] [Rank 0] Group 10 Loss: 5.1481 +[2025-09-05 15:36:04] [Rank 0] Group 11 Loss: 5.2092 +[2025-09-05 15:36:04] [Rank 0] Group 11 Loss: 5.2092 +[2025-09-05 15:36:04] [Rank 0] Group 12 Loss: 5.2590 +[2025-09-05 15:36:04] [Rank 0] Group 12 Loss: 5.2590 +[2025-09-05 15:36:04] [Rank 0] Group 13 Loss: 5.3898 +[2025-09-05 15:36:04] [Rank 0] Group 13 Loss: 5.3898 +[2025-09-05 15:36:04] [Rank 0] Group 14 Loss: 5.3383 +[2025-09-05 15:36:04] [Rank 0] Group 14 Loss: 5.3383 +[2025-09-05 15:36:04] [Rank 0] Group 15 Loss: 5.5288 +[2025-09-05 15:36:04] [Rank 0] Group 15 Loss: 5.5288 +[2025-09-05 15:36:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 15:36:04] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 15:36:04] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 15:36:04] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 15:36:04] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 15:36:04] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 15:36:04] [Rank 0] Group 9 FTA: 0.9600 +[2025-09-05 15:36:04] [Rank 0] Group 9 FTA: 0.9600 +[2025-09-05 15:36:04] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:36:04] [Rank 0] Group 11 FTA: 0.7700 +[2025-09-05 15:36:04] [Rank 0] Group 11 FTA: 0.7700 +[2025-09-05 15:36:04] [Rank 0] Group 12 FTA: 0.3500 +[2025-09-05 15:36:04] [Rank 0] Group 12 FTA: 0.3500 +[2025-09-05 15:36:04] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:36:04] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:36:04] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 15:36:04] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 15:36:04] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:36:04] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:36:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:36:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:36:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:36:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:36:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:36:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:36:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:36:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:36:05] [Rank 0] step:4001/10000 train_time:167611ms step_avg:41.89ms +[2025-09-05 15:36:05] [Rank 0] step:4001/10000 train_time:167611ms step_avg:41.89ms +[2025-09-05 15:36:06] [Rank 0] step:4021/10000 train_time:168053ms step_avg:41.79ms +[2025-09-05 15:36:06] [Rank 0] step:4021/10000 train_time:168053ms step_avg:41.79ms +[2025-09-05 15:36:06] [Rank 0] step:4041/10000 train_time:168712ms step_avg:41.75ms +[2025-09-05 15:36:06] [Rank 0] step:4041/10000 train_time:168712ms step_avg:41.75ms +[2025-09-05 15:36:07] [Rank 0] step:4061/10000 train_time:169373ms step_avg:41.71ms +[2025-09-05 15:36:07] [Rank 0] step:4061/10000 train_time:169373ms step_avg:41.71ms +[2025-09-05 15:36:08] [Rank 0] step:4081/10000 train_time:170032ms step_avg:41.66ms +[2025-09-05 15:36:08] [Rank 0] step:4081/10000 train_time:170032ms step_avg:41.66ms +[2025-09-05 15:36:08] [Rank 0] step:4101/10000 train_time:170690ms step_avg:41.62ms +[2025-09-05 15:36:08] [Rank 0] step:4101/10000 train_time:170690ms step_avg:41.62ms +[2025-09-05 15:36:09] [Rank 0] step:4121/10000 train_time:171349ms step_avg:41.58ms +[2025-09-05 15:36:09] [Rank 0] step:4121/10000 train_time:171349ms step_avg:41.58ms +[2025-09-05 15:36:10] [Rank 0] step:4141/10000 train_time:172009ms step_avg:41.54ms +[2025-09-05 15:36:10] [Rank 0] step:4141/10000 train_time:172009ms step_avg:41.54ms +[2025-09-05 15:36:10] [Rank 0] step:4161/10000 train_time:172671ms step_avg:41.50ms +[2025-09-05 15:36:10] [Rank 0] step:4161/10000 train_time:172671ms step_avg:41.50ms +[2025-09-05 15:36:11] [Rank 0] step:4181/10000 train_time:173331ms step_avg:41.46ms +[2025-09-05 15:36:11] [Rank 0] step:4181/10000 train_time:173331ms step_avg:41.46ms +[2025-09-05 15:36:12] [Rank 0] step:4201/10000 train_time:173990ms step_avg:41.42ms +[2025-09-05 15:36:12] [Rank 0] step:4201/10000 train_time:173990ms step_avg:41.42ms +[2025-09-05 15:36:12] [Rank 0] step:4221/10000 train_time:174649ms step_avg:41.38ms +[2025-09-05 15:36:12] [Rank 0] step:4221/10000 train_time:174649ms step_avg:41.38ms +[2025-09-05 15:36:13] [Rank 0] step:4241/10000 train_time:175309ms step_avg:41.34ms +[2025-09-05 15:36:13] [Rank 0] step:4241/10000 train_time:175309ms step_avg:41.34ms +[2025-09-05 15:36:14] [Rank 0] step:4261/10000 train_time:175968ms step_avg:41.30ms +[2025-09-05 15:36:14] [Rank 0] step:4261/10000 train_time:175968ms step_avg:41.30ms +[2025-09-05 15:36:14] [Rank 0] step:4281/10000 train_time:176627ms step_avg:41.26ms +[2025-09-05 15:36:14] [Rank 0] step:4281/10000 train_time:176627ms step_avg:41.26ms +[2025-09-05 15:36:15] [Rank 0] step:4301/10000 train_time:177287ms step_avg:41.22ms +[2025-09-05 15:36:15] [Rank 0] step:4301/10000 train_time:177287ms step_avg:41.22ms +[2025-09-05 15:36:16] [Rank 0] step:4321/10000 train_time:177946ms step_avg:41.18ms +[2025-09-05 15:36:16] [Rank 0] step:4321/10000 train_time:177946ms step_avg:41.18ms +[2025-09-05 15:36:16] [Rank 0] step:4341/10000 train_time:178606ms step_avg:41.14ms +[2025-09-05 15:36:16] [Rank 0] step:4341/10000 train_time:178606ms step_avg:41.14ms +[2025-09-05 15:36:17] [Rank 0] step:4361/10000 train_time:179266ms step_avg:41.11ms +[2025-09-05 15:36:17] [Rank 0] step:4361/10000 train_time:179266ms step_avg:41.11ms +[2025-09-05 15:36:18] [Rank 0] step:4381/10000 train_time:179925ms step_avg:41.07ms +[2025-09-05 15:36:18] [Rank 0] step:4381/10000 train_time:179925ms step_avg:41.07ms +[2025-09-05 15:36:18] [Rank 0] step:4401/10000 train_time:180584ms step_avg:41.03ms +[2025-09-05 15:36:18] [Rank 0] step:4401/10000 train_time:180584ms step_avg:41.03ms +[2025-09-05 15:36:19] [Rank 0] step:4421/10000 train_time:181244ms step_avg:41.00ms +[2025-09-05 15:36:19] [Rank 0] step:4421/10000 train_time:181244ms step_avg:41.00ms +[2025-09-05 15:36:20] [Rank 0] step:4441/10000 train_time:181904ms step_avg:40.96ms +[2025-09-05 15:36:20] [Rank 0] step:4441/10000 train_time:181904ms step_avg:40.96ms +[2025-09-05 15:36:20] [Rank 0] step:4461/10000 train_time:182564ms step_avg:40.92ms +[2025-09-05 15:36:20] [Rank 0] step:4461/10000 train_time:182564ms step_avg:40.92ms +[2025-09-05 15:36:21] [Rank 0] step:4481/10000 train_time:183224ms step_avg:40.89ms +[2025-09-05 15:36:21] [Rank 0] step:4481/10000 train_time:183224ms step_avg:40.89ms +[2025-09-05 15:36:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:36:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:36:22] [Rank 0] PRINT: step:4500/10000 train_loss:0.7112 val_loss:0.6980 train_time:184117ms step_avg:40.91ms +[2025-09-05 15:36:22] [Rank 0] PRINT: step:4500/10000 train_loss:0.7112 val_loss:0.6980 train_time:184117ms step_avg:40.91ms +[2025-09-05 15:36:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:36:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:36:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:36:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:37:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:37:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:37:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:37:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:37:43] [Rank 0] Total Loss: 5.0264 +[2025-09-05 15:37:43] [Rank 0] Total Loss: 5.0264 +[2025-09-05 15:37:43] [Rank 0] Total FTA (Unweighted): 0.7913 +[2025-09-05 15:37:43] [Rank 0] Total FTA (Unweighted): 0.7913 +[2025-09-05 15:37:43] [Rank 0] Total FTA (Weighted): 0.7913 +[2025-09-05 15:37:43] [Rank 0] Total FTA (Weighted): 0.7913 +[2025-09-05 15:37:43] [Rank 0] Group 0 Loss: 4.9812 +[2025-09-05 15:37:43] [Rank 0] Group 0 Loss: 4.9812 +[2025-09-05 15:37:43] [Rank 0] Group 1 Loss: 4.6441 +[2025-09-05 15:37:43] [Rank 0] Group 1 Loss: 4.6441 +[2025-09-05 15:37:43] [Rank 0] Group 2 Loss: 4.5449 +[2025-09-05 15:37:43] [Rank 0] Group 2 Loss: 4.5449 +[2025-09-05 15:37:43] [Rank 0] Group 3 Loss: 4.9801 +[2025-09-05 15:37:43] [Rank 0] Group 3 Loss: 4.9801 +[2025-09-05 15:37:43] [Rank 0] Group 4 Loss: 4.9487 +[2025-09-05 15:37:43] [Rank 0] Group 4 Loss: 4.9487 +[2025-09-05 15:37:43] [Rank 0] Group 5 Loss: 4.9359 +[2025-09-05 15:37:43] [Rank 0] Group 5 Loss: 4.9359 +[2025-09-05 15:37:43] [Rank 0] Group 6 Loss: 4.8273 +[2025-09-05 15:37:43] [Rank 0] Group 6 Loss: 4.8273 +[2025-09-05 15:37:43] [Rank 0] Group 7 Loss: 4.9130 +[2025-09-05 15:37:43] [Rank 0] Group 7 Loss: 4.9130 +[2025-09-05 15:37:43] [Rank 0] Group 8 Loss: 5.0081 +[2025-09-05 15:37:43] [Rank 0] Group 8 Loss: 5.0081 +[2025-09-05 15:37:43] [Rank 0] Group 9 Loss: 5.0053 +[2025-09-05 15:37:43] [Rank 0] Group 9 Loss: 5.0053 +[2025-09-05 15:37:43] [Rank 0] Group 10 Loss: 5.1128 +[2025-09-05 15:37:43] [Rank 0] Group 10 Loss: 5.1128 +[2025-09-05 15:37:43] [Rank 0] Group 11 Loss: 5.1351 +[2025-09-05 15:37:43] [Rank 0] Group 11 Loss: 5.1351 +[2025-09-05 15:37:43] [Rank 0] Group 12 Loss: 5.1916 +[2025-09-05 15:37:43] [Rank 0] Group 12 Loss: 5.1916 +[2025-09-05 15:37:43] [Rank 0] Group 13 Loss: 5.3691 +[2025-09-05 15:37:43] [Rank 0] Group 13 Loss: 5.3691 +[2025-09-05 15:37:43] [Rank 0] Group 14 Loss: 5.3041 +[2025-09-05 15:37:43] [Rank 0] Group 14 Loss: 5.3041 +[2025-09-05 15:37:43] [Rank 0] Group 15 Loss: 5.5213 +[2025-09-05 15:37:43] [Rank 0] Group 15 Loss: 5.5213 +[2025-09-05 15:37:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 15:37:43] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 15:37:43] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:37:43] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:37:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:37:43] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 15:37:43] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 15:37:43] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 15:37:43] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 15:37:43] [Rank 0] Group 11 FTA: 0.9000 +[2025-09-05 15:37:43] [Rank 0] Group 11 FTA: 0.9000 +[2025-09-05 15:37:43] [Rank 0] Group 12 FTA: 0.4300 +[2025-09-05 15:37:43] [Rank 0] Group 12 FTA: 0.4300 +[2025-09-05 15:37:43] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 15:37:43] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 15:37:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:37:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:37:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:37:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:37:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:37:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:37:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:37:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:37:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:37:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:37:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:37:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:37:45] [Rank 0] step:4501/10000 train_time:184126ms step_avg:40.91ms +[2025-09-05 15:37:45] [Rank 0] step:4501/10000 train_time:184126ms step_avg:40.91ms +[2025-09-05 15:37:45] [Rank 0] step:4521/10000 train_time:184558ms step_avg:40.82ms +[2025-09-05 15:37:45] [Rank 0] step:4521/10000 train_time:184558ms step_avg:40.82ms +[2025-09-05 15:37:46] [Rank 0] step:4541/10000 train_time:185217ms step_avg:40.79ms +[2025-09-05 15:37:46] [Rank 0] step:4541/10000 train_time:185217ms step_avg:40.79ms +[2025-09-05 15:37:47] [Rank 0] step:4561/10000 train_time:185875ms step_avg:40.75ms +[2025-09-05 15:37:47] [Rank 0] step:4561/10000 train_time:185875ms step_avg:40.75ms +[2025-09-05 15:37:47] [Rank 0] step:4581/10000 train_time:186534ms step_avg:40.72ms +[2025-09-05 15:37:47] [Rank 0] step:4581/10000 train_time:186534ms step_avg:40.72ms +[2025-09-05 15:37:48] [Rank 0] step:4601/10000 train_time:187193ms step_avg:40.69ms +[2025-09-05 15:37:48] [Rank 0] step:4601/10000 train_time:187193ms step_avg:40.69ms +[2025-09-05 15:37:49] [Rank 0] step:4621/10000 train_time:187853ms step_avg:40.65ms +[2025-09-05 15:37:49] [Rank 0] step:4621/10000 train_time:187853ms step_avg:40.65ms +[2025-09-05 15:37:49] [Rank 0] step:4641/10000 train_time:188512ms step_avg:40.62ms +[2025-09-05 15:37:49] [Rank 0] step:4641/10000 train_time:188512ms step_avg:40.62ms +[2025-09-05 15:37:50] [Rank 0] step:4661/10000 train_time:189170ms step_avg:40.59ms +[2025-09-05 15:37:50] [Rank 0] step:4661/10000 train_time:189170ms step_avg:40.59ms +[2025-09-05 15:37:51] [Rank 0] step:4681/10000 train_time:189829ms step_avg:40.55ms +[2025-09-05 15:37:51] [Rank 0] step:4681/10000 train_time:189829ms step_avg:40.55ms +[2025-09-05 15:37:51] [Rank 0] step:4701/10000 train_time:190488ms step_avg:40.52ms +[2025-09-05 15:37:51] [Rank 0] step:4701/10000 train_time:190488ms step_avg:40.52ms +[2025-09-05 15:37:52] [Rank 0] step:4721/10000 train_time:191147ms step_avg:40.49ms +[2025-09-05 15:37:52] [Rank 0] step:4721/10000 train_time:191147ms step_avg:40.49ms +[2025-09-05 15:37:53] [Rank 0] step:4741/10000 train_time:191806ms step_avg:40.46ms +[2025-09-05 15:37:53] [Rank 0] step:4741/10000 train_time:191806ms step_avg:40.46ms +[2025-09-05 15:37:53] [Rank 0] step:4761/10000 train_time:192465ms step_avg:40.43ms +[2025-09-05 15:37:53] [Rank 0] step:4761/10000 train_time:192465ms step_avg:40.43ms +[2025-09-05 15:37:54] [Rank 0] step:4781/10000 train_time:193123ms step_avg:40.39ms +[2025-09-05 15:37:54] [Rank 0] step:4781/10000 train_time:193123ms step_avg:40.39ms +[2025-09-05 15:37:55] [Rank 0] step:4801/10000 train_time:193781ms step_avg:40.36ms +[2025-09-05 15:37:55] [Rank 0] step:4801/10000 train_time:193781ms step_avg:40.36ms +[2025-09-05 15:37:55] [Rank 0] step:4821/10000 train_time:194443ms step_avg:40.33ms +[2025-09-05 15:37:55] [Rank 0] step:4821/10000 train_time:194443ms step_avg:40.33ms +[2025-09-05 15:37:56] [Rank 0] step:4841/10000 train_time:195409ms step_avg:40.37ms +[2025-09-05 15:37:56] [Rank 0] step:4841/10000 train_time:195409ms step_avg:40.37ms +[2025-09-05 15:37:57] [Rank 0] step:4861/10000 train_time:196068ms step_avg:40.33ms +[2025-09-05 15:37:57] [Rank 0] step:4861/10000 train_time:196068ms step_avg:40.33ms +[2025-09-05 15:37:57] [Rank 0] step:4881/10000 train_time:196728ms step_avg:40.30ms +[2025-09-05 15:37:57] [Rank 0] step:4881/10000 train_time:196728ms step_avg:40.30ms +[2025-09-05 15:37:58] [Rank 0] step:4901/10000 train_time:197386ms step_avg:40.27ms +[2025-09-05 15:37:58] [Rank 0] step:4901/10000 train_time:197386ms step_avg:40.27ms +[2025-09-05 15:37:59] [Rank 0] step:4921/10000 train_time:198045ms step_avg:40.24ms +[2025-09-05 15:37:59] [Rank 0] step:4921/10000 train_time:198045ms step_avg:40.24ms +[2025-09-05 15:37:59] [Rank 0] step:4941/10000 train_time:198703ms step_avg:40.22ms +[2025-09-05 15:37:59] [Rank 0] step:4941/10000 train_time:198703ms step_avg:40.22ms +[2025-09-05 15:38:00] [Rank 0] step:4961/10000 train_time:199362ms step_avg:40.19ms +[2025-09-05 15:38:00] [Rank 0] step:4961/10000 train_time:199362ms step_avg:40.19ms +[2025-09-05 15:38:01] [Rank 0] step:4981/10000 train_time:200022ms step_avg:40.16ms +[2025-09-05 15:38:01] [Rank 0] step:4981/10000 train_time:200022ms step_avg:40.16ms +[2025-09-05 15:38:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:38:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:38:02] [Rank 0] PRINT: step:5000/10000 train_loss:0.7011 val_loss:0.6890 train_time:200917ms step_avg:40.18ms +[2025-09-05 15:38:02] [Rank 0] PRINT: step:5000/10000 train_loss:0.7011 val_loss:0.6890 train_time:200917ms step_avg:40.18ms +[2025-09-05 15:38:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:38:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:38:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:38:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:39:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:39:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:39:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:39:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:39:24] [Rank 0] Total Loss: 5.0305 +[2025-09-05 15:39:24] [Rank 0] Total Loss: 5.0305 +[2025-09-05 15:39:24] [Rank 0] Total FTA (Unweighted): 0.8069 +[2025-09-05 15:39:24] [Rank 0] Total FTA (Unweighted): 0.8069 +[2025-09-05 15:39:24] [Rank 0] Total FTA (Weighted): 0.8069 +[2025-09-05 15:39:24] [Rank 0] Total FTA (Weighted): 0.8069 +[2025-09-05 15:39:24] [Rank 0] Group 0 Loss: 5.0620 +[2025-09-05 15:39:24] [Rank 0] Group 0 Loss: 5.0620 +[2025-09-05 15:39:24] [Rank 0] Group 1 Loss: 4.6369 +[2025-09-05 15:39:24] [Rank 0] Group 1 Loss: 4.6369 +[2025-09-05 15:39:24] [Rank 0] Group 2 Loss: 4.6449 +[2025-09-05 15:39:24] [Rank 0] Group 2 Loss: 4.6449 +[2025-09-05 15:39:24] [Rank 0] Group 3 Loss: 4.9798 +[2025-09-05 15:39:24] [Rank 0] Group 3 Loss: 4.9798 +[2025-09-05 15:39:24] [Rank 0] Group 4 Loss: 4.9628 +[2025-09-05 15:39:24] [Rank 0] Group 4 Loss: 4.9628 +[2025-09-05 15:39:24] [Rank 0] Group 5 Loss: 4.9271 +[2025-09-05 15:39:24] [Rank 0] Group 5 Loss: 4.9271 +[2025-09-05 15:39:24] [Rank 0] Group 6 Loss: 4.8444 +[2025-09-05 15:39:24] [Rank 0] Group 6 Loss: 4.8444 +[2025-09-05 15:39:24] [Rank 0] Group 7 Loss: 4.9243 +[2025-09-05 15:39:24] [Rank 0] Group 7 Loss: 4.9243 +[2025-09-05 15:39:24] [Rank 0] Group 8 Loss: 5.0398 +[2025-09-05 15:39:24] [Rank 0] Group 8 Loss: 5.0398 +[2025-09-05 15:39:24] [Rank 0] Group 9 Loss: 5.0092 +[2025-09-05 15:39:24] [Rank 0] Group 9 Loss: 5.0092 +[2025-09-05 15:39:24] [Rank 0] Group 10 Loss: 5.0975 +[2025-09-05 15:39:24] [Rank 0] Group 10 Loss: 5.0975 +[2025-09-05 15:39:24] [Rank 0] Group 11 Loss: 5.0936 +[2025-09-05 15:39:24] [Rank 0] Group 11 Loss: 5.0936 +[2025-09-05 15:39:24] [Rank 0] Group 12 Loss: 5.1808 +[2025-09-05 15:39:24] [Rank 0] Group 12 Loss: 5.1808 +[2025-09-05 15:39:24] [Rank 0] Group 13 Loss: 5.3352 +[2025-09-05 15:39:24] [Rank 0] Group 13 Loss: 5.3352 +[2025-09-05 15:39:24] [Rank 0] Group 14 Loss: 5.2906 +[2025-09-05 15:39:24] [Rank 0] Group 14 Loss: 5.2906 +[2025-09-05 15:39:24] [Rank 0] Group 15 Loss: 5.4596 +[2025-09-05 15:39:24] [Rank 0] Group 15 Loss: 5.4596 +[2025-09-05 15:39:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:39:24] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:39:24] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:39:24] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:39:24] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:39:24] [Rank 0] Group 9 FTA: 0.9300 +[2025-09-05 15:39:24] [Rank 0] Group 9 FTA: 0.9300 +[2025-09-05 15:39:24] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 15:39:24] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 15:39:24] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 15:39:24] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 15:39:24] [Rank 0] Group 12 FTA: 0.6000 +[2025-09-05 15:39:24] [Rank 0] Group 12 FTA: 0.6000 +[2025-09-05 15:39:24] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 15:39:24] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 15:39:24] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:39:24] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:39:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:39:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:39:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:39:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:39:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:39:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:39:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:39:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:39:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:39:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:39:26] [Rank 0] step:5001/10000 train_time:200926ms step_avg:40.18ms +[2025-09-05 15:39:26] [Rank 0] step:5001/10000 train_time:200926ms step_avg:40.18ms +[2025-09-05 15:39:26] [Rank 0] step:5021/10000 train_time:201380ms step_avg:40.11ms +[2025-09-05 15:39:26] [Rank 0] step:5021/10000 train_time:201380ms step_avg:40.11ms +[2025-09-05 15:39:27] [Rank 0] step:5041/10000 train_time:202039ms step_avg:40.08ms +[2025-09-05 15:39:27] [Rank 0] step:5041/10000 train_time:202039ms step_avg:40.08ms +[2025-09-05 15:39:28] [Rank 0] step:5061/10000 train_time:202699ms step_avg:40.05ms +[2025-09-05 15:39:28] [Rank 0] step:5061/10000 train_time:202699ms step_avg:40.05ms +[2025-09-05 15:39:28] [Rank 0] step:5081/10000 train_time:203358ms step_avg:40.02ms +[2025-09-05 15:39:28] [Rank 0] step:5081/10000 train_time:203358ms step_avg:40.02ms +[2025-09-05 15:39:29] [Rank 0] step:5101/10000 train_time:204017ms step_avg:40.00ms +[2025-09-05 15:39:29] [Rank 0] step:5101/10000 train_time:204017ms step_avg:40.00ms +[2025-09-05 15:39:30] [Rank 0] step:5121/10000 train_time:204676ms step_avg:39.97ms +[2025-09-05 15:39:30] [Rank 0] step:5121/10000 train_time:204676ms step_avg:39.97ms +[2025-09-05 15:39:30] [Rank 0] step:5141/10000 train_time:205336ms step_avg:39.94ms +[2025-09-05 15:39:30] [Rank 0] step:5141/10000 train_time:205336ms step_avg:39.94ms +[2025-09-05 15:39:31] [Rank 0] step:5161/10000 train_time:205995ms step_avg:39.91ms +[2025-09-05 15:39:31] [Rank 0] step:5161/10000 train_time:205995ms step_avg:39.91ms +[2025-09-05 15:39:32] [Rank 0] step:5181/10000 train_time:206655ms step_avg:39.89ms +[2025-09-05 15:39:32] [Rank 0] step:5181/10000 train_time:206655ms step_avg:39.89ms +[2025-09-05 15:39:32] [Rank 0] step:5201/10000 train_time:207314ms step_avg:39.86ms +[2025-09-05 15:39:32] [Rank 0] step:5201/10000 train_time:207314ms step_avg:39.86ms +[2025-09-05 15:39:33] [Rank 0] step:5221/10000 train_time:207973ms step_avg:39.83ms +[2025-09-05 15:39:33] [Rank 0] step:5221/10000 train_time:207973ms step_avg:39.83ms +[2025-09-05 15:39:34] [Rank 0] step:5241/10000 train_time:208633ms step_avg:39.81ms +[2025-09-05 15:39:34] [Rank 0] step:5241/10000 train_time:208633ms step_avg:39.81ms +[2025-09-05 15:39:34] [Rank 0] step:5261/10000 train_time:209292ms step_avg:39.78ms +[2025-09-05 15:39:34] [Rank 0] step:5261/10000 train_time:209292ms step_avg:39.78ms +[2025-09-05 15:39:35] [Rank 0] step:5281/10000 train_time:209952ms step_avg:39.76ms +[2025-09-05 15:39:35] [Rank 0] step:5281/10000 train_time:209952ms step_avg:39.76ms +[2025-09-05 15:39:36] [Rank 0] step:5301/10000 train_time:210611ms step_avg:39.73ms +[2025-09-05 15:39:36] [Rank 0] step:5301/10000 train_time:210611ms step_avg:39.73ms +[2025-09-05 15:39:36] [Rank 0] step:5321/10000 train_time:211271ms step_avg:39.71ms +[2025-09-05 15:39:36] [Rank 0] step:5321/10000 train_time:211271ms step_avg:39.71ms +[2025-09-05 15:39:37] [Rank 0] step:5341/10000 train_time:211931ms step_avg:39.68ms +[2025-09-05 15:39:37] [Rank 0] step:5341/10000 train_time:211931ms step_avg:39.68ms +[2025-09-05 15:39:37] [Rank 0] step:5361/10000 train_time:212590ms step_avg:39.65ms +[2025-09-05 15:39:37] [Rank 0] step:5361/10000 train_time:212590ms step_avg:39.65ms +[2025-09-05 15:39:38] [Rank 0] step:5381/10000 train_time:213250ms step_avg:39.63ms +[2025-09-05 15:39:38] [Rank 0] step:5381/10000 train_time:213250ms step_avg:39.63ms +[2025-09-05 15:39:39] [Rank 0] step:5401/10000 train_time:213909ms step_avg:39.61ms +[2025-09-05 15:39:39] [Rank 0] step:5401/10000 train_time:213909ms step_avg:39.61ms +[2025-09-05 15:39:40] [Rank 0] step:5421/10000 train_time:214741ms step_avg:39.61ms +[2025-09-05 15:39:40] [Rank 0] step:5421/10000 train_time:214741ms step_avg:39.61ms +[2025-09-05 15:39:40] [Rank 0] step:5441/10000 train_time:215400ms step_avg:39.59ms +[2025-09-05 15:39:40] [Rank 0] step:5441/10000 train_time:215400ms step_avg:39.59ms +[2025-09-05 15:39:41] [Rank 0] step:5461/10000 train_time:216058ms step_avg:39.56ms +[2025-09-05 15:39:41] [Rank 0] step:5461/10000 train_time:216058ms step_avg:39.56ms +[2025-09-05 15:39:42] [Rank 0] step:5481/10000 train_time:216717ms step_avg:39.54ms +[2025-09-05 15:39:42] [Rank 0] step:5481/10000 train_time:216717ms step_avg:39.54ms +[2025-09-05 15:39:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:39:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:39:43] [Rank 0] PRINT: step:5500/10000 train_loss:0.6925 val_loss:0.6810 train_time:217766ms step_avg:39.59ms +[2025-09-05 15:39:43] [Rank 0] PRINT: step:5500/10000 train_loss:0.6925 val_loss:0.6810 train_time:217766ms step_avg:39.59ms +[2025-09-05 15:39:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:39:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:39:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:39:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:41:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:41:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:41:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:41:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:41:04] [Rank 0] Total Loss: 5.0262 +[2025-09-05 15:41:04] [Rank 0] Total Loss: 5.0262 +[2025-09-05 15:41:04] [Rank 0] Total FTA (Unweighted): 0.8144 +[2025-09-05 15:41:04] [Rank 0] Total FTA (Unweighted): 0.8144 +[2025-09-05 15:41:04] [Rank 0] Total FTA (Weighted): 0.8144 +[2025-09-05 15:41:04] [Rank 0] Total FTA (Weighted): 0.8144 +[2025-09-05 15:41:04] [Rank 0] Group 0 Loss: 5.1133 +[2025-09-05 15:41:04] [Rank 0] Group 0 Loss: 5.1133 +[2025-09-05 15:41:04] [Rank 0] Group 1 Loss: 4.5740 +[2025-09-05 15:41:04] [Rank 0] Group 1 Loss: 4.5740 +[2025-09-05 15:41:04] [Rank 0] Group 2 Loss: 4.6404 +[2025-09-05 15:41:04] [Rank 0] Group 2 Loss: 4.6404 +[2025-09-05 15:41:04] [Rank 0] Group 3 Loss: 4.9607 +[2025-09-05 15:41:04] [Rank 0] Group 3 Loss: 4.9607 +[2025-09-05 15:41:04] [Rank 0] Group 4 Loss: 5.0579 +[2025-09-05 15:41:04] [Rank 0] Group 4 Loss: 5.0579 +[2025-09-05 15:41:04] [Rank 0] Group 5 Loss: 4.9435 +[2025-09-05 15:41:04] [Rank 0] Group 5 Loss: 4.9435 +[2025-09-05 15:41:04] [Rank 0] Group 6 Loss: 4.8162 +[2025-09-05 15:41:04] [Rank 0] Group 6 Loss: 4.8162 +[2025-09-05 15:41:04] [Rank 0] Group 7 Loss: 4.9347 +[2025-09-05 15:41:04] [Rank 0] Group 7 Loss: 4.9347 +[2025-09-05 15:41:04] [Rank 0] Group 8 Loss: 5.0131 +[2025-09-05 15:41:04] [Rank 0] Group 8 Loss: 5.0131 +[2025-09-05 15:41:04] [Rank 0] Group 9 Loss: 5.0150 +[2025-09-05 15:41:04] [Rank 0] Group 9 Loss: 5.0150 +[2025-09-05 15:41:04] [Rank 0] Group 10 Loss: 5.0986 +[2025-09-05 15:41:04] [Rank 0] Group 10 Loss: 5.0986 +[2025-09-05 15:41:04] [Rank 0] Group 11 Loss: 5.0989 +[2025-09-05 15:41:04] [Rank 0] Group 11 Loss: 5.0989 +[2025-09-05 15:41:04] [Rank 0] Group 12 Loss: 5.1580 +[2025-09-05 15:41:04] [Rank 0] Group 12 Loss: 5.1580 +[2025-09-05 15:41:04] [Rank 0] Group 13 Loss: 5.3148 +[2025-09-05 15:41:04] [Rank 0] Group 13 Loss: 5.3148 +[2025-09-05 15:41:04] [Rank 0] Group 14 Loss: 5.2579 +[2025-09-05 15:41:04] [Rank 0] Group 14 Loss: 5.2579 +[2025-09-05 15:41:04] [Rank 0] Group 15 Loss: 5.4225 +[2025-09-05 15:41:04] [Rank 0] Group 15 Loss: 5.4225 +[2025-09-05 15:41:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:41:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:41:05] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 15:41:05] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 15:41:05] [Rank 0] Group 6 FTA: 0.9600 +[2025-09-05 15:41:05] [Rank 0] Group 6 FTA: 0.9600 +[2025-09-05 15:41:05] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 15:41:05] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 15:41:05] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:41:05] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:41:05] [Rank 0] Group 9 FTA: 0.9500 +[2025-09-05 15:41:05] [Rank 0] Group 9 FTA: 0.9500 +[2025-09-05 15:41:05] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 15:41:05] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 15:41:05] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 15:41:05] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 15:41:05] [Rank 0] Group 12 FTA: 0.7500 +[2025-09-05 15:41:05] [Rank 0] Group 12 FTA: 0.7500 +[2025-09-05 15:41:05] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 15:41:05] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 15:41:05] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:41:05] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:41:05] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 15:41:05] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 15:41:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:41:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:41:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:41:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:41:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:41:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:41:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:41:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:41:06] [Rank 0] step:5501/10000 train_time:217775ms step_avg:39.59ms +[2025-09-05 15:41:06] [Rank 0] step:5501/10000 train_time:217775ms step_avg:39.59ms +[2025-09-05 15:41:07] [Rank 0] step:5521/10000 train_time:218224ms step_avg:39.53ms +[2025-09-05 15:41:07] [Rank 0] step:5521/10000 train_time:218224ms step_avg:39.53ms +[2025-09-05 15:41:07] [Rank 0] step:5541/10000 train_time:218882ms step_avg:39.50ms +[2025-09-05 15:41:07] [Rank 0] step:5541/10000 train_time:218882ms step_avg:39.50ms +[2025-09-05 15:41:08] [Rank 0] step:5561/10000 train_time:219541ms step_avg:39.48ms +[2025-09-05 15:41:08] [Rank 0] step:5561/10000 train_time:219541ms step_avg:39.48ms +[2025-09-05 15:41:09] [Rank 0] step:5581/10000 train_time:220201ms step_avg:39.46ms +[2025-09-05 15:41:09] [Rank 0] step:5581/10000 train_time:220201ms step_avg:39.46ms +[2025-09-05 15:41:09] [Rank 0] step:5601/10000 train_time:220861ms step_avg:39.43ms +[2025-09-05 15:41:09] [Rank 0] step:5601/10000 train_time:220861ms step_avg:39.43ms +[2025-09-05 15:41:10] [Rank 0] step:5621/10000 train_time:221519ms step_avg:39.41ms +[2025-09-05 15:41:10] [Rank 0] step:5621/10000 train_time:221519ms step_avg:39.41ms +[2025-09-05 15:41:11] [Rank 0] step:5641/10000 train_time:222713ms step_avg:39.48ms +[2025-09-05 15:41:11] [Rank 0] step:5641/10000 train_time:222713ms step_avg:39.48ms +[2025-09-05 15:41:12] [Rank 0] step:5661/10000 train_time:223288ms step_avg:39.44ms +[2025-09-05 15:41:12] [Rank 0] step:5661/10000 train_time:223288ms step_avg:39.44ms +[2025-09-05 15:41:12] [Rank 0] step:5681/10000 train_time:223947ms step_avg:39.42ms +[2025-09-05 15:41:12] [Rank 0] step:5681/10000 train_time:223947ms step_avg:39.42ms +[2025-09-05 15:41:13] [Rank 0] step:5701/10000 train_time:224605ms step_avg:39.40ms +[2025-09-05 15:41:13] [Rank 0] step:5701/10000 train_time:224605ms step_avg:39.40ms +[2025-09-05 15:41:14] [Rank 0] step:5721/10000 train_time:225264ms step_avg:39.37ms +[2025-09-05 15:41:14] [Rank 0] step:5721/10000 train_time:225264ms step_avg:39.37ms +[2025-09-05 15:41:14] [Rank 0] step:5741/10000 train_time:225922ms step_avg:39.35ms +[2025-09-05 15:41:14] [Rank 0] step:5741/10000 train_time:225922ms step_avg:39.35ms +[2025-09-05 15:41:15] [Rank 0] step:5761/10000 train_time:226581ms step_avg:39.33ms +[2025-09-05 15:41:15] [Rank 0] step:5761/10000 train_time:226581ms step_avg:39.33ms +[2025-09-05 15:41:16] [Rank 0] step:5781/10000 train_time:227240ms step_avg:39.31ms +[2025-09-05 15:41:16] [Rank 0] step:5781/10000 train_time:227240ms step_avg:39.31ms +[2025-09-05 15:41:16] [Rank 0] step:5801/10000 train_time:227898ms step_avg:39.29ms +[2025-09-05 15:41:16] [Rank 0] step:5801/10000 train_time:227898ms step_avg:39.29ms +[2025-09-05 15:41:17] [Rank 0] step:5821/10000 train_time:228558ms step_avg:39.26ms +[2025-09-05 15:41:17] [Rank 0] step:5821/10000 train_time:228558ms step_avg:39.26ms +[2025-09-05 15:41:18] [Rank 0] step:5841/10000 train_time:229217ms step_avg:39.24ms +[2025-09-05 15:41:18] [Rank 0] step:5841/10000 train_time:229217ms step_avg:39.24ms +[2025-09-05 15:41:18] [Rank 0] step:5861/10000 train_time:229876ms step_avg:39.22ms +[2025-09-05 15:41:18] [Rank 0] step:5861/10000 train_time:229876ms step_avg:39.22ms +[2025-09-05 15:41:19] [Rank 0] step:5881/10000 train_time:230579ms step_avg:39.21ms +[2025-09-05 15:41:19] [Rank 0] step:5881/10000 train_time:230579ms step_avg:39.21ms +[2025-09-05 15:41:20] [Rank 0] step:5901/10000 train_time:231238ms step_avg:39.19ms +[2025-09-05 15:41:20] [Rank 0] step:5901/10000 train_time:231238ms step_avg:39.19ms +[2025-09-05 15:41:20] [Rank 0] step:5921/10000 train_time:231898ms step_avg:39.17ms +[2025-09-05 15:41:20] [Rank 0] step:5921/10000 train_time:231898ms step_avg:39.17ms +[2025-09-05 15:41:21] [Rank 0] step:5941/10000 train_time:232556ms step_avg:39.14ms +[2025-09-05 15:41:21] [Rank 0] step:5941/10000 train_time:232556ms step_avg:39.14ms +[2025-09-05 15:41:22] [Rank 0] step:5961/10000 train_time:233216ms step_avg:39.12ms +[2025-09-05 15:41:22] [Rank 0] step:5961/10000 train_time:233216ms step_avg:39.12ms +[2025-09-05 15:41:22] [Rank 0] step:5981/10000 train_time:233877ms step_avg:39.10ms +[2025-09-05 15:41:22] [Rank 0] step:5981/10000 train_time:233877ms step_avg:39.10ms +[2025-09-05 15:41:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:41:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:41:23] [Rank 0] PRINT: step:6000/10000 train_loss:0.6850 val_loss:0.6744 train_time:234768ms step_avg:39.13ms +[2025-09-05 15:41:23] [Rank 0] PRINT: step:6000/10000 train_loss:0.6850 val_loss:0.6744 train_time:234768ms step_avg:39.13ms +[2025-09-05 15:41:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:41:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:41:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:41:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:42:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:42:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:42:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:42:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:42:45] [Rank 0] Total Loss: 4.9447 +[2025-09-05 15:42:45] [Rank 0] Total Loss: 4.9447 +[2025-09-05 15:42:45] [Rank 0] Total FTA (Unweighted): 0.8356 +[2025-09-05 15:42:45] [Rank 0] Total FTA (Unweighted): 0.8356 +[2025-09-05 15:42:45] [Rank 0] Total FTA (Weighted): 0.8356 +[2025-09-05 15:42:45] [Rank 0] Total FTA (Weighted): 0.8356 +[2025-09-05 15:42:45] [Rank 0] Group 0 Loss: 4.8423 +[2025-09-05 15:42:45] [Rank 0] Group 0 Loss: 4.8423 +[2025-09-05 15:42:45] [Rank 0] Group 1 Loss: 4.4775 +[2025-09-05 15:42:45] [Rank 0] Group 1 Loss: 4.4775 +[2025-09-05 15:42:45] [Rank 0] Group 2 Loss: 4.5450 +[2025-09-05 15:42:45] [Rank 0] Group 2 Loss: 4.5450 +[2025-09-05 15:42:45] [Rank 0] Group 3 Loss: 4.8802 +[2025-09-05 15:42:45] [Rank 0] Group 3 Loss: 4.8802 +[2025-09-05 15:42:45] [Rank 0] Group 4 Loss: 4.9098 +[2025-09-05 15:42:45] [Rank 0] Group 4 Loss: 4.9098 +[2025-09-05 15:42:45] [Rank 0] Group 5 Loss: 4.8633 +[2025-09-05 15:42:45] [Rank 0] Group 5 Loss: 4.8633 +[2025-09-05 15:42:45] [Rank 0] Group 6 Loss: 4.7734 +[2025-09-05 15:42:45] [Rank 0] Group 6 Loss: 4.7734 +[2025-09-05 15:42:45] [Rank 0] Group 7 Loss: 4.8279 +[2025-09-05 15:42:45] [Rank 0] Group 7 Loss: 4.8279 +[2025-09-05 15:42:45] [Rank 0] Group 8 Loss: 4.9589 +[2025-09-05 15:42:45] [Rank 0] Group 8 Loss: 4.9589 +[2025-09-05 15:42:45] [Rank 0] Group 9 Loss: 4.9698 +[2025-09-05 15:42:45] [Rank 0] Group 9 Loss: 4.9698 +[2025-09-05 15:42:45] [Rank 0] Group 10 Loss: 5.0514 +[2025-09-05 15:42:45] [Rank 0] Group 10 Loss: 5.0514 +[2025-09-05 15:42:45] [Rank 0] Group 11 Loss: 5.0316 +[2025-09-05 15:42:45] [Rank 0] Group 11 Loss: 5.0316 +[2025-09-05 15:42:45] [Rank 0] Group 12 Loss: 5.0680 +[2025-09-05 15:42:45] [Rank 0] Group 12 Loss: 5.0680 +[2025-09-05 15:42:45] [Rank 0] Group 13 Loss: 5.2694 +[2025-09-05 15:42:45] [Rank 0] Group 13 Loss: 5.2694 +[2025-09-05 15:42:45] [Rank 0] Group 14 Loss: 5.2530 +[2025-09-05 15:42:45] [Rank 0] Group 14 Loss: 5.2530 +[2025-09-05 15:42:45] [Rank 0] Group 15 Loss: 5.3935 +[2025-09-05 15:42:45] [Rank 0] Group 15 Loss: 5.3935 +[2025-09-05 15:42:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:42:45] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:42:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:42:45] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 15:42:45] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 15:42:45] [Rank 0] Group 12 FTA: 0.8300 +[2025-09-05 15:42:45] [Rank 0] Group 12 FTA: 0.8300 +[2025-09-05 15:42:45] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 15:42:45] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 15:42:45] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 15:42:45] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 15:42:45] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 15:42:45] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 15:42:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:42:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:42:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:42:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:42:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:42:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:42:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:42:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:42:46] [Rank 0] step:6001/10000 train_time:234778ms step_avg:39.12ms +[2025-09-05 15:42:46] [Rank 0] step:6001/10000 train_time:234778ms step_avg:39.12ms +[2025-09-05 15:42:47] [Rank 0] step:6021/10000 train_time:235294ms step_avg:39.08ms +[2025-09-05 15:42:47] [Rank 0] step:6021/10000 train_time:235294ms step_avg:39.08ms +[2025-09-05 15:42:48] [Rank 0] step:6041/10000 train_time:235925ms step_avg:39.05ms +[2025-09-05 15:42:48] [Rank 0] step:6041/10000 train_time:235925ms step_avg:39.05ms +[2025-09-05 15:42:48] [Rank 0] step:6061/10000 train_time:236586ms step_avg:39.03ms +[2025-09-05 15:42:48] [Rank 0] step:6061/10000 train_time:236586ms step_avg:39.03ms +[2025-09-05 15:42:49] [Rank 0] step:6081/10000 train_time:237443ms step_avg:39.05ms +[2025-09-05 15:42:49] [Rank 0] step:6081/10000 train_time:237443ms step_avg:39.05ms +[2025-09-05 15:42:50] [Rank 0] step:6101/10000 train_time:238102ms step_avg:39.03ms +[2025-09-05 15:42:50] [Rank 0] step:6101/10000 train_time:238102ms step_avg:39.03ms +[2025-09-05 15:42:50] [Rank 0] step:6121/10000 train_time:238763ms step_avg:39.01ms +[2025-09-05 15:42:50] [Rank 0] step:6121/10000 train_time:238763ms step_avg:39.01ms +[2025-09-05 15:42:51] [Rank 0] step:6141/10000 train_time:239423ms step_avg:38.99ms +[2025-09-05 15:42:51] [Rank 0] step:6141/10000 train_time:239423ms step_avg:38.99ms +[2025-09-05 15:42:52] [Rank 0] step:6161/10000 train_time:240270ms step_avg:39.00ms +[2025-09-05 15:42:52] [Rank 0] step:6161/10000 train_time:240270ms step_avg:39.00ms +[2025-09-05 15:42:53] [Rank 0] step:6181/10000 train_time:240929ms step_avg:38.98ms +[2025-09-05 15:42:53] [Rank 0] step:6181/10000 train_time:240929ms step_avg:38.98ms +[2025-09-05 15:42:53] [Rank 0] step:6201/10000 train_time:241589ms step_avg:38.96ms +[2025-09-05 15:42:53] [Rank 0] step:6201/10000 train_time:241589ms step_avg:38.96ms +[2025-09-05 15:42:54] [Rank 0] step:6221/10000 train_time:242249ms step_avg:38.94ms +[2025-09-05 15:42:54] [Rank 0] step:6221/10000 train_time:242249ms step_avg:38.94ms +[2025-09-05 15:42:54] [Rank 0] step:6241/10000 train_time:242909ms step_avg:38.92ms +[2025-09-05 15:42:54] [Rank 0] step:6241/10000 train_time:242909ms step_avg:38.92ms +[2025-09-05 15:42:55] [Rank 0] step:6261/10000 train_time:243569ms step_avg:38.90ms +[2025-09-05 15:42:55] [Rank 0] step:6261/10000 train_time:243569ms step_avg:38.90ms +[2025-09-05 15:42:56] [Rank 0] step:6281/10000 train_time:244229ms step_avg:38.88ms +[2025-09-05 15:42:56] [Rank 0] step:6281/10000 train_time:244229ms step_avg:38.88ms +[2025-09-05 15:42:56] [Rank 0] step:6301/10000 train_time:244888ms step_avg:38.86ms +[2025-09-05 15:42:56] [Rank 0] step:6301/10000 train_time:244888ms step_avg:38.86ms +[2025-09-05 15:42:57] [Rank 0] step:6321/10000 train_time:245547ms step_avg:38.85ms +[2025-09-05 15:42:57] [Rank 0] step:6321/10000 train_time:245547ms step_avg:38.85ms +[2025-09-05 15:42:58] [Rank 0] step:6341/10000 train_time:246207ms step_avg:38.83ms +[2025-09-05 15:42:58] [Rank 0] step:6341/10000 train_time:246207ms step_avg:38.83ms +[2025-09-05 15:42:58] [Rank 0] step:6361/10000 train_time:246866ms step_avg:38.81ms +[2025-09-05 15:42:58] [Rank 0] step:6361/10000 train_time:246866ms step_avg:38.81ms +[2025-09-05 15:42:59] [Rank 0] step:6381/10000 train_time:247527ms step_avg:38.79ms +[2025-09-05 15:42:59] [Rank 0] step:6381/10000 train_time:247527ms step_avg:38.79ms +[2025-09-05 15:43:00] [Rank 0] step:6401/10000 train_time:248187ms step_avg:38.77ms +[2025-09-05 15:43:00] [Rank 0] step:6401/10000 train_time:248187ms step_avg:38.77ms +[2025-09-05 15:43:00] [Rank 0] step:6421/10000 train_time:248847ms step_avg:38.76ms +[2025-09-05 15:43:00] [Rank 0] step:6421/10000 train_time:248847ms step_avg:38.76ms +[2025-09-05 15:43:01] [Rank 0] step:6441/10000 train_time:249507ms step_avg:38.74ms +[2025-09-05 15:43:01] [Rank 0] step:6441/10000 train_time:249507ms step_avg:38.74ms +[2025-09-05 15:43:02] [Rank 0] step:6461/10000 train_time:250168ms step_avg:38.72ms +[2025-09-05 15:43:02] [Rank 0] step:6461/10000 train_time:250168ms step_avg:38.72ms +[2025-09-05 15:43:02] [Rank 0] step:6481/10000 train_time:250828ms step_avg:38.70ms +[2025-09-05 15:43:02] [Rank 0] step:6481/10000 train_time:250828ms step_avg:38.70ms +[2025-09-05 15:43:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:43:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:43:04] [Rank 0] PRINT: step:6500/10000 train_loss:0.6784 val_loss:0.6673 train_time:251723ms step_avg:38.73ms +[2025-09-05 15:43:04] [Rank 0] PRINT: step:6500/10000 train_loss:0.6784 val_loss:0.6673 train_time:251723ms step_avg:38.73ms +[2025-09-05 15:43:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:43:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:43:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:43:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:44:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:44:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:44:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:44:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:44:25] [Rank 0] Total Loss: 4.9573 +[2025-09-05 15:44:25] [Rank 0] Total Loss: 4.9573 +[2025-09-05 15:44:25] [Rank 0] Total FTA (Unweighted): 0.8431 +[2025-09-05 15:44:25] [Rank 0] Total FTA (Unweighted): 0.8431 +[2025-09-05 15:44:25] [Rank 0] Total FTA (Weighted): 0.8431 +[2025-09-05 15:44:25] [Rank 0] Total FTA (Weighted): 0.8431 +[2025-09-05 15:44:25] [Rank 0] Group 0 Loss: 4.9559 +[2025-09-05 15:44:25] [Rank 0] Group 0 Loss: 4.9559 +[2025-09-05 15:44:25] [Rank 0] Group 1 Loss: 4.4852 +[2025-09-05 15:44:25] [Rank 0] Group 1 Loss: 4.4852 +[2025-09-05 15:44:25] [Rank 0] Group 2 Loss: 4.5495 +[2025-09-05 15:44:25] [Rank 0] Group 2 Loss: 4.5495 +[2025-09-05 15:44:25] [Rank 0] Group 3 Loss: 4.8776 +[2025-09-05 15:44:25] [Rank 0] Group 3 Loss: 4.8776 +[2025-09-05 15:44:25] [Rank 0] Group 4 Loss: 4.9783 +[2025-09-05 15:44:25] [Rank 0] Group 4 Loss: 4.9783 +[2025-09-05 15:44:25] [Rank 0] Group 5 Loss: 4.8254 +[2025-09-05 15:44:25] [Rank 0] Group 5 Loss: 4.8254 +[2025-09-05 15:44:25] [Rank 0] Group 6 Loss: 4.7791 +[2025-09-05 15:44:25] [Rank 0] Group 6 Loss: 4.7791 +[2025-09-05 15:44:26] [Rank 0] Group 7 Loss: 4.9033 +[2025-09-05 15:44:26] [Rank 0] Group 7 Loss: 4.9033 +[2025-09-05 15:44:26] [Rank 0] Group 8 Loss: 4.9703 +[2025-09-05 15:44:26] [Rank 0] Group 8 Loss: 4.9703 +[2025-09-05 15:44:26] [Rank 0] Group 9 Loss: 4.9445 +[2025-09-05 15:44:26] [Rank 0] Group 9 Loss: 4.9445 +[2025-09-05 15:44:26] [Rank 0] Group 10 Loss: 5.0383 +[2025-09-05 15:44:26] [Rank 0] Group 10 Loss: 5.0383 +[2025-09-05 15:44:26] [Rank 0] Group 11 Loss: 5.0458 +[2025-09-05 15:44:26] [Rank 0] Group 11 Loss: 5.0458 +[2025-09-05 15:44:26] [Rank 0] Group 12 Loss: 5.1191 +[2025-09-05 15:44:26] [Rank 0] Group 12 Loss: 5.1191 +[2025-09-05 15:44:26] [Rank 0] Group 13 Loss: 5.2376 +[2025-09-05 15:44:26] [Rank 0] Group 13 Loss: 5.2376 +[2025-09-05 15:44:26] [Rank 0] Group 14 Loss: 5.2394 +[2025-09-05 15:44:26] [Rank 0] Group 14 Loss: 5.2394 +[2025-09-05 15:44:26] [Rank 0] Group 15 Loss: 5.3682 +[2025-09-05 15:44:26] [Rank 0] Group 15 Loss: 5.3682 +[2025-09-05 15:44:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 15:44:26] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 15:44:26] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:44:26] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:44:26] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:44:26] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:44:26] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:44:26] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 15:44:26] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 15:44:26] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 15:44:26] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 15:44:26] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 15:44:26] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 15:44:26] [Rank 0] Group 12 FTA: 0.8500 +[2025-09-05 15:44:26] [Rank 0] Group 12 FTA: 0.8500 +[2025-09-05 15:44:26] [Rank 0] Group 13 FTA: 0.4600 +[2025-09-05 15:44:26] [Rank 0] Group 13 FTA: 0.4600 +[2025-09-05 15:44:26] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 15:44:26] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 15:44:26] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:44:26] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:44:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:44:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:44:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:44:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:44:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:44:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:44:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:44:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:44:27] [Rank 0] step:6501/10000 train_time:251732ms step_avg:38.72ms +[2025-09-05 15:44:27] [Rank 0] step:6501/10000 train_time:251732ms step_avg:38.72ms +[2025-09-05 15:44:28] [Rank 0] step:6521/10000 train_time:252168ms step_avg:38.67ms +[2025-09-05 15:44:28] [Rank 0] step:6521/10000 train_time:252168ms step_avg:38.67ms +[2025-09-05 15:44:28] [Rank 0] step:6541/10000 train_time:252827ms step_avg:38.65ms +[2025-09-05 15:44:28] [Rank 0] step:6541/10000 train_time:252827ms step_avg:38.65ms +[2025-09-05 15:44:29] [Rank 0] step:6561/10000 train_time:253487ms step_avg:38.64ms +[2025-09-05 15:44:29] [Rank 0] step:6561/10000 train_time:253487ms step_avg:38.64ms +[2025-09-05 15:44:30] [Rank 0] step:6581/10000 train_time:254146ms step_avg:38.62ms +[2025-09-05 15:44:30] [Rank 0] step:6581/10000 train_time:254146ms step_avg:38.62ms +[2025-09-05 15:44:30] [Rank 0] step:6601/10000 train_time:254805ms step_avg:38.60ms +[2025-09-05 15:44:30] [Rank 0] step:6601/10000 train_time:254805ms step_avg:38.60ms +[2025-09-05 15:44:31] [Rank 0] step:6621/10000 train_time:255464ms step_avg:38.58ms +[2025-09-05 15:44:31] [Rank 0] step:6621/10000 train_time:255464ms step_avg:38.58ms +[2025-09-05 15:44:32] [Rank 0] step:6641/10000 train_time:256122ms step_avg:38.57ms +[2025-09-05 15:44:32] [Rank 0] step:6641/10000 train_time:256122ms step_avg:38.57ms +[2025-09-05 15:44:32] [Rank 0] step:6661/10000 train_time:256782ms step_avg:38.55ms +[2025-09-05 15:44:32] [Rank 0] step:6661/10000 train_time:256782ms step_avg:38.55ms +[2025-09-05 15:44:33] [Rank 0] step:6681/10000 train_time:257440ms step_avg:38.53ms +[2025-09-05 15:44:33] [Rank 0] step:6681/10000 train_time:257440ms step_avg:38.53ms +[2025-09-05 15:44:34] [Rank 0] step:6701/10000 train_time:258099ms step_avg:38.52ms +[2025-09-05 15:44:34] [Rank 0] step:6701/10000 train_time:258099ms step_avg:38.52ms +[2025-09-05 15:44:34] [Rank 0] step:6721/10000 train_time:258758ms step_avg:38.50ms +[2025-09-05 15:44:34] [Rank 0] step:6721/10000 train_time:258758ms step_avg:38.50ms +[2025-09-05 15:44:35] [Rank 0] step:6741/10000 train_time:259417ms step_avg:38.48ms +[2025-09-05 15:44:35] [Rank 0] step:6741/10000 train_time:259417ms step_avg:38.48ms +[2025-09-05 15:44:36] [Rank 0] step:6761/10000 train_time:260081ms step_avg:38.47ms +[2025-09-05 15:44:36] [Rank 0] step:6761/10000 train_time:260081ms step_avg:38.47ms +[2025-09-05 15:44:36] [Rank 0] step:6781/10000 train_time:260741ms step_avg:38.45ms +[2025-09-05 15:44:36] [Rank 0] step:6781/10000 train_time:260741ms step_avg:38.45ms +[2025-09-05 15:44:37] [Rank 0] step:6801/10000 train_time:261399ms step_avg:38.44ms +[2025-09-05 15:44:37] [Rank 0] step:6801/10000 train_time:261399ms step_avg:38.44ms +[2025-09-05 15:44:38] [Rank 0] step:6821/10000 train_time:262059ms step_avg:38.42ms +[2025-09-05 15:44:38] [Rank 0] step:6821/10000 train_time:262059ms step_avg:38.42ms +[2025-09-05 15:44:38] [Rank 0] step:6841/10000 train_time:262822ms step_avg:38.42ms +[2025-09-05 15:44:38] [Rank 0] step:6841/10000 train_time:262822ms step_avg:38.42ms +[2025-09-05 15:44:39] [Rank 0] step:6861/10000 train_time:263481ms step_avg:38.40ms +[2025-09-05 15:44:39] [Rank 0] step:6861/10000 train_time:263481ms step_avg:38.40ms +[2025-09-05 15:44:40] [Rank 0] step:6881/10000 train_time:264141ms step_avg:38.39ms +[2025-09-05 15:44:40] [Rank 0] step:6881/10000 train_time:264141ms step_avg:38.39ms +[2025-09-05 15:44:40] [Rank 0] step:6901/10000 train_time:264800ms step_avg:38.37ms +[2025-09-05 15:44:40] [Rank 0] step:6901/10000 train_time:264800ms step_avg:38.37ms +[2025-09-05 15:44:41] [Rank 0] step:6921/10000 train_time:265459ms step_avg:38.36ms +[2025-09-05 15:44:41] [Rank 0] step:6921/10000 train_time:265459ms step_avg:38.36ms +[2025-09-05 15:44:42] [Rank 0] step:6941/10000 train_time:266117ms step_avg:38.34ms +[2025-09-05 15:44:42] [Rank 0] step:6941/10000 train_time:266117ms step_avg:38.34ms +[2025-09-05 15:44:42] [Rank 0] step:6961/10000 train_time:266775ms step_avg:38.32ms +[2025-09-05 15:44:42] [Rank 0] step:6961/10000 train_time:266775ms step_avg:38.32ms +[2025-09-05 15:44:43] [Rank 0] step:6981/10000 train_time:267434ms step_avg:38.31ms +[2025-09-05 15:44:43] [Rank 0] step:6981/10000 train_time:267434ms step_avg:38.31ms +[2025-09-05 15:44:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:44:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:44:44] [Rank 0] PRINT: step:7000/10000 train_loss:0.6719 val_loss:0.6615 train_time:268327ms step_avg:38.33ms +[2025-09-05 15:44:44] [Rank 0] PRINT: step:7000/10000 train_loss:0.6719 val_loss:0.6615 train_time:268327ms step_avg:38.33ms +[2025-09-05 15:44:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:44:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:44:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:44:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:46:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:46:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:46:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:46:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:46:05] [Rank 0] Total Loss: 5.0152 +[2025-09-05 15:46:05] [Rank 0] Total Loss: 5.0152 +[2025-09-05 15:46:05] [Rank 0] Total FTA (Unweighted): 0.8606 +[2025-09-05 15:46:05] [Rank 0] Total FTA (Unweighted): 0.8606 +[2025-09-05 15:46:05] [Rank 0] Total FTA (Weighted): 0.8606 +[2025-09-05 15:46:05] [Rank 0] Total FTA (Weighted): 0.8606 +[2025-09-05 15:46:05] [Rank 0] Group 0 Loss: 4.9316 +[2025-09-05 15:46:05] [Rank 0] Group 0 Loss: 4.9316 +[2025-09-05 15:46:05] [Rank 0] Group 1 Loss: 4.5753 +[2025-09-05 15:46:05] [Rank 0] Group 1 Loss: 4.5753 +[2025-09-05 15:46:05] [Rank 0] Group 2 Loss: 4.5963 +[2025-09-05 15:46:05] [Rank 0] Group 2 Loss: 4.5963 +[2025-09-05 15:46:05] [Rank 0] Group 3 Loss: 4.9174 +[2025-09-05 15:46:05] [Rank 0] Group 3 Loss: 4.9174 +[2025-09-05 15:46:05] [Rank 0] Group 4 Loss: 5.0236 +[2025-09-05 15:46:05] [Rank 0] Group 4 Loss: 5.0236 +[2025-09-05 15:46:05] [Rank 0] Group 5 Loss: 4.9728 +[2025-09-05 15:46:05] [Rank 0] Group 5 Loss: 4.9728 +[2025-09-05 15:46:05] [Rank 0] Group 6 Loss: 4.8467 +[2025-09-05 15:46:05] [Rank 0] Group 6 Loss: 4.8467 +[2025-09-05 15:46:05] [Rank 0] Group 7 Loss: 4.9276 +[2025-09-05 15:46:05] [Rank 0] Group 7 Loss: 4.9276 +[2025-09-05 15:46:05] [Rank 0] Group 8 Loss: 5.0427 +[2025-09-05 15:46:05] [Rank 0] Group 8 Loss: 5.0427 +[2025-09-05 15:46:06] [Rank 0] Group 9 Loss: 5.0266 +[2025-09-05 15:46:06] [Rank 0] Group 9 Loss: 5.0266 +[2025-09-05 15:46:06] [Rank 0] Group 10 Loss: 5.1205 +[2025-09-05 15:46:06] [Rank 0] Group 10 Loss: 5.1205 +[2025-09-05 15:46:06] [Rank 0] Group 11 Loss: 5.1209 +[2025-09-05 15:46:06] [Rank 0] Group 11 Loss: 5.1209 +[2025-09-05 15:46:06] [Rank 0] Group 12 Loss: 5.1943 +[2025-09-05 15:46:06] [Rank 0] Group 12 Loss: 5.1943 +[2025-09-05 15:46:06] [Rank 0] Group 13 Loss: 5.2669 +[2025-09-05 15:46:06] [Rank 0] Group 13 Loss: 5.2669 +[2025-09-05 15:46:06] [Rank 0] Group 14 Loss: 5.2579 +[2025-09-05 15:46:06] [Rank 0] Group 14 Loss: 5.2579 +[2025-09-05 15:46:06] [Rank 0] Group 15 Loss: 5.4226 +[2025-09-05 15:46:06] [Rank 0] Group 15 Loss: 5.4226 +[2025-09-05 15:46:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 15:46:06] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 15:46:06] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 12 FTA: 0.9000 +[2025-09-05 15:46:06] [Rank 0] Group 12 FTA: 0.9000 +[2025-09-05 15:46:06] [Rank 0] Group 13 FTA: 0.4900 +[2025-09-05 15:46:06] [Rank 0] Group 13 FTA: 0.4900 +[2025-09-05 15:46:06] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 15:46:06] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 15:46:06] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 15:46:06] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 15:46:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:46:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:46:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:46:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:46:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:46:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:46:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:46:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:46:07] [Rank 0] step:7001/10000 train_time:268336ms step_avg:38.33ms +[2025-09-05 15:46:07] [Rank 0] step:7001/10000 train_time:268336ms step_avg:38.33ms +[2025-09-05 15:46:08] [Rank 0] step:7021/10000 train_time:268768ms step_avg:38.28ms +[2025-09-05 15:46:08] [Rank 0] step:7021/10000 train_time:268768ms step_avg:38.28ms +[2025-09-05 15:46:08] [Rank 0] step:7041/10000 train_time:269427ms step_avg:38.27ms +[2025-09-05 15:46:08] [Rank 0] step:7041/10000 train_time:269427ms step_avg:38.27ms +[2025-09-05 15:46:09] [Rank 0] step:7061/10000 train_time:270087ms step_avg:38.25ms +[2025-09-05 15:46:09] [Rank 0] step:7061/10000 train_time:270087ms step_avg:38.25ms +[2025-09-05 15:46:10] [Rank 0] step:7081/10000 train_time:270747ms step_avg:38.24ms +[2025-09-05 15:46:10] [Rank 0] step:7081/10000 train_time:270747ms step_avg:38.24ms +[2025-09-05 15:46:10] [Rank 0] step:7101/10000 train_time:271408ms step_avg:38.22ms +[2025-09-05 15:46:10] [Rank 0] step:7101/10000 train_time:271408ms step_avg:38.22ms +[2025-09-05 15:46:11] [Rank 0] step:7121/10000 train_time:272066ms step_avg:38.21ms +[2025-09-05 15:46:11] [Rank 0] step:7121/10000 train_time:272066ms step_avg:38.21ms +[2025-09-05 15:46:12] [Rank 0] step:7141/10000 train_time:272726ms step_avg:38.19ms +[2025-09-05 15:46:12] [Rank 0] step:7141/10000 train_time:272726ms step_avg:38.19ms +[2025-09-05 15:46:12] [Rank 0] step:7161/10000 train_time:273386ms step_avg:38.18ms +[2025-09-05 15:46:12] [Rank 0] step:7161/10000 train_time:273386ms step_avg:38.18ms +[2025-09-05 15:46:13] [Rank 0] step:7181/10000 train_time:274046ms step_avg:38.16ms +[2025-09-05 15:46:13] [Rank 0] step:7181/10000 train_time:274046ms step_avg:38.16ms +[2025-09-05 15:46:14] [Rank 0] step:7201/10000 train_time:274705ms step_avg:38.15ms +[2025-09-05 15:46:14] [Rank 0] step:7201/10000 train_time:274705ms step_avg:38.15ms +[2025-09-05 15:46:14] [Rank 0] step:7221/10000 train_time:275366ms step_avg:38.13ms +[2025-09-05 15:46:14] [Rank 0] step:7221/10000 train_time:275366ms step_avg:38.13ms +[2025-09-05 15:46:15] [Rank 0] step:7241/10000 train_time:276026ms step_avg:38.12ms +[2025-09-05 15:46:15] [Rank 0] step:7241/10000 train_time:276026ms step_avg:38.12ms +[2025-09-05 15:46:16] [Rank 0] step:7261/10000 train_time:276685ms step_avg:38.11ms +[2025-09-05 15:46:16] [Rank 0] step:7261/10000 train_time:276685ms step_avg:38.11ms +[2025-09-05 15:46:16] [Rank 0] step:7281/10000 train_time:277344ms step_avg:38.09ms +[2025-09-05 15:46:16] [Rank 0] step:7281/10000 train_time:277344ms step_avg:38.09ms +[2025-09-05 15:46:17] [Rank 0] step:7301/10000 train_time:278003ms step_avg:38.08ms +[2025-09-05 15:46:17] [Rank 0] step:7301/10000 train_time:278003ms step_avg:38.08ms +[2025-09-05 15:46:18] [Rank 0] step:7321/10000 train_time:278663ms step_avg:38.06ms +[2025-09-05 15:46:18] [Rank 0] step:7321/10000 train_time:278663ms step_avg:38.06ms +[2025-09-05 15:46:18] [Rank 0] step:7341/10000 train_time:279323ms step_avg:38.05ms +[2025-09-05 15:46:18] [Rank 0] step:7341/10000 train_time:279323ms step_avg:38.05ms +[2025-09-05 15:46:19] [Rank 0] step:7361/10000 train_time:280085ms step_avg:38.05ms +[2025-09-05 15:46:19] [Rank 0] step:7361/10000 train_time:280085ms step_avg:38.05ms +[2025-09-05 15:46:20] [Rank 0] step:7381/10000 train_time:280745ms step_avg:38.04ms +[2025-09-05 15:46:20] [Rank 0] step:7381/10000 train_time:280745ms step_avg:38.04ms +[2025-09-05 15:46:20] [Rank 0] step:7401/10000 train_time:281405ms step_avg:38.02ms +[2025-09-05 15:46:20] [Rank 0] step:7401/10000 train_time:281405ms step_avg:38.02ms +[2025-09-05 15:46:21] [Rank 0] step:7421/10000 train_time:282064ms step_avg:38.01ms +[2025-09-05 15:46:21] [Rank 0] step:7421/10000 train_time:282064ms step_avg:38.01ms +[2025-09-05 15:46:22] [Rank 0] step:7441/10000 train_time:282724ms step_avg:38.00ms +[2025-09-05 15:46:22] [Rank 0] step:7441/10000 train_time:282724ms step_avg:38.00ms +[2025-09-05 15:46:22] [Rank 0] step:7461/10000 train_time:283384ms step_avg:37.98ms +[2025-09-05 15:46:22] [Rank 0] step:7461/10000 train_time:283384ms step_avg:37.98ms +[2025-09-05 15:46:23] [Rank 0] step:7481/10000 train_time:284043ms step_avg:37.97ms +[2025-09-05 15:46:23] [Rank 0] step:7481/10000 train_time:284043ms step_avg:37.97ms +[2025-09-05 15:46:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:46:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:46:24] [Rank 0] PRINT: step:7500/10000 train_loss:0.6658 val_loss:0.6556 train_time:284936ms step_avg:37.99ms +[2025-09-05 15:46:24] [Rank 0] PRINT: step:7500/10000 train_loss:0.6658 val_loss:0.6556 train_time:284936ms step_avg:37.99ms +[2025-09-05 15:46:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:46:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:46:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:46:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:47:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:47:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:47:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:47:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:47:45] [Rank 0] Total Loss: 4.9592 +[2025-09-05 15:47:45] [Rank 0] Total Loss: 4.9592 +[2025-09-05 15:47:45] [Rank 0] Total FTA (Unweighted): 0.8656 +[2025-09-05 15:47:45] [Rank 0] Total FTA (Unweighted): 0.8656 +[2025-09-05 15:47:45] [Rank 0] Total FTA (Weighted): 0.8656 +[2025-09-05 15:47:45] [Rank 0] Total FTA (Weighted): 0.8656 +[2025-09-05 15:47:45] [Rank 0] Group 0 Loss: 4.8641 +[2025-09-05 15:47:45] [Rank 0] Group 0 Loss: 4.8641 +[2025-09-05 15:47:45] [Rank 0] Group 1 Loss: 4.5460 +[2025-09-05 15:47:45] [Rank 0] Group 1 Loss: 4.5460 +[2025-09-05 15:47:45] [Rank 0] Group 2 Loss: 4.5536 +[2025-09-05 15:47:45] [Rank 0] Group 2 Loss: 4.5536 +[2025-09-05 15:47:45] [Rank 0] Group 3 Loss: 4.8735 +[2025-09-05 15:47:45] [Rank 0] Group 3 Loss: 4.8735 +[2025-09-05 15:47:45] [Rank 0] Group 4 Loss: 4.9621 +[2025-09-05 15:47:45] [Rank 0] Group 4 Loss: 4.9621 +[2025-09-05 15:47:45] [Rank 0] Group 5 Loss: 4.8867 +[2025-09-05 15:47:45] [Rank 0] Group 5 Loss: 4.8867 +[2025-09-05 15:47:45] [Rank 0] Group 6 Loss: 4.8090 +[2025-09-05 15:47:45] [Rank 0] Group 6 Loss: 4.8090 +[2025-09-05 15:47:45] [Rank 0] Group 7 Loss: 4.8963 +[2025-09-05 15:47:45] [Rank 0] Group 7 Loss: 4.8963 +[2025-09-05 15:47:45] [Rank 0] Group 8 Loss: 5.0014 +[2025-09-05 15:47:45] [Rank 0] Group 8 Loss: 5.0014 +[2025-09-05 15:47:45] [Rank 0] Group 9 Loss: 4.9457 +[2025-09-05 15:47:45] [Rank 0] Group 9 Loss: 4.9457 +[2025-09-05 15:47:45] [Rank 0] Group 10 Loss: 5.0802 +[2025-09-05 15:47:45] [Rank 0] Group 10 Loss: 5.0802 +[2025-09-05 15:47:45] [Rank 0] Group 11 Loss: 5.0508 +[2025-09-05 15:47:45] [Rank 0] Group 11 Loss: 5.0508 +[2025-09-05 15:47:45] [Rank 0] Group 12 Loss: 5.0989 +[2025-09-05 15:47:45] [Rank 0] Group 12 Loss: 5.0989 +[2025-09-05 15:47:45] [Rank 0] Group 13 Loss: 5.1999 +[2025-09-05 15:47:45] [Rank 0] Group 13 Loss: 5.1999 +[2025-09-05 15:47:45] [Rank 0] Group 14 Loss: 5.2142 +[2025-09-05 15:47:45] [Rank 0] Group 14 Loss: 5.2142 +[2025-09-05 15:47:45] [Rank 0] Group 15 Loss: 5.3641 +[2025-09-05 15:47:45] [Rank 0] Group 15 Loss: 5.3641 +[2025-09-05 15:47:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 15:47:45] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 15:47:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:47:45] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:47:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:47:45] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 15:47:45] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 15:47:46] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:47:46] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:47:46] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 15:47:46] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 15:47:46] [Rank 0] Group 12 FTA: 0.9400 +[2025-09-05 15:47:46] [Rank 0] Group 12 FTA: 0.9400 +[2025-09-05 15:47:46] [Rank 0] Group 13 FTA: 0.6000 +[2025-09-05 15:47:46] [Rank 0] Group 13 FTA: 0.6000 +[2025-09-05 15:47:46] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 15:47:46] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 15:47:46] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 15:47:46] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 15:47:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:47:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:47:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:47:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:47:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:47:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:47:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:47:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:47:47] [Rank 0] step:7501/10000 train_time:284945ms step_avg:37.99ms +[2025-09-05 15:47:47] [Rank 0] step:7501/10000 train_time:284945ms step_avg:37.99ms +[2025-09-05 15:47:48] [Rank 0] step:7521/10000 train_time:285393ms step_avg:37.95ms +[2025-09-05 15:47:48] [Rank 0] step:7521/10000 train_time:285393ms step_avg:37.95ms +[2025-09-05 15:47:48] [Rank 0] step:7541/10000 train_time:286052ms step_avg:37.93ms +[2025-09-05 15:47:48] [Rank 0] step:7541/10000 train_time:286052ms step_avg:37.93ms +[2025-09-05 15:47:49] [Rank 0] step:7561/10000 train_time:286712ms step_avg:37.92ms +[2025-09-05 15:47:49] [Rank 0] step:7561/10000 train_time:286712ms step_avg:37.92ms +[2025-09-05 15:47:50] [Rank 0] step:7581/10000 train_time:287371ms step_avg:37.91ms +[2025-09-05 15:47:50] [Rank 0] step:7581/10000 train_time:287371ms step_avg:37.91ms +[2025-09-05 15:47:50] [Rank 0] step:7601/10000 train_time:288029ms step_avg:37.89ms +[2025-09-05 15:47:50] [Rank 0] step:7601/10000 train_time:288029ms step_avg:37.89ms +[2025-09-05 15:47:51] [Rank 0] step:7621/10000 train_time:288689ms step_avg:37.88ms +[2025-09-05 15:47:51] [Rank 0] step:7621/10000 train_time:288689ms step_avg:37.88ms +[2025-09-05 15:47:52] [Rank 0] step:7641/10000 train_time:289998ms step_avg:37.95ms +[2025-09-05 15:47:52] [Rank 0] step:7641/10000 train_time:289998ms step_avg:37.95ms +[2025-09-05 15:47:53] [Rank 0] step:7661/10000 train_time:290469ms step_avg:37.92ms +[2025-09-05 15:47:53] [Rank 0] step:7661/10000 train_time:290469ms step_avg:37.92ms +[2025-09-05 15:47:53] [Rank 0] step:7681/10000 train_time:291127ms step_avg:37.90ms +[2025-09-05 15:47:53] [Rank 0] step:7681/10000 train_time:291127ms step_avg:37.90ms +[2025-09-05 15:47:54] [Rank 0] step:7701/10000 train_time:291786ms step_avg:37.89ms +[2025-09-05 15:47:54] [Rank 0] step:7701/10000 train_time:291786ms step_avg:37.89ms +[2025-09-05 15:47:55] [Rank 0] step:7721/10000 train_time:292445ms step_avg:37.88ms +[2025-09-05 15:47:55] [Rank 0] step:7721/10000 train_time:292445ms step_avg:37.88ms +[2025-09-05 15:47:55] [Rank 0] step:7741/10000 train_time:293104ms step_avg:37.86ms +[2025-09-05 15:47:55] [Rank 0] step:7741/10000 train_time:293104ms step_avg:37.86ms +[2025-09-05 15:47:56] [Rank 0] step:7761/10000 train_time:293762ms step_avg:37.85ms +[2025-09-05 15:47:56] [Rank 0] step:7761/10000 train_time:293762ms step_avg:37.85ms +[2025-09-05 15:47:57] [Rank 0] step:7781/10000 train_time:294422ms step_avg:37.84ms +[2025-09-05 15:47:57] [Rank 0] step:7781/10000 train_time:294422ms step_avg:37.84ms +[2025-09-05 15:47:57] [Rank 0] step:7801/10000 train_time:295081ms step_avg:37.83ms +[2025-09-05 15:47:57] [Rank 0] step:7801/10000 train_time:295081ms step_avg:37.83ms +[2025-09-05 15:47:58] [Rank 0] step:7821/10000 train_time:295740ms step_avg:37.81ms +[2025-09-05 15:47:58] [Rank 0] step:7821/10000 train_time:295740ms step_avg:37.81ms +[2025-09-05 15:47:59] [Rank 0] step:7841/10000 train_time:296398ms step_avg:37.80ms +[2025-09-05 15:47:59] [Rank 0] step:7841/10000 train_time:296398ms step_avg:37.80ms +[2025-09-05 15:47:59] [Rank 0] step:7861/10000 train_time:297059ms step_avg:37.79ms +[2025-09-05 15:47:59] [Rank 0] step:7861/10000 train_time:297059ms step_avg:37.79ms +[2025-09-05 15:48:00] [Rank 0] step:7881/10000 train_time:297718ms step_avg:37.78ms +[2025-09-05 15:48:00] [Rank 0] step:7881/10000 train_time:297718ms step_avg:37.78ms +[2025-09-05 15:48:01] [Rank 0] step:7901/10000 train_time:298377ms step_avg:37.76ms +[2025-09-05 15:48:01] [Rank 0] step:7901/10000 train_time:298377ms step_avg:37.76ms +[2025-09-05 15:48:01] [Rank 0] step:7921/10000 train_time:299036ms step_avg:37.75ms +[2025-09-05 15:48:01] [Rank 0] step:7921/10000 train_time:299036ms step_avg:37.75ms +[2025-09-05 15:48:02] [Rank 0] step:7941/10000 train_time:299696ms step_avg:37.74ms +[2025-09-05 15:48:02] [Rank 0] step:7941/10000 train_time:299696ms step_avg:37.74ms +[2025-09-05 15:48:03] [Rank 0] step:7961/10000 train_time:300354ms step_avg:37.73ms +[2025-09-05 15:48:03] [Rank 0] step:7961/10000 train_time:300354ms step_avg:37.73ms +[2025-09-05 15:48:03] [Rank 0] step:7981/10000 train_time:301013ms step_avg:37.72ms +[2025-09-05 15:48:03] [Rank 0] step:7981/10000 train_time:301013ms step_avg:37.72ms +[2025-09-05 15:48:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:48:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:48:04] [Rank 0] PRINT: step:8000/10000 train_loss:0.6602 val_loss:0.6502 train_time:301906ms step_avg:37.74ms +[2025-09-05 15:48:04] [Rank 0] PRINT: step:8000/10000 train_loss:0.6602 val_loss:0.6502 train_time:301906ms step_avg:37.74ms +[2025-09-05 15:48:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:48:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:48:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:48:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:49:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:49:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:49:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:49:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:49:26] [Rank 0] Total Loss: 4.9902 +[2025-09-05 15:49:26] [Rank 0] Total Loss: 4.9902 +[2025-09-05 15:49:26] [Rank 0] Total FTA (Unweighted): 0.8769 +[2025-09-05 15:49:26] [Rank 0] Total FTA (Unweighted): 0.8769 +[2025-09-05 15:49:26] [Rank 0] Total FTA (Weighted): 0.8769 +[2025-09-05 15:49:26] [Rank 0] Total FTA (Weighted): 0.8769 +[2025-09-05 15:49:26] [Rank 0] Group 0 Loss: 4.9272 +[2025-09-05 15:49:26] [Rank 0] Group 0 Loss: 4.9272 +[2025-09-05 15:49:26] [Rank 0] Group 1 Loss: 4.5430 +[2025-09-05 15:49:26] [Rank 0] Group 1 Loss: 4.5430 +[2025-09-05 15:49:27] [Rank 0] Group 2 Loss: 4.5461 +[2025-09-05 15:49:27] [Rank 0] Group 2 Loss: 4.5461 +[2025-09-05 15:49:27] [Rank 0] Group 3 Loss: 4.8858 +[2025-09-05 15:49:27] [Rank 0] Group 3 Loss: 4.8858 +[2025-09-05 15:49:27] [Rank 0] Group 4 Loss: 4.9885 +[2025-09-05 15:49:27] [Rank 0] Group 4 Loss: 4.9885 +[2025-09-05 15:49:27] [Rank 0] Group 5 Loss: 4.9719 +[2025-09-05 15:49:27] [Rank 0] Group 5 Loss: 4.9719 +[2025-09-05 15:49:27] [Rank 0] Group 6 Loss: 4.8166 +[2025-09-05 15:49:27] [Rank 0] Group 6 Loss: 4.8166 +[2025-09-05 15:49:27] [Rank 0] Group 7 Loss: 4.9186 +[2025-09-05 15:49:27] [Rank 0] Group 7 Loss: 4.9186 +[2025-09-05 15:49:27] [Rank 0] Group 8 Loss: 5.0461 +[2025-09-05 15:49:27] [Rank 0] Group 8 Loss: 5.0461 +[2025-09-05 15:49:27] [Rank 0] Group 9 Loss: 4.9914 +[2025-09-05 15:49:27] [Rank 0] Group 9 Loss: 4.9914 +[2025-09-05 15:49:27] [Rank 0] Group 10 Loss: 5.0984 +[2025-09-05 15:49:27] [Rank 0] Group 10 Loss: 5.0984 +[2025-09-05 15:49:27] [Rank 0] Group 11 Loss: 5.0888 +[2025-09-05 15:49:27] [Rank 0] Group 11 Loss: 5.0888 +[2025-09-05 15:49:27] [Rank 0] Group 12 Loss: 5.1469 +[2025-09-05 15:49:27] [Rank 0] Group 12 Loss: 5.1469 +[2025-09-05 15:49:27] [Rank 0] Group 13 Loss: 5.2424 +[2025-09-05 15:49:27] [Rank 0] Group 13 Loss: 5.2424 +[2025-09-05 15:49:27] [Rank 0] Group 14 Loss: 5.2569 +[2025-09-05 15:49:27] [Rank 0] Group 14 Loss: 5.2569 +[2025-09-05 15:49:27] [Rank 0] Group 15 Loss: 5.3752 +[2025-09-05 15:49:27] [Rank 0] Group 15 Loss: 5.3752 +[2025-09-05 15:49:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 5 FTA: 0.9600 +[2025-09-05 15:49:27] [Rank 0] Group 5 FTA: 0.9600 +[2025-09-05 15:49:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 15:49:27] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 15:49:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:49:27] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 15:49:27] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 15:49:27] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 15:49:27] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 15:49:27] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 15:49:27] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 15:49:27] [Rank 0] Group 13 FTA: 0.6300 +[2025-09-05 15:49:27] [Rank 0] Group 13 FTA: 0.6300 +[2025-09-05 15:49:27] [Rank 0] Group 14 FTA: 0.3500 +[2025-09-05 15:49:27] [Rank 0] Group 14 FTA: 0.3500 +[2025-09-05 15:49:27] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 15:49:27] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 15:49:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:49:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:49:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:49:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:49:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:49:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:49:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:49:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:49:28] [Rank 0] step:8001/10000 train_time:301916ms step_avg:37.73ms +[2025-09-05 15:49:28] [Rank 0] step:8001/10000 train_time:301916ms step_avg:37.73ms +[2025-09-05 15:49:29] [Rank 0] step:8021/10000 train_time:302538ms step_avg:37.72ms +[2025-09-05 15:49:29] [Rank 0] step:8021/10000 train_time:302538ms step_avg:37.72ms +[2025-09-05 15:49:30] [Rank 0] step:8041/10000 train_time:303472ms step_avg:37.74ms +[2025-09-05 15:49:30] [Rank 0] step:8041/10000 train_time:303472ms step_avg:37.74ms +[2025-09-05 15:49:31] [Rank 0] step:8061/10000 train_time:304132ms step_avg:37.73ms +[2025-09-05 15:49:31] [Rank 0] step:8061/10000 train_time:304132ms step_avg:37.73ms +[2025-09-05 15:49:31] [Rank 0] step:8081/10000 train_time:304790ms step_avg:37.72ms +[2025-09-05 15:49:31] [Rank 0] step:8081/10000 train_time:304790ms step_avg:37.72ms +[2025-09-05 15:49:32] [Rank 0] step:8101/10000 train_time:305450ms step_avg:37.71ms +[2025-09-05 15:49:32] [Rank 0] step:8101/10000 train_time:305450ms step_avg:37.71ms +[2025-09-05 15:49:33] [Rank 0] step:8121/10000 train_time:306109ms step_avg:37.69ms +[2025-09-05 15:49:33] [Rank 0] step:8121/10000 train_time:306109ms step_avg:37.69ms +[2025-09-05 15:49:33] [Rank 0] step:8141/10000 train_time:306772ms step_avg:37.68ms +[2025-09-05 15:49:33] [Rank 0] step:8141/10000 train_time:306772ms step_avg:37.68ms +[2025-09-05 15:49:34] [Rank 0] step:8161/10000 train_time:307429ms step_avg:37.67ms +[2025-09-05 15:49:34] [Rank 0] step:8161/10000 train_time:307429ms step_avg:37.67ms +[2025-09-05 15:49:35] [Rank 0] step:8181/10000 train_time:308089ms step_avg:37.66ms +[2025-09-05 15:49:35] [Rank 0] step:8181/10000 train_time:308089ms step_avg:37.66ms +[2025-09-05 15:49:35] [Rank 0] step:8201/10000 train_time:308747ms step_avg:37.65ms +[2025-09-05 15:49:35] [Rank 0] step:8201/10000 train_time:308747ms step_avg:37.65ms +[2025-09-05 15:49:36] [Rank 0] step:8221/10000 train_time:309407ms step_avg:37.64ms +[2025-09-05 15:49:36] [Rank 0] step:8221/10000 train_time:309407ms step_avg:37.64ms +[2025-09-05 15:49:37] [Rank 0] step:8241/10000 train_time:310066ms step_avg:37.62ms +[2025-09-05 15:49:37] [Rank 0] step:8241/10000 train_time:310066ms step_avg:37.62ms +[2025-09-05 15:49:37] [Rank 0] step:8261/10000 train_time:310726ms step_avg:37.61ms +[2025-09-05 15:49:37] [Rank 0] step:8261/10000 train_time:310726ms step_avg:37.61ms +[2025-09-05 15:49:38] [Rank 0] step:8281/10000 train_time:311388ms step_avg:37.60ms +[2025-09-05 15:49:38] [Rank 0] step:8281/10000 train_time:311388ms step_avg:37.60ms +[2025-09-05 15:49:39] [Rank 0] step:8301/10000 train_time:312044ms step_avg:37.59ms +[2025-09-05 15:49:39] [Rank 0] step:8301/10000 train_time:312044ms step_avg:37.59ms +[2025-09-05 15:49:39] [Rank 0] step:8321/10000 train_time:312703ms step_avg:37.58ms +[2025-09-05 15:49:39] [Rank 0] step:8321/10000 train_time:312703ms step_avg:37.58ms +[2025-09-05 15:49:40] [Rank 0] step:8341/10000 train_time:313362ms step_avg:37.57ms +[2025-09-05 15:49:40] [Rank 0] step:8341/10000 train_time:313362ms step_avg:37.57ms +[2025-09-05 15:49:40] [Rank 0] step:8361/10000 train_time:314021ms step_avg:37.56ms +[2025-09-05 15:49:40] [Rank 0] step:8361/10000 train_time:314021ms step_avg:37.56ms +[2025-09-05 15:49:41] [Rank 0] step:8381/10000 train_time:314680ms step_avg:37.55ms +[2025-09-05 15:49:41] [Rank 0] step:8381/10000 train_time:314680ms step_avg:37.55ms +[2025-09-05 15:49:42] [Rank 0] step:8401/10000 train_time:315340ms step_avg:37.54ms +[2025-09-05 15:49:42] [Rank 0] step:8401/10000 train_time:315340ms step_avg:37.54ms +[2025-09-05 15:49:42] [Rank 0] step:8421/10000 train_time:316000ms step_avg:37.53ms +[2025-09-05 15:49:42] [Rank 0] step:8421/10000 train_time:316000ms step_avg:37.53ms +[2025-09-05 15:49:43] [Rank 0] step:8441/10000 train_time:316659ms step_avg:37.51ms +[2025-09-05 15:49:43] [Rank 0] step:8441/10000 train_time:316659ms step_avg:37.51ms +[2025-09-05 15:49:44] [Rank 0] step:8461/10000 train_time:317318ms step_avg:37.50ms +[2025-09-05 15:49:44] [Rank 0] step:8461/10000 train_time:317318ms step_avg:37.50ms +[2025-09-05 15:49:44] [Rank 0] step:8481/10000 train_time:317977ms step_avg:37.49ms +[2025-09-05 15:49:44] [Rank 0] step:8481/10000 train_time:317977ms step_avg:37.49ms +[2025-09-05 15:49:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:49:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:49:46] [Rank 0] PRINT: step:8500/10000 train_loss:0.6548 val_loss:0.6453 train_time:318871ms step_avg:37.51ms +[2025-09-05 15:49:46] [Rank 0] PRINT: step:8500/10000 train_loss:0.6548 val_loss:0.6453 train_time:318871ms step_avg:37.51ms +[2025-09-05 15:49:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:49:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:49:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:49:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:51:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:51:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:51:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:51:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:51:07] [Rank 0] Total Loss: 4.9833 +[2025-09-05 15:51:07] [Rank 0] Total Loss: 4.9833 +[2025-09-05 15:51:07] [Rank 0] Total FTA (Unweighted): 0.8931 +[2025-09-05 15:51:07] [Rank 0] Total FTA (Unweighted): 0.8931 +[2025-09-05 15:51:07] [Rank 0] Total FTA (Weighted): 0.8931 +[2025-09-05 15:51:07] [Rank 0] Total FTA (Weighted): 0.8931 +[2025-09-05 15:51:07] [Rank 0] Group 0 Loss: 4.9249 +[2025-09-05 15:51:07] [Rank 0] Group 0 Loss: 4.9249 +[2025-09-05 15:51:07] [Rank 0] Group 1 Loss: 4.6238 +[2025-09-05 15:51:07] [Rank 0] Group 1 Loss: 4.6238 +[2025-09-05 15:51:07] [Rank 0] Group 2 Loss: 4.5409 +[2025-09-05 15:51:07] [Rank 0] Group 2 Loss: 4.5409 +[2025-09-05 15:51:07] [Rank 0] Group 3 Loss: 4.9333 +[2025-09-05 15:51:07] [Rank 0] Group 3 Loss: 4.9333 +[2025-09-05 15:51:07] [Rank 0] Group 4 Loss: 5.0065 +[2025-09-05 15:51:07] [Rank 0] Group 4 Loss: 5.0065 +[2025-09-05 15:51:07] [Rank 0] Group 5 Loss: 4.9190 +[2025-09-05 15:51:07] [Rank 0] Group 5 Loss: 4.9190 +[2025-09-05 15:51:07] [Rank 0] Group 6 Loss: 4.8138 +[2025-09-05 15:51:07] [Rank 0] Group 6 Loss: 4.8138 +[2025-09-05 15:51:07] [Rank 0] Group 7 Loss: 4.9316 +[2025-09-05 15:51:07] [Rank 0] Group 7 Loss: 4.9316 +[2025-09-05 15:51:07] [Rank 0] Group 8 Loss: 5.0226 +[2025-09-05 15:51:07] [Rank 0] Group 8 Loss: 5.0226 +[2025-09-05 15:51:07] [Rank 0] Group 9 Loss: 4.9985 +[2025-09-05 15:51:07] [Rank 0] Group 9 Loss: 4.9985 +[2025-09-05 15:51:07] [Rank 0] Group 10 Loss: 5.1003 +[2025-09-05 15:51:07] [Rank 0] Group 10 Loss: 5.1003 +[2025-09-05 15:51:07] [Rank 0] Group 11 Loss: 5.0879 +[2025-09-05 15:51:07] [Rank 0] Group 11 Loss: 5.0879 +[2025-09-05 15:51:07] [Rank 0] Group 12 Loss: 5.1350 +[2025-09-05 15:51:07] [Rank 0] Group 12 Loss: 5.1350 +[2025-09-05 15:51:07] [Rank 0] Group 13 Loss: 5.1984 +[2025-09-05 15:51:07] [Rank 0] Group 13 Loss: 5.1984 +[2025-09-05 15:51:07] [Rank 0] Group 14 Loss: 5.1874 +[2025-09-05 15:51:07] [Rank 0] Group 14 Loss: 5.1874 +[2025-09-05 15:51:07] [Rank 0] Group 15 Loss: 5.3088 +[2025-09-05 15:51:07] [Rank 0] Group 15 Loss: 5.3088 +[2025-09-05 15:51:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:51:07] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:51:07] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:51:07] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:51:07] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 15:51:07] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 15:51:07] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 15:51:07] [Rank 0] Group 13 FTA: 0.7700 +[2025-09-05 15:51:07] [Rank 0] Group 13 FTA: 0.7700 +[2025-09-05 15:51:07] [Rank 0] Group 14 FTA: 0.4000 +[2025-09-05 15:51:07] [Rank 0] Group 14 FTA: 0.4000 +[2025-09-05 15:51:07] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 15:51:07] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 15:51:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:51:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:51:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:51:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:51:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:51:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:51:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:51:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:51:09] [Rank 0] step:8501/10000 train_time:318880ms step_avg:37.51ms +[2025-09-05 15:51:09] [Rank 0] step:8501/10000 train_time:318880ms step_avg:37.51ms +[2025-09-05 15:51:09] [Rank 0] step:8521/10000 train_time:319324ms step_avg:37.47ms +[2025-09-05 15:51:09] [Rank 0] step:8521/10000 train_time:319324ms step_avg:37.47ms +[2025-09-05 15:51:10] [Rank 0] step:8541/10000 train_time:319982ms step_avg:37.46ms +[2025-09-05 15:51:10] [Rank 0] step:8541/10000 train_time:319982ms step_avg:37.46ms +[2025-09-05 15:51:11] [Rank 0] step:8561/10000 train_time:320640ms step_avg:37.45ms +[2025-09-05 15:51:11] [Rank 0] step:8561/10000 train_time:320640ms step_avg:37.45ms +[2025-09-05 15:51:11] [Rank 0] step:8581/10000 train_time:321300ms step_avg:37.44ms +[2025-09-05 15:51:11] [Rank 0] step:8581/10000 train_time:321300ms step_avg:37.44ms +[2025-09-05 15:51:12] [Rank 0] step:8601/10000 train_time:321957ms step_avg:37.43ms +[2025-09-05 15:51:12] [Rank 0] step:8601/10000 train_time:321957ms step_avg:37.43ms +[2025-09-05 15:51:13] [Rank 0] step:8621/10000 train_time:322614ms step_avg:37.42ms +[2025-09-05 15:51:13] [Rank 0] step:8621/10000 train_time:322614ms step_avg:37.42ms +[2025-09-05 15:51:13] [Rank 0] step:8641/10000 train_time:323272ms step_avg:37.41ms +[2025-09-05 15:51:13] [Rank 0] step:8641/10000 train_time:323272ms step_avg:37.41ms +[2025-09-05 15:51:14] [Rank 0] step:8661/10000 train_time:323931ms step_avg:37.40ms +[2025-09-05 15:51:14] [Rank 0] step:8661/10000 train_time:323931ms step_avg:37.40ms +[2025-09-05 15:51:15] [Rank 0] step:8681/10000 train_time:324787ms step_avg:37.41ms +[2025-09-05 15:51:15] [Rank 0] step:8681/10000 train_time:324787ms step_avg:37.41ms +[2025-09-05 15:51:15] [Rank 0] step:8701/10000 train_time:325445ms step_avg:37.40ms +[2025-09-05 15:51:15] [Rank 0] step:8701/10000 train_time:325445ms step_avg:37.40ms +[2025-09-05 15:51:16] [Rank 0] step:8721/10000 train_time:326105ms step_avg:37.39ms +[2025-09-05 15:51:16] [Rank 0] step:8721/10000 train_time:326105ms step_avg:37.39ms +[2025-09-05 15:51:17] [Rank 0] step:8741/10000 train_time:326763ms step_avg:37.38ms +[2025-09-05 15:51:17] [Rank 0] step:8741/10000 train_time:326763ms step_avg:37.38ms +[2025-09-05 15:51:18] [Rank 0] step:8761/10000 train_time:327602ms step_avg:37.39ms +[2025-09-05 15:51:18] [Rank 0] step:8761/10000 train_time:327602ms step_avg:37.39ms +[2025-09-05 15:51:18] [Rank 0] step:8781/10000 train_time:328261ms step_avg:37.38ms +[2025-09-05 15:51:18] [Rank 0] step:8781/10000 train_time:328261ms step_avg:37.38ms +[2025-09-05 15:51:19] [Rank 0] step:8801/10000 train_time:328919ms step_avg:37.37ms +[2025-09-05 15:51:19] [Rank 0] step:8801/10000 train_time:328919ms step_avg:37.37ms +[2025-09-05 15:51:20] [Rank 0] step:8821/10000 train_time:329577ms step_avg:37.36ms +[2025-09-05 15:51:20] [Rank 0] step:8821/10000 train_time:329577ms step_avg:37.36ms +[2025-09-05 15:51:20] [Rank 0] step:8841/10000 train_time:330236ms step_avg:37.35ms +[2025-09-05 15:51:20] [Rank 0] step:8841/10000 train_time:330236ms step_avg:37.35ms +[2025-09-05 15:51:21] [Rank 0] step:8861/10000 train_time:330896ms step_avg:37.34ms +[2025-09-05 15:51:21] [Rank 0] step:8861/10000 train_time:330896ms step_avg:37.34ms +[2025-09-05 15:51:22] [Rank 0] step:8881/10000 train_time:331555ms step_avg:37.33ms +[2025-09-05 15:51:22] [Rank 0] step:8881/10000 train_time:331555ms step_avg:37.33ms +[2025-09-05 15:51:22] [Rank 0] step:8901/10000 train_time:332215ms step_avg:37.32ms +[2025-09-05 15:51:22] [Rank 0] step:8901/10000 train_time:332215ms step_avg:37.32ms +[2025-09-05 15:51:23] [Rank 0] step:8921/10000 train_time:332875ms step_avg:37.31ms +[2025-09-05 15:51:23] [Rank 0] step:8921/10000 train_time:332875ms step_avg:37.31ms +[2025-09-05 15:51:23] [Rank 0] step:8941/10000 train_time:333532ms step_avg:37.30ms +[2025-09-05 15:51:23] [Rank 0] step:8941/10000 train_time:333532ms step_avg:37.30ms +[2025-09-05 15:51:24] [Rank 0] step:8961/10000 train_time:334191ms step_avg:37.29ms +[2025-09-05 15:51:24] [Rank 0] step:8961/10000 train_time:334191ms step_avg:37.29ms +[2025-09-05 15:51:25] [Rank 0] step:8981/10000 train_time:334850ms step_avg:37.28ms +[2025-09-05 15:51:25] [Rank 0] step:8981/10000 train_time:334850ms step_avg:37.28ms +[2025-09-05 15:51:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:51:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:51:26] [Rank 0] PRINT: step:9000/10000 train_loss:0.6495 val_loss:0.6405 train_time:335742ms step_avg:37.30ms +[2025-09-05 15:51:26] [Rank 0] PRINT: step:9000/10000 train_loss:0.6495 val_loss:0.6405 train_time:335742ms step_avg:37.30ms +[2025-09-05 15:51:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:51:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:51:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:51:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:52:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:52:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:52:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:52:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:52:48] [Rank 0] Total Loss: 4.9895 +[2025-09-05 15:52:48] [Rank 0] Total Loss: 4.9895 +[2025-09-05 15:52:48] [Rank 0] Total FTA (Unweighted): 0.8944 +[2025-09-05 15:52:48] [Rank 0] Total FTA (Unweighted): 0.8944 +[2025-09-05 15:52:48] [Rank 0] Total FTA (Weighted): 0.8944 +[2025-09-05 15:52:48] [Rank 0] Total FTA (Weighted): 0.8944 +[2025-09-05 15:52:48] [Rank 0] Group 0 Loss: 4.9561 +[2025-09-05 15:52:48] [Rank 0] Group 0 Loss: 4.9561 +[2025-09-05 15:52:48] [Rank 0] Group 1 Loss: 4.4895 +[2025-09-05 15:52:48] [Rank 0] Group 1 Loss: 4.4895 +[2025-09-05 15:52:48] [Rank 0] Group 2 Loss: 4.5549 +[2025-09-05 15:52:48] [Rank 0] Group 2 Loss: 4.5549 +[2025-09-05 15:52:48] [Rank 0] Group 3 Loss: 4.8863 +[2025-09-05 15:52:48] [Rank 0] Group 3 Loss: 4.8863 +[2025-09-05 15:52:48] [Rank 0] Group 4 Loss: 4.9990 +[2025-09-05 15:52:48] [Rank 0] Group 4 Loss: 4.9990 +[2025-09-05 15:52:48] [Rank 0] Group 5 Loss: 4.9520 +[2025-09-05 15:52:48] [Rank 0] Group 5 Loss: 4.9520 +[2025-09-05 15:52:48] [Rank 0] Group 6 Loss: 4.8258 +[2025-09-05 15:52:48] [Rank 0] Group 6 Loss: 4.8258 +[2025-09-05 15:52:48] [Rank 0] Group 7 Loss: 4.9320 +[2025-09-05 15:52:48] [Rank 0] Group 7 Loss: 4.9320 +[2025-09-05 15:52:48] [Rank 0] Group 8 Loss: 5.0563 +[2025-09-05 15:52:48] [Rank 0] Group 8 Loss: 5.0563 +[2025-09-05 15:52:48] [Rank 0] Group 9 Loss: 5.0151 +[2025-09-05 15:52:48] [Rank 0] Group 9 Loss: 5.0151 +[2025-09-05 15:52:48] [Rank 0] Group 10 Loss: 5.1395 +[2025-09-05 15:52:48] [Rank 0] Group 10 Loss: 5.1395 +[2025-09-05 15:52:48] [Rank 0] Group 11 Loss: 5.0903 +[2025-09-05 15:52:48] [Rank 0] Group 11 Loss: 5.0903 +[2025-09-05 15:52:48] [Rank 0] Group 12 Loss: 5.1846 +[2025-09-05 15:52:48] [Rank 0] Group 12 Loss: 5.1846 +[2025-09-05 15:52:48] [Rank 0] Group 13 Loss: 5.2100 +[2025-09-05 15:52:48] [Rank 0] Group 13 Loss: 5.2100 +[2025-09-05 15:52:48] [Rank 0] Group 14 Loss: 5.1973 +[2025-09-05 15:52:48] [Rank 0] Group 14 Loss: 5.1973 +[2025-09-05 15:52:48] [Rank 0] Group 15 Loss: 5.3440 +[2025-09-05 15:52:48] [Rank 0] Group 15 Loss: 5.3440 +[2025-09-05 15:52:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:52:48] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 15:52:48] [Rank 0] Group 13 FTA: 0.7700 +[2025-09-05 15:52:48] [Rank 0] Group 13 FTA: 0.7700 +[2025-09-05 15:52:48] [Rank 0] Group 14 FTA: 0.4200 +[2025-09-05 15:52:48] [Rank 0] Group 14 FTA: 0.4200 +[2025-09-05 15:52:48] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 15:52:48] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 15:52:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:52:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:52:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:52:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:52:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:52:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:52:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:52:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:52:50] [Rank 0] step:9001/10000 train_time:335751ms step_avg:37.30ms +[2025-09-05 15:52:50] [Rank 0] step:9001/10000 train_time:335751ms step_avg:37.30ms +[2025-09-05 15:52:51] [Rank 0] step:9021/10000 train_time:336202ms step_avg:37.27ms +[2025-09-05 15:52:51] [Rank 0] step:9021/10000 train_time:336202ms step_avg:37.27ms +[2025-09-05 15:52:52] [Rank 0] step:9041/10000 train_time:336862ms step_avg:37.26ms +[2025-09-05 15:52:52] [Rank 0] step:9041/10000 train_time:336862ms step_avg:37.26ms +[2025-09-05 15:52:52] [Rank 0] step:9061/10000 train_time:337521ms step_avg:37.25ms +[2025-09-05 15:52:52] [Rank 0] step:9061/10000 train_time:337521ms step_avg:37.25ms +[2025-09-05 15:52:53] [Rank 0] step:9081/10000 train_time:338182ms step_avg:37.24ms +[2025-09-05 15:52:53] [Rank 0] step:9081/10000 train_time:338182ms step_avg:37.24ms +[2025-09-05 15:52:54] [Rank 0] step:9101/10000 train_time:338840ms step_avg:37.23ms +[2025-09-05 15:52:54] [Rank 0] step:9101/10000 train_time:338840ms step_avg:37.23ms +[2025-09-05 15:52:54] [Rank 0] step:9121/10000 train_time:339500ms step_avg:37.22ms +[2025-09-05 15:52:54] [Rank 0] step:9121/10000 train_time:339500ms step_avg:37.22ms +[2025-09-05 15:52:55] [Rank 0] step:9141/10000 train_time:340160ms step_avg:37.21ms +[2025-09-05 15:52:55] [Rank 0] step:9141/10000 train_time:340160ms step_avg:37.21ms +[2025-09-05 15:52:56] [Rank 0] step:9161/10000 train_time:340819ms step_avg:37.20ms +[2025-09-05 15:52:56] [Rank 0] step:9161/10000 train_time:340819ms step_avg:37.20ms +[2025-09-05 15:52:56] [Rank 0] step:9181/10000 train_time:341478ms step_avg:37.19ms +[2025-09-05 15:52:56] [Rank 0] step:9181/10000 train_time:341478ms step_avg:37.19ms +[2025-09-05 15:52:57] [Rank 0] step:9201/10000 train_time:342138ms step_avg:37.18ms +[2025-09-05 15:52:57] [Rank 0] step:9201/10000 train_time:342138ms step_avg:37.18ms +[2025-09-05 15:52:58] [Rank 0] step:9221/10000 train_time:342798ms step_avg:37.18ms +[2025-09-05 15:52:58] [Rank 0] step:9221/10000 train_time:342798ms step_avg:37.18ms +[2025-09-05 15:52:58] [Rank 0] step:9241/10000 train_time:343458ms step_avg:37.17ms +[2025-09-05 15:52:58] [Rank 0] step:9241/10000 train_time:343458ms step_avg:37.17ms +[2025-09-05 15:52:59] [Rank 0] step:9261/10000 train_time:344118ms step_avg:37.16ms +[2025-09-05 15:52:59] [Rank 0] step:9261/10000 train_time:344118ms step_avg:37.16ms +[2025-09-05 15:53:00] [Rank 0] step:9281/10000 train_time:344779ms step_avg:37.15ms +[2025-09-05 15:53:00] [Rank 0] step:9281/10000 train_time:344779ms step_avg:37.15ms +[2025-09-05 15:53:00] [Rank 0] step:9301/10000 train_time:345439ms step_avg:37.14ms +[2025-09-05 15:53:00] [Rank 0] step:9301/10000 train_time:345439ms step_avg:37.14ms +[2025-09-05 15:53:01] [Rank 0] step:9321/10000 train_time:346097ms step_avg:37.13ms +[2025-09-05 15:53:01] [Rank 0] step:9321/10000 train_time:346097ms step_avg:37.13ms +[2025-09-05 15:53:01] [Rank 0] step:9341/10000 train_time:346757ms step_avg:37.12ms +[2025-09-05 15:53:01] [Rank 0] step:9341/10000 train_time:346757ms step_avg:37.12ms +[2025-09-05 15:53:02] [Rank 0] step:9361/10000 train_time:347418ms step_avg:37.11ms +[2025-09-05 15:53:02] [Rank 0] step:9361/10000 train_time:347418ms step_avg:37.11ms +[2025-09-05 15:53:03] [Rank 0] step:9381/10000 train_time:348077ms step_avg:37.10ms +[2025-09-05 15:53:03] [Rank 0] step:9381/10000 train_time:348077ms step_avg:37.10ms +[2025-09-05 15:53:03] [Rank 0] step:9401/10000 train_time:348738ms step_avg:37.10ms +[2025-09-05 15:53:03] [Rank 0] step:9401/10000 train_time:348738ms step_avg:37.10ms +[2025-09-05 15:53:04] [Rank 0] step:9421/10000 train_time:349395ms step_avg:37.09ms +[2025-09-05 15:53:04] [Rank 0] step:9421/10000 train_time:349395ms step_avg:37.09ms +[2025-09-05 15:53:05] [Rank 0] step:9441/10000 train_time:350055ms step_avg:37.08ms +[2025-09-05 15:53:05] [Rank 0] step:9441/10000 train_time:350055ms step_avg:37.08ms +[2025-09-05 15:53:05] [Rank 0] step:9461/10000 train_time:350713ms step_avg:37.07ms +[2025-09-05 15:53:05] [Rank 0] step:9461/10000 train_time:350713ms step_avg:37.07ms +[2025-09-05 15:53:06] [Rank 0] step:9481/10000 train_time:351372ms step_avg:37.06ms +[2025-09-05 15:53:06] [Rank 0] step:9481/10000 train_time:351372ms step_avg:37.06ms +[2025-09-05 15:53:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:53:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:53:07] [Rank 0] PRINT: step:9500/10000 train_loss:0.6444 val_loss:0.6360 train_time:352266ms step_avg:37.08ms +[2025-09-05 15:53:07] [Rank 0] PRINT: step:9500/10000 train_loss:0.6444 val_loss:0.6360 train_time:352266ms step_avg:37.08ms +[2025-09-05 15:53:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:53:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:53:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:53:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:54:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:54:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:54:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:54:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:54:29] [Rank 0] Total Loss: 4.9583 +[2025-09-05 15:54:29] [Rank 0] Total Loss: 4.9583 +[2025-09-05 15:54:29] [Rank 0] Total FTA (Unweighted): 0.9062 +[2025-09-05 15:54:29] [Rank 0] Total FTA (Unweighted): 0.9062 +[2025-09-05 15:54:29] [Rank 0] Total FTA (Weighted): 0.9062 +[2025-09-05 15:54:29] [Rank 0] Total FTA (Weighted): 0.9062 +[2025-09-05 15:54:29] [Rank 0] Group 0 Loss: 4.9200 +[2025-09-05 15:54:29] [Rank 0] Group 0 Loss: 4.9200 +[2025-09-05 15:54:29] [Rank 0] Group 1 Loss: 4.5294 +[2025-09-05 15:54:29] [Rank 0] Group 1 Loss: 4.5294 +[2025-09-05 15:54:29] [Rank 0] Group 2 Loss: 4.5051 +[2025-09-05 15:54:29] [Rank 0] Group 2 Loss: 4.5051 +[2025-09-05 15:54:29] [Rank 0] Group 3 Loss: 4.8855 +[2025-09-05 15:54:29] [Rank 0] Group 3 Loss: 4.8855 +[2025-09-05 15:54:29] [Rank 0] Group 4 Loss: 4.9828 +[2025-09-05 15:54:29] [Rank 0] Group 4 Loss: 4.9828 +[2025-09-05 15:54:29] [Rank 0] Group 5 Loss: 4.9177 +[2025-09-05 15:54:29] [Rank 0] Group 5 Loss: 4.9177 +[2025-09-05 15:54:29] [Rank 0] Group 6 Loss: 4.8160 +[2025-09-05 15:54:29] [Rank 0] Group 6 Loss: 4.8160 +[2025-09-05 15:54:29] [Rank 0] Group 7 Loss: 4.8935 +[2025-09-05 15:54:29] [Rank 0] Group 7 Loss: 4.8935 +[2025-09-05 15:54:29] [Rank 0] Group 8 Loss: 5.0282 +[2025-09-05 15:54:29] [Rank 0] Group 8 Loss: 5.0282 +[2025-09-05 15:54:29] [Rank 0] Group 9 Loss: 4.9701 +[2025-09-05 15:54:29] [Rank 0] Group 9 Loss: 4.9701 +[2025-09-05 15:54:29] [Rank 0] Group 10 Loss: 5.1169 +[2025-09-05 15:54:29] [Rank 0] Group 10 Loss: 5.1169 +[2025-09-05 15:54:29] [Rank 0] Group 11 Loss: 5.0663 +[2025-09-05 15:54:29] [Rank 0] Group 11 Loss: 5.0663 +[2025-09-05 15:54:29] [Rank 0] Group 12 Loss: 5.1295 +[2025-09-05 15:54:29] [Rank 0] Group 12 Loss: 5.1295 +[2025-09-05 15:54:29] [Rank 0] Group 13 Loss: 5.1314 +[2025-09-05 15:54:29] [Rank 0] Group 13 Loss: 5.1314 +[2025-09-05 15:54:29] [Rank 0] Group 14 Loss: 5.1593 +[2025-09-05 15:54:29] [Rank 0] Group 14 Loss: 5.1593 +[2025-09-05 15:54:29] [Rank 0] Group 15 Loss: 5.2809 +[2025-09-05 15:54:29] [Rank 0] Group 15 Loss: 5.2809 +[2025-09-05 15:54:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:54:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:54:30] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:54:30] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 15:54:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:54:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:54:30] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:54:30] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:54:30] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:54:30] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:54:30] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 15:54:30] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 15:54:30] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 15:54:30] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 15:54:30] [Rank 0] Group 13 FTA: 0.8500 +[2025-09-05 15:54:30] [Rank 0] Group 13 FTA: 0.8500 +[2025-09-05 15:54:30] [Rank 0] Group 14 FTA: 0.4900 +[2025-09-05 15:54:30] [Rank 0] Group 14 FTA: 0.4900 +[2025-09-05 15:54:30] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 15:54:30] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 15:54:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:54:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:54:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:54:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:54:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:54:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:54:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:54:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:54:31] [Rank 0] step:9501/10000 train_time:352275ms step_avg:37.08ms +[2025-09-05 15:54:31] [Rank 0] step:9501/10000 train_time:352275ms step_avg:37.08ms +[2025-09-05 15:54:32] [Rank 0] step:9521/10000 train_time:352720ms step_avg:37.05ms +[2025-09-05 15:54:32] [Rank 0] step:9521/10000 train_time:352720ms step_avg:37.05ms +[2025-09-05 15:54:32] [Rank 0] step:9541/10000 train_time:353378ms step_avg:37.04ms +[2025-09-05 15:54:32] [Rank 0] step:9541/10000 train_time:353378ms step_avg:37.04ms +[2025-09-05 15:54:33] [Rank 0] step:9561/10000 train_time:354037ms step_avg:37.03ms +[2025-09-05 15:54:33] [Rank 0] step:9561/10000 train_time:354037ms step_avg:37.03ms +[2025-09-05 15:54:34] [Rank 0] step:9581/10000 train_time:354699ms step_avg:37.02ms +[2025-09-05 15:54:34] [Rank 0] step:9581/10000 train_time:354699ms step_avg:37.02ms +[2025-09-05 15:54:34] [Rank 0] step:9601/10000 train_time:355358ms step_avg:37.01ms +[2025-09-05 15:54:34] [Rank 0] step:9601/10000 train_time:355358ms step_avg:37.01ms +[2025-09-05 15:54:35] [Rank 0] step:9621/10000 train_time:356016ms step_avg:37.00ms +[2025-09-05 15:54:35] [Rank 0] step:9621/10000 train_time:356016ms step_avg:37.00ms +[2025-09-05 15:54:36] [Rank 0] step:9641/10000 train_time:356675ms step_avg:37.00ms +[2025-09-05 15:54:36] [Rank 0] step:9641/10000 train_time:356675ms step_avg:37.00ms +[2025-09-05 15:54:37] [Rank 0] step:9661/10000 train_time:357614ms step_avg:37.02ms +[2025-09-05 15:54:37] [Rank 0] step:9661/10000 train_time:357614ms step_avg:37.02ms +[2025-09-05 15:54:37] [Rank 0] step:9681/10000 train_time:358273ms step_avg:37.01ms +[2025-09-05 15:54:37] [Rank 0] step:9681/10000 train_time:358273ms step_avg:37.01ms +[2025-09-05 15:54:38] [Rank 0] step:9701/10000 train_time:358932ms step_avg:37.00ms +[2025-09-05 15:54:38] [Rank 0] step:9701/10000 train_time:358932ms step_avg:37.00ms +[2025-09-05 15:54:39] [Rank 0] step:9721/10000 train_time:359589ms step_avg:36.99ms +[2025-09-05 15:54:39] [Rank 0] step:9721/10000 train_time:359589ms step_avg:36.99ms +[2025-09-05 15:54:39] [Rank 0] step:9741/10000 train_time:360248ms step_avg:36.98ms +[2025-09-05 15:54:39] [Rank 0] step:9741/10000 train_time:360248ms step_avg:36.98ms +[2025-09-05 15:54:40] [Rank 0] step:9761/10000 train_time:360906ms step_avg:36.97ms +[2025-09-05 15:54:40] [Rank 0] step:9761/10000 train_time:360906ms step_avg:36.97ms +[2025-09-05 15:54:41] [Rank 0] step:9781/10000 train_time:361578ms step_avg:36.97ms +[2025-09-05 15:54:41] [Rank 0] step:9781/10000 train_time:361578ms step_avg:36.97ms +[2025-09-05 15:54:41] [Rank 0] step:9801/10000 train_time:362237ms step_avg:36.96ms +[2025-09-05 15:54:41] [Rank 0] step:9801/10000 train_time:362237ms step_avg:36.96ms +[2025-09-05 15:54:42] [Rank 0] step:9821/10000 train_time:362894ms step_avg:36.95ms +[2025-09-05 15:54:42] [Rank 0] step:9821/10000 train_time:362894ms step_avg:36.95ms +[2025-09-05 15:54:42] [Rank 0] step:9841/10000 train_time:363553ms step_avg:36.94ms +[2025-09-05 15:54:42] [Rank 0] step:9841/10000 train_time:363553ms step_avg:36.94ms +[2025-09-05 15:54:43] [Rank 0] step:9861/10000 train_time:364212ms step_avg:36.93ms +[2025-09-05 15:54:43] [Rank 0] step:9861/10000 train_time:364212ms step_avg:36.93ms +[2025-09-05 15:54:44] [Rank 0] step:9881/10000 train_time:364869ms step_avg:36.93ms +[2025-09-05 15:54:44] [Rank 0] step:9881/10000 train_time:364869ms step_avg:36.93ms +[2025-09-05 15:54:44] [Rank 0] step:9901/10000 train_time:365528ms step_avg:36.92ms +[2025-09-05 15:54:44] [Rank 0] step:9901/10000 train_time:365528ms step_avg:36.92ms +[2025-09-05 15:54:45] [Rank 0] step:9921/10000 train_time:366187ms step_avg:36.91ms +[2025-09-05 15:54:45] [Rank 0] step:9921/10000 train_time:366187ms step_avg:36.91ms +[2025-09-05 15:54:46] [Rank 0] step:9941/10000 train_time:366846ms step_avg:36.90ms +[2025-09-05 15:54:46] [Rank 0] step:9941/10000 train_time:366846ms step_avg:36.90ms +[2025-09-05 15:54:46] [Rank 0] step:9961/10000 train_time:367504ms step_avg:36.89ms +[2025-09-05 15:54:46] [Rank 0] step:9961/10000 train_time:367504ms step_avg:36.89ms +[2025-09-05 15:54:47] [Rank 0] step:9981/10000 train_time:368163ms step_avg:36.89ms +[2025-09-05 15:54:47] [Rank 0] step:9981/10000 train_time:368163ms step_avg:36.89ms +[2025-09-05 15:54:48] [Rank 0] step:10000/10000 train_time:368788ms step_avg:36.88ms +[2025-09-05 15:54:48] [Rank 0] step:10000/10000 train_time:368788ms step_avg:36.88ms +[2025-09-05 15:54:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:54:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:54:48] [Rank 0] PRINT: step:10000/10000 train_loss:0.6396 val_loss:0.6320 train_time:369061ms step_avg:36.91ms +[2025-09-05 15:54:48] [Rank 0] PRINT: step:10000/10000 train_loss:0.6396 val_loss:0.6320 train_time:369061ms step_avg:36.91ms +[2025-09-05 15:54:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:54:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:54:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:54:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:56:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:56:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:56:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:56:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:56:10] [Rank 0] Total Loss: 4.9927 +[2025-09-05 15:56:10] [Rank 0] Total Loss: 4.9927 +[2025-09-05 15:56:10] [Rank 0] Total FTA (Unweighted): 0.9156 +[2025-09-05 15:56:10] [Rank 0] Total FTA (Unweighted): 0.9156 +[2025-09-05 15:56:10] [Rank 0] Total FTA (Weighted): 0.9156 +[2025-09-05 15:56:10] [Rank 0] Total FTA (Weighted): 0.9156 +[2025-09-05 15:56:10] [Rank 0] Group 0 Loss: 4.8971 +[2025-09-05 15:56:10] [Rank 0] Group 0 Loss: 4.8971 +[2025-09-05 15:56:10] [Rank 0] Group 1 Loss: 4.5108 +[2025-09-05 15:56:10] [Rank 0] Group 1 Loss: 4.5108 +[2025-09-05 15:56:10] [Rank 0] Group 2 Loss: 4.6196 +[2025-09-05 15:56:10] [Rank 0] Group 2 Loss: 4.6196 +[2025-09-05 15:56:10] [Rank 0] Group 3 Loss: 4.8971 +[2025-09-05 15:56:10] [Rank 0] Group 3 Loss: 4.8971 +[2025-09-05 15:56:10] [Rank 0] Group 4 Loss: 5.0271 +[2025-09-05 15:56:10] [Rank 0] Group 4 Loss: 5.0271 +[2025-09-05 15:56:10] [Rank 0] Group 5 Loss: 4.9713 +[2025-09-05 15:56:10] [Rank 0] Group 5 Loss: 4.9713 +[2025-09-05 15:56:10] [Rank 0] Group 6 Loss: 4.8493 +[2025-09-05 15:56:10] [Rank 0] Group 6 Loss: 4.8493 +[2025-09-05 15:56:10] [Rank 0] Group 7 Loss: 4.9510 +[2025-09-05 15:56:10] [Rank 0] Group 7 Loss: 4.9510 +[2025-09-05 15:56:10] [Rank 0] Group 8 Loss: 5.0638 +[2025-09-05 15:56:10] [Rank 0] Group 8 Loss: 5.0638 +[2025-09-05 15:56:10] [Rank 0] Group 9 Loss: 5.0228 +[2025-09-05 15:56:10] [Rank 0] Group 9 Loss: 5.0228 +[2025-09-05 15:56:10] [Rank 0] Group 10 Loss: 5.1288 +[2025-09-05 15:56:10] [Rank 0] Group 10 Loss: 5.1288 +[2025-09-05 15:56:10] [Rank 0] Group 11 Loss: 5.0981 +[2025-09-05 15:56:10] [Rank 0] Group 11 Loss: 5.0981 +[2025-09-05 15:56:10] [Rank 0] Group 12 Loss: 5.1597 +[2025-09-05 15:56:10] [Rank 0] Group 12 Loss: 5.1597 +[2025-09-05 15:56:10] [Rank 0] Group 13 Loss: 5.1912 +[2025-09-05 15:56:10] [Rank 0] Group 13 Loss: 5.1912 +[2025-09-05 15:56:10] [Rank 0] Group 14 Loss: 5.1786 +[2025-09-05 15:56:10] [Rank 0] Group 14 Loss: 5.1786 +[2025-09-05 15:56:10] [Rank 0] Group 15 Loss: 5.3166 +[2025-09-05 15:56:10] [Rank 0] Group 15 Loss: 5.3166 +[2025-09-05 15:56:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:56:10] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 15:56:10] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:56:10] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 15:56:10] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 15:56:10] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 15:56:10] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 15:56:10] [Rank 0] Group 13 FTA: 0.8800 +[2025-09-05 15:56:10] [Rank 0] Group 13 FTA: 0.8800 +[2025-09-05 15:56:10] [Rank 0] Group 14 FTA: 0.5900 +[2025-09-05 15:56:10] [Rank 0] Group 14 FTA: 0.5900 +[2025-09-05 15:56:10] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-05 15:56:10] [Rank 0] Group 15 FTA: 0.2200 +[2025-09-05 15:56:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:56:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_loss_curves.png +[2025-09-05 15:56:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:56:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/per_class_acc_curves.png +[2025-09-05 15:56:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:56:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_loss_curve.png +[2025-09-05 15:56:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:56:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_42/total_acc_curve.png +[2025-09-05 15:56:11] [Rank 0] step:10001/10000 train_time:369070ms step_avg:36.90ms +[2025-09-05 15:56:11] [Rank 0] step:10001/10000 train_time:369070ms step_avg:36.90ms +[2025-09-05 15:56:11] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 15:56:11 2025 --- +[2025-09-05 15:56:11] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 15:56:11 2025 --- +[2025-09-05 15:56:11] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 15:56:11] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5e5abddc9bf70670b7daf4bfbeca5a2a0f8e9858 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.005, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "98d74260-2786-4ab0-9d22-0824c6f3fa15", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..19ec9cc32d40c71b452a5c899ad495655aa0bdad --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9444861ab7c4d4de8fe54429b46b59fcfd185b5877a5bba0b349e70e61c61584 +size 381096 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..4b03657db493e8bc43c32fbc3ad545d7d047dcfd --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5867fee39b7fa28763de09bdee9f95e7b055c47f9b23a5cd19b6d3c04e68842c +size 448671 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..36fda008b544d58dbc4475d3f5bf02aee604e403 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89fb3936c61f14477c9da6d24543852c48abc4487c75ba0df3753ddf1f0bc2c2 +size 98036 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c2b044ed0724523db6a5eb3f1a6ecade976db12c --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:752be8ce4570d3aab1a11cebc7e036efc0c9ecb5f501ff14622b0a53b167f493 +size 112505 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/training_log_98d74260-2786-4ab0-9d22-0824c6f3fa15.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/training_log_98d74260-2786-4ab0-9d22-0824c6f3fa15.txt new file mode 100644 index 0000000000000000000000000000000000000000..8502e171cdc596fce287141925e8bc6242834b7d --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/training_log_98d74260-2786-4ab0-9d22-0824c6f3fa15.txt @@ -0,0 +1,5614 @@ +[2025-09-05 18:19:19] [Rank 0] PRINT: --- Script Start: Fri Sep 5 18:19:19 2025 --- +[2025-09-05 18:19:19] [Rank 0] PRINT: --- Script Start: Fri Sep 5 18:19:19 2025 --- +[2025-09-05 18:19:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.005, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 18:19:19] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.005, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 18:19:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 18:19:19] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 18:19:19] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 18:19:19] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 18:19:19] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43 +[2025-09-05 18:19:19] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43 +[2025-09-05 18:19:19] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 18:19:19] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 18:19:19] [Rank 0] PRINT: Constructing model... +[2025-09-05 18:19:19] [Rank 0] PRINT: Constructing model... +[2025-09-05 18:19:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 18:19:21] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 18:19:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 18:19:21] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 18:19:21] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 18:19:21] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 18:19:25] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 18:19:25] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 18:19:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 18:19:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 18:19:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 18:19:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 18:19:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 18:19:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 18:19:25] [Rank 0] PRINT: Model returns: +[2025-09-05 18:19:25] [Rank 0] PRINT: Model returns: +[2025-09-05 18:19:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 18:19:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 18:19:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 18:19:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 18:19:25] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-09-05 18:19:25] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-09-05 18:19:25] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 18:19:25] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 18:19:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 18:19:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 18:19:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 18:19:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 18:19:30] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 18:19:30] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 18:19:30] [Rank 0] PRINT: Starting warmup... +[2025-09-05 18:19:30] [Rank 0] PRINT: Starting warmup... +[2025-09-05 18:20:10] [Rank 0] PRINT: Warmup complete. +[2025-09-05 18:20:10] [Rank 0] PRINT: Warmup complete. +[2025-09-05 18:20:10] [Rank 0] PRINT: Starting training... +[2025-09-05 18:20:10] [Rank 0] PRINT: Starting training... +[2025-09-05 18:20:16] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/fixed_eval_indices.json +[2025-09-05 18:20:16] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/fixed_eval_indices.json +[2025-09-05 18:20:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:20:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:20:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 18:20:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 18:20:54] [Rank 0] step:21/10000 train_time:33575ms step_avg:1598.82ms +[2025-09-05 18:20:54] [Rank 0] step:21/10000 train_time:33575ms step_avg:1598.82ms +[2025-09-05 18:20:54] [Rank 0] step:41/10000 train_time:34224ms step_avg:834.74ms +[2025-09-05 18:20:54] [Rank 0] step:41/10000 train_time:34224ms step_avg:834.74ms +[2025-09-05 18:20:55] [Rank 0] step:61/10000 train_time:34872ms step_avg:571.68ms +[2025-09-05 18:20:55] [Rank 0] step:61/10000 train_time:34872ms step_avg:571.68ms +[2025-09-05 18:20:56] [Rank 0] step:81/10000 train_time:35698ms step_avg:440.72ms +[2025-09-05 18:20:56] [Rank 0] step:81/10000 train_time:35698ms step_avg:440.72ms +[2025-09-05 18:20:57] [Rank 0] step:101/10000 train_time:36346ms step_avg:359.86ms +[2025-09-05 18:20:57] [Rank 0] step:101/10000 train_time:36346ms step_avg:359.86ms +[2025-09-05 18:20:57] [Rank 0] step:121/10000 train_time:36993ms step_avg:305.73ms +[2025-09-05 18:20:57] [Rank 0] step:121/10000 train_time:36993ms step_avg:305.73ms +[2025-09-05 18:20:58] [Rank 0] step:141/10000 train_time:37641ms step_avg:266.96ms +[2025-09-05 18:20:58] [Rank 0] step:141/10000 train_time:37641ms step_avg:266.96ms +[2025-09-05 18:20:59] [Rank 0] step:161/10000 train_time:38479ms step_avg:239.00ms +[2025-09-05 18:20:59] [Rank 0] step:161/10000 train_time:38479ms step_avg:239.00ms +[2025-09-05 18:20:59] [Rank 0] step:181/10000 train_time:39132ms step_avg:216.20ms +[2025-09-05 18:20:59] [Rank 0] step:181/10000 train_time:39132ms step_avg:216.20ms +[2025-09-05 18:21:00] [Rank 0] step:201/10000 train_time:39779ms step_avg:197.91ms +[2025-09-05 18:21:00] [Rank 0] step:201/10000 train_time:39779ms step_avg:197.91ms +[2025-09-05 18:21:01] [Rank 0] step:221/10000 train_time:40426ms step_avg:182.92ms +[2025-09-05 18:21:01] [Rank 0] step:221/10000 train_time:40426ms step_avg:182.92ms +[2025-09-05 18:21:01] [Rank 0] step:241/10000 train_time:41074ms step_avg:170.43ms +[2025-09-05 18:21:01] [Rank 0] step:241/10000 train_time:41074ms step_avg:170.43ms +[2025-09-05 18:21:02] [Rank 0] step:261/10000 train_time:41722ms step_avg:159.85ms +[2025-09-05 18:21:02] [Rank 0] step:261/10000 train_time:41722ms step_avg:159.85ms +[2025-09-05 18:21:03] [Rank 0] step:281/10000 train_time:42369ms step_avg:150.78ms +[2025-09-05 18:21:03] [Rank 0] step:281/10000 train_time:42369ms step_avg:150.78ms +[2025-09-05 18:21:03] [Rank 0] step:301/10000 train_time:43018ms step_avg:142.92ms +[2025-09-05 18:21:03] [Rank 0] step:301/10000 train_time:43018ms step_avg:142.92ms +[2025-09-05 18:21:04] [Rank 0] step:321/10000 train_time:43665ms step_avg:136.03ms +[2025-09-05 18:21:04] [Rank 0] step:321/10000 train_time:43665ms step_avg:136.03ms +[2025-09-05 18:21:05] [Rank 0] step:341/10000 train_time:44313ms step_avg:129.95ms +[2025-09-05 18:21:05] [Rank 0] step:341/10000 train_time:44313ms step_avg:129.95ms +[2025-09-05 18:21:05] [Rank 0] step:361/10000 train_time:44960ms step_avg:124.54ms +[2025-09-05 18:21:05] [Rank 0] step:361/10000 train_time:44960ms step_avg:124.54ms +[2025-09-05 18:21:06] [Rank 0] step:381/10000 train_time:45607ms step_avg:119.70ms +[2025-09-05 18:21:06] [Rank 0] step:381/10000 train_time:45607ms step_avg:119.70ms +[2025-09-05 18:21:07] [Rank 0] step:401/10000 train_time:46254ms step_avg:115.35ms +[2025-09-05 18:21:07] [Rank 0] step:401/10000 train_time:46254ms step_avg:115.35ms +[2025-09-05 18:21:07] [Rank 0] step:421/10000 train_time:46902ms step_avg:111.41ms +[2025-09-05 18:21:07] [Rank 0] step:421/10000 train_time:46902ms step_avg:111.41ms +[2025-09-05 18:21:08] [Rank 0] step:441/10000 train_time:47549ms step_avg:107.82ms +[2025-09-05 18:21:08] [Rank 0] step:441/10000 train_time:47549ms step_avg:107.82ms +[2025-09-05 18:21:08] [Rank 0] step:461/10000 train_time:48197ms step_avg:104.55ms +[2025-09-05 18:21:08] [Rank 0] step:461/10000 train_time:48197ms step_avg:104.55ms +[2025-09-05 18:21:09] [Rank 0] step:481/10000 train_time:48844ms step_avg:101.55ms +[2025-09-05 18:21:09] [Rank 0] step:481/10000 train_time:48844ms step_avg:101.55ms +[2025-09-05 18:21:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:21:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:21:10] [Rank 0] PRINT: step:500/10000 train_loss:3.5023 val_loss:1.4004 train_time:49722ms step_avg:99.44ms +[2025-09-05 18:21:10] [Rank 0] PRINT: step:500/10000 train_loss:3.5023 val_loss:1.4004 train_time:49722ms step_avg:99.44ms +[2025-09-05 18:21:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:21:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:21:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:21:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:22:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:22:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:22:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:22:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:22:32] [Rank 0] Total Loss: 4.5020 +[2025-09-05 18:22:32] [Rank 0] Total Loss: 4.5020 +[2025-09-05 18:22:32] [Rank 0] Total FTA (Unweighted): 0.3500 +[2025-09-05 18:22:32] [Rank 0] Total FTA (Unweighted): 0.3500 +[2025-09-05 18:22:32] [Rank 0] Total FTA (Weighted): 0.3500 +[2025-09-05 18:22:32] [Rank 0] Total FTA (Weighted): 0.3500 +[2025-09-05 18:22:32] [Rank 0] Group 0 Loss: 3.9275 +[2025-09-05 18:22:32] [Rank 0] Group 0 Loss: 3.9275 +[2025-09-05 18:22:32] [Rank 0] Group 1 Loss: 3.6039 +[2025-09-05 18:22:32] [Rank 0] Group 1 Loss: 3.6039 +[2025-09-05 18:22:32] [Rank 0] Group 2 Loss: 3.4680 +[2025-09-05 18:22:32] [Rank 0] Group 2 Loss: 3.4680 +[2025-09-05 18:22:32] [Rank 0] Group 3 Loss: 3.7977 +[2025-09-05 18:22:32] [Rank 0] Group 3 Loss: 3.7977 +[2025-09-05 18:22:32] [Rank 0] Group 4 Loss: 3.9453 +[2025-09-05 18:22:32] [Rank 0] Group 4 Loss: 3.9453 +[2025-09-05 18:22:32] [Rank 0] Group 5 Loss: 4.1559 +[2025-09-05 18:22:32] [Rank 0] Group 5 Loss: 4.1559 +[2025-09-05 18:22:32] [Rank 0] Group 6 Loss: 4.2710 +[2025-09-05 18:22:32] [Rank 0] Group 6 Loss: 4.2710 +[2025-09-05 18:22:32] [Rank 0] Group 7 Loss: 4.4508 +[2025-09-05 18:22:32] [Rank 0] Group 7 Loss: 4.4508 +[2025-09-05 18:22:32] [Rank 0] Group 8 Loss: 4.7658 +[2025-09-05 18:22:32] [Rank 0] Group 8 Loss: 4.7658 +[2025-09-05 18:22:32] [Rank 0] Group 9 Loss: 4.9340 +[2025-09-05 18:22:32] [Rank 0] Group 9 Loss: 4.9340 +[2025-09-05 18:22:32] [Rank 0] Group 10 Loss: 5.0430 +[2025-09-05 18:22:32] [Rank 0] Group 10 Loss: 5.0430 +[2025-09-05 18:22:32] [Rank 0] Group 11 Loss: 5.1330 +[2025-09-05 18:22:32] [Rank 0] Group 11 Loss: 5.1330 +[2025-09-05 18:22:32] [Rank 0] Group 12 Loss: 5.1123 +[2025-09-05 18:22:32] [Rank 0] Group 12 Loss: 5.1123 +[2025-09-05 18:22:32] [Rank 0] Group 13 Loss: 5.1592 +[2025-09-05 18:22:32] [Rank 0] Group 13 Loss: 5.1592 +[2025-09-05 18:22:32] [Rank 0] Group 14 Loss: 5.1812 +[2025-09-05 18:22:32] [Rank 0] Group 14 Loss: 5.1812 +[2025-09-05 18:22:32] [Rank 0] Group 15 Loss: 5.0830 +[2025-09-05 18:22:32] [Rank 0] Group 15 Loss: 5.0830 +[2025-09-05 18:22:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:22:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:22:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:22:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:22:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:22:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:22:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:22:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:22:32] [Rank 0] Group 4 FTA: 0.3500 +[2025-09-05 18:22:32] [Rank 0] Group 4 FTA: 0.3500 +[2025-09-05 18:22:32] [Rank 0] Group 5 FTA: 0.2600 +[2025-09-05 18:22:32] [Rank 0] Group 5 FTA: 0.2600 +[2025-09-05 18:22:32] [Rank 0] Group 6 FTA: 0.1800 +[2025-09-05 18:22:32] [Rank 0] Group 6 FTA: 0.1800 +[2025-09-05 18:22:32] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 18:22:32] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 18:22:32] [Rank 0] Group 8 FTA: 0.1000 +[2025-09-05 18:22:32] [Rank 0] Group 8 FTA: 0.1000 +[2025-09-05 18:22:32] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 18:22:32] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 18:22:32] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 18:22:32] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 18:22:32] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 18:22:32] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 18:22:32] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:22:32] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:22:32] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:22:32] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:22:32] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:22:32] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:22:32] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:22:32] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:22:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:22:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:22:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:22:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:22:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:22:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:22:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:22:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:22:33] [Rank 0] step:501/10000 train_time:49732ms step_avg:99.26ms +[2025-09-05 18:22:33] [Rank 0] step:501/10000 train_time:49732ms step_avg:99.26ms +[2025-09-05 18:22:34] [Rank 0] step:521/10000 train_time:50154ms step_avg:96.27ms +[2025-09-05 18:22:34] [Rank 0] step:521/10000 train_time:50154ms step_avg:96.27ms +[2025-09-05 18:22:34] [Rank 0] step:541/10000 train_time:50800ms step_avg:93.90ms +[2025-09-05 18:22:34] [Rank 0] step:541/10000 train_time:50800ms step_avg:93.90ms +[2025-09-05 18:22:35] [Rank 0] step:561/10000 train_time:51446ms step_avg:91.70ms +[2025-09-05 18:22:35] [Rank 0] step:561/10000 train_time:51446ms step_avg:91.70ms +[2025-09-05 18:22:36] [Rank 0] step:581/10000 train_time:52092ms step_avg:89.66ms +[2025-09-05 18:22:36] [Rank 0] step:581/10000 train_time:52092ms step_avg:89.66ms +[2025-09-05 18:22:36] [Rank 0] step:601/10000 train_time:52739ms step_avg:87.75ms +[2025-09-05 18:22:36] [Rank 0] step:601/10000 train_time:52739ms step_avg:87.75ms +[2025-09-05 18:22:37] [Rank 0] step:621/10000 train_time:53385ms step_avg:85.97ms +[2025-09-05 18:22:37] [Rank 0] step:621/10000 train_time:53385ms step_avg:85.97ms +[2025-09-05 18:22:38] [Rank 0] step:641/10000 train_time:54035ms step_avg:84.30ms +[2025-09-05 18:22:38] [Rank 0] step:641/10000 train_time:54035ms step_avg:84.30ms +[2025-09-05 18:22:38] [Rank 0] step:661/10000 train_time:54682ms step_avg:82.73ms +[2025-09-05 18:22:38] [Rank 0] step:661/10000 train_time:54682ms step_avg:82.73ms +[2025-09-05 18:22:39] [Rank 0] step:681/10000 train_time:55431ms step_avg:81.40ms +[2025-09-05 18:22:39] [Rank 0] step:681/10000 train_time:55431ms step_avg:81.40ms +[2025-09-05 18:22:40] [Rank 0] step:701/10000 train_time:56078ms step_avg:80.00ms +[2025-09-05 18:22:40] [Rank 0] step:701/10000 train_time:56078ms step_avg:80.00ms +[2025-09-05 18:22:40] [Rank 0] step:721/10000 train_time:56725ms step_avg:78.68ms +[2025-09-05 18:22:40] [Rank 0] step:721/10000 train_time:56725ms step_avg:78.68ms +[2025-09-05 18:22:41] [Rank 0] step:741/10000 train_time:57372ms step_avg:77.42ms +[2025-09-05 18:22:41] [Rank 0] step:741/10000 train_time:57372ms step_avg:77.42ms +[2025-09-05 18:22:42] [Rank 0] step:761/10000 train_time:58022ms step_avg:76.24ms +[2025-09-05 18:22:42] [Rank 0] step:761/10000 train_time:58022ms step_avg:76.24ms +[2025-09-05 18:22:42] [Rank 0] step:781/10000 train_time:58673ms step_avg:75.13ms +[2025-09-05 18:22:42] [Rank 0] step:781/10000 train_time:58673ms step_avg:75.13ms +[2025-09-05 18:22:43] [Rank 0] step:801/10000 train_time:59326ms step_avg:74.06ms +[2025-09-05 18:22:43] [Rank 0] step:801/10000 train_time:59326ms step_avg:74.06ms +[2025-09-05 18:22:44] [Rank 0] step:821/10000 train_time:60520ms step_avg:73.71ms +[2025-09-05 18:22:44] [Rank 0] step:821/10000 train_time:60520ms step_avg:73.71ms +[2025-09-05 18:22:45] [Rank 0] step:841/10000 train_time:61107ms step_avg:72.66ms +[2025-09-05 18:22:45] [Rank 0] step:841/10000 train_time:61107ms step_avg:72.66ms +[2025-09-05 18:22:45] [Rank 0] step:861/10000 train_time:61759ms step_avg:71.73ms +[2025-09-05 18:22:45] [Rank 0] step:861/10000 train_time:61759ms step_avg:71.73ms +[2025-09-05 18:22:46] [Rank 0] step:881/10000 train_time:62411ms step_avg:70.84ms +[2025-09-05 18:22:46] [Rank 0] step:881/10000 train_time:62411ms step_avg:70.84ms +[2025-09-05 18:22:47] [Rank 0] step:901/10000 train_time:63063ms step_avg:69.99ms +[2025-09-05 18:22:47] [Rank 0] step:901/10000 train_time:63063ms step_avg:69.99ms +[2025-09-05 18:22:47] [Rank 0] step:921/10000 train_time:63714ms step_avg:69.18ms +[2025-09-05 18:22:47] [Rank 0] step:921/10000 train_time:63714ms step_avg:69.18ms +[2025-09-05 18:22:48] [Rank 0] step:941/10000 train_time:64366ms step_avg:68.40ms +[2025-09-05 18:22:48] [Rank 0] step:941/10000 train_time:64366ms step_avg:68.40ms +[2025-09-05 18:22:49] [Rank 0] step:961/10000 train_time:65018ms step_avg:67.66ms +[2025-09-05 18:22:49] [Rank 0] step:961/10000 train_time:65018ms step_avg:67.66ms +[2025-09-05 18:22:49] [Rank 0] step:981/10000 train_time:65670ms step_avg:66.94ms +[2025-09-05 18:22:49] [Rank 0] step:981/10000 train_time:65670ms step_avg:66.94ms +[2025-09-05 18:22:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:22:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:22:50] [Rank 0] PRINT: step:1000/10000 train_loss:1.1261 val_loss:0.9692 train_time:66554ms step_avg:66.55ms +[2025-09-05 18:22:50] [Rank 0] PRINT: step:1000/10000 train_loss:1.1261 val_loss:0.9692 train_time:66554ms step_avg:66.55ms +[2025-09-05 18:22:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:22:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:22:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:22:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:24:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:24:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:24:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:24:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:24:12] [Rank 0] Total Loss: 4.8394 +[2025-09-05 18:24:12] [Rank 0] Total Loss: 4.8394 +[2025-09-05 18:24:12] [Rank 0] Total FTA (Unweighted): 0.5575 +[2025-09-05 18:24:12] [Rank 0] Total FTA (Unweighted): 0.5575 +[2025-09-05 18:24:12] [Rank 0] Total FTA (Weighted): 0.5575 +[2025-09-05 18:24:12] [Rank 0] Total FTA (Weighted): 0.5575 +[2025-09-05 18:24:12] [Rank 0] Group 0 Loss: 4.5792 +[2025-09-05 18:24:12] [Rank 0] Group 0 Loss: 4.5792 +[2025-09-05 18:24:12] [Rank 0] Group 1 Loss: 4.2604 +[2025-09-05 18:24:12] [Rank 0] Group 1 Loss: 4.2604 +[2025-09-05 18:24:12] [Rank 0] Group 2 Loss: 4.1413 +[2025-09-05 18:24:12] [Rank 0] Group 2 Loss: 4.1413 +[2025-09-05 18:24:12] [Rank 0] Group 3 Loss: 4.5461 +[2025-09-05 18:24:12] [Rank 0] Group 3 Loss: 4.5461 +[2025-09-05 18:24:12] [Rank 0] Group 4 Loss: 4.5472 +[2025-09-05 18:24:12] [Rank 0] Group 4 Loss: 4.5472 +[2025-09-05 18:24:12] [Rank 0] Group 5 Loss: 4.5705 +[2025-09-05 18:24:12] [Rank 0] Group 5 Loss: 4.5705 +[2025-09-05 18:24:12] [Rank 0] Group 6 Loss: 4.5261 +[2025-09-05 18:24:12] [Rank 0] Group 6 Loss: 4.5261 +[2025-09-05 18:24:12] [Rank 0] Group 7 Loss: 4.6459 +[2025-09-05 18:24:12] [Rank 0] Group 7 Loss: 4.6459 +[2025-09-05 18:24:12] [Rank 0] Group 8 Loss: 4.7595 +[2025-09-05 18:24:12] [Rank 0] Group 8 Loss: 4.7595 +[2025-09-05 18:24:12] [Rank 0] Group 9 Loss: 4.8894 +[2025-09-05 18:24:12] [Rank 0] Group 9 Loss: 4.8894 +[2025-09-05 18:24:12] [Rank 0] Group 10 Loss: 5.0509 +[2025-09-05 18:24:12] [Rank 0] Group 10 Loss: 5.0509 +[2025-09-05 18:24:12] [Rank 0] Group 11 Loss: 5.2022 +[2025-09-05 18:24:12] [Rank 0] Group 11 Loss: 5.2022 +[2025-09-05 18:24:12] [Rank 0] Group 12 Loss: 5.3395 +[2025-09-05 18:24:12] [Rank 0] Group 12 Loss: 5.3395 +[2025-09-05 18:24:12] [Rank 0] Group 13 Loss: 5.4884 +[2025-09-05 18:24:12] [Rank 0] Group 13 Loss: 5.4884 +[2025-09-05 18:24:12] [Rank 0] Group 14 Loss: 5.4393 +[2025-09-05 18:24:12] [Rank 0] Group 14 Loss: 5.4393 +[2025-09-05 18:24:12] [Rank 0] Group 15 Loss: 5.4449 +[2025-09-05 18:24:12] [Rank 0] Group 15 Loss: 5.4449 +[2025-09-05 18:24:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:24:12] [Rank 0] Group 6 FTA: 0.9600 +[2025-09-05 18:24:12] [Rank 0] Group 6 FTA: 0.9600 +[2025-09-05 18:24:12] [Rank 0] Group 7 FTA: 0.7700 +[2025-09-05 18:24:12] [Rank 0] Group 7 FTA: 0.7700 +[2025-09-05 18:24:12] [Rank 0] Group 8 FTA: 0.3900 +[2025-09-05 18:24:12] [Rank 0] Group 8 FTA: 0.3900 +[2025-09-05 18:24:12] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 18:24:12] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 18:24:12] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 18:24:12] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 18:24:12] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:24:12] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:24:12] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 18:24:12] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 18:24:12] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:24:12] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:24:12] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 18:24:12] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 18:24:12] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:24:12] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:24:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:24:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:24:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:24:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:24:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:24:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:24:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:24:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:24:14] [Rank 0] step:1001/10000 train_time:66563ms step_avg:66.50ms +[2025-09-05 18:24:14] [Rank 0] step:1001/10000 train_time:66563ms step_avg:66.50ms +[2025-09-05 18:24:14] [Rank 0] step:1021/10000 train_time:67009ms step_avg:65.63ms +[2025-09-05 18:24:14] [Rank 0] step:1021/10000 train_time:67009ms step_avg:65.63ms +[2025-09-05 18:24:15] [Rank 0] step:1041/10000 train_time:67662ms step_avg:65.00ms +[2025-09-05 18:24:15] [Rank 0] step:1041/10000 train_time:67662ms step_avg:65.00ms +[2025-09-05 18:24:16] [Rank 0] step:1061/10000 train_time:68313ms step_avg:64.39ms +[2025-09-05 18:24:16] [Rank 0] step:1061/10000 train_time:68313ms step_avg:64.39ms +[2025-09-05 18:24:16] [Rank 0] step:1081/10000 train_time:68965ms step_avg:63.80ms +[2025-09-05 18:24:16] [Rank 0] step:1081/10000 train_time:68965ms step_avg:63.80ms +[2025-09-05 18:24:17] [Rank 0] step:1101/10000 train_time:69617ms step_avg:63.23ms +[2025-09-05 18:24:17] [Rank 0] step:1101/10000 train_time:69617ms step_avg:63.23ms +[2025-09-05 18:24:18] [Rank 0] step:1121/10000 train_time:70269ms step_avg:62.68ms +[2025-09-05 18:24:18] [Rank 0] step:1121/10000 train_time:70269ms step_avg:62.68ms +[2025-09-05 18:24:18] [Rank 0] step:1141/10000 train_time:70920ms step_avg:62.16ms +[2025-09-05 18:24:18] [Rank 0] step:1141/10000 train_time:70920ms step_avg:62.16ms +[2025-09-05 18:24:19] [Rank 0] step:1161/10000 train_time:71572ms step_avg:61.65ms +[2025-09-05 18:24:19] [Rank 0] step:1161/10000 train_time:71572ms step_avg:61.65ms +[2025-09-05 18:24:20] [Rank 0] step:1181/10000 train_time:72225ms step_avg:61.16ms +[2025-09-05 18:24:20] [Rank 0] step:1181/10000 train_time:72225ms step_avg:61.16ms +[2025-09-05 18:24:20] [Rank 0] step:1201/10000 train_time:72877ms step_avg:60.68ms +[2025-09-05 18:24:20] [Rank 0] step:1201/10000 train_time:72877ms step_avg:60.68ms +[2025-09-05 18:24:21] [Rank 0] step:1221/10000 train_time:73529ms step_avg:60.22ms +[2025-09-05 18:24:21] [Rank 0] step:1221/10000 train_time:73529ms step_avg:60.22ms +[2025-09-05 18:24:22] [Rank 0] step:1241/10000 train_time:74180ms step_avg:59.77ms +[2025-09-05 18:24:22] [Rank 0] step:1241/10000 train_time:74180ms step_avg:59.77ms +[2025-09-05 18:24:22] [Rank 0] step:1261/10000 train_time:74833ms step_avg:59.34ms +[2025-09-05 18:24:22] [Rank 0] step:1261/10000 train_time:74833ms step_avg:59.34ms +[2025-09-05 18:24:23] [Rank 0] step:1281/10000 train_time:75485ms step_avg:58.93ms +[2025-09-05 18:24:23] [Rank 0] step:1281/10000 train_time:75485ms step_avg:58.93ms +[2025-09-05 18:24:23] [Rank 0] step:1301/10000 train_time:76137ms step_avg:58.52ms +[2025-09-05 18:24:23] [Rank 0] step:1301/10000 train_time:76137ms step_avg:58.52ms +[2025-09-05 18:24:24] [Rank 0] step:1321/10000 train_time:76789ms step_avg:58.13ms +[2025-09-05 18:24:24] [Rank 0] step:1321/10000 train_time:76789ms step_avg:58.13ms +[2025-09-05 18:24:25] [Rank 0] step:1341/10000 train_time:77441ms step_avg:57.75ms +[2025-09-05 18:24:25] [Rank 0] step:1341/10000 train_time:77441ms step_avg:57.75ms +[2025-09-05 18:24:25] [Rank 0] step:1361/10000 train_time:78093ms step_avg:57.38ms +[2025-09-05 18:24:25] [Rank 0] step:1361/10000 train_time:78093ms step_avg:57.38ms +[2025-09-05 18:24:26] [Rank 0] step:1381/10000 train_time:78746ms step_avg:57.02ms +[2025-09-05 18:24:26] [Rank 0] step:1381/10000 train_time:78746ms step_avg:57.02ms +[2025-09-05 18:24:27] [Rank 0] step:1401/10000 train_time:79398ms step_avg:56.67ms +[2025-09-05 18:24:27] [Rank 0] step:1401/10000 train_time:79398ms step_avg:56.67ms +[2025-09-05 18:24:27] [Rank 0] step:1421/10000 train_time:80050ms step_avg:56.33ms +[2025-09-05 18:24:27] [Rank 0] step:1421/10000 train_time:80050ms step_avg:56.33ms +[2025-09-05 18:24:28] [Rank 0] step:1441/10000 train_time:80704ms step_avg:56.01ms +[2025-09-05 18:24:28] [Rank 0] step:1441/10000 train_time:80704ms step_avg:56.01ms +[2025-09-05 18:24:29] [Rank 0] step:1461/10000 train_time:81354ms step_avg:55.68ms +[2025-09-05 18:24:29] [Rank 0] step:1461/10000 train_time:81354ms step_avg:55.68ms +[2025-09-05 18:24:29] [Rank 0] step:1481/10000 train_time:82007ms step_avg:55.37ms +[2025-09-05 18:24:29] [Rank 0] step:1481/10000 train_time:82007ms step_avg:55.37ms +[2025-09-05 18:24:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:24:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:24:30] [Rank 0] PRINT: step:1500/10000 train_loss:0.9210 val_loss:0.8713 train_time:82892ms step_avg:55.26ms +[2025-09-05 18:24:30] [Rank 0] PRINT: step:1500/10000 train_loss:0.9210 val_loss:0.8713 train_time:82892ms step_avg:55.26ms +[2025-09-05 18:24:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:24:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:24:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:24:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:25:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:25:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:25:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:25:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:25:52] [Rank 0] Total Loss: 5.0166 +[2025-09-05 18:25:52] [Rank 0] Total Loss: 5.0166 +[2025-09-05 18:25:52] [Rank 0] Total FTA (Unweighted): 0.6425 +[2025-09-05 18:25:52] [Rank 0] Total FTA (Unweighted): 0.6425 +[2025-09-05 18:25:52] [Rank 0] Total FTA (Weighted): 0.6425 +[2025-09-05 18:25:52] [Rank 0] Total FTA (Weighted): 0.6425 +[2025-09-05 18:25:52] [Rank 0] Group 0 Loss: 4.8947 +[2025-09-05 18:25:52] [Rank 0] Group 0 Loss: 4.8947 +[2025-09-05 18:25:52] [Rank 0] Group 1 Loss: 4.4325 +[2025-09-05 18:25:52] [Rank 0] Group 1 Loss: 4.4325 +[2025-09-05 18:25:52] [Rank 0] Group 2 Loss: 4.3726 +[2025-09-05 18:25:52] [Rank 0] Group 2 Loss: 4.3726 +[2025-09-05 18:25:52] [Rank 0] Group 3 Loss: 4.8467 +[2025-09-05 18:25:52] [Rank 0] Group 3 Loss: 4.8467 +[2025-09-05 18:25:52] [Rank 0] Group 4 Loss: 4.8348 +[2025-09-05 18:25:52] [Rank 0] Group 4 Loss: 4.8348 +[2025-09-05 18:25:52] [Rank 0] Group 5 Loss: 4.7229 +[2025-09-05 18:25:52] [Rank 0] Group 5 Loss: 4.7229 +[2025-09-05 18:25:52] [Rank 0] Group 6 Loss: 4.6907 +[2025-09-05 18:25:52] [Rank 0] Group 6 Loss: 4.6907 +[2025-09-05 18:25:52] [Rank 0] Group 7 Loss: 4.8520 +[2025-09-05 18:25:52] [Rank 0] Group 7 Loss: 4.8520 +[2025-09-05 18:25:52] [Rank 0] Group 8 Loss: 4.9145 +[2025-09-05 18:25:52] [Rank 0] Group 8 Loss: 4.9145 +[2025-09-05 18:25:52] [Rank 0] Group 9 Loss: 4.9505 +[2025-09-05 18:25:52] [Rank 0] Group 9 Loss: 4.9505 +[2025-09-05 18:25:52] [Rank 0] Group 10 Loss: 5.1434 +[2025-09-05 18:25:52] [Rank 0] Group 10 Loss: 5.1434 +[2025-09-05 18:25:52] [Rank 0] Group 11 Loss: 5.2386 +[2025-09-05 18:25:52] [Rank 0] Group 11 Loss: 5.2386 +[2025-09-05 18:25:52] [Rank 0] Group 12 Loss: 5.3966 +[2025-09-05 18:25:52] [Rank 0] Group 12 Loss: 5.3966 +[2025-09-05 18:25:52] [Rank 0] Group 13 Loss: 5.5903 +[2025-09-05 18:25:52] [Rank 0] Group 13 Loss: 5.5903 +[2025-09-05 18:25:52] [Rank 0] Group 14 Loss: 5.6612 +[2025-09-05 18:25:52] [Rank 0] Group 14 Loss: 5.6612 +[2025-09-05 18:25:52] [Rank 0] Group 15 Loss: 5.7238 +[2025-09-05 18:25:52] [Rank 0] Group 15 Loss: 5.7238 +[2025-09-05 18:25:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:25:52] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 18:25:52] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 18:25:52] [Rank 0] Group 8 FTA: 0.9000 +[2025-09-05 18:25:52] [Rank 0] Group 8 FTA: 0.9000 +[2025-09-05 18:25:52] [Rank 0] Group 9 FTA: 0.5500 +[2025-09-05 18:25:52] [Rank 0] Group 9 FTA: 0.5500 +[2025-09-05 18:25:52] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 18:25:52] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 18:25:52] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 18:25:52] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 18:25:52] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 18:25:52] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 18:25:52] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:25:52] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:25:52] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 18:25:52] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 18:25:52] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:25:52] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:25:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:25:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:25:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:25:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:25:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:25:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:25:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:25:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:25:53] [Rank 0] step:1501/10000 train_time:82901ms step_avg:55.23ms +[2025-09-05 18:25:53] [Rank 0] step:1501/10000 train_time:82901ms step_avg:55.23ms +[2025-09-05 18:25:54] [Rank 0] step:1521/10000 train_time:83327ms step_avg:54.78ms +[2025-09-05 18:25:54] [Rank 0] step:1521/10000 train_time:83327ms step_avg:54.78ms +[2025-09-05 18:25:54] [Rank 0] step:1541/10000 train_time:83979ms step_avg:54.50ms +[2025-09-05 18:25:54] [Rank 0] step:1541/10000 train_time:83979ms step_avg:54.50ms +[2025-09-05 18:25:55] [Rank 0] step:1561/10000 train_time:84633ms step_avg:54.22ms +[2025-09-05 18:25:55] [Rank 0] step:1561/10000 train_time:84633ms step_avg:54.22ms +[2025-09-05 18:25:56] [Rank 0] step:1581/10000 train_time:85283ms step_avg:53.94ms +[2025-09-05 18:25:56] [Rank 0] step:1581/10000 train_time:85283ms step_avg:53.94ms +[2025-09-05 18:25:56] [Rank 0] step:1601/10000 train_time:85935ms step_avg:53.68ms +[2025-09-05 18:25:56] [Rank 0] step:1601/10000 train_time:85935ms step_avg:53.68ms +[2025-09-05 18:25:57] [Rank 0] step:1621/10000 train_time:86587ms step_avg:53.42ms +[2025-09-05 18:25:57] [Rank 0] step:1621/10000 train_time:86587ms step_avg:53.42ms +[2025-09-05 18:25:58] [Rank 0] step:1641/10000 train_time:87420ms step_avg:53.27ms +[2025-09-05 18:25:58] [Rank 0] step:1641/10000 train_time:87420ms step_avg:53.27ms +[2025-09-05 18:25:59] [Rank 0] step:1661/10000 train_time:88073ms step_avg:53.02ms +[2025-09-05 18:25:59] [Rank 0] step:1661/10000 train_time:88073ms step_avg:53.02ms +[2025-09-05 18:25:59] [Rank 0] step:1681/10000 train_time:88724ms step_avg:52.78ms +[2025-09-05 18:25:59] [Rank 0] step:1681/10000 train_time:88724ms step_avg:52.78ms +[2025-09-05 18:26:00] [Rank 0] step:1701/10000 train_time:89376ms step_avg:52.54ms +[2025-09-05 18:26:00] [Rank 0] step:1701/10000 train_time:89376ms step_avg:52.54ms +[2025-09-05 18:26:00] [Rank 0] step:1721/10000 train_time:90027ms step_avg:52.31ms +[2025-09-05 18:26:00] [Rank 0] step:1721/10000 train_time:90027ms step_avg:52.31ms +[2025-09-05 18:26:01] [Rank 0] step:1741/10000 train_time:90679ms step_avg:52.08ms +[2025-09-05 18:26:01] [Rank 0] step:1741/10000 train_time:90679ms step_avg:52.08ms +[2025-09-05 18:26:02] [Rank 0] step:1761/10000 train_time:91331ms step_avg:51.86ms +[2025-09-05 18:26:02] [Rank 0] step:1761/10000 train_time:91331ms step_avg:51.86ms +[2025-09-05 18:26:02] [Rank 0] step:1781/10000 train_time:91983ms step_avg:51.65ms +[2025-09-05 18:26:02] [Rank 0] step:1781/10000 train_time:91983ms step_avg:51.65ms +[2025-09-05 18:26:03] [Rank 0] step:1801/10000 train_time:92637ms step_avg:51.44ms +[2025-09-05 18:26:03] [Rank 0] step:1801/10000 train_time:92637ms step_avg:51.44ms +[2025-09-05 18:26:04] [Rank 0] step:1821/10000 train_time:93287ms step_avg:51.23ms +[2025-09-05 18:26:04] [Rank 0] step:1821/10000 train_time:93287ms step_avg:51.23ms +[2025-09-05 18:26:04] [Rank 0] step:1841/10000 train_time:93938ms step_avg:51.03ms +[2025-09-05 18:26:04] [Rank 0] step:1841/10000 train_time:93938ms step_avg:51.03ms +[2025-09-05 18:26:05] [Rank 0] step:1861/10000 train_time:94590ms step_avg:50.83ms +[2025-09-05 18:26:05] [Rank 0] step:1861/10000 train_time:94590ms step_avg:50.83ms +[2025-09-05 18:26:06] [Rank 0] step:1881/10000 train_time:95242ms step_avg:50.63ms +[2025-09-05 18:26:06] [Rank 0] step:1881/10000 train_time:95242ms step_avg:50.63ms +[2025-09-05 18:26:06] [Rank 0] step:1901/10000 train_time:95894ms step_avg:50.44ms +[2025-09-05 18:26:06] [Rank 0] step:1901/10000 train_time:95894ms step_avg:50.44ms +[2025-09-05 18:26:07] [Rank 0] step:1921/10000 train_time:96547ms step_avg:50.26ms +[2025-09-05 18:26:07] [Rank 0] step:1921/10000 train_time:96547ms step_avg:50.26ms +[2025-09-05 18:26:08] [Rank 0] step:1941/10000 train_time:97199ms step_avg:50.08ms +[2025-09-05 18:26:08] [Rank 0] step:1941/10000 train_time:97199ms step_avg:50.08ms +[2025-09-05 18:26:08] [Rank 0] step:1961/10000 train_time:97851ms step_avg:49.90ms +[2025-09-05 18:26:08] [Rank 0] step:1961/10000 train_time:97851ms step_avg:49.90ms +[2025-09-05 18:26:09] [Rank 0] step:1981/10000 train_time:98503ms step_avg:49.72ms +[2025-09-05 18:26:09] [Rank 0] step:1981/10000 train_time:98503ms step_avg:49.72ms +[2025-09-05 18:26:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:26:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:26:10] [Rank 0] PRINT: step:2000/10000 train_loss:0.8499 val_loss:0.8134 train_time:99419ms step_avg:49.71ms +[2025-09-05 18:26:10] [Rank 0] PRINT: step:2000/10000 train_loss:0.8499 val_loss:0.8134 train_time:99419ms step_avg:49.71ms +[2025-09-05 18:26:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:26:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:26:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:26:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:27:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:27:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:27:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:27:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:27:31] [Rank 0] Total Loss: 5.0967 +[2025-09-05 18:27:31] [Rank 0] Total Loss: 5.0967 +[2025-09-05 18:27:31] [Rank 0] Total FTA (Unweighted): 0.6938 +[2025-09-05 18:27:31] [Rank 0] Total FTA (Unweighted): 0.6938 +[2025-09-05 18:27:31] [Rank 0] Total FTA (Weighted): 0.6937 +[2025-09-05 18:27:31] [Rank 0] Total FTA (Weighted): 0.6937 +[2025-09-05 18:27:31] [Rank 0] Group 0 Loss: 4.9653 +[2025-09-05 18:27:31] [Rank 0] Group 0 Loss: 4.9653 +[2025-09-05 18:27:31] [Rank 0] Group 1 Loss: 4.5605 +[2025-09-05 18:27:31] [Rank 0] Group 1 Loss: 4.5605 +[2025-09-05 18:27:31] [Rank 0] Group 2 Loss: 4.5188 +[2025-09-05 18:27:31] [Rank 0] Group 2 Loss: 4.5188 +[2025-09-05 18:27:31] [Rank 0] Group 3 Loss: 4.9386 +[2025-09-05 18:27:31] [Rank 0] Group 3 Loss: 4.9386 +[2025-09-05 18:27:31] [Rank 0] Group 4 Loss: 4.9747 +[2025-09-05 18:27:31] [Rank 0] Group 4 Loss: 4.9747 +[2025-09-05 18:27:31] [Rank 0] Group 5 Loss: 4.8812 +[2025-09-05 18:27:31] [Rank 0] Group 5 Loss: 4.8812 +[2025-09-05 18:27:31] [Rank 0] Group 6 Loss: 4.8066 +[2025-09-05 18:27:31] [Rank 0] Group 6 Loss: 4.8066 +[2025-09-05 18:27:31] [Rank 0] Group 7 Loss: 4.9825 +[2025-09-05 18:27:31] [Rank 0] Group 7 Loss: 4.9825 +[2025-09-05 18:27:31] [Rank 0] Group 8 Loss: 5.0695 +[2025-09-05 18:27:31] [Rank 0] Group 8 Loss: 5.0695 +[2025-09-05 18:27:31] [Rank 0] Group 9 Loss: 5.0192 +[2025-09-05 18:27:31] [Rank 0] Group 9 Loss: 5.0192 +[2025-09-05 18:27:31] [Rank 0] Group 10 Loss: 5.2269 +[2025-09-05 18:27:31] [Rank 0] Group 10 Loss: 5.2269 +[2025-09-05 18:27:31] [Rank 0] Group 11 Loss: 5.2495 +[2025-09-05 18:27:31] [Rank 0] Group 11 Loss: 5.2495 +[2025-09-05 18:27:31] [Rank 0] Group 12 Loss: 5.3814 +[2025-09-05 18:27:31] [Rank 0] Group 12 Loss: 5.3814 +[2025-09-05 18:27:31] [Rank 0] Group 13 Loss: 5.5648 +[2025-09-05 18:27:31] [Rank 0] Group 13 Loss: 5.5648 +[2025-09-05 18:27:31] [Rank 0] Group 14 Loss: 5.6286 +[2025-09-05 18:27:31] [Rank 0] Group 14 Loss: 5.6286 +[2025-09-05 18:27:31] [Rank 0] Group 15 Loss: 5.7790 +[2025-09-05 18:27:31] [Rank 0] Group 15 Loss: 5.7790 +[2025-09-05 18:27:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 18:27:31] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 18:27:31] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:27:31] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 18:27:31] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 18:27:31] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 18:27:31] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 18:27:31] [Rank 0] Group 9 FTA: 0.8200 +[2025-09-05 18:27:31] [Rank 0] Group 9 FTA: 0.8200 +[2025-09-05 18:27:31] [Rank 0] Group 10 FTA: 0.6100 +[2025-09-05 18:27:31] [Rank 0] Group 10 FTA: 0.6100 +[2025-09-05 18:27:31] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 18:27:31] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 18:27:31] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 18:27:31] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 18:27:31] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:27:31] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:27:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:27:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:27:31] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:27:31] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:27:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:27:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:27:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:27:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:27:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:27:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:27:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:27:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:27:33] [Rank 0] step:2001/10000 train_time:99428ms step_avg:49.69ms +[2025-09-05 18:27:33] [Rank 0] step:2001/10000 train_time:99428ms step_avg:49.69ms +[2025-09-05 18:27:34] [Rank 0] step:2021/10000 train_time:100074ms step_avg:49.52ms +[2025-09-05 18:27:34] [Rank 0] step:2021/10000 train_time:100074ms step_avg:49.52ms +[2025-09-05 18:27:34] [Rank 0] step:2041/10000 train_time:100727ms step_avg:49.35ms +[2025-09-05 18:27:34] [Rank 0] step:2041/10000 train_time:100727ms step_avg:49.35ms +[2025-09-05 18:27:35] [Rank 0] step:2061/10000 train_time:101380ms step_avg:49.19ms +[2025-09-05 18:27:35] [Rank 0] step:2061/10000 train_time:101380ms step_avg:49.19ms +[2025-09-05 18:27:36] [Rank 0] step:2081/10000 train_time:102032ms step_avg:49.03ms +[2025-09-05 18:27:36] [Rank 0] step:2081/10000 train_time:102032ms step_avg:49.03ms +[2025-09-05 18:27:36] [Rank 0] step:2101/10000 train_time:102685ms step_avg:48.87ms +[2025-09-05 18:27:36] [Rank 0] step:2101/10000 train_time:102685ms step_avg:48.87ms +[2025-09-05 18:27:37] [Rank 0] step:2121/10000 train_time:103338ms step_avg:48.72ms +[2025-09-05 18:27:37] [Rank 0] step:2121/10000 train_time:103338ms step_avg:48.72ms +[2025-09-05 18:27:38] [Rank 0] step:2141/10000 train_time:103992ms step_avg:48.57ms +[2025-09-05 18:27:38] [Rank 0] step:2141/10000 train_time:103992ms step_avg:48.57ms +[2025-09-05 18:27:38] [Rank 0] step:2161/10000 train_time:104643ms step_avg:48.42ms +[2025-09-05 18:27:38] [Rank 0] step:2161/10000 train_time:104643ms step_avg:48.42ms +[2025-09-05 18:27:39] [Rank 0] step:2181/10000 train_time:105296ms step_avg:48.28ms +[2025-09-05 18:27:39] [Rank 0] step:2181/10000 train_time:105296ms step_avg:48.28ms +[2025-09-05 18:27:40] [Rank 0] step:2201/10000 train_time:105949ms step_avg:48.14ms +[2025-09-05 18:27:40] [Rank 0] step:2201/10000 train_time:105949ms step_avg:48.14ms +[2025-09-05 18:27:40] [Rank 0] step:2221/10000 train_time:106602ms step_avg:48.00ms +[2025-09-05 18:27:40] [Rank 0] step:2221/10000 train_time:106602ms step_avg:48.00ms +[2025-09-05 18:27:41] [Rank 0] step:2241/10000 train_time:107258ms step_avg:47.86ms +[2025-09-05 18:27:41] [Rank 0] step:2241/10000 train_time:107258ms step_avg:47.86ms +[2025-09-05 18:27:42] [Rank 0] step:2261/10000 train_time:107916ms step_avg:47.73ms +[2025-09-05 18:27:42] [Rank 0] step:2261/10000 train_time:107916ms step_avg:47.73ms +[2025-09-05 18:27:42] [Rank 0] step:2281/10000 train_time:108575ms step_avg:47.60ms +[2025-09-05 18:27:42] [Rank 0] step:2281/10000 train_time:108575ms step_avg:47.60ms +[2025-09-05 18:27:43] [Rank 0] step:2301/10000 train_time:109234ms step_avg:47.47ms +[2025-09-05 18:27:43] [Rank 0] step:2301/10000 train_time:109234ms step_avg:47.47ms +[2025-09-05 18:27:44] [Rank 0] step:2321/10000 train_time:109893ms step_avg:47.35ms +[2025-09-05 18:27:44] [Rank 0] step:2321/10000 train_time:109893ms step_avg:47.35ms +[2025-09-05 18:27:44] [Rank 0] step:2341/10000 train_time:110552ms step_avg:47.22ms +[2025-09-05 18:27:44] [Rank 0] step:2341/10000 train_time:110552ms step_avg:47.22ms +[2025-09-05 18:27:45] [Rank 0] step:2361/10000 train_time:111210ms step_avg:47.10ms +[2025-09-05 18:27:45] [Rank 0] step:2361/10000 train_time:111210ms step_avg:47.10ms +[2025-09-05 18:27:46] [Rank 0] step:2381/10000 train_time:111869ms step_avg:46.98ms +[2025-09-05 18:27:46] [Rank 0] step:2381/10000 train_time:111869ms step_avg:46.98ms +[2025-09-05 18:27:46] [Rank 0] step:2401/10000 train_time:112527ms step_avg:46.87ms +[2025-09-05 18:27:46] [Rank 0] step:2401/10000 train_time:112527ms step_avg:46.87ms +[2025-09-05 18:27:47] [Rank 0] step:2421/10000 train_time:113186ms step_avg:46.75ms +[2025-09-05 18:27:47] [Rank 0] step:2421/10000 train_time:113186ms step_avg:46.75ms +[2025-09-05 18:27:47] [Rank 0] step:2441/10000 train_time:113846ms step_avg:46.64ms +[2025-09-05 18:27:47] [Rank 0] step:2441/10000 train_time:113846ms step_avg:46.64ms +[2025-09-05 18:27:48] [Rank 0] step:2461/10000 train_time:114504ms step_avg:46.53ms +[2025-09-05 18:27:48] [Rank 0] step:2461/10000 train_time:114504ms step_avg:46.53ms +[2025-09-05 18:27:49] [Rank 0] step:2481/10000 train_time:115163ms step_avg:46.42ms +[2025-09-05 18:27:49] [Rank 0] step:2481/10000 train_time:115163ms step_avg:46.42ms +[2025-09-05 18:27:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:27:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:27:50] [Rank 0] PRINT: step:2500/10000 train_loss:0.8048 val_loss:0.7730 train_time:116057ms step_avg:46.42ms +[2025-09-05 18:27:50] [Rank 0] PRINT: step:2500/10000 train_loss:0.8048 val_loss:0.7730 train_time:116057ms step_avg:46.42ms +[2025-09-05 18:27:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:27:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:27:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:27:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:29:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:29:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:29:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:29:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:29:12] [Rank 0] Total Loss: 5.1250 +[2025-09-05 18:29:12] [Rank 0] Total Loss: 5.1250 +[2025-09-05 18:29:12] [Rank 0] Total FTA (Unweighted): 0.7262 +[2025-09-05 18:29:12] [Rank 0] Total FTA (Unweighted): 0.7262 +[2025-09-05 18:29:12] [Rank 0] Total FTA (Weighted): 0.7262 +[2025-09-05 18:29:12] [Rank 0] Total FTA (Weighted): 0.7262 +[2025-09-05 18:29:12] [Rank 0] Group 0 Loss: 5.0487 +[2025-09-05 18:29:12] [Rank 0] Group 0 Loss: 5.0487 +[2025-09-05 18:29:12] [Rank 0] Group 1 Loss: 4.5731 +[2025-09-05 18:29:12] [Rank 0] Group 1 Loss: 4.5731 +[2025-09-05 18:29:12] [Rank 0] Group 2 Loss: 4.5233 +[2025-09-05 18:29:12] [Rank 0] Group 2 Loss: 4.5233 +[2025-09-05 18:29:12] [Rank 0] Group 3 Loss: 5.0048 +[2025-09-05 18:29:12] [Rank 0] Group 3 Loss: 5.0048 +[2025-09-05 18:29:12] [Rank 0] Group 4 Loss: 5.0431 +[2025-09-05 18:29:12] [Rank 0] Group 4 Loss: 5.0431 +[2025-09-05 18:29:12] [Rank 0] Group 5 Loss: 4.9308 +[2025-09-05 18:29:12] [Rank 0] Group 5 Loss: 4.9308 +[2025-09-05 18:29:12] [Rank 0] Group 6 Loss: 4.8877 +[2025-09-05 18:29:12] [Rank 0] Group 6 Loss: 4.8877 +[2025-09-05 18:29:12] [Rank 0] Group 7 Loss: 5.0105 +[2025-09-05 18:29:12] [Rank 0] Group 7 Loss: 5.0105 +[2025-09-05 18:29:12] [Rank 0] Group 8 Loss: 5.1129 +[2025-09-05 18:29:12] [Rank 0] Group 8 Loss: 5.1129 +[2025-09-05 18:29:12] [Rank 0] Group 9 Loss: 5.1020 +[2025-09-05 18:29:12] [Rank 0] Group 9 Loss: 5.1020 +[2025-09-05 18:29:12] [Rank 0] Group 10 Loss: 5.2502 +[2025-09-05 18:29:12] [Rank 0] Group 10 Loss: 5.2502 +[2025-09-05 18:29:12] [Rank 0] Group 11 Loss: 5.2662 +[2025-09-05 18:29:12] [Rank 0] Group 11 Loss: 5.2662 +[2025-09-05 18:29:12] [Rank 0] Group 12 Loss: 5.3977 +[2025-09-05 18:29:12] [Rank 0] Group 12 Loss: 5.3977 +[2025-09-05 18:29:12] [Rank 0] Group 13 Loss: 5.5095 +[2025-09-05 18:29:12] [Rank 0] Group 13 Loss: 5.5095 +[2025-09-05 18:29:12] [Rank 0] Group 14 Loss: 5.5761 +[2025-09-05 18:29:12] [Rank 0] Group 14 Loss: 5.5761 +[2025-09-05 18:29:12] [Rank 0] Group 15 Loss: 5.7632 +[2025-09-05 18:29:12] [Rank 0] Group 15 Loss: 5.7632 +[2025-09-05 18:29:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:29:12] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 18:29:12] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 18:29:12] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:29:12] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:29:12] [Rank 0] Group 10 FTA: 0.8300 +[2025-09-05 18:29:12] [Rank 0] Group 10 FTA: 0.8300 +[2025-09-05 18:29:12] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 18:29:12] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 18:29:12] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 18:29:12] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 18:29:12] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:29:12] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:29:12] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 18:29:12] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 18:29:12] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:29:12] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:29:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:29:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:29:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:29:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:29:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:29:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:29:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:29:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:29:13] [Rank 0] step:2501/10000 train_time:116066ms step_avg:46.41ms +[2025-09-05 18:29:13] [Rank 0] step:2501/10000 train_time:116066ms step_avg:46.41ms +[2025-09-05 18:29:14] [Rank 0] step:2521/10000 train_time:116500ms step_avg:46.21ms +[2025-09-05 18:29:14] [Rank 0] step:2521/10000 train_time:116500ms step_avg:46.21ms +[2025-09-05 18:29:15] [Rank 0] step:2541/10000 train_time:117158ms step_avg:46.11ms +[2025-09-05 18:29:15] [Rank 0] step:2541/10000 train_time:117158ms step_avg:46.11ms +[2025-09-05 18:29:15] [Rank 0] step:2561/10000 train_time:117817ms step_avg:46.00ms +[2025-09-05 18:29:15] [Rank 0] step:2561/10000 train_time:117817ms step_avg:46.00ms +[2025-09-05 18:29:16] [Rank 0] step:2581/10000 train_time:118475ms step_avg:45.90ms +[2025-09-05 18:29:16] [Rank 0] step:2581/10000 train_time:118475ms step_avg:45.90ms +[2025-09-05 18:29:17] [Rank 0] step:2601/10000 train_time:119133ms step_avg:45.80ms +[2025-09-05 18:29:17] [Rank 0] step:2601/10000 train_time:119133ms step_avg:45.80ms +[2025-09-05 18:29:17] [Rank 0] step:2621/10000 train_time:119792ms step_avg:45.70ms +[2025-09-05 18:29:17] [Rank 0] step:2621/10000 train_time:119792ms step_avg:45.70ms +[2025-09-05 18:29:18] [Rank 0] step:2641/10000 train_time:120450ms step_avg:45.61ms +[2025-09-05 18:29:18] [Rank 0] step:2641/10000 train_time:120450ms step_avg:45.61ms +[2025-09-05 18:29:19] [Rank 0] step:2661/10000 train_time:121108ms step_avg:45.51ms +[2025-09-05 18:29:19] [Rank 0] step:2661/10000 train_time:121108ms step_avg:45.51ms +[2025-09-05 18:29:19] [Rank 0] step:2681/10000 train_time:121766ms step_avg:45.42ms +[2025-09-05 18:29:19] [Rank 0] step:2681/10000 train_time:121766ms step_avg:45.42ms +[2025-09-05 18:29:20] [Rank 0] step:2701/10000 train_time:122424ms step_avg:45.33ms +[2025-09-05 18:29:20] [Rank 0] step:2701/10000 train_time:122424ms step_avg:45.33ms +[2025-09-05 18:29:21] [Rank 0] step:2721/10000 train_time:123082ms step_avg:45.23ms +[2025-09-05 18:29:21] [Rank 0] step:2721/10000 train_time:123082ms step_avg:45.23ms +[2025-09-05 18:29:21] [Rank 0] step:2741/10000 train_time:123920ms step_avg:45.21ms +[2025-09-05 18:29:21] [Rank 0] step:2741/10000 train_time:123920ms step_avg:45.21ms +[2025-09-05 18:29:22] [Rank 0] step:2761/10000 train_time:124577ms step_avg:45.12ms +[2025-09-05 18:29:22] [Rank 0] step:2761/10000 train_time:124577ms step_avg:45.12ms +[2025-09-05 18:29:23] [Rank 0] step:2781/10000 train_time:125235ms step_avg:45.03ms +[2025-09-05 18:29:23] [Rank 0] step:2781/10000 train_time:125235ms step_avg:45.03ms +[2025-09-05 18:29:23] [Rank 0] step:2801/10000 train_time:125893ms step_avg:44.95ms +[2025-09-05 18:29:23] [Rank 0] step:2801/10000 train_time:125893ms step_avg:44.95ms +[2025-09-05 18:29:25] [Rank 0] step:2821/10000 train_time:126748ms step_avg:44.93ms +[2025-09-05 18:29:25] [Rank 0] step:2821/10000 train_time:126748ms step_avg:44.93ms +[2025-09-05 18:29:25] [Rank 0] step:2841/10000 train_time:127877ms step_avg:45.01ms +[2025-09-05 18:29:25] [Rank 0] step:2841/10000 train_time:127877ms step_avg:45.01ms +[2025-09-05 18:29:26] [Rank 0] step:2861/10000 train_time:128536ms step_avg:44.93ms +[2025-09-05 18:29:26] [Rank 0] step:2861/10000 train_time:128536ms step_avg:44.93ms +[2025-09-05 18:29:27] [Rank 0] step:2881/10000 train_time:129194ms step_avg:44.84ms +[2025-09-05 18:29:27] [Rank 0] step:2881/10000 train_time:129194ms step_avg:44.84ms +[2025-09-05 18:29:27] [Rank 0] step:2901/10000 train_time:129852ms step_avg:44.76ms +[2025-09-05 18:29:27] [Rank 0] step:2901/10000 train_time:129852ms step_avg:44.76ms +[2025-09-05 18:29:28] [Rank 0] step:2921/10000 train_time:130511ms step_avg:44.68ms +[2025-09-05 18:29:28] [Rank 0] step:2921/10000 train_time:130511ms step_avg:44.68ms +[2025-09-05 18:29:29] [Rank 0] step:2941/10000 train_time:131170ms step_avg:44.60ms +[2025-09-05 18:29:29] [Rank 0] step:2941/10000 train_time:131170ms step_avg:44.60ms +[2025-09-05 18:29:29] [Rank 0] step:2961/10000 train_time:131828ms step_avg:44.52ms +[2025-09-05 18:29:29] [Rank 0] step:2961/10000 train_time:131828ms step_avg:44.52ms +[2025-09-05 18:29:30] [Rank 0] step:2981/10000 train_time:132486ms step_avg:44.44ms +[2025-09-05 18:29:30] [Rank 0] step:2981/10000 train_time:132486ms step_avg:44.44ms +[2025-09-05 18:29:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:29:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:29:31] [Rank 0] PRINT: step:3000/10000 train_loss:0.7716 val_loss:0.7492 train_time:133379ms step_avg:44.46ms +[2025-09-05 18:29:31] [Rank 0] PRINT: step:3000/10000 train_loss:0.7716 val_loss:0.7492 train_time:133379ms step_avg:44.46ms +[2025-09-05 18:29:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:29:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:29:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:29:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:30:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:30:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:30:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:30:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:30:53] [Rank 0] Total Loss: 5.1504 +[2025-09-05 18:30:53] [Rank 0] Total Loss: 5.1504 +[2025-09-05 18:30:53] [Rank 0] Total FTA (Unweighted): 0.7506 +[2025-09-05 18:30:53] [Rank 0] Total FTA (Unweighted): 0.7506 +[2025-09-05 18:30:53] [Rank 0] Total FTA (Weighted): 0.7506 +[2025-09-05 18:30:53] [Rank 0] Total FTA (Weighted): 0.7506 +[2025-09-05 18:30:53] [Rank 0] Group 0 Loss: 5.0505 +[2025-09-05 18:30:53] [Rank 0] Group 0 Loss: 5.0505 +[2025-09-05 18:30:53] [Rank 0] Group 1 Loss: 4.7644 +[2025-09-05 18:30:53] [Rank 0] Group 1 Loss: 4.7644 +[2025-09-05 18:30:53] [Rank 0] Group 2 Loss: 4.6133 +[2025-09-05 18:30:53] [Rank 0] Group 2 Loss: 4.6133 +[2025-09-05 18:30:53] [Rank 0] Group 3 Loss: 5.0203 +[2025-09-05 18:30:53] [Rank 0] Group 3 Loss: 5.0203 +[2025-09-05 18:30:53] [Rank 0] Group 4 Loss: 5.0339 +[2025-09-05 18:30:53] [Rank 0] Group 4 Loss: 5.0339 +[2025-09-05 18:30:53] [Rank 0] Group 5 Loss: 5.0033 +[2025-09-05 18:30:53] [Rank 0] Group 5 Loss: 5.0033 +[2025-09-05 18:30:53] [Rank 0] Group 6 Loss: 4.9165 +[2025-09-05 18:30:53] [Rank 0] Group 6 Loss: 4.9165 +[2025-09-05 18:30:53] [Rank 0] Group 7 Loss: 5.0561 +[2025-09-05 18:30:53] [Rank 0] Group 7 Loss: 5.0561 +[2025-09-05 18:30:53] [Rank 0] Group 8 Loss: 5.1442 +[2025-09-05 18:30:53] [Rank 0] Group 8 Loss: 5.1442 +[2025-09-05 18:30:53] [Rank 0] Group 9 Loss: 5.1217 +[2025-09-05 18:30:53] [Rank 0] Group 9 Loss: 5.1217 +[2025-09-05 18:30:53] [Rank 0] Group 10 Loss: 5.2706 +[2025-09-05 18:30:53] [Rank 0] Group 10 Loss: 5.2706 +[2025-09-05 18:30:53] [Rank 0] Group 11 Loss: 5.2907 +[2025-09-05 18:30:53] [Rank 0] Group 11 Loss: 5.2907 +[2025-09-05 18:30:53] [Rank 0] Group 12 Loss: 5.4264 +[2025-09-05 18:30:53] [Rank 0] Group 12 Loss: 5.4264 +[2025-09-05 18:30:53] [Rank 0] Group 13 Loss: 5.4734 +[2025-09-05 18:30:53] [Rank 0] Group 13 Loss: 5.4734 +[2025-09-05 18:30:53] [Rank 0] Group 14 Loss: 5.5263 +[2025-09-05 18:30:53] [Rank 0] Group 14 Loss: 5.5263 +[2025-09-05 18:30:53] [Rank 0] Group 15 Loss: 5.6952 +[2025-09-05 18:30:53] [Rank 0] Group 15 Loss: 5.6952 +[2025-09-05 18:30:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 18:30:53] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 18:30:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:30:53] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:30:53] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:30:53] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 18:30:53] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 18:30:53] [Rank 0] Group 11 FTA: 0.5000 +[2025-09-05 18:30:53] [Rank 0] Group 11 FTA: 0.5000 +[2025-09-05 18:30:53] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 18:30:53] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 18:30:53] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 18:30:53] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 18:30:53] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 18:30:53] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 18:30:53] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:30:53] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:30:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:30:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:30:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:30:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:30:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:30:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:30:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:30:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:30:55] [Rank 0] step:3001/10000 train_time:133389ms step_avg:44.45ms +[2025-09-05 18:30:55] [Rank 0] step:3001/10000 train_time:133389ms step_avg:44.45ms +[2025-09-05 18:30:55] [Rank 0] step:3021/10000 train_time:133839ms step_avg:44.30ms +[2025-09-05 18:30:55] [Rank 0] step:3021/10000 train_time:133839ms step_avg:44.30ms +[2025-09-05 18:30:56] [Rank 0] step:3041/10000 train_time:134498ms step_avg:44.23ms +[2025-09-05 18:30:56] [Rank 0] step:3041/10000 train_time:134498ms step_avg:44.23ms +[2025-09-05 18:30:57] [Rank 0] step:3061/10000 train_time:135157ms step_avg:44.15ms +[2025-09-05 18:30:57] [Rank 0] step:3061/10000 train_time:135157ms step_avg:44.15ms +[2025-09-05 18:30:57] [Rank 0] step:3081/10000 train_time:135816ms step_avg:44.08ms +[2025-09-05 18:30:57] [Rank 0] step:3081/10000 train_time:135816ms step_avg:44.08ms +[2025-09-05 18:30:58] [Rank 0] step:3101/10000 train_time:136475ms step_avg:44.01ms +[2025-09-05 18:30:58] [Rank 0] step:3101/10000 train_time:136475ms step_avg:44.01ms +[2025-09-05 18:30:59] [Rank 0] step:3121/10000 train_time:137135ms step_avg:43.94ms +[2025-09-05 18:30:59] [Rank 0] step:3121/10000 train_time:137135ms step_avg:43.94ms +[2025-09-05 18:30:59] [Rank 0] step:3141/10000 train_time:137795ms step_avg:43.87ms +[2025-09-05 18:30:59] [Rank 0] step:3141/10000 train_time:137795ms step_avg:43.87ms +[2025-09-05 18:31:00] [Rank 0] step:3161/10000 train_time:138455ms step_avg:43.80ms +[2025-09-05 18:31:00] [Rank 0] step:3161/10000 train_time:138455ms step_avg:43.80ms +[2025-09-05 18:31:00] [Rank 0] step:3181/10000 train_time:139114ms step_avg:43.73ms +[2025-09-05 18:31:00] [Rank 0] step:3181/10000 train_time:139114ms step_avg:43.73ms +[2025-09-05 18:31:01] [Rank 0] step:3201/10000 train_time:139775ms step_avg:43.67ms +[2025-09-05 18:31:01] [Rank 0] step:3201/10000 train_time:139775ms step_avg:43.67ms +[2025-09-05 18:31:02] [Rank 0] step:3221/10000 train_time:140432ms step_avg:43.60ms +[2025-09-05 18:31:02] [Rank 0] step:3221/10000 train_time:140432ms step_avg:43.60ms +[2025-09-05 18:31:02] [Rank 0] step:3241/10000 train_time:141091ms step_avg:43.53ms +[2025-09-05 18:31:02] [Rank 0] step:3241/10000 train_time:141091ms step_avg:43.53ms +[2025-09-05 18:31:03] [Rank 0] step:3261/10000 train_time:141750ms step_avg:43.47ms +[2025-09-05 18:31:03] [Rank 0] step:3261/10000 train_time:141750ms step_avg:43.47ms +[2025-09-05 18:31:04] [Rank 0] step:3281/10000 train_time:142409ms step_avg:43.40ms +[2025-09-05 18:31:04] [Rank 0] step:3281/10000 train_time:142409ms step_avg:43.40ms +[2025-09-05 18:31:04] [Rank 0] step:3301/10000 train_time:143069ms step_avg:43.34ms +[2025-09-05 18:31:04] [Rank 0] step:3301/10000 train_time:143069ms step_avg:43.34ms +[2025-09-05 18:31:05] [Rank 0] step:3321/10000 train_time:143728ms step_avg:43.28ms +[2025-09-05 18:31:05] [Rank 0] step:3321/10000 train_time:143728ms step_avg:43.28ms +[2025-09-05 18:31:06] [Rank 0] step:3341/10000 train_time:144387ms step_avg:43.22ms +[2025-09-05 18:31:06] [Rank 0] step:3341/10000 train_time:144387ms step_avg:43.22ms +[2025-09-05 18:31:06] [Rank 0] step:3361/10000 train_time:145047ms step_avg:43.16ms +[2025-09-05 18:31:06] [Rank 0] step:3361/10000 train_time:145047ms step_avg:43.16ms +[2025-09-05 18:31:07] [Rank 0] step:3381/10000 train_time:145706ms step_avg:43.10ms +[2025-09-05 18:31:07] [Rank 0] step:3381/10000 train_time:145706ms step_avg:43.10ms +[2025-09-05 18:31:08] [Rank 0] step:3401/10000 train_time:146365ms step_avg:43.04ms +[2025-09-05 18:31:08] [Rank 0] step:3401/10000 train_time:146365ms step_avg:43.04ms +[2025-09-05 18:31:08] [Rank 0] step:3421/10000 train_time:147024ms step_avg:42.98ms +[2025-09-05 18:31:08] [Rank 0] step:3421/10000 train_time:147024ms step_avg:42.98ms +[2025-09-05 18:31:09] [Rank 0] step:3441/10000 train_time:147682ms step_avg:42.92ms +[2025-09-05 18:31:09] [Rank 0] step:3441/10000 train_time:147682ms step_avg:42.92ms +[2025-09-05 18:31:10] [Rank 0] step:3461/10000 train_time:148341ms step_avg:42.86ms +[2025-09-05 18:31:10] [Rank 0] step:3461/10000 train_time:148341ms step_avg:42.86ms +[2025-09-05 18:31:10] [Rank 0] step:3481/10000 train_time:148999ms step_avg:42.80ms +[2025-09-05 18:31:10] [Rank 0] step:3481/10000 train_time:148999ms step_avg:42.80ms +[2025-09-05 18:31:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:31:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:31:11] [Rank 0] PRINT: step:3500/10000 train_loss:0.7473 val_loss:0.7289 train_time:149894ms step_avg:42.83ms +[2025-09-05 18:31:11] [Rank 0] PRINT: step:3500/10000 train_loss:0.7473 val_loss:0.7289 train_time:149894ms step_avg:42.83ms +[2025-09-05 18:31:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:31:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:31:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:31:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:32:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:32:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:32:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:32:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:32:33] [Rank 0] Total Loss: 5.1199 +[2025-09-05 18:32:33] [Rank 0] Total Loss: 5.1199 +[2025-09-05 18:32:33] [Rank 0] Total FTA (Unweighted): 0.7687 +[2025-09-05 18:32:33] [Rank 0] Total FTA (Unweighted): 0.7687 +[2025-09-05 18:32:33] [Rank 0] Total FTA (Weighted): 0.7688 +[2025-09-05 18:32:33] [Rank 0] Total FTA (Weighted): 0.7688 +[2025-09-05 18:32:33] [Rank 0] Group 0 Loss: 4.9255 +[2025-09-05 18:32:33] [Rank 0] Group 0 Loss: 4.9255 +[2025-09-05 18:32:33] [Rank 0] Group 1 Loss: 4.7568 +[2025-09-05 18:32:33] [Rank 0] Group 1 Loss: 4.7568 +[2025-09-05 18:32:33] [Rank 0] Group 2 Loss: 4.6562 +[2025-09-05 18:32:33] [Rank 0] Group 2 Loss: 4.6562 +[2025-09-05 18:32:33] [Rank 0] Group 3 Loss: 5.0199 +[2025-09-05 18:32:33] [Rank 0] Group 3 Loss: 5.0199 +[2025-09-05 18:32:33] [Rank 0] Group 4 Loss: 5.0376 +[2025-09-05 18:32:33] [Rank 0] Group 4 Loss: 5.0376 +[2025-09-05 18:32:33] [Rank 0] Group 5 Loss: 4.9590 +[2025-09-05 18:32:33] [Rank 0] Group 5 Loss: 4.9590 +[2025-09-05 18:32:33] [Rank 0] Group 6 Loss: 4.8953 +[2025-09-05 18:32:33] [Rank 0] Group 6 Loss: 4.8953 +[2025-09-05 18:32:33] [Rank 0] Group 7 Loss: 5.0394 +[2025-09-05 18:32:33] [Rank 0] Group 7 Loss: 5.0394 +[2025-09-05 18:32:33] [Rank 0] Group 8 Loss: 5.1431 +[2025-09-05 18:32:33] [Rank 0] Group 8 Loss: 5.1431 +[2025-09-05 18:32:33] [Rank 0] Group 9 Loss: 5.0811 +[2025-09-05 18:32:33] [Rank 0] Group 9 Loss: 5.0811 +[2025-09-05 18:32:33] [Rank 0] Group 10 Loss: 5.2684 +[2025-09-05 18:32:33] [Rank 0] Group 10 Loss: 5.2684 +[2025-09-05 18:32:33] [Rank 0] Group 11 Loss: 5.2521 +[2025-09-05 18:32:33] [Rank 0] Group 11 Loss: 5.2521 +[2025-09-05 18:32:33] [Rank 0] Group 12 Loss: 5.3732 +[2025-09-05 18:32:33] [Rank 0] Group 12 Loss: 5.3732 +[2025-09-05 18:32:33] [Rank 0] Group 13 Loss: 5.4188 +[2025-09-05 18:32:33] [Rank 0] Group 13 Loss: 5.4188 +[2025-09-05 18:32:33] [Rank 0] Group 14 Loss: 5.4639 +[2025-09-05 18:32:33] [Rank 0] Group 14 Loss: 5.4639 +[2025-09-05 18:32:33] [Rank 0] Group 15 Loss: 5.6285 +[2025-09-05 18:32:33] [Rank 0] Group 15 Loss: 5.6285 +[2025-09-05 18:32:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:32:33] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 18:32:33] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 18:32:33] [Rank 0] Group 10 FTA: 0.9600 +[2025-09-05 18:32:33] [Rank 0] Group 10 FTA: 0.9600 +[2025-09-05 18:32:33] [Rank 0] Group 11 FTA: 0.6800 +[2025-09-05 18:32:33] [Rank 0] Group 11 FTA: 0.6800 +[2025-09-05 18:32:33] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 18:32:33] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 18:32:33] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:32:33] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:32:33] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 18:32:33] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 18:32:33] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:32:33] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:32:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:32:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:32:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:32:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:32:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:32:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:32:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:32:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:32:34] [Rank 0] step:3501/10000 train_time:149902ms step_avg:42.82ms +[2025-09-05 18:32:34] [Rank 0] step:3501/10000 train_time:149902ms step_avg:42.82ms +[2025-09-05 18:32:35] [Rank 0] step:3521/10000 train_time:150347ms step_avg:42.70ms +[2025-09-05 18:32:35] [Rank 0] step:3521/10000 train_time:150347ms step_avg:42.70ms +[2025-09-05 18:32:35] [Rank 0] step:3541/10000 train_time:151005ms step_avg:42.64ms +[2025-09-05 18:32:35] [Rank 0] step:3541/10000 train_time:151005ms step_avg:42.64ms +[2025-09-05 18:32:36] [Rank 0] step:3561/10000 train_time:151663ms step_avg:42.59ms +[2025-09-05 18:32:36] [Rank 0] step:3561/10000 train_time:151663ms step_avg:42.59ms +[2025-09-05 18:32:37] [Rank 0] step:3581/10000 train_time:152321ms step_avg:42.54ms +[2025-09-05 18:32:37] [Rank 0] step:3581/10000 train_time:152321ms step_avg:42.54ms +[2025-09-05 18:32:37] [Rank 0] step:3601/10000 train_time:152980ms step_avg:42.48ms +[2025-09-05 18:32:37] [Rank 0] step:3601/10000 train_time:152980ms step_avg:42.48ms +[2025-09-05 18:32:38] [Rank 0] step:3621/10000 train_time:153638ms step_avg:42.43ms +[2025-09-05 18:32:38] [Rank 0] step:3621/10000 train_time:153638ms step_avg:42.43ms +[2025-09-05 18:32:39] [Rank 0] step:3641/10000 train_time:154369ms step_avg:42.40ms +[2025-09-05 18:32:39] [Rank 0] step:3641/10000 train_time:154369ms step_avg:42.40ms +[2025-09-05 18:32:39] [Rank 0] step:3661/10000 train_time:155026ms step_avg:42.35ms +[2025-09-05 18:32:39] [Rank 0] step:3661/10000 train_time:155026ms step_avg:42.35ms +[2025-09-05 18:32:40] [Rank 0] step:3681/10000 train_time:155683ms step_avg:42.29ms +[2025-09-05 18:32:40] [Rank 0] step:3681/10000 train_time:155683ms step_avg:42.29ms +[2025-09-05 18:32:41] [Rank 0] step:3701/10000 train_time:156341ms step_avg:42.24ms +[2025-09-05 18:32:41] [Rank 0] step:3701/10000 train_time:156341ms step_avg:42.24ms +[2025-09-05 18:32:41] [Rank 0] step:3721/10000 train_time:156999ms step_avg:42.19ms +[2025-09-05 18:32:41] [Rank 0] step:3721/10000 train_time:156999ms step_avg:42.19ms +[2025-09-05 18:32:42] [Rank 0] step:3741/10000 train_time:157658ms step_avg:42.14ms +[2025-09-05 18:32:42] [Rank 0] step:3741/10000 train_time:157658ms step_avg:42.14ms +[2025-09-05 18:32:43] [Rank 0] step:3761/10000 train_time:158315ms step_avg:42.09ms +[2025-09-05 18:32:43] [Rank 0] step:3761/10000 train_time:158315ms step_avg:42.09ms +[2025-09-05 18:32:43] [Rank 0] step:3781/10000 train_time:158974ms step_avg:42.05ms +[2025-09-05 18:32:43] [Rank 0] step:3781/10000 train_time:158974ms step_avg:42.05ms +[2025-09-05 18:32:44] [Rank 0] step:3801/10000 train_time:159632ms step_avg:42.00ms +[2025-09-05 18:32:44] [Rank 0] step:3801/10000 train_time:159632ms step_avg:42.00ms +[2025-09-05 18:32:45] [Rank 0] step:3821/10000 train_time:160290ms step_avg:41.95ms +[2025-09-05 18:32:45] [Rank 0] step:3821/10000 train_time:160290ms step_avg:41.95ms +[2025-09-05 18:32:45] [Rank 0] step:3841/10000 train_time:160948ms step_avg:41.90ms +[2025-09-05 18:32:45] [Rank 0] step:3841/10000 train_time:160948ms step_avg:41.90ms +[2025-09-05 18:32:46] [Rank 0] step:3861/10000 train_time:161606ms step_avg:41.86ms +[2025-09-05 18:32:46] [Rank 0] step:3861/10000 train_time:161606ms step_avg:41.86ms +[2025-09-05 18:32:47] [Rank 0] step:3881/10000 train_time:162265ms step_avg:41.81ms +[2025-09-05 18:32:47] [Rank 0] step:3881/10000 train_time:162265ms step_avg:41.81ms +[2025-09-05 18:32:47] [Rank 0] step:3901/10000 train_time:162922ms step_avg:41.76ms +[2025-09-05 18:32:47] [Rank 0] step:3901/10000 train_time:162922ms step_avg:41.76ms +[2025-09-05 18:32:48] [Rank 0] step:3921/10000 train_time:163581ms step_avg:41.72ms +[2025-09-05 18:32:48] [Rank 0] step:3921/10000 train_time:163581ms step_avg:41.72ms +[2025-09-05 18:32:49] [Rank 0] step:3941/10000 train_time:164239ms step_avg:41.67ms +[2025-09-05 18:32:49] [Rank 0] step:3941/10000 train_time:164239ms step_avg:41.67ms +[2025-09-05 18:32:49] [Rank 0] step:3961/10000 train_time:164898ms step_avg:41.63ms +[2025-09-05 18:32:49] [Rank 0] step:3961/10000 train_time:164898ms step_avg:41.63ms +[2025-09-05 18:32:50] [Rank 0] step:3981/10000 train_time:165555ms step_avg:41.59ms +[2025-09-05 18:32:50] [Rank 0] step:3981/10000 train_time:165555ms step_avg:41.59ms +[2025-09-05 18:32:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:32:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:32:51] [Rank 0] PRINT: step:4000/10000 train_loss:0.7293 val_loss:0.7131 train_time:166448ms step_avg:41.61ms +[2025-09-05 18:32:51] [Rank 0] PRINT: step:4000/10000 train_loss:0.7293 val_loss:0.7131 train_time:166448ms step_avg:41.61ms +[2025-09-05 18:32:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:32:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:32:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:32:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:34:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:34:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:34:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:34:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:34:13] [Rank 0] Total Loss: 5.0714 +[2025-09-05 18:34:13] [Rank 0] Total Loss: 5.0714 +[2025-09-05 18:34:13] [Rank 0] Total FTA (Unweighted): 0.7944 +[2025-09-05 18:34:13] [Rank 0] Total FTA (Unweighted): 0.7944 +[2025-09-05 18:34:13] [Rank 0] Total FTA (Weighted): 0.7944 +[2025-09-05 18:34:13] [Rank 0] Total FTA (Weighted): 0.7944 +[2025-09-05 18:34:13] [Rank 0] Group 0 Loss: 4.8888 +[2025-09-05 18:34:13] [Rank 0] Group 0 Loss: 4.8888 +[2025-09-05 18:34:13] [Rank 0] Group 1 Loss: 4.5964 +[2025-09-05 18:34:13] [Rank 0] Group 1 Loss: 4.5964 +[2025-09-05 18:34:13] [Rank 0] Group 2 Loss: 4.6168 +[2025-09-05 18:34:13] [Rank 0] Group 2 Loss: 4.6168 +[2025-09-05 18:34:13] [Rank 0] Group 3 Loss: 4.9619 +[2025-09-05 18:34:13] [Rank 0] Group 3 Loss: 4.9619 +[2025-09-05 18:34:13] [Rank 0] Group 4 Loss: 5.0243 +[2025-09-05 18:34:13] [Rank 0] Group 4 Loss: 5.0243 +[2025-09-05 18:34:13] [Rank 0] Group 5 Loss: 4.9678 +[2025-09-05 18:34:13] [Rank 0] Group 5 Loss: 4.9678 +[2025-09-05 18:34:13] [Rank 0] Group 6 Loss: 4.8543 +[2025-09-05 18:34:13] [Rank 0] Group 6 Loss: 4.8543 +[2025-09-05 18:34:13] [Rank 0] Group 7 Loss: 5.0107 +[2025-09-05 18:34:13] [Rank 0] Group 7 Loss: 5.0107 +[2025-09-05 18:34:13] [Rank 0] Group 8 Loss: 5.1024 +[2025-09-05 18:34:13] [Rank 0] Group 8 Loss: 5.1024 +[2025-09-05 18:34:13] [Rank 0] Group 9 Loss: 5.0863 +[2025-09-05 18:34:13] [Rank 0] Group 9 Loss: 5.0863 +[2025-09-05 18:34:13] [Rank 0] Group 10 Loss: 5.1935 +[2025-09-05 18:34:13] [Rank 0] Group 10 Loss: 5.1935 +[2025-09-05 18:34:13] [Rank 0] Group 11 Loss: 5.2134 +[2025-09-05 18:34:13] [Rank 0] Group 11 Loss: 5.2134 +[2025-09-05 18:34:13] [Rank 0] Group 12 Loss: 5.3152 +[2025-09-05 18:34:13] [Rank 0] Group 12 Loss: 5.3152 +[2025-09-05 18:34:13] [Rank 0] Group 13 Loss: 5.3833 +[2025-09-05 18:34:13] [Rank 0] Group 13 Loss: 5.3833 +[2025-09-05 18:34:13] [Rank 0] Group 14 Loss: 5.3702 +[2025-09-05 18:34:13] [Rank 0] Group 14 Loss: 5.3702 +[2025-09-05 18:34:13] [Rank 0] Group 15 Loss: 5.5561 +[2025-09-05 18:34:13] [Rank 0] Group 15 Loss: 5.5561 +[2025-09-05 18:34:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 18:34:13] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 18:34:13] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:34:13] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 18:34:13] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 18:34:13] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:34:13] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:34:13] [Rank 0] Group 11 FTA: 0.8600 +[2025-09-05 18:34:13] [Rank 0] Group 11 FTA: 0.8600 +[2025-09-05 18:34:13] [Rank 0] Group 12 FTA: 0.3900 +[2025-09-05 18:34:13] [Rank 0] Group 12 FTA: 0.3900 +[2025-09-05 18:34:13] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 18:34:13] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 18:34:13] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 18:34:13] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 18:34:13] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:34:13] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:34:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:34:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:34:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:34:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:34:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:34:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:34:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:34:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:34:14] [Rank 0] step:4001/10000 train_time:166456ms step_avg:41.60ms +[2025-09-05 18:34:14] [Rank 0] step:4001/10000 train_time:166456ms step_avg:41.60ms +[2025-09-05 18:34:15] [Rank 0] step:4021/10000 train_time:167000ms step_avg:41.53ms +[2025-09-05 18:34:15] [Rank 0] step:4021/10000 train_time:167000ms step_avg:41.53ms +[2025-09-05 18:34:16] [Rank 0] step:4041/10000 train_time:167659ms step_avg:41.49ms +[2025-09-05 18:34:16] [Rank 0] step:4041/10000 train_time:167659ms step_avg:41.49ms +[2025-09-05 18:34:16] [Rank 0] step:4061/10000 train_time:168319ms step_avg:41.45ms +[2025-09-05 18:34:16] [Rank 0] step:4061/10000 train_time:168319ms step_avg:41.45ms +[2025-09-05 18:34:17] [Rank 0] step:4081/10000 train_time:168979ms step_avg:41.41ms +[2025-09-05 18:34:17] [Rank 0] step:4081/10000 train_time:168979ms step_avg:41.41ms +[2025-09-05 18:34:17] [Rank 0] step:4101/10000 train_time:169637ms step_avg:41.36ms +[2025-09-05 18:34:17] [Rank 0] step:4101/10000 train_time:169637ms step_avg:41.36ms +[2025-09-05 18:34:18] [Rank 0] step:4121/10000 train_time:170295ms step_avg:41.32ms +[2025-09-05 18:34:18] [Rank 0] step:4121/10000 train_time:170295ms step_avg:41.32ms +[2025-09-05 18:34:19] [Rank 0] step:4141/10000 train_time:170954ms step_avg:41.28ms +[2025-09-05 18:34:19] [Rank 0] step:4141/10000 train_time:170954ms step_avg:41.28ms +[2025-09-05 18:34:19] [Rank 0] step:4161/10000 train_time:171614ms step_avg:41.24ms +[2025-09-05 18:34:19] [Rank 0] step:4161/10000 train_time:171614ms step_avg:41.24ms +[2025-09-05 18:34:20] [Rank 0] step:4181/10000 train_time:172272ms step_avg:41.20ms +[2025-09-05 18:34:20] [Rank 0] step:4181/10000 train_time:172272ms step_avg:41.20ms +[2025-09-05 18:34:21] [Rank 0] step:4201/10000 train_time:172932ms step_avg:41.16ms +[2025-09-05 18:34:21] [Rank 0] step:4201/10000 train_time:172932ms step_avg:41.16ms +[2025-09-05 18:34:21] [Rank 0] step:4221/10000 train_time:173591ms step_avg:41.13ms +[2025-09-05 18:34:21] [Rank 0] step:4221/10000 train_time:173591ms step_avg:41.13ms +[2025-09-05 18:34:22] [Rank 0] step:4241/10000 train_time:174250ms step_avg:41.09ms +[2025-09-05 18:34:22] [Rank 0] step:4241/10000 train_time:174250ms step_avg:41.09ms +[2025-09-05 18:34:23] [Rank 0] step:4261/10000 train_time:174909ms step_avg:41.05ms +[2025-09-05 18:34:23] [Rank 0] step:4261/10000 train_time:174909ms step_avg:41.05ms +[2025-09-05 18:34:23] [Rank 0] step:4281/10000 train_time:175568ms step_avg:41.01ms +[2025-09-05 18:34:23] [Rank 0] step:4281/10000 train_time:175568ms step_avg:41.01ms +[2025-09-05 18:34:24] [Rank 0] step:4301/10000 train_time:176227ms step_avg:40.97ms +[2025-09-05 18:34:24] [Rank 0] step:4301/10000 train_time:176227ms step_avg:40.97ms +[2025-09-05 18:34:25] [Rank 0] step:4321/10000 train_time:176887ms step_avg:40.94ms +[2025-09-05 18:34:25] [Rank 0] step:4321/10000 train_time:176887ms step_avg:40.94ms +[2025-09-05 18:34:25] [Rank 0] step:4341/10000 train_time:177546ms step_avg:40.90ms +[2025-09-05 18:34:25] [Rank 0] step:4341/10000 train_time:177546ms step_avg:40.90ms +[2025-09-05 18:34:26] [Rank 0] step:4361/10000 train_time:178204ms step_avg:40.86ms +[2025-09-05 18:34:26] [Rank 0] step:4361/10000 train_time:178204ms step_avg:40.86ms +[2025-09-05 18:34:27] [Rank 0] step:4381/10000 train_time:178863ms step_avg:40.83ms +[2025-09-05 18:34:27] [Rank 0] step:4381/10000 train_time:178863ms step_avg:40.83ms +[2025-09-05 18:34:27] [Rank 0] step:4401/10000 train_time:179523ms step_avg:40.79ms +[2025-09-05 18:34:27] [Rank 0] step:4401/10000 train_time:179523ms step_avg:40.79ms +[2025-09-05 18:34:28] [Rank 0] step:4421/10000 train_time:180182ms step_avg:40.76ms +[2025-09-05 18:34:28] [Rank 0] step:4421/10000 train_time:180182ms step_avg:40.76ms +[2025-09-05 18:34:29] [Rank 0] step:4441/10000 train_time:180840ms step_avg:40.72ms +[2025-09-05 18:34:29] [Rank 0] step:4441/10000 train_time:180840ms step_avg:40.72ms +[2025-09-05 18:34:29] [Rank 0] step:4461/10000 train_time:181500ms step_avg:40.69ms +[2025-09-05 18:34:29] [Rank 0] step:4461/10000 train_time:181500ms step_avg:40.69ms +[2025-09-05 18:34:30] [Rank 0] step:4481/10000 train_time:182159ms step_avg:40.65ms +[2025-09-05 18:34:30] [Rank 0] step:4481/10000 train_time:182159ms step_avg:40.65ms +[2025-09-05 18:34:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:34:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:34:31] [Rank 0] PRINT: step:4500/10000 train_loss:0.7156 val_loss:0.7012 train_time:183053ms step_avg:40.68ms +[2025-09-05 18:34:31] [Rank 0] PRINT: step:4500/10000 train_loss:0.7156 val_loss:0.7012 train_time:183053ms step_avg:40.68ms +[2025-09-05 18:34:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:34:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:34:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:34:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:35:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:35:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:35:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:35:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:35:52] [Rank 0] Total Loss: 5.1743 +[2025-09-05 18:35:52] [Rank 0] Total Loss: 5.1743 +[2025-09-05 18:35:52] [Rank 0] Total FTA (Unweighted): 0.8119 +[2025-09-05 18:35:52] [Rank 0] Total FTA (Unweighted): 0.8119 +[2025-09-05 18:35:52] [Rank 0] Total FTA (Weighted): 0.8119 +[2025-09-05 18:35:52] [Rank 0] Total FTA (Weighted): 0.8119 +[2025-09-05 18:35:52] [Rank 0] Group 0 Loss: 5.0221 +[2025-09-05 18:35:52] [Rank 0] Group 0 Loss: 5.0221 +[2025-09-05 18:35:52] [Rank 0] Group 1 Loss: 4.7876 +[2025-09-05 18:35:52] [Rank 0] Group 1 Loss: 4.7876 +[2025-09-05 18:35:52] [Rank 0] Group 2 Loss: 4.7466 +[2025-09-05 18:35:52] [Rank 0] Group 2 Loss: 4.7466 +[2025-09-05 18:35:52] [Rank 0] Group 3 Loss: 5.1044 +[2025-09-05 18:35:52] [Rank 0] Group 3 Loss: 5.1044 +[2025-09-05 18:35:52] [Rank 0] Group 4 Loss: 5.0582 +[2025-09-05 18:35:52] [Rank 0] Group 4 Loss: 5.0582 +[2025-09-05 18:35:52] [Rank 0] Group 5 Loss: 5.0474 +[2025-09-05 18:35:52] [Rank 0] Group 5 Loss: 5.0474 +[2025-09-05 18:35:52] [Rank 0] Group 6 Loss: 4.9473 +[2025-09-05 18:35:52] [Rank 0] Group 6 Loss: 4.9473 +[2025-09-05 18:35:52] [Rank 0] Group 7 Loss: 5.1306 +[2025-09-05 18:35:52] [Rank 0] Group 7 Loss: 5.1306 +[2025-09-05 18:35:52] [Rank 0] Group 8 Loss: 5.2377 +[2025-09-05 18:35:52] [Rank 0] Group 8 Loss: 5.2377 +[2025-09-05 18:35:52] [Rank 0] Group 9 Loss: 5.1893 +[2025-09-05 18:35:52] [Rank 0] Group 9 Loss: 5.1893 +[2025-09-05 18:35:52] [Rank 0] Group 10 Loss: 5.3267 +[2025-09-05 18:35:52] [Rank 0] Group 10 Loss: 5.3267 +[2025-09-05 18:35:52] [Rank 0] Group 11 Loss: 5.3514 +[2025-09-05 18:35:52] [Rank 0] Group 11 Loss: 5.3514 +[2025-09-05 18:35:52] [Rank 0] Group 12 Loss: 5.3566 +[2025-09-05 18:35:52] [Rank 0] Group 12 Loss: 5.3566 +[2025-09-05 18:35:52] [Rank 0] Group 13 Loss: 5.4621 +[2025-09-05 18:35:52] [Rank 0] Group 13 Loss: 5.4621 +[2025-09-05 18:35:52] [Rank 0] Group 14 Loss: 5.4329 +[2025-09-05 18:35:52] [Rank 0] Group 14 Loss: 5.4329 +[2025-09-05 18:35:52] [Rank 0] Group 15 Loss: 5.5874 +[2025-09-05 18:35:52] [Rank 0] Group 15 Loss: 5.5874 +[2025-09-05 18:35:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:35:52] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-05 18:35:52] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-05 18:35:52] [Rank 0] Group 12 FTA: 0.5600 +[2025-09-05 18:35:52] [Rank 0] Group 12 FTA: 0.5600 +[2025-09-05 18:35:52] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 18:35:52] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 18:35:52] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 18:35:52] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 18:35:52] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 18:35:52] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 18:35:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:35:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:35:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:35:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:35:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:35:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:35:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:35:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:35:53] [Rank 0] step:4501/10000 train_time:183061ms step_avg:40.67ms +[2025-09-05 18:35:53] [Rank 0] step:4501/10000 train_time:183061ms step_avg:40.67ms +[2025-09-05 18:35:54] [Rank 0] step:4521/10000 train_time:183512ms step_avg:40.59ms +[2025-09-05 18:35:54] [Rank 0] step:4521/10000 train_time:183512ms step_avg:40.59ms +[2025-09-05 18:35:55] [Rank 0] step:4541/10000 train_time:184170ms step_avg:40.56ms +[2025-09-05 18:35:55] [Rank 0] step:4541/10000 train_time:184170ms step_avg:40.56ms +[2025-09-05 18:35:55] [Rank 0] step:4561/10000 train_time:184828ms step_avg:40.52ms +[2025-09-05 18:35:55] [Rank 0] step:4561/10000 train_time:184828ms step_avg:40.52ms +[2025-09-05 18:35:56] [Rank 0] step:4581/10000 train_time:185486ms step_avg:40.49ms +[2025-09-05 18:35:56] [Rank 0] step:4581/10000 train_time:185486ms step_avg:40.49ms +[2025-09-05 18:35:57] [Rank 0] step:4601/10000 train_time:186145ms step_avg:40.46ms +[2025-09-05 18:35:57] [Rank 0] step:4601/10000 train_time:186145ms step_avg:40.46ms +[2025-09-05 18:35:57] [Rank 0] step:4621/10000 train_time:186803ms step_avg:40.42ms +[2025-09-05 18:35:57] [Rank 0] step:4621/10000 train_time:186803ms step_avg:40.42ms +[2025-09-05 18:35:58] [Rank 0] step:4641/10000 train_time:187461ms step_avg:40.39ms +[2025-09-05 18:35:58] [Rank 0] step:4641/10000 train_time:187461ms step_avg:40.39ms +[2025-09-05 18:35:59] [Rank 0] step:4661/10000 train_time:188119ms step_avg:40.36ms +[2025-09-05 18:35:59] [Rank 0] step:4661/10000 train_time:188119ms step_avg:40.36ms +[2025-09-05 18:35:59] [Rank 0] step:4681/10000 train_time:188778ms step_avg:40.33ms +[2025-09-05 18:35:59] [Rank 0] step:4681/10000 train_time:188778ms step_avg:40.33ms +[2025-09-05 18:36:00] [Rank 0] step:4701/10000 train_time:189436ms step_avg:40.30ms +[2025-09-05 18:36:00] [Rank 0] step:4701/10000 train_time:189436ms step_avg:40.30ms +[2025-09-05 18:36:01] [Rank 0] step:4721/10000 train_time:190094ms step_avg:40.27ms +[2025-09-05 18:36:01] [Rank 0] step:4721/10000 train_time:190094ms step_avg:40.27ms +[2025-09-05 18:36:01] [Rank 0] step:4741/10000 train_time:190752ms step_avg:40.23ms +[2025-09-05 18:36:01] [Rank 0] step:4741/10000 train_time:190752ms step_avg:40.23ms +[2025-09-05 18:36:02] [Rank 0] step:4761/10000 train_time:191410ms step_avg:40.20ms +[2025-09-05 18:36:02] [Rank 0] step:4761/10000 train_time:191410ms step_avg:40.20ms +[2025-09-05 18:36:03] [Rank 0] step:4781/10000 train_time:192069ms step_avg:40.17ms +[2025-09-05 18:36:03] [Rank 0] step:4781/10000 train_time:192069ms step_avg:40.17ms +[2025-09-05 18:36:03] [Rank 0] step:4801/10000 train_time:192727ms step_avg:40.14ms +[2025-09-05 18:36:03] [Rank 0] step:4801/10000 train_time:192727ms step_avg:40.14ms +[2025-09-05 18:36:04] [Rank 0] step:4821/10000 train_time:193385ms step_avg:40.11ms +[2025-09-05 18:36:04] [Rank 0] step:4821/10000 train_time:193385ms step_avg:40.11ms +[2025-09-05 18:36:05] [Rank 0] step:4841/10000 train_time:194352ms step_avg:40.15ms +[2025-09-05 18:36:05] [Rank 0] step:4841/10000 train_time:194352ms step_avg:40.15ms +[2025-09-05 18:36:06] [Rank 0] step:4861/10000 train_time:195010ms step_avg:40.12ms +[2025-09-05 18:36:06] [Rank 0] step:4861/10000 train_time:195010ms step_avg:40.12ms +[2025-09-05 18:36:06] [Rank 0] step:4881/10000 train_time:195671ms step_avg:40.09ms +[2025-09-05 18:36:06] [Rank 0] step:4881/10000 train_time:195671ms step_avg:40.09ms +[2025-09-05 18:36:07] [Rank 0] step:4901/10000 train_time:196330ms step_avg:40.06ms +[2025-09-05 18:36:07] [Rank 0] step:4901/10000 train_time:196330ms step_avg:40.06ms +[2025-09-05 18:36:08] [Rank 0] step:4921/10000 train_time:196988ms step_avg:40.03ms +[2025-09-05 18:36:08] [Rank 0] step:4921/10000 train_time:196988ms step_avg:40.03ms +[2025-09-05 18:36:08] [Rank 0] step:4941/10000 train_time:197645ms step_avg:40.00ms +[2025-09-05 18:36:08] [Rank 0] step:4941/10000 train_time:197645ms step_avg:40.00ms +[2025-09-05 18:36:09] [Rank 0] step:4961/10000 train_time:198306ms step_avg:39.97ms +[2025-09-05 18:36:09] [Rank 0] step:4961/10000 train_time:198306ms step_avg:39.97ms +[2025-09-05 18:36:10] [Rank 0] step:4981/10000 train_time:198960ms step_avg:39.94ms +[2025-09-05 18:36:10] [Rank 0] step:4981/10000 train_time:198960ms step_avg:39.94ms +[2025-09-05 18:36:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:36:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:36:11] [Rank 0] PRINT: step:5000/10000 train_loss:0.7041 val_loss:0.6918 train_time:199853ms step_avg:39.97ms +[2025-09-05 18:36:11] [Rank 0] PRINT: step:5000/10000 train_loss:0.7041 val_loss:0.6918 train_time:199853ms step_avg:39.97ms +[2025-09-05 18:36:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:36:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:36:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:36:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:37:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:37:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:37:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:37:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:37:33] [Rank 0] Total Loss: 5.1468 +[2025-09-05 18:37:33] [Rank 0] Total Loss: 5.1468 +[2025-09-05 18:37:33] [Rank 0] Total FTA (Unweighted): 0.8181 +[2025-09-05 18:37:33] [Rank 0] Total FTA (Unweighted): 0.8181 +[2025-09-05 18:37:33] [Rank 0] Total FTA (Weighted): 0.8181 +[2025-09-05 18:37:33] [Rank 0] Total FTA (Weighted): 0.8181 +[2025-09-05 18:37:33] [Rank 0] Group 0 Loss: 4.9435 +[2025-09-05 18:37:33] [Rank 0] Group 0 Loss: 4.9435 +[2025-09-05 18:37:33] [Rank 0] Group 1 Loss: 4.7387 +[2025-09-05 18:37:33] [Rank 0] Group 1 Loss: 4.7387 +[2025-09-05 18:37:33] [Rank 0] Group 2 Loss: 4.7682 +[2025-09-05 18:37:33] [Rank 0] Group 2 Loss: 4.7682 +[2025-09-05 18:37:33] [Rank 0] Group 3 Loss: 5.0373 +[2025-09-05 18:37:33] [Rank 0] Group 3 Loss: 5.0373 +[2025-09-05 18:37:33] [Rank 0] Group 4 Loss: 5.0716 +[2025-09-05 18:37:33] [Rank 0] Group 4 Loss: 5.0716 +[2025-09-05 18:37:33] [Rank 0] Group 5 Loss: 5.0227 +[2025-09-05 18:37:33] [Rank 0] Group 5 Loss: 5.0227 +[2025-09-05 18:37:33] [Rank 0] Group 6 Loss: 4.9519 +[2025-09-05 18:37:33] [Rank 0] Group 6 Loss: 4.9519 +[2025-09-05 18:37:33] [Rank 0] Group 7 Loss: 5.1176 +[2025-09-05 18:37:33] [Rank 0] Group 7 Loss: 5.1176 +[2025-09-05 18:37:33] [Rank 0] Group 8 Loss: 5.2145 +[2025-09-05 18:37:33] [Rank 0] Group 8 Loss: 5.2145 +[2025-09-05 18:37:33] [Rank 0] Group 9 Loss: 5.1601 +[2025-09-05 18:37:33] [Rank 0] Group 9 Loss: 5.1601 +[2025-09-05 18:37:33] [Rank 0] Group 10 Loss: 5.2831 +[2025-09-05 18:37:33] [Rank 0] Group 10 Loss: 5.2831 +[2025-09-05 18:37:33] [Rank 0] Group 11 Loss: 5.3295 +[2025-09-05 18:37:33] [Rank 0] Group 11 Loss: 5.3295 +[2025-09-05 18:37:33] [Rank 0] Group 12 Loss: 5.3473 +[2025-09-05 18:37:33] [Rank 0] Group 12 Loss: 5.3473 +[2025-09-05 18:37:33] [Rank 0] Group 13 Loss: 5.4181 +[2025-09-05 18:37:33] [Rank 0] Group 13 Loss: 5.4181 +[2025-09-05 18:37:33] [Rank 0] Group 14 Loss: 5.4034 +[2025-09-05 18:37:33] [Rank 0] Group 14 Loss: 5.4034 +[2025-09-05 18:37:33] [Rank 0] Group 15 Loss: 5.5417 +[2025-09-05 18:37:33] [Rank 0] Group 15 Loss: 5.5417 +[2025-09-05 18:37:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:37:33] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 18:37:33] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 18:37:33] [Rank 0] Group 12 FTA: 0.6300 +[2025-09-05 18:37:33] [Rank 0] Group 12 FTA: 0.6300 +[2025-09-05 18:37:33] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 18:37:33] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 18:37:33] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:37:33] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:37:33] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:37:33] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:37:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:37:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:37:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:37:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:37:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:37:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:37:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:37:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:37:34] [Rank 0] step:5001/10000 train_time:199861ms step_avg:39.96ms +[2025-09-05 18:37:34] [Rank 0] step:5001/10000 train_time:199861ms step_avg:39.96ms +[2025-09-05 18:37:35] [Rank 0] step:5021/10000 train_time:200307ms step_avg:39.89ms +[2025-09-05 18:37:35] [Rank 0] step:5021/10000 train_time:200307ms step_avg:39.89ms +[2025-09-05 18:37:36] [Rank 0] step:5041/10000 train_time:200966ms step_avg:39.87ms +[2025-09-05 18:37:36] [Rank 0] step:5041/10000 train_time:200966ms step_avg:39.87ms +[2025-09-05 18:37:36] [Rank 0] step:5061/10000 train_time:201625ms step_avg:39.84ms +[2025-09-05 18:37:36] [Rank 0] step:5061/10000 train_time:201625ms step_avg:39.84ms +[2025-09-05 18:37:37] [Rank 0] step:5081/10000 train_time:202284ms step_avg:39.81ms +[2025-09-05 18:37:37] [Rank 0] step:5081/10000 train_time:202284ms step_avg:39.81ms +[2025-09-05 18:37:38] [Rank 0] step:5101/10000 train_time:202944ms step_avg:39.79ms +[2025-09-05 18:37:38] [Rank 0] step:5101/10000 train_time:202944ms step_avg:39.79ms +[2025-09-05 18:37:38] [Rank 0] step:5121/10000 train_time:203602ms step_avg:39.76ms +[2025-09-05 18:37:38] [Rank 0] step:5121/10000 train_time:203602ms step_avg:39.76ms +[2025-09-05 18:37:39] [Rank 0] step:5141/10000 train_time:204261ms step_avg:39.73ms +[2025-09-05 18:37:39] [Rank 0] step:5141/10000 train_time:204261ms step_avg:39.73ms +[2025-09-05 18:37:40] [Rank 0] step:5161/10000 train_time:204920ms step_avg:39.71ms +[2025-09-05 18:37:40] [Rank 0] step:5161/10000 train_time:204920ms step_avg:39.71ms +[2025-09-05 18:37:40] [Rank 0] step:5181/10000 train_time:205579ms step_avg:39.68ms +[2025-09-05 18:37:40] [Rank 0] step:5181/10000 train_time:205579ms step_avg:39.68ms +[2025-09-05 18:37:41] [Rank 0] step:5201/10000 train_time:206237ms step_avg:39.65ms +[2025-09-05 18:37:41] [Rank 0] step:5201/10000 train_time:206237ms step_avg:39.65ms +[2025-09-05 18:37:42] [Rank 0] step:5221/10000 train_time:206897ms step_avg:39.63ms +[2025-09-05 18:37:42] [Rank 0] step:5221/10000 train_time:206897ms step_avg:39.63ms +[2025-09-05 18:37:42] [Rank 0] step:5241/10000 train_time:207555ms step_avg:39.60ms +[2025-09-05 18:37:42] [Rank 0] step:5241/10000 train_time:207555ms step_avg:39.60ms +[2025-09-05 18:37:43] [Rank 0] step:5261/10000 train_time:208214ms step_avg:39.58ms +[2025-09-05 18:37:43] [Rank 0] step:5261/10000 train_time:208214ms step_avg:39.58ms +[2025-09-05 18:37:44] [Rank 0] step:5281/10000 train_time:208873ms step_avg:39.55ms +[2025-09-05 18:37:44] [Rank 0] step:5281/10000 train_time:208873ms step_avg:39.55ms +[2025-09-05 18:37:44] [Rank 0] step:5301/10000 train_time:209532ms step_avg:39.53ms +[2025-09-05 18:37:44] [Rank 0] step:5301/10000 train_time:209532ms step_avg:39.53ms +[2025-09-05 18:37:45] [Rank 0] step:5321/10000 train_time:210191ms step_avg:39.50ms +[2025-09-05 18:37:45] [Rank 0] step:5321/10000 train_time:210191ms step_avg:39.50ms +[2025-09-05 18:37:46] [Rank 0] step:5341/10000 train_time:210850ms step_avg:39.48ms +[2025-09-05 18:37:46] [Rank 0] step:5341/10000 train_time:210850ms step_avg:39.48ms +[2025-09-05 18:37:46] [Rank 0] step:5361/10000 train_time:211509ms step_avg:39.45ms +[2025-09-05 18:37:46] [Rank 0] step:5361/10000 train_time:211509ms step_avg:39.45ms +[2025-09-05 18:37:47] [Rank 0] step:5381/10000 train_time:212168ms step_avg:39.43ms +[2025-09-05 18:37:47] [Rank 0] step:5381/10000 train_time:212168ms step_avg:39.43ms +[2025-09-05 18:37:48] [Rank 0] step:5401/10000 train_time:212986ms step_avg:39.43ms +[2025-09-05 18:37:48] [Rank 0] step:5401/10000 train_time:212986ms step_avg:39.43ms +[2025-09-05 18:37:48] [Rank 0] step:5421/10000 train_time:213645ms step_avg:39.41ms +[2025-09-05 18:37:48] [Rank 0] step:5421/10000 train_time:213645ms step_avg:39.41ms +[2025-09-05 18:37:49] [Rank 0] step:5441/10000 train_time:214305ms step_avg:39.39ms +[2025-09-05 18:37:49] [Rank 0] step:5441/10000 train_time:214305ms step_avg:39.39ms +[2025-09-05 18:37:50] [Rank 0] step:5461/10000 train_time:214964ms step_avg:39.36ms +[2025-09-05 18:37:50] [Rank 0] step:5461/10000 train_time:214964ms step_avg:39.36ms +[2025-09-05 18:37:51] [Rank 0] step:5481/10000 train_time:215769ms step_avg:39.37ms +[2025-09-05 18:37:51] [Rank 0] step:5481/10000 train_time:215769ms step_avg:39.37ms +[2025-09-05 18:37:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:37:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:37:52] [Rank 0] PRINT: step:5500/10000 train_loss:0.6947 val_loss:0.6830 train_time:216662ms step_avg:39.39ms +[2025-09-05 18:37:52] [Rank 0] PRINT: step:5500/10000 train_loss:0.6947 val_loss:0.6830 train_time:216662ms step_avg:39.39ms +[2025-09-05 18:37:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:37:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:37:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:37:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:39:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:39:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:39:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:39:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:39:13] [Rank 0] Total Loss: 5.1520 +[2025-09-05 18:39:13] [Rank 0] Total Loss: 5.1520 +[2025-09-05 18:39:13] [Rank 0] Total FTA (Unweighted): 0.8331 +[2025-09-05 18:39:13] [Rank 0] Total FTA (Unweighted): 0.8331 +[2025-09-05 18:39:13] [Rank 0] Total FTA (Weighted): 0.8331 +[2025-09-05 18:39:13] [Rank 0] Total FTA (Weighted): 0.8331 +[2025-09-05 18:39:13] [Rank 0] Group 0 Loss: 4.9892 +[2025-09-05 18:39:13] [Rank 0] Group 0 Loss: 4.9892 +[2025-09-05 18:39:13] [Rank 0] Group 1 Loss: 4.6736 +[2025-09-05 18:39:13] [Rank 0] Group 1 Loss: 4.6736 +[2025-09-05 18:39:13] [Rank 0] Group 2 Loss: 4.7676 +[2025-09-05 18:39:13] [Rank 0] Group 2 Loss: 4.7676 +[2025-09-05 18:39:13] [Rank 0] Group 3 Loss: 5.1181 +[2025-09-05 18:39:13] [Rank 0] Group 3 Loss: 5.1181 +[2025-09-05 18:39:13] [Rank 0] Group 4 Loss: 5.1065 +[2025-09-05 18:39:13] [Rank 0] Group 4 Loss: 5.1065 +[2025-09-05 18:39:13] [Rank 0] Group 5 Loss: 5.0691 +[2025-09-05 18:39:13] [Rank 0] Group 5 Loss: 5.0691 +[2025-09-05 18:39:13] [Rank 0] Group 6 Loss: 4.9222 +[2025-09-05 18:39:13] [Rank 0] Group 6 Loss: 4.9222 +[2025-09-05 18:39:13] [Rank 0] Group 7 Loss: 5.1015 +[2025-09-05 18:39:13] [Rank 0] Group 7 Loss: 5.1015 +[2025-09-05 18:39:13] [Rank 0] Group 8 Loss: 5.1926 +[2025-09-05 18:39:13] [Rank 0] Group 8 Loss: 5.1926 +[2025-09-05 18:39:13] [Rank 0] Group 9 Loss: 5.1483 +[2025-09-05 18:39:13] [Rank 0] Group 9 Loss: 5.1483 +[2025-09-05 18:39:13] [Rank 0] Group 10 Loss: 5.2992 +[2025-09-05 18:39:13] [Rank 0] Group 10 Loss: 5.2992 +[2025-09-05 18:39:13] [Rank 0] Group 11 Loss: 5.3077 +[2025-09-05 18:39:13] [Rank 0] Group 11 Loss: 5.3077 +[2025-09-05 18:39:13] [Rank 0] Group 12 Loss: 5.3493 +[2025-09-05 18:39:13] [Rank 0] Group 12 Loss: 5.3493 +[2025-09-05 18:39:13] [Rank 0] Group 13 Loss: 5.4202 +[2025-09-05 18:39:13] [Rank 0] Group 13 Loss: 5.4202 +[2025-09-05 18:39:13] [Rank 0] Group 14 Loss: 5.4264 +[2025-09-05 18:39:13] [Rank 0] Group 14 Loss: 5.4264 +[2025-09-05 18:39:13] [Rank 0] Group 15 Loss: 5.5403 +[2025-09-05 18:39:13] [Rank 0] Group 15 Loss: 5.5403 +[2025-09-05 18:39:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 18:39:13] [Rank 0] Group 4 FTA: 0.9900 +[2025-09-05 18:39:13] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:39:13] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:39:13] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:39:13] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 18:39:13] [Rank 0] Group 12 FTA: 0.8000 +[2025-09-05 18:39:13] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 18:39:13] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 18:39:13] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:39:13] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:39:13] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:39:13] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:39:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:39:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:39:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:39:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:39:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:39:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:39:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:39:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:39:15] [Rank 0] step:5501/10000 train_time:216670ms step_avg:39.39ms +[2025-09-05 18:39:15] [Rank 0] step:5501/10000 train_time:216670ms step_avg:39.39ms +[2025-09-05 18:39:15] [Rank 0] step:5521/10000 train_time:217104ms step_avg:39.32ms +[2025-09-05 18:39:15] [Rank 0] step:5521/10000 train_time:217104ms step_avg:39.32ms +[2025-09-05 18:39:16] [Rank 0] step:5541/10000 train_time:217762ms step_avg:39.30ms +[2025-09-05 18:39:16] [Rank 0] step:5541/10000 train_time:217762ms step_avg:39.30ms +[2025-09-05 18:39:17] [Rank 0] step:5561/10000 train_time:218422ms step_avg:39.28ms +[2025-09-05 18:39:17] [Rank 0] step:5561/10000 train_time:218422ms step_avg:39.28ms +[2025-09-05 18:39:17] [Rank 0] step:5581/10000 train_time:219078ms step_avg:39.25ms +[2025-09-05 18:39:17] [Rank 0] step:5581/10000 train_time:219078ms step_avg:39.25ms +[2025-09-05 18:39:18] [Rank 0] step:5601/10000 train_time:219737ms step_avg:39.23ms +[2025-09-05 18:39:18] [Rank 0] step:5601/10000 train_time:219737ms step_avg:39.23ms +[2025-09-05 18:39:19] [Rank 0] step:5621/10000 train_time:220395ms step_avg:39.21ms +[2025-09-05 18:39:19] [Rank 0] step:5621/10000 train_time:220395ms step_avg:39.21ms +[2025-09-05 18:39:20] [Rank 0] step:5641/10000 train_time:221052ms step_avg:39.19ms +[2025-09-05 18:39:20] [Rank 0] step:5641/10000 train_time:221052ms step_avg:39.19ms +[2025-09-05 18:39:20] [Rank 0] step:5661/10000 train_time:222191ms step_avg:39.25ms +[2025-09-05 18:39:20] [Rank 0] step:5661/10000 train_time:222191ms step_avg:39.25ms +[2025-09-05 18:39:21] [Rank 0] step:5681/10000 train_time:222849ms step_avg:39.23ms +[2025-09-05 18:39:21] [Rank 0] step:5681/10000 train_time:222849ms step_avg:39.23ms +[2025-09-05 18:39:22] [Rank 0] step:5701/10000 train_time:223507ms step_avg:39.20ms +[2025-09-05 18:39:22] [Rank 0] step:5701/10000 train_time:223507ms step_avg:39.20ms +[2025-09-05 18:39:22] [Rank 0] step:5721/10000 train_time:224167ms step_avg:39.18ms +[2025-09-05 18:39:22] [Rank 0] step:5721/10000 train_time:224167ms step_avg:39.18ms +[2025-09-05 18:39:23] [Rank 0] step:5741/10000 train_time:224823ms step_avg:39.16ms +[2025-09-05 18:39:23] [Rank 0] step:5741/10000 train_time:224823ms step_avg:39.16ms +[2025-09-05 18:39:24] [Rank 0] step:5761/10000 train_time:225481ms step_avg:39.14ms +[2025-09-05 18:39:24] [Rank 0] step:5761/10000 train_time:225481ms step_avg:39.14ms +[2025-09-05 18:39:24] [Rank 0] step:5781/10000 train_time:226139ms step_avg:39.12ms +[2025-09-05 18:39:24] [Rank 0] step:5781/10000 train_time:226139ms step_avg:39.12ms +[2025-09-05 18:39:25] [Rank 0] step:5801/10000 train_time:226798ms step_avg:39.10ms +[2025-09-05 18:39:25] [Rank 0] step:5801/10000 train_time:226798ms step_avg:39.10ms +[2025-09-05 18:39:26] [Rank 0] step:5821/10000 train_time:227456ms step_avg:39.08ms +[2025-09-05 18:39:26] [Rank 0] step:5821/10000 train_time:227456ms step_avg:39.08ms +[2025-09-05 18:39:26] [Rank 0] step:5841/10000 train_time:228114ms step_avg:39.05ms +[2025-09-05 18:39:26] [Rank 0] step:5841/10000 train_time:228114ms step_avg:39.05ms +[2025-09-05 18:39:27] [Rank 0] step:5861/10000 train_time:228772ms step_avg:39.03ms +[2025-09-05 18:39:27] [Rank 0] step:5861/10000 train_time:228772ms step_avg:39.03ms +[2025-09-05 18:39:28] [Rank 0] step:5881/10000 train_time:229430ms step_avg:39.01ms +[2025-09-05 18:39:28] [Rank 0] step:5881/10000 train_time:229430ms step_avg:39.01ms +[2025-09-05 18:39:28] [Rank 0] step:5901/10000 train_time:230088ms step_avg:38.99ms +[2025-09-05 18:39:28] [Rank 0] step:5901/10000 train_time:230088ms step_avg:38.99ms +[2025-09-05 18:39:29] [Rank 0] step:5921/10000 train_time:230746ms step_avg:38.97ms +[2025-09-05 18:39:29] [Rank 0] step:5921/10000 train_time:230746ms step_avg:38.97ms +[2025-09-05 18:39:30] [Rank 0] step:5941/10000 train_time:231507ms step_avg:38.97ms +[2025-09-05 18:39:30] [Rank 0] step:5941/10000 train_time:231507ms step_avg:38.97ms +[2025-09-05 18:39:30] [Rank 0] step:5961/10000 train_time:232165ms step_avg:38.95ms +[2025-09-05 18:39:30] [Rank 0] step:5961/10000 train_time:232165ms step_avg:38.95ms +[2025-09-05 18:39:31] [Rank 0] step:5981/10000 train_time:232823ms step_avg:38.93ms +[2025-09-05 18:39:31] [Rank 0] step:5981/10000 train_time:232823ms step_avg:38.93ms +[2025-09-05 18:39:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:39:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:39:32] [Rank 0] PRINT: step:6000/10000 train_loss:0.6868 val_loss:0.6759 train_time:233714ms step_avg:38.95ms +[2025-09-05 18:39:32] [Rank 0] PRINT: step:6000/10000 train_loss:0.6868 val_loss:0.6759 train_time:233714ms step_avg:38.95ms +[2025-09-05 18:39:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:39:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:39:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:39:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:40:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:40:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:40:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:40:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:40:53] [Rank 0] Total Loss: 5.0785 +[2025-09-05 18:40:53] [Rank 0] Total Loss: 5.0785 +[2025-09-05 18:40:53] [Rank 0] Total FTA (Unweighted): 0.8500 +[2025-09-05 18:40:53] [Rank 0] Total FTA (Unweighted): 0.8500 +[2025-09-05 18:40:53] [Rank 0] Total FTA (Weighted): 0.8500 +[2025-09-05 18:40:53] [Rank 0] Total FTA (Weighted): 0.8500 +[2025-09-05 18:40:53] [Rank 0] Group 0 Loss: 4.9519 +[2025-09-05 18:40:53] [Rank 0] Group 0 Loss: 4.9519 +[2025-09-05 18:40:53] [Rank 0] Group 1 Loss: 4.5407 +[2025-09-05 18:40:53] [Rank 0] Group 1 Loss: 4.5407 +[2025-09-05 18:40:53] [Rank 0] Group 2 Loss: 4.5690 +[2025-09-05 18:40:53] [Rank 0] Group 2 Loss: 4.5690 +[2025-09-05 18:40:53] [Rank 0] Group 3 Loss: 5.0194 +[2025-09-05 18:40:53] [Rank 0] Group 3 Loss: 5.0194 +[2025-09-05 18:40:53] [Rank 0] Group 4 Loss: 5.0016 +[2025-09-05 18:40:53] [Rank 0] Group 4 Loss: 5.0016 +[2025-09-05 18:40:53] [Rank 0] Group 5 Loss: 4.9736 +[2025-09-05 18:40:53] [Rank 0] Group 5 Loss: 4.9736 +[2025-09-05 18:40:53] [Rank 0] Group 6 Loss: 4.8845 +[2025-09-05 18:40:53] [Rank 0] Group 6 Loss: 4.8845 +[2025-09-05 18:40:53] [Rank 0] Group 7 Loss: 5.0186 +[2025-09-05 18:40:53] [Rank 0] Group 7 Loss: 5.0186 +[2025-09-05 18:40:53] [Rank 0] Group 8 Loss: 5.1444 +[2025-09-05 18:40:53] [Rank 0] Group 8 Loss: 5.1444 +[2025-09-05 18:40:53] [Rank 0] Group 9 Loss: 5.1107 +[2025-09-05 18:40:53] [Rank 0] Group 9 Loss: 5.1107 +[2025-09-05 18:40:53] [Rank 0] Group 10 Loss: 5.2452 +[2025-09-05 18:40:53] [Rank 0] Group 10 Loss: 5.2452 +[2025-09-05 18:40:53] [Rank 0] Group 11 Loss: 5.2846 +[2025-09-05 18:40:53] [Rank 0] Group 11 Loss: 5.2846 +[2025-09-05 18:40:53] [Rank 0] Group 12 Loss: 5.2630 +[2025-09-05 18:40:53] [Rank 0] Group 12 Loss: 5.2630 +[2025-09-05 18:40:53] [Rank 0] Group 13 Loss: 5.3892 +[2025-09-05 18:40:53] [Rank 0] Group 13 Loss: 5.3892 +[2025-09-05 18:40:54] [Rank 0] Group 14 Loss: 5.3608 +[2025-09-05 18:40:54] [Rank 0] Group 14 Loss: 5.3608 +[2025-09-05 18:40:54] [Rank 0] Group 15 Loss: 5.4994 +[2025-09-05 18:40:54] [Rank 0] Group 15 Loss: 5.4994 +[2025-09-05 18:40:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:40:54] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 18:40:54] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 18:40:54] [Rank 0] Group 12 FTA: 0.8800 +[2025-09-05 18:40:54] [Rank 0] Group 12 FTA: 0.8800 +[2025-09-05 18:40:54] [Rank 0] Group 13 FTA: 0.4300 +[2025-09-05 18:40:54] [Rank 0] Group 13 FTA: 0.4300 +[2025-09-05 18:40:54] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 18:40:54] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 18:40:54] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:40:54] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:40:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:40:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:40:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:40:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:40:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:40:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:40:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:40:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:40:56] [Rank 0] step:6001/10000 train_time:233722ms step_avg:38.95ms +[2025-09-05 18:40:56] [Rank 0] step:6001/10000 train_time:233722ms step_avg:38.95ms +[2025-09-05 18:40:57] [Rank 0] step:6021/10000 train_time:234637ms step_avg:38.97ms +[2025-09-05 18:40:57] [Rank 0] step:6021/10000 train_time:234637ms step_avg:38.97ms +[2025-09-05 18:40:58] [Rank 0] step:6041/10000 train_time:235434ms step_avg:38.97ms +[2025-09-05 18:40:58] [Rank 0] step:6041/10000 train_time:235434ms step_avg:38.97ms +[2025-09-05 18:40:58] [Rank 0] step:6061/10000 train_time:236093ms step_avg:38.95ms +[2025-09-05 18:40:58] [Rank 0] step:6061/10000 train_time:236093ms step_avg:38.95ms +[2025-09-05 18:40:59] [Rank 0] step:6081/10000 train_time:236752ms step_avg:38.93ms +[2025-09-05 18:40:59] [Rank 0] step:6081/10000 train_time:236752ms step_avg:38.93ms +[2025-09-05 18:41:00] [Rank 0] step:6101/10000 train_time:237623ms step_avg:38.95ms +[2025-09-05 18:41:00] [Rank 0] step:6101/10000 train_time:237623ms step_avg:38.95ms +[2025-09-05 18:41:00] [Rank 0] step:6121/10000 train_time:238281ms step_avg:38.93ms +[2025-09-05 18:41:00] [Rank 0] step:6121/10000 train_time:238281ms step_avg:38.93ms +[2025-09-05 18:41:01] [Rank 0] step:6141/10000 train_time:238940ms step_avg:38.91ms +[2025-09-05 18:41:01] [Rank 0] step:6141/10000 train_time:238940ms step_avg:38.91ms +[2025-09-05 18:41:02] [Rank 0] step:6161/10000 train_time:239599ms step_avg:38.89ms +[2025-09-05 18:41:02] [Rank 0] step:6161/10000 train_time:239599ms step_avg:38.89ms +[2025-09-05 18:41:02] [Rank 0] step:6181/10000 train_time:240258ms step_avg:38.87ms +[2025-09-05 18:41:02] [Rank 0] step:6181/10000 train_time:240258ms step_avg:38.87ms +[2025-09-05 18:41:03] [Rank 0] step:6201/10000 train_time:240917ms step_avg:38.85ms +[2025-09-05 18:41:03] [Rank 0] step:6201/10000 train_time:240917ms step_avg:38.85ms +[2025-09-05 18:41:04] [Rank 0] step:6221/10000 train_time:241577ms step_avg:38.83ms +[2025-09-05 18:41:04] [Rank 0] step:6221/10000 train_time:241577ms step_avg:38.83ms +[2025-09-05 18:41:04] [Rank 0] step:6241/10000 train_time:242236ms step_avg:38.81ms +[2025-09-05 18:41:04] [Rank 0] step:6241/10000 train_time:242236ms step_avg:38.81ms +[2025-09-05 18:41:05] [Rank 0] step:6261/10000 train_time:242895ms step_avg:38.79ms +[2025-09-05 18:41:05] [Rank 0] step:6261/10000 train_time:242895ms step_avg:38.79ms +[2025-09-05 18:41:06] [Rank 0] step:6281/10000 train_time:243555ms step_avg:38.78ms +[2025-09-05 18:41:06] [Rank 0] step:6281/10000 train_time:243555ms step_avg:38.78ms +[2025-09-05 18:41:06] [Rank 0] step:6301/10000 train_time:244215ms step_avg:38.76ms +[2025-09-05 18:41:06] [Rank 0] step:6301/10000 train_time:244215ms step_avg:38.76ms +[2025-09-05 18:41:07] [Rank 0] step:6321/10000 train_time:244877ms step_avg:38.74ms +[2025-09-05 18:41:07] [Rank 0] step:6321/10000 train_time:244877ms step_avg:38.74ms +[2025-09-05 18:41:08] [Rank 0] step:6341/10000 train_time:245533ms step_avg:38.72ms +[2025-09-05 18:41:08] [Rank 0] step:6341/10000 train_time:245533ms step_avg:38.72ms +[2025-09-05 18:41:08] [Rank 0] step:6361/10000 train_time:246192ms step_avg:38.70ms +[2025-09-05 18:41:08] [Rank 0] step:6361/10000 train_time:246192ms step_avg:38.70ms +[2025-09-05 18:41:09] [Rank 0] step:6381/10000 train_time:246852ms step_avg:38.69ms +[2025-09-05 18:41:09] [Rank 0] step:6381/10000 train_time:246852ms step_avg:38.69ms +[2025-09-05 18:41:10] [Rank 0] step:6401/10000 train_time:247510ms step_avg:38.67ms +[2025-09-05 18:41:10] [Rank 0] step:6401/10000 train_time:247510ms step_avg:38.67ms +[2025-09-05 18:41:10] [Rank 0] step:6421/10000 train_time:248169ms step_avg:38.65ms +[2025-09-05 18:41:10] [Rank 0] step:6421/10000 train_time:248169ms step_avg:38.65ms +[2025-09-05 18:41:11] [Rank 0] step:6441/10000 train_time:248828ms step_avg:38.63ms +[2025-09-05 18:41:11] [Rank 0] step:6441/10000 train_time:248828ms step_avg:38.63ms +[2025-09-05 18:41:12] [Rank 0] step:6461/10000 train_time:249486ms step_avg:38.61ms +[2025-09-05 18:41:12] [Rank 0] step:6461/10000 train_time:249486ms step_avg:38.61ms +[2025-09-05 18:41:12] [Rank 0] step:6481/10000 train_time:250146ms step_avg:38.60ms +[2025-09-05 18:41:12] [Rank 0] step:6481/10000 train_time:250146ms step_avg:38.60ms +[2025-09-05 18:41:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:41:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:41:13] [Rank 0] PRINT: step:6500/10000 train_loss:0.6796 val_loss:0.6692 train_time:251039ms step_avg:38.62ms +[2025-09-05 18:41:13] [Rank 0] PRINT: step:6500/10000 train_loss:0.6796 val_loss:0.6692 train_time:251039ms step_avg:38.62ms +[2025-09-05 18:41:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:41:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:41:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:41:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:42:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:42:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:42:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:42:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:42:34] [Rank 0] Total Loss: 5.1818 +[2025-09-05 18:42:34] [Rank 0] Total Loss: 5.1818 +[2025-09-05 18:42:34] [Rank 0] Total FTA (Unweighted): 0.8488 +[2025-09-05 18:42:34] [Rank 0] Total FTA (Unweighted): 0.8488 +[2025-09-05 18:42:34] [Rank 0] Total FTA (Weighted): 0.8488 +[2025-09-05 18:42:34] [Rank 0] Total FTA (Weighted): 0.8488 +[2025-09-05 18:42:34] [Rank 0] Group 0 Loss: 4.9682 +[2025-09-05 18:42:34] [Rank 0] Group 0 Loss: 4.9682 +[2025-09-05 18:42:34] [Rank 0] Group 1 Loss: 4.6881 +[2025-09-05 18:42:34] [Rank 0] Group 1 Loss: 4.6881 +[2025-09-05 18:42:34] [Rank 0] Group 2 Loss: 4.7521 +[2025-09-05 18:42:34] [Rank 0] Group 2 Loss: 4.7521 +[2025-09-05 18:42:34] [Rank 0] Group 3 Loss: 5.1708 +[2025-09-05 18:42:34] [Rank 0] Group 3 Loss: 5.1708 +[2025-09-05 18:42:34] [Rank 0] Group 4 Loss: 5.1422 +[2025-09-05 18:42:34] [Rank 0] Group 4 Loss: 5.1422 +[2025-09-05 18:42:34] [Rank 0] Group 5 Loss: 5.0872 +[2025-09-05 18:42:34] [Rank 0] Group 5 Loss: 5.0872 +[2025-09-05 18:42:34] [Rank 0] Group 6 Loss: 5.0086 +[2025-09-05 18:42:34] [Rank 0] Group 6 Loss: 5.0086 +[2025-09-05 18:42:34] [Rank 0] Group 7 Loss: 5.1394 +[2025-09-05 18:42:34] [Rank 0] Group 7 Loss: 5.1394 +[2025-09-05 18:42:34] [Rank 0] Group 8 Loss: 5.2640 +[2025-09-05 18:42:34] [Rank 0] Group 8 Loss: 5.2640 +[2025-09-05 18:42:34] [Rank 0] Group 9 Loss: 5.2088 +[2025-09-05 18:42:34] [Rank 0] Group 9 Loss: 5.2088 +[2025-09-05 18:42:34] [Rank 0] Group 10 Loss: 5.3199 +[2025-09-05 18:42:34] [Rank 0] Group 10 Loss: 5.3199 +[2025-09-05 18:42:34] [Rank 0] Group 11 Loss: 5.3651 +[2025-09-05 18:42:34] [Rank 0] Group 11 Loss: 5.3651 +[2025-09-05 18:42:34] [Rank 0] Group 12 Loss: 5.3667 +[2025-09-05 18:42:34] [Rank 0] Group 12 Loss: 5.3667 +[2025-09-05 18:42:34] [Rank 0] Group 13 Loss: 5.4397 +[2025-09-05 18:42:34] [Rank 0] Group 13 Loss: 5.4397 +[2025-09-05 18:42:34] [Rank 0] Group 14 Loss: 5.4203 +[2025-09-05 18:42:34] [Rank 0] Group 14 Loss: 5.4203 +[2025-09-05 18:42:34] [Rank 0] Group 15 Loss: 5.5681 +[2025-09-05 18:42:34] [Rank 0] Group 15 Loss: 5.5681 +[2025-09-05 18:42:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:42:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:42:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:42:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:42:35] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:42:35] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:42:35] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 18:42:35] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 18:42:35] [Rank 0] Group 12 FTA: 0.9000 +[2025-09-05 18:42:35] [Rank 0] Group 12 FTA: 0.9000 +[2025-09-05 18:42:35] [Rank 0] Group 13 FTA: 0.4900 +[2025-09-05 18:42:35] [Rank 0] Group 13 FTA: 0.4900 +[2025-09-05 18:42:35] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:42:35] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:42:35] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:42:35] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:42:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:42:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:42:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:42:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:42:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:42:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:42:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:42:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:42:36] [Rank 0] step:6501/10000 train_time:251047ms step_avg:38.62ms +[2025-09-05 18:42:36] [Rank 0] step:6501/10000 train_time:251047ms step_avg:38.62ms +[2025-09-05 18:42:37] [Rank 0] step:6521/10000 train_time:251482ms step_avg:38.56ms +[2025-09-05 18:42:37] [Rank 0] step:6521/10000 train_time:251482ms step_avg:38.56ms +[2025-09-05 18:42:37] [Rank 0] step:6541/10000 train_time:252140ms step_avg:38.55ms +[2025-09-05 18:42:37] [Rank 0] step:6541/10000 train_time:252140ms step_avg:38.55ms +[2025-09-05 18:42:38] [Rank 0] step:6561/10000 train_time:252798ms step_avg:38.53ms +[2025-09-05 18:42:38] [Rank 0] step:6561/10000 train_time:252798ms step_avg:38.53ms +[2025-09-05 18:42:39] [Rank 0] step:6581/10000 train_time:253456ms step_avg:38.51ms +[2025-09-05 18:42:39] [Rank 0] step:6581/10000 train_time:253456ms step_avg:38.51ms +[2025-09-05 18:42:39] [Rank 0] step:6601/10000 train_time:254114ms step_avg:38.50ms +[2025-09-05 18:42:39] [Rank 0] step:6601/10000 train_time:254114ms step_avg:38.50ms +[2025-09-05 18:42:40] [Rank 0] step:6621/10000 train_time:254772ms step_avg:38.48ms +[2025-09-05 18:42:40] [Rank 0] step:6621/10000 train_time:254772ms step_avg:38.48ms +[2025-09-05 18:42:41] [Rank 0] step:6641/10000 train_time:255429ms step_avg:38.46ms +[2025-09-05 18:42:41] [Rank 0] step:6641/10000 train_time:255429ms step_avg:38.46ms +[2025-09-05 18:42:41] [Rank 0] step:6661/10000 train_time:256087ms step_avg:38.45ms +[2025-09-05 18:42:41] [Rank 0] step:6661/10000 train_time:256087ms step_avg:38.45ms +[2025-09-05 18:42:42] [Rank 0] step:6681/10000 train_time:256745ms step_avg:38.43ms +[2025-09-05 18:42:42] [Rank 0] step:6681/10000 train_time:256745ms step_avg:38.43ms +[2025-09-05 18:42:43] [Rank 0] step:6701/10000 train_time:257404ms step_avg:38.41ms +[2025-09-05 18:42:43] [Rank 0] step:6701/10000 train_time:257404ms step_avg:38.41ms +[2025-09-05 18:42:43] [Rank 0] step:6721/10000 train_time:258062ms step_avg:38.40ms +[2025-09-05 18:42:43] [Rank 0] step:6721/10000 train_time:258062ms step_avg:38.40ms +[2025-09-05 18:42:44] [Rank 0] step:6741/10000 train_time:258721ms step_avg:38.38ms +[2025-09-05 18:42:44] [Rank 0] step:6741/10000 train_time:258721ms step_avg:38.38ms +[2025-09-05 18:42:45] [Rank 0] step:6761/10000 train_time:259379ms step_avg:38.36ms +[2025-09-05 18:42:45] [Rank 0] step:6761/10000 train_time:259379ms step_avg:38.36ms +[2025-09-05 18:42:45] [Rank 0] step:6781/10000 train_time:260037ms step_avg:38.35ms +[2025-09-05 18:42:45] [Rank 0] step:6781/10000 train_time:260037ms step_avg:38.35ms +[2025-09-05 18:42:46] [Rank 0] step:6801/10000 train_time:260696ms step_avg:38.33ms +[2025-09-05 18:42:46] [Rank 0] step:6801/10000 train_time:260696ms step_avg:38.33ms +[2025-09-05 18:42:47] [Rank 0] step:6821/10000 train_time:261353ms step_avg:38.32ms +[2025-09-05 18:42:47] [Rank 0] step:6821/10000 train_time:261353ms step_avg:38.32ms +[2025-09-05 18:42:47] [Rank 0] step:6841/10000 train_time:262215ms step_avg:38.33ms +[2025-09-05 18:42:47] [Rank 0] step:6841/10000 train_time:262215ms step_avg:38.33ms +[2025-09-05 18:42:48] [Rank 0] step:6861/10000 train_time:262873ms step_avg:38.31ms +[2025-09-05 18:42:48] [Rank 0] step:6861/10000 train_time:262873ms step_avg:38.31ms +[2025-09-05 18:42:49] [Rank 0] step:6881/10000 train_time:263531ms step_avg:38.30ms +[2025-09-05 18:42:49] [Rank 0] step:6881/10000 train_time:263531ms step_avg:38.30ms +[2025-09-05 18:42:49] [Rank 0] step:6901/10000 train_time:264189ms step_avg:38.28ms +[2025-09-05 18:42:49] [Rank 0] step:6901/10000 train_time:264189ms step_avg:38.28ms +[2025-09-05 18:42:50] [Rank 0] step:6921/10000 train_time:264847ms step_avg:38.27ms +[2025-09-05 18:42:50] [Rank 0] step:6921/10000 train_time:264847ms step_avg:38.27ms +[2025-09-05 18:42:51] [Rank 0] step:6941/10000 train_time:265505ms step_avg:38.25ms +[2025-09-05 18:42:51] [Rank 0] step:6941/10000 train_time:265505ms step_avg:38.25ms +[2025-09-05 18:42:51] [Rank 0] step:6961/10000 train_time:266166ms step_avg:38.24ms +[2025-09-05 18:42:51] [Rank 0] step:6961/10000 train_time:266166ms step_avg:38.24ms +[2025-09-05 18:42:52] [Rank 0] step:6981/10000 train_time:266822ms step_avg:38.22ms +[2025-09-05 18:42:52] [Rank 0] step:6981/10000 train_time:266822ms step_avg:38.22ms +[2025-09-05 18:42:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:42:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:42:53] [Rank 0] PRINT: step:7000/10000 train_loss:0.6730 val_loss:0.6627 train_time:267714ms step_avg:38.24ms +[2025-09-05 18:42:53] [Rank 0] PRINT: step:7000/10000 train_loss:0.6730 val_loss:0.6627 train_time:267714ms step_avg:38.24ms +[2025-09-05 18:42:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:42:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:42:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:42:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:44:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:44:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:44:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:44:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:44:15] [Rank 0] Total Loss: 5.2266 +[2025-09-05 18:44:15] [Rank 0] Total Loss: 5.2266 +[2025-09-05 18:44:15] [Rank 0] Total FTA (Unweighted): 0.8625 +[2025-09-05 18:44:15] [Rank 0] Total FTA (Unweighted): 0.8625 +[2025-09-05 18:44:15] [Rank 0] Total FTA (Weighted): 0.8625 +[2025-09-05 18:44:15] [Rank 0] Total FTA (Weighted): 0.8625 +[2025-09-05 18:44:15] [Rank 0] Group 0 Loss: 5.0907 +[2025-09-05 18:44:15] [Rank 0] Group 0 Loss: 5.0907 +[2025-09-05 18:44:15] [Rank 0] Group 1 Loss: 4.7326 +[2025-09-05 18:44:15] [Rank 0] Group 1 Loss: 4.7326 +[2025-09-05 18:44:15] [Rank 0] Group 2 Loss: 4.7764 +[2025-09-05 18:44:15] [Rank 0] Group 2 Loss: 4.7764 +[2025-09-05 18:44:15] [Rank 0] Group 3 Loss: 5.1379 +[2025-09-05 18:44:15] [Rank 0] Group 3 Loss: 5.1379 +[2025-09-05 18:44:15] [Rank 0] Group 4 Loss: 5.2085 +[2025-09-05 18:44:15] [Rank 0] Group 4 Loss: 5.2085 +[2025-09-05 18:44:15] [Rank 0] Group 5 Loss: 5.1631 +[2025-09-05 18:44:15] [Rank 0] Group 5 Loss: 5.1631 +[2025-09-05 18:44:15] [Rank 0] Group 6 Loss: 5.0833 +[2025-09-05 18:44:15] [Rank 0] Group 6 Loss: 5.0833 +[2025-09-05 18:44:15] [Rank 0] Group 7 Loss: 5.1626 +[2025-09-05 18:44:15] [Rank 0] Group 7 Loss: 5.1626 +[2025-09-05 18:44:15] [Rank 0] Group 8 Loss: 5.3174 +[2025-09-05 18:44:15] [Rank 0] Group 8 Loss: 5.3174 +[2025-09-05 18:44:15] [Rank 0] Group 9 Loss: 5.2184 +[2025-09-05 18:44:15] [Rank 0] Group 9 Loss: 5.2184 +[2025-09-05 18:44:15] [Rank 0] Group 10 Loss: 5.3952 +[2025-09-05 18:44:15] [Rank 0] Group 10 Loss: 5.3952 +[2025-09-05 18:44:15] [Rank 0] Group 11 Loss: 5.3886 +[2025-09-05 18:44:15] [Rank 0] Group 11 Loss: 5.3886 +[2025-09-05 18:44:15] [Rank 0] Group 12 Loss: 5.4046 +[2025-09-05 18:44:15] [Rank 0] Group 12 Loss: 5.4046 +[2025-09-05 18:44:15] [Rank 0] Group 13 Loss: 5.4976 +[2025-09-05 18:44:15] [Rank 0] Group 13 Loss: 5.4976 +[2025-09-05 18:44:15] [Rank 0] Group 14 Loss: 5.4850 +[2025-09-05 18:44:15] [Rank 0] Group 14 Loss: 5.4850 +[2025-09-05 18:44:15] [Rank 0] Group 15 Loss: 5.5637 +[2025-09-05 18:44:15] [Rank 0] Group 15 Loss: 5.5637 +[2025-09-05 18:44:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:44:15] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:44:15] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:44:15] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 18:44:15] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 18:44:15] [Rank 0] Group 13 FTA: 0.5800 +[2025-09-05 18:44:15] [Rank 0] Group 13 FTA: 0.5800 +[2025-09-05 18:44:15] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 18:44:15] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 18:44:15] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:44:15] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:44:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:44:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:44:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:44:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:44:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:44:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:44:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:44:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:44:17] [Rank 0] step:7001/10000 train_time:267722ms step_avg:38.24ms +[2025-09-05 18:44:17] [Rank 0] step:7001/10000 train_time:267722ms step_avg:38.24ms +[2025-09-05 18:44:17] [Rank 0] step:7021/10000 train_time:268158ms step_avg:38.19ms +[2025-09-05 18:44:17] [Rank 0] step:7021/10000 train_time:268158ms step_avg:38.19ms +[2025-09-05 18:44:18] [Rank 0] step:7041/10000 train_time:268817ms step_avg:38.18ms +[2025-09-05 18:44:18] [Rank 0] step:7041/10000 train_time:268817ms step_avg:38.18ms +[2025-09-05 18:44:19] [Rank 0] step:7061/10000 train_time:269476ms step_avg:38.16ms +[2025-09-05 18:44:19] [Rank 0] step:7061/10000 train_time:269476ms step_avg:38.16ms +[2025-09-05 18:44:19] [Rank 0] step:7081/10000 train_time:270134ms step_avg:38.15ms +[2025-09-05 18:44:19] [Rank 0] step:7081/10000 train_time:270134ms step_avg:38.15ms +[2025-09-05 18:44:20] [Rank 0] step:7101/10000 train_time:270793ms step_avg:38.13ms +[2025-09-05 18:44:20] [Rank 0] step:7101/10000 train_time:270793ms step_avg:38.13ms +[2025-09-05 18:44:20] [Rank 0] step:7121/10000 train_time:271452ms step_avg:38.12ms +[2025-09-05 18:44:20] [Rank 0] step:7121/10000 train_time:271452ms step_avg:38.12ms +[2025-09-05 18:44:21] [Rank 0] step:7141/10000 train_time:272111ms step_avg:38.11ms +[2025-09-05 18:44:21] [Rank 0] step:7141/10000 train_time:272111ms step_avg:38.11ms +[2025-09-05 18:44:22] [Rank 0] step:7161/10000 train_time:272770ms step_avg:38.09ms +[2025-09-05 18:44:22] [Rank 0] step:7161/10000 train_time:272770ms step_avg:38.09ms +[2025-09-05 18:44:22] [Rank 0] step:7181/10000 train_time:273430ms step_avg:38.08ms +[2025-09-05 18:44:22] [Rank 0] step:7181/10000 train_time:273430ms step_avg:38.08ms +[2025-09-05 18:44:23] [Rank 0] step:7201/10000 train_time:274089ms step_avg:38.06ms +[2025-09-05 18:44:23] [Rank 0] step:7201/10000 train_time:274089ms step_avg:38.06ms +[2025-09-05 18:44:24] [Rank 0] step:7221/10000 train_time:274748ms step_avg:38.05ms +[2025-09-05 18:44:24] [Rank 0] step:7221/10000 train_time:274748ms step_avg:38.05ms +[2025-09-05 18:44:24] [Rank 0] step:7241/10000 train_time:275406ms step_avg:38.03ms +[2025-09-05 18:44:24] [Rank 0] step:7241/10000 train_time:275406ms step_avg:38.03ms +[2025-09-05 18:44:25] [Rank 0] step:7261/10000 train_time:276066ms step_avg:38.02ms +[2025-09-05 18:44:25] [Rank 0] step:7261/10000 train_time:276066ms step_avg:38.02ms +[2025-09-05 18:44:26] [Rank 0] step:7281/10000 train_time:276725ms step_avg:38.01ms +[2025-09-05 18:44:26] [Rank 0] step:7281/10000 train_time:276725ms step_avg:38.01ms +[2025-09-05 18:44:26] [Rank 0] step:7301/10000 train_time:277384ms step_avg:37.99ms +[2025-09-05 18:44:26] [Rank 0] step:7301/10000 train_time:277384ms step_avg:37.99ms +[2025-09-05 18:44:27] [Rank 0] step:7321/10000 train_time:278043ms step_avg:37.98ms +[2025-09-05 18:44:27] [Rank 0] step:7321/10000 train_time:278043ms step_avg:37.98ms +[2025-09-05 18:44:28] [Rank 0] step:7341/10000 train_time:278702ms step_avg:37.97ms +[2025-09-05 18:44:28] [Rank 0] step:7341/10000 train_time:278702ms step_avg:37.97ms +[2025-09-05 18:44:28] [Rank 0] step:7361/10000 train_time:279361ms step_avg:37.95ms +[2025-09-05 18:44:28] [Rank 0] step:7361/10000 train_time:279361ms step_avg:37.95ms +[2025-09-05 18:44:29] [Rank 0] step:7381/10000 train_time:280021ms step_avg:37.94ms +[2025-09-05 18:44:29] [Rank 0] step:7381/10000 train_time:280021ms step_avg:37.94ms +[2025-09-05 18:44:30] [Rank 0] step:7401/10000 train_time:280680ms step_avg:37.92ms +[2025-09-05 18:44:30] [Rank 0] step:7401/10000 train_time:280680ms step_avg:37.92ms +[2025-09-05 18:44:30] [Rank 0] step:7421/10000 train_time:281339ms step_avg:37.91ms +[2025-09-05 18:44:30] [Rank 0] step:7421/10000 train_time:281339ms step_avg:37.91ms +[2025-09-05 18:44:31] [Rank 0] step:7441/10000 train_time:281998ms step_avg:37.90ms +[2025-09-05 18:44:31] [Rank 0] step:7441/10000 train_time:281998ms step_avg:37.90ms +[2025-09-05 18:44:32] [Rank 0] step:7461/10000 train_time:282657ms step_avg:37.88ms +[2025-09-05 18:44:32] [Rank 0] step:7461/10000 train_time:282657ms step_avg:37.88ms +[2025-09-05 18:44:32] [Rank 0] step:7481/10000 train_time:283315ms step_avg:37.87ms +[2025-09-05 18:44:32] [Rank 0] step:7481/10000 train_time:283315ms step_avg:37.87ms +[2025-09-05 18:44:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:44:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:44:33] [Rank 0] PRINT: step:7500/10000 train_loss:0.6667 val_loss:0.6575 train_time:284208ms step_avg:37.89ms +[2025-09-05 18:44:33] [Rank 0] PRINT: step:7500/10000 train_loss:0.6667 val_loss:0.6575 train_time:284208ms step_avg:37.89ms +[2025-09-05 18:44:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:44:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:44:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:44:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:45:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:45:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:45:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:45:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:45:55] [Rank 0] Total Loss: 5.2575 +[2025-09-05 18:45:55] [Rank 0] Total Loss: 5.2575 +[2025-09-05 18:45:55] [Rank 0] Total FTA (Unweighted): 0.8694 +[2025-09-05 18:45:55] [Rank 0] Total FTA (Unweighted): 0.8694 +[2025-09-05 18:45:55] [Rank 0] Total FTA (Weighted): 0.8694 +[2025-09-05 18:45:55] [Rank 0] Total FTA (Weighted): 0.8694 +[2025-09-05 18:45:55] [Rank 0] Group 0 Loss: 5.1974 +[2025-09-05 18:45:55] [Rank 0] Group 0 Loss: 5.1974 +[2025-09-05 18:45:55] [Rank 0] Group 1 Loss: 4.6808 +[2025-09-05 18:45:55] [Rank 0] Group 1 Loss: 4.6808 +[2025-09-05 18:45:55] [Rank 0] Group 2 Loss: 4.7170 +[2025-09-05 18:45:55] [Rank 0] Group 2 Loss: 4.7170 +[2025-09-05 18:45:55] [Rank 0] Group 3 Loss: 5.2600 +[2025-09-05 18:45:55] [Rank 0] Group 3 Loss: 5.2600 +[2025-09-05 18:45:55] [Rank 0] Group 4 Loss: 5.2528 +[2025-09-05 18:45:55] [Rank 0] Group 4 Loss: 5.2528 +[2025-09-05 18:45:55] [Rank 0] Group 5 Loss: 5.2000 +[2025-09-05 18:45:55] [Rank 0] Group 5 Loss: 5.2000 +[2025-09-05 18:45:55] [Rank 0] Group 6 Loss: 5.0768 +[2025-09-05 18:45:55] [Rank 0] Group 6 Loss: 5.0768 +[2025-09-05 18:45:55] [Rank 0] Group 7 Loss: 5.2001 +[2025-09-05 18:45:55] [Rank 0] Group 7 Loss: 5.2001 +[2025-09-05 18:45:55] [Rank 0] Group 8 Loss: 5.3366 +[2025-09-05 18:45:55] [Rank 0] Group 8 Loss: 5.3366 +[2025-09-05 18:45:55] [Rank 0] Group 9 Loss: 5.2704 +[2025-09-05 18:45:55] [Rank 0] Group 9 Loss: 5.2704 +[2025-09-05 18:45:55] [Rank 0] Group 10 Loss: 5.4469 +[2025-09-05 18:45:55] [Rank 0] Group 10 Loss: 5.4469 +[2025-09-05 18:45:55] [Rank 0] Group 11 Loss: 5.4413 +[2025-09-05 18:45:55] [Rank 0] Group 11 Loss: 5.4413 +[2025-09-05 18:45:55] [Rank 0] Group 12 Loss: 5.4351 +[2025-09-05 18:45:55] [Rank 0] Group 12 Loss: 5.4351 +[2025-09-05 18:45:55] [Rank 0] Group 13 Loss: 5.5234 +[2025-09-05 18:45:55] [Rank 0] Group 13 Loss: 5.5234 +[2025-09-05 18:45:55] [Rank 0] Group 14 Loss: 5.4898 +[2025-09-05 18:45:55] [Rank 0] Group 14 Loss: 5.4898 +[2025-09-05 18:45:55] [Rank 0] Group 15 Loss: 5.5908 +[2025-09-05 18:45:55] [Rank 0] Group 15 Loss: 5.5908 +[2025-09-05 18:45:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:45:55] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:45:55] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:45:55] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 18:45:55] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 18:45:55] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-05 18:45:55] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-05 18:45:55] [Rank 0] Group 13 FTA: 0.6400 +[2025-09-05 18:45:55] [Rank 0] Group 13 FTA: 0.6400 +[2025-09-05 18:45:55] [Rank 0] Group 14 FTA: 0.2300 +[2025-09-05 18:45:55] [Rank 0] Group 14 FTA: 0.2300 +[2025-09-05 18:45:55] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:45:55] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:45:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:45:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:45:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:45:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:45:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:45:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:45:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:45:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:45:57] [Rank 0] step:7501/10000 train_time:284216ms step_avg:37.89ms +[2025-09-05 18:45:57] [Rank 0] step:7501/10000 train_time:284216ms step_avg:37.89ms +[2025-09-05 18:45:57] [Rank 0] step:7521/10000 train_time:284657ms step_avg:37.85ms +[2025-09-05 18:45:57] [Rank 0] step:7521/10000 train_time:284657ms step_avg:37.85ms +[2025-09-05 18:45:58] [Rank 0] step:7541/10000 train_time:285315ms step_avg:37.84ms +[2025-09-05 18:45:58] [Rank 0] step:7541/10000 train_time:285315ms step_avg:37.84ms +[2025-09-05 18:45:59] [Rank 0] step:7561/10000 train_time:285973ms step_avg:37.82ms +[2025-09-05 18:45:59] [Rank 0] step:7561/10000 train_time:285973ms step_avg:37.82ms +[2025-09-05 18:45:59] [Rank 0] step:7581/10000 train_time:286631ms step_avg:37.81ms +[2025-09-05 18:45:59] [Rank 0] step:7581/10000 train_time:286631ms step_avg:37.81ms +[2025-09-05 18:46:00] [Rank 0] step:7601/10000 train_time:287288ms step_avg:37.80ms +[2025-09-05 18:46:00] [Rank 0] step:7601/10000 train_time:287288ms step_avg:37.80ms +[2025-09-05 18:46:01] [Rank 0] step:7621/10000 train_time:287947ms step_avg:37.78ms +[2025-09-05 18:46:01] [Rank 0] step:7621/10000 train_time:287947ms step_avg:37.78ms +[2025-09-05 18:46:02] [Rank 0] step:7641/10000 train_time:288604ms step_avg:37.77ms +[2025-09-05 18:46:02] [Rank 0] step:7641/10000 train_time:288604ms step_avg:37.77ms +[2025-09-05 18:46:03] [Rank 0] step:7661/10000 train_time:289735ms step_avg:37.82ms +[2025-09-05 18:46:03] [Rank 0] step:7661/10000 train_time:289735ms step_avg:37.82ms +[2025-09-05 18:46:03] [Rank 0] step:7681/10000 train_time:290392ms step_avg:37.81ms +[2025-09-05 18:46:03] [Rank 0] step:7681/10000 train_time:290392ms step_avg:37.81ms +[2025-09-05 18:46:04] [Rank 0] step:7701/10000 train_time:291050ms step_avg:37.79ms +[2025-09-05 18:46:04] [Rank 0] step:7701/10000 train_time:291050ms step_avg:37.79ms +[2025-09-05 18:46:05] [Rank 0] step:7721/10000 train_time:291707ms step_avg:37.78ms +[2025-09-05 18:46:05] [Rank 0] step:7721/10000 train_time:291707ms step_avg:37.78ms +[2025-09-05 18:46:05] [Rank 0] step:7741/10000 train_time:292365ms step_avg:37.77ms +[2025-09-05 18:46:05] [Rank 0] step:7741/10000 train_time:292365ms step_avg:37.77ms +[2025-09-05 18:46:06] [Rank 0] step:7761/10000 train_time:293024ms step_avg:37.76ms +[2025-09-05 18:46:06] [Rank 0] step:7761/10000 train_time:293024ms step_avg:37.76ms +[2025-09-05 18:46:06] [Rank 0] step:7781/10000 train_time:293681ms step_avg:37.74ms +[2025-09-05 18:46:06] [Rank 0] step:7781/10000 train_time:293681ms step_avg:37.74ms +[2025-09-05 18:46:07] [Rank 0] step:7801/10000 train_time:294340ms step_avg:37.73ms +[2025-09-05 18:46:07] [Rank 0] step:7801/10000 train_time:294340ms step_avg:37.73ms +[2025-09-05 18:46:08] [Rank 0] step:7821/10000 train_time:294997ms step_avg:37.72ms +[2025-09-05 18:46:08] [Rank 0] step:7821/10000 train_time:294997ms step_avg:37.72ms +[2025-09-05 18:46:08] [Rank 0] step:7841/10000 train_time:295655ms step_avg:37.71ms +[2025-09-05 18:46:08] [Rank 0] step:7841/10000 train_time:295655ms step_avg:37.71ms +[2025-09-05 18:46:09] [Rank 0] step:7861/10000 train_time:296313ms step_avg:37.69ms +[2025-09-05 18:46:09] [Rank 0] step:7861/10000 train_time:296313ms step_avg:37.69ms +[2025-09-05 18:46:10] [Rank 0] step:7881/10000 train_time:296970ms step_avg:37.68ms +[2025-09-05 18:46:10] [Rank 0] step:7881/10000 train_time:296970ms step_avg:37.68ms +[2025-09-05 18:46:10] [Rank 0] step:7901/10000 train_time:297628ms step_avg:37.67ms +[2025-09-05 18:46:10] [Rank 0] step:7901/10000 train_time:297628ms step_avg:37.67ms +[2025-09-05 18:46:11] [Rank 0] step:7921/10000 train_time:298286ms step_avg:37.66ms +[2025-09-05 18:46:11] [Rank 0] step:7921/10000 train_time:298286ms step_avg:37.66ms +[2025-09-05 18:46:12] [Rank 0] step:7941/10000 train_time:298944ms step_avg:37.65ms +[2025-09-05 18:46:12] [Rank 0] step:7941/10000 train_time:298944ms step_avg:37.65ms +[2025-09-05 18:46:12] [Rank 0] step:7961/10000 train_time:299602ms step_avg:37.63ms +[2025-09-05 18:46:12] [Rank 0] step:7961/10000 train_time:299602ms step_avg:37.63ms +[2025-09-05 18:46:13] [Rank 0] step:7981/10000 train_time:300448ms step_avg:37.65ms +[2025-09-05 18:46:13] [Rank 0] step:7981/10000 train_time:300448ms step_avg:37.65ms +[2025-09-05 18:46:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:46:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:46:14] [Rank 0] PRINT: step:8000/10000 train_loss:0.6609 val_loss:0.6518 train_time:301340ms step_avg:37.67ms +[2025-09-05 18:46:14] [Rank 0] PRINT: step:8000/10000 train_loss:0.6609 val_loss:0.6518 train_time:301340ms step_avg:37.67ms +[2025-09-05 18:46:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:46:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:46:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:46:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:47:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:47:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:47:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:47:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:47:36] [Rank 0] Total Loss: 5.2177 +[2025-09-05 18:47:36] [Rank 0] Total Loss: 5.2177 +[2025-09-05 18:47:36] [Rank 0] Total FTA (Unweighted): 0.8756 +[2025-09-05 18:47:36] [Rank 0] Total FTA (Unweighted): 0.8756 +[2025-09-05 18:47:36] [Rank 0] Total FTA (Weighted): 0.8756 +[2025-09-05 18:47:36] [Rank 0] Total FTA (Weighted): 0.8756 +[2025-09-05 18:47:36] [Rank 0] Group 0 Loss: 5.0831 +[2025-09-05 18:47:36] [Rank 0] Group 0 Loss: 5.0831 +[2025-09-05 18:47:36] [Rank 0] Group 1 Loss: 4.7835 +[2025-09-05 18:47:36] [Rank 0] Group 1 Loss: 4.7835 +[2025-09-05 18:47:36] [Rank 0] Group 2 Loss: 4.7997 +[2025-09-05 18:47:36] [Rank 0] Group 2 Loss: 4.7997 +[2025-09-05 18:47:36] [Rank 0] Group 3 Loss: 5.1487 +[2025-09-05 18:47:36] [Rank 0] Group 3 Loss: 5.1487 +[2025-09-05 18:47:36] [Rank 0] Group 4 Loss: 5.2441 +[2025-09-05 18:47:36] [Rank 0] Group 4 Loss: 5.2441 +[2025-09-05 18:47:36] [Rank 0] Group 5 Loss: 5.1242 +[2025-09-05 18:47:36] [Rank 0] Group 5 Loss: 5.1242 +[2025-09-05 18:47:36] [Rank 0] Group 6 Loss: 5.0641 +[2025-09-05 18:47:36] [Rank 0] Group 6 Loss: 5.0641 +[2025-09-05 18:47:36] [Rank 0] Group 7 Loss: 5.1712 +[2025-09-05 18:47:36] [Rank 0] Group 7 Loss: 5.1712 +[2025-09-05 18:47:36] [Rank 0] Group 8 Loss: 5.3003 +[2025-09-05 18:47:36] [Rank 0] Group 8 Loss: 5.3003 +[2025-09-05 18:47:36] [Rank 0] Group 9 Loss: 5.2230 +[2025-09-05 18:47:36] [Rank 0] Group 9 Loss: 5.2230 +[2025-09-05 18:47:36] [Rank 0] Group 10 Loss: 5.3888 +[2025-09-05 18:47:36] [Rank 0] Group 10 Loss: 5.3888 +[2025-09-05 18:47:36] [Rank 0] Group 11 Loss: 5.3680 +[2025-09-05 18:47:36] [Rank 0] Group 11 Loss: 5.3680 +[2025-09-05 18:47:36] [Rank 0] Group 12 Loss: 5.3970 +[2025-09-05 18:47:36] [Rank 0] Group 12 Loss: 5.3970 +[2025-09-05 18:47:36] [Rank 0] Group 13 Loss: 5.4665 +[2025-09-05 18:47:36] [Rank 0] Group 13 Loss: 5.4665 +[2025-09-05 18:47:36] [Rank 0] Group 14 Loss: 5.4237 +[2025-09-05 18:47:36] [Rank 0] Group 14 Loss: 5.4237 +[2025-09-05 18:47:36] [Rank 0] Group 15 Loss: 5.4973 +[2025-09-05 18:47:36] [Rank 0] Group 15 Loss: 5.4973 +[2025-09-05 18:47:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 18:47:36] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 18:47:36] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:47:36] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 18:47:36] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 18:47:36] [Rank 0] Group 12 FTA: 0.9700 +[2025-09-05 18:47:36] [Rank 0] Group 12 FTA: 0.9700 +[2025-09-05 18:47:36] [Rank 0] Group 13 FTA: 0.6700 +[2025-09-05 18:47:36] [Rank 0] Group 13 FTA: 0.6700 +[2025-09-05 18:47:36] [Rank 0] Group 14 FTA: 0.2800 +[2025-09-05 18:47:36] [Rank 0] Group 14 FTA: 0.2800 +[2025-09-05 18:47:36] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:47:36] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:47:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:47:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:47:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:47:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:47:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:47:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:47:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:47:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:47:38] [Rank 0] step:8001/10000 train_time:301348ms step_avg:37.66ms +[2025-09-05 18:47:38] [Rank 0] step:8001/10000 train_time:301348ms step_avg:37.66ms +[2025-09-05 18:47:39] [Rank 0] step:8021/10000 train_time:301794ms step_avg:37.63ms +[2025-09-05 18:47:39] [Rank 0] step:8021/10000 train_time:301794ms step_avg:37.63ms +[2025-09-05 18:47:40] [Rank 0] step:8041/10000 train_time:302503ms step_avg:37.62ms +[2025-09-05 18:47:40] [Rank 0] step:8041/10000 train_time:302503ms step_avg:37.62ms +[2025-09-05 18:47:41] [Rank 0] step:8061/10000 train_time:303162ms step_avg:37.61ms +[2025-09-05 18:47:41] [Rank 0] step:8061/10000 train_time:303162ms step_avg:37.61ms +[2025-09-05 18:47:41] [Rank 0] step:8081/10000 train_time:303821ms step_avg:37.60ms +[2025-09-05 18:47:41] [Rank 0] step:8081/10000 train_time:303821ms step_avg:37.60ms +[2025-09-05 18:47:42] [Rank 0] step:8101/10000 train_time:304480ms step_avg:37.59ms +[2025-09-05 18:47:42] [Rank 0] step:8101/10000 train_time:304480ms step_avg:37.59ms +[2025-09-05 18:47:42] [Rank 0] step:8121/10000 train_time:305139ms step_avg:37.57ms +[2025-09-05 18:47:42] [Rank 0] step:8121/10000 train_time:305139ms step_avg:37.57ms +[2025-09-05 18:47:43] [Rank 0] step:8141/10000 train_time:305798ms step_avg:37.56ms +[2025-09-05 18:47:43] [Rank 0] step:8141/10000 train_time:305798ms step_avg:37.56ms +[2025-09-05 18:47:44] [Rank 0] step:8161/10000 train_time:306457ms step_avg:37.55ms +[2025-09-05 18:47:44] [Rank 0] step:8161/10000 train_time:306457ms step_avg:37.55ms +[2025-09-05 18:47:44] [Rank 0] step:8181/10000 train_time:307116ms step_avg:37.54ms +[2025-09-05 18:47:44] [Rank 0] step:8181/10000 train_time:307116ms step_avg:37.54ms +[2025-09-05 18:47:45] [Rank 0] step:8201/10000 train_time:307775ms step_avg:37.53ms +[2025-09-05 18:47:45] [Rank 0] step:8201/10000 train_time:307775ms step_avg:37.53ms +[2025-09-05 18:47:46] [Rank 0] step:8221/10000 train_time:308434ms step_avg:37.52ms +[2025-09-05 18:47:46] [Rank 0] step:8221/10000 train_time:308434ms step_avg:37.52ms +[2025-09-05 18:47:46] [Rank 0] step:8241/10000 train_time:309093ms step_avg:37.51ms +[2025-09-05 18:47:46] [Rank 0] step:8241/10000 train_time:309093ms step_avg:37.51ms +[2025-09-05 18:47:47] [Rank 0] step:8261/10000 train_time:309752ms step_avg:37.50ms +[2025-09-05 18:47:47] [Rank 0] step:8261/10000 train_time:309752ms step_avg:37.50ms +[2025-09-05 18:47:48] [Rank 0] step:8281/10000 train_time:310411ms step_avg:37.48ms +[2025-09-05 18:47:48] [Rank 0] step:8281/10000 train_time:310411ms step_avg:37.48ms +[2025-09-05 18:47:48] [Rank 0] step:8301/10000 train_time:311071ms step_avg:37.47ms +[2025-09-05 18:47:48] [Rank 0] step:8301/10000 train_time:311071ms step_avg:37.47ms +[2025-09-05 18:47:49] [Rank 0] step:8321/10000 train_time:311730ms step_avg:37.46ms +[2025-09-05 18:47:49] [Rank 0] step:8321/10000 train_time:311730ms step_avg:37.46ms +[2025-09-05 18:47:50] [Rank 0] step:8341/10000 train_time:312389ms step_avg:37.45ms +[2025-09-05 18:47:50] [Rank 0] step:8341/10000 train_time:312389ms step_avg:37.45ms +[2025-09-05 18:47:50] [Rank 0] step:8361/10000 train_time:313049ms step_avg:37.44ms +[2025-09-05 18:47:50] [Rank 0] step:8361/10000 train_time:313049ms step_avg:37.44ms +[2025-09-05 18:47:51] [Rank 0] step:8381/10000 train_time:313708ms step_avg:37.43ms +[2025-09-05 18:47:51] [Rank 0] step:8381/10000 train_time:313708ms step_avg:37.43ms +[2025-09-05 18:47:52] [Rank 0] step:8401/10000 train_time:314367ms step_avg:37.42ms +[2025-09-05 18:47:52] [Rank 0] step:8401/10000 train_time:314367ms step_avg:37.42ms +[2025-09-05 18:47:52] [Rank 0] step:8421/10000 train_time:315026ms step_avg:37.41ms +[2025-09-05 18:47:52] [Rank 0] step:8421/10000 train_time:315026ms step_avg:37.41ms +[2025-09-05 18:47:53] [Rank 0] step:8441/10000 train_time:315684ms step_avg:37.40ms +[2025-09-05 18:47:53] [Rank 0] step:8441/10000 train_time:315684ms step_avg:37.40ms +[2025-09-05 18:47:54] [Rank 0] step:8461/10000 train_time:316344ms step_avg:37.39ms +[2025-09-05 18:47:54] [Rank 0] step:8461/10000 train_time:316344ms step_avg:37.39ms +[2025-09-05 18:47:54] [Rank 0] step:8481/10000 train_time:317005ms step_avg:37.38ms +[2025-09-05 18:47:54] [Rank 0] step:8481/10000 train_time:317005ms step_avg:37.38ms +[2025-09-05 18:47:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:47:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:47:55] [Rank 0] PRINT: step:8500/10000 train_loss:0.6553 val_loss:0.6470 train_time:317897ms step_avg:37.40ms +[2025-09-05 18:47:55] [Rank 0] PRINT: step:8500/10000 train_loss:0.6553 val_loss:0.6470 train_time:317897ms step_avg:37.40ms +[2025-09-05 18:47:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:47:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:47:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:47:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:49:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:49:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:49:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:49:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:49:17] [Rank 0] Total Loss: 5.2565 +[2025-09-05 18:49:17] [Rank 0] Total Loss: 5.2565 +[2025-09-05 18:49:17] [Rank 0] Total FTA (Unweighted): 0.8875 +[2025-09-05 18:49:17] [Rank 0] Total FTA (Unweighted): 0.8875 +[2025-09-05 18:49:17] [Rank 0] Total FTA (Weighted): 0.8875 +[2025-09-05 18:49:17] [Rank 0] Total FTA (Weighted): 0.8875 +[2025-09-05 18:49:17] [Rank 0] Group 0 Loss: 5.2308 +[2025-09-05 18:49:17] [Rank 0] Group 0 Loss: 5.2308 +[2025-09-05 18:49:17] [Rank 0] Group 1 Loss: 4.8141 +[2025-09-05 18:49:17] [Rank 0] Group 1 Loss: 4.8141 +[2025-09-05 18:49:17] [Rank 0] Group 2 Loss: 4.8091 +[2025-09-05 18:49:17] [Rank 0] Group 2 Loss: 4.8091 +[2025-09-05 18:49:17] [Rank 0] Group 3 Loss: 5.2311 +[2025-09-05 18:49:17] [Rank 0] Group 3 Loss: 5.2311 +[2025-09-05 18:49:17] [Rank 0] Group 4 Loss: 5.2427 +[2025-09-05 18:49:17] [Rank 0] Group 4 Loss: 5.2427 +[2025-09-05 18:49:17] [Rank 0] Group 5 Loss: 5.1618 +[2025-09-05 18:49:17] [Rank 0] Group 5 Loss: 5.1618 +[2025-09-05 18:49:17] [Rank 0] Group 6 Loss: 5.1129 +[2025-09-05 18:49:17] [Rank 0] Group 6 Loss: 5.1129 +[2025-09-05 18:49:17] [Rank 0] Group 7 Loss: 5.2107 +[2025-09-05 18:49:17] [Rank 0] Group 7 Loss: 5.2107 +[2025-09-05 18:49:17] [Rank 0] Group 8 Loss: 5.3229 +[2025-09-05 18:49:17] [Rank 0] Group 8 Loss: 5.3229 +[2025-09-05 18:49:17] [Rank 0] Group 9 Loss: 5.2669 +[2025-09-05 18:49:17] [Rank 0] Group 9 Loss: 5.2669 +[2025-09-05 18:49:17] [Rank 0] Group 10 Loss: 5.3762 +[2025-09-05 18:49:17] [Rank 0] Group 10 Loss: 5.3762 +[2025-09-05 18:49:17] [Rank 0] Group 11 Loss: 5.4202 +[2025-09-05 18:49:17] [Rank 0] Group 11 Loss: 5.4202 +[2025-09-05 18:49:17] [Rank 0] Group 12 Loss: 5.4213 +[2025-09-05 18:49:17] [Rank 0] Group 12 Loss: 5.4213 +[2025-09-05 18:49:17] [Rank 0] Group 13 Loss: 5.4865 +[2025-09-05 18:49:17] [Rank 0] Group 13 Loss: 5.4865 +[2025-09-05 18:49:17] [Rank 0] Group 14 Loss: 5.4488 +[2025-09-05 18:49:17] [Rank 0] Group 14 Loss: 5.4488 +[2025-09-05 18:49:17] [Rank 0] Group 15 Loss: 5.5486 +[2025-09-05 18:49:17] [Rank 0] Group 15 Loss: 5.5486 +[2025-09-05 18:49:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 18:49:17] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 18:49:17] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 18:49:17] [Rank 0] Group 13 FTA: 0.7600 +[2025-09-05 18:49:17] [Rank 0] Group 13 FTA: 0.7600 +[2025-09-05 18:49:17] [Rank 0] Group 14 FTA: 0.3100 +[2025-09-05 18:49:17] [Rank 0] Group 14 FTA: 0.3100 +[2025-09-05 18:49:17] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 18:49:17] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 18:49:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:49:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:49:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:49:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:49:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:49:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:49:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:49:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:49:19] [Rank 0] step:8501/10000 train_time:317906ms step_avg:37.40ms +[2025-09-05 18:49:19] [Rank 0] step:8501/10000 train_time:317906ms step_avg:37.40ms +[2025-09-05 18:49:19] [Rank 0] step:8521/10000 train_time:318342ms step_avg:37.36ms +[2025-09-05 18:49:19] [Rank 0] step:8521/10000 train_time:318342ms step_avg:37.36ms +[2025-09-05 18:49:20] [Rank 0] step:8541/10000 train_time:318999ms step_avg:37.35ms +[2025-09-05 18:49:20] [Rank 0] step:8541/10000 train_time:318999ms step_avg:37.35ms +[2025-09-05 18:49:21] [Rank 0] step:8561/10000 train_time:319657ms step_avg:37.34ms +[2025-09-05 18:49:21] [Rank 0] step:8561/10000 train_time:319657ms step_avg:37.34ms +[2025-09-05 18:49:21] [Rank 0] step:8581/10000 train_time:320314ms step_avg:37.33ms +[2025-09-05 18:49:21] [Rank 0] step:8581/10000 train_time:320314ms step_avg:37.33ms +[2025-09-05 18:49:22] [Rank 0] step:8601/10000 train_time:320972ms step_avg:37.32ms +[2025-09-05 18:49:22] [Rank 0] step:8601/10000 train_time:320972ms step_avg:37.32ms +[2025-09-05 18:49:23] [Rank 0] step:8621/10000 train_time:321630ms step_avg:37.31ms +[2025-09-05 18:49:23] [Rank 0] step:8621/10000 train_time:321630ms step_avg:37.31ms +[2025-09-05 18:49:23] [Rank 0] step:8641/10000 train_time:322444ms step_avg:37.32ms +[2025-09-05 18:49:23] [Rank 0] step:8641/10000 train_time:322444ms step_avg:37.32ms +[2025-09-05 18:49:24] [Rank 0] step:8661/10000 train_time:323102ms step_avg:37.31ms +[2025-09-05 18:49:24] [Rank 0] step:8661/10000 train_time:323102ms step_avg:37.31ms +[2025-09-05 18:49:25] [Rank 0] step:8681/10000 train_time:323761ms step_avg:37.30ms +[2025-09-05 18:49:25] [Rank 0] step:8681/10000 train_time:323761ms step_avg:37.30ms +[2025-09-05 18:49:25] [Rank 0] step:8701/10000 train_time:324535ms step_avg:37.30ms +[2025-09-05 18:49:25] [Rank 0] step:8701/10000 train_time:324535ms step_avg:37.30ms +[2025-09-05 18:49:26] [Rank 0] step:8721/10000 train_time:325194ms step_avg:37.29ms +[2025-09-05 18:49:26] [Rank 0] step:8721/10000 train_time:325194ms step_avg:37.29ms +[2025-09-05 18:49:27] [Rank 0] step:8741/10000 train_time:325852ms step_avg:37.28ms +[2025-09-05 18:49:27] [Rank 0] step:8741/10000 train_time:325852ms step_avg:37.28ms +[2025-09-05 18:49:27] [Rank 0] step:8761/10000 train_time:326510ms step_avg:37.27ms +[2025-09-05 18:49:27] [Rank 0] step:8761/10000 train_time:326510ms step_avg:37.27ms +[2025-09-05 18:49:28] [Rank 0] step:8781/10000 train_time:327168ms step_avg:37.26ms +[2025-09-05 18:49:28] [Rank 0] step:8781/10000 train_time:327168ms step_avg:37.26ms +[2025-09-05 18:49:29] [Rank 0] step:8801/10000 train_time:327826ms step_avg:37.25ms +[2025-09-05 18:49:29] [Rank 0] step:8801/10000 train_time:327826ms step_avg:37.25ms +[2025-09-05 18:49:29] [Rank 0] step:8821/10000 train_time:328485ms step_avg:37.24ms +[2025-09-05 18:49:29] [Rank 0] step:8821/10000 train_time:328485ms step_avg:37.24ms +[2025-09-05 18:49:30] [Rank 0] step:8841/10000 train_time:329237ms step_avg:37.24ms +[2025-09-05 18:49:30] [Rank 0] step:8841/10000 train_time:329237ms step_avg:37.24ms +[2025-09-05 18:49:31] [Rank 0] step:8861/10000 train_time:329895ms step_avg:37.23ms +[2025-09-05 18:49:31] [Rank 0] step:8861/10000 train_time:329895ms step_avg:37.23ms +[2025-09-05 18:49:32] [Rank 0] step:8881/10000 train_time:330553ms step_avg:37.22ms +[2025-09-05 18:49:32] [Rank 0] step:8881/10000 train_time:330553ms step_avg:37.22ms +[2025-09-05 18:49:32] [Rank 0] step:8901/10000 train_time:331212ms step_avg:37.21ms +[2025-09-05 18:49:32] [Rank 0] step:8901/10000 train_time:331212ms step_avg:37.21ms +[2025-09-05 18:49:33] [Rank 0] step:8921/10000 train_time:331871ms step_avg:37.20ms +[2025-09-05 18:49:33] [Rank 0] step:8921/10000 train_time:331871ms step_avg:37.20ms +[2025-09-05 18:49:33] [Rank 0] step:8941/10000 train_time:332527ms step_avg:37.19ms +[2025-09-05 18:49:33] [Rank 0] step:8941/10000 train_time:332527ms step_avg:37.19ms +[2025-09-05 18:49:34] [Rank 0] step:8961/10000 train_time:333185ms step_avg:37.18ms +[2025-09-05 18:49:34] [Rank 0] step:8961/10000 train_time:333185ms step_avg:37.18ms +[2025-09-05 18:49:35] [Rank 0] step:8981/10000 train_time:333843ms step_avg:37.17ms +[2025-09-05 18:49:35] [Rank 0] step:8981/10000 train_time:333843ms step_avg:37.17ms +[2025-09-05 18:49:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:49:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:49:36] [Rank 0] PRINT: step:9000/10000 train_loss:0.6502 val_loss:0.6416 train_time:334735ms step_avg:37.19ms +[2025-09-05 18:49:36] [Rank 0] PRINT: step:9000/10000 train_loss:0.6502 val_loss:0.6416 train_time:334735ms step_avg:37.19ms +[2025-09-05 18:49:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:49:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:49:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:49:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:50:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:50:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:50:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:50:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:50:58] [Rank 0] Total Loss: 5.2074 +[2025-09-05 18:50:58] [Rank 0] Total Loss: 5.2074 +[2025-09-05 18:50:58] [Rank 0] Total FTA (Unweighted): 0.8962 +[2025-09-05 18:50:58] [Rank 0] Total FTA (Unweighted): 0.8962 +[2025-09-05 18:50:58] [Rank 0] Total FTA (Weighted): 0.8962 +[2025-09-05 18:50:58] [Rank 0] Total FTA (Weighted): 0.8962 +[2025-09-05 18:50:58] [Rank 0] Group 0 Loss: 5.2078 +[2025-09-05 18:50:58] [Rank 0] Group 0 Loss: 5.2078 +[2025-09-05 18:50:58] [Rank 0] Group 1 Loss: 4.7663 +[2025-09-05 18:50:58] [Rank 0] Group 1 Loss: 4.7663 +[2025-09-05 18:50:58] [Rank 0] Group 2 Loss: 4.6919 +[2025-09-05 18:50:58] [Rank 0] Group 2 Loss: 4.6919 +[2025-09-05 18:50:58] [Rank 0] Group 3 Loss: 5.1578 +[2025-09-05 18:50:58] [Rank 0] Group 3 Loss: 5.1578 +[2025-09-05 18:50:58] [Rank 0] Group 4 Loss: 5.1988 +[2025-09-05 18:50:58] [Rank 0] Group 4 Loss: 5.1988 +[2025-09-05 18:50:58] [Rank 0] Group 5 Loss: 5.1550 +[2025-09-05 18:50:58] [Rank 0] Group 5 Loss: 5.1550 +[2025-09-05 18:50:58] [Rank 0] Group 6 Loss: 5.0605 +[2025-09-05 18:50:58] [Rank 0] Group 6 Loss: 5.0605 +[2025-09-05 18:50:58] [Rank 0] Group 7 Loss: 5.1303 +[2025-09-05 18:50:58] [Rank 0] Group 7 Loss: 5.1303 +[2025-09-05 18:50:58] [Rank 0] Group 8 Loss: 5.2873 +[2025-09-05 18:50:58] [Rank 0] Group 8 Loss: 5.2873 +[2025-09-05 18:50:58] [Rank 0] Group 9 Loss: 5.2299 +[2025-09-05 18:50:58] [Rank 0] Group 9 Loss: 5.2299 +[2025-09-05 18:50:58] [Rank 0] Group 10 Loss: 5.3344 +[2025-09-05 18:50:58] [Rank 0] Group 10 Loss: 5.3344 +[2025-09-05 18:50:58] [Rank 0] Group 11 Loss: 5.3906 +[2025-09-05 18:50:58] [Rank 0] Group 11 Loss: 5.3906 +[2025-09-05 18:50:58] [Rank 0] Group 12 Loss: 5.3782 +[2025-09-05 18:50:58] [Rank 0] Group 12 Loss: 5.3782 +[2025-09-05 18:50:58] [Rank 0] Group 13 Loss: 5.4606 +[2025-09-05 18:50:58] [Rank 0] Group 13 Loss: 5.4606 +[2025-09-05 18:50:58] [Rank 0] Group 14 Loss: 5.3927 +[2025-09-05 18:50:58] [Rank 0] Group 14 Loss: 5.3927 +[2025-09-05 18:50:58] [Rank 0] Group 15 Loss: 5.4759 +[2025-09-05 18:50:58] [Rank 0] Group 15 Loss: 5.4759 +[2025-09-05 18:50:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:50:58] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:50:58] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 18:50:58] [Rank 0] Group 13 FTA: 0.8400 +[2025-09-05 18:50:58] [Rank 0] Group 13 FTA: 0.8400 +[2025-09-05 18:50:58] [Rank 0] Group 14 FTA: 0.3500 +[2025-09-05 18:50:58] [Rank 0] Group 14 FTA: 0.3500 +[2025-09-05 18:50:58] [Rank 0] Group 15 FTA: 0.1900 +[2025-09-05 18:50:58] [Rank 0] Group 15 FTA: 0.1900 +[2025-09-05 18:50:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:50:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:50:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:50:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:50:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:50:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:50:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:50:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:50:59] [Rank 0] step:9001/10000 train_time:334742ms step_avg:37.19ms +[2025-09-05 18:50:59] [Rank 0] step:9001/10000 train_time:334742ms step_avg:37.19ms +[2025-09-05 18:51:00] [Rank 0] step:9021/10000 train_time:335175ms step_avg:37.15ms +[2025-09-05 18:51:00] [Rank 0] step:9021/10000 train_time:335175ms step_avg:37.15ms +[2025-09-05 18:51:00] [Rank 0] step:9041/10000 train_time:335834ms step_avg:37.15ms +[2025-09-05 18:51:00] [Rank 0] step:9041/10000 train_time:335834ms step_avg:37.15ms +[2025-09-05 18:51:01] [Rank 0] step:9061/10000 train_time:336494ms step_avg:37.14ms +[2025-09-05 18:51:01] [Rank 0] step:9061/10000 train_time:336494ms step_avg:37.14ms +[2025-09-05 18:51:02] [Rank 0] step:9081/10000 train_time:337152ms step_avg:37.13ms +[2025-09-05 18:51:02] [Rank 0] step:9081/10000 train_time:337152ms step_avg:37.13ms +[2025-09-05 18:51:02] [Rank 0] step:9101/10000 train_time:337811ms step_avg:37.12ms +[2025-09-05 18:51:02] [Rank 0] step:9101/10000 train_time:337811ms step_avg:37.12ms +[2025-09-05 18:51:03] [Rank 0] step:9121/10000 train_time:338470ms step_avg:37.11ms +[2025-09-05 18:51:03] [Rank 0] step:9121/10000 train_time:338470ms step_avg:37.11ms +[2025-09-05 18:51:04] [Rank 0] step:9141/10000 train_time:339128ms step_avg:37.10ms +[2025-09-05 18:51:04] [Rank 0] step:9141/10000 train_time:339128ms step_avg:37.10ms +[2025-09-05 18:51:04] [Rank 0] step:9161/10000 train_time:339787ms step_avg:37.09ms +[2025-09-05 18:51:04] [Rank 0] step:9161/10000 train_time:339787ms step_avg:37.09ms +[2025-09-05 18:51:05] [Rank 0] step:9181/10000 train_time:340445ms step_avg:37.08ms +[2025-09-05 18:51:05] [Rank 0] step:9181/10000 train_time:340445ms step_avg:37.08ms +[2025-09-05 18:51:06] [Rank 0] step:9201/10000 train_time:341104ms step_avg:37.07ms +[2025-09-05 18:51:06] [Rank 0] step:9201/10000 train_time:341104ms step_avg:37.07ms +[2025-09-05 18:51:06] [Rank 0] step:9221/10000 train_time:341763ms step_avg:37.06ms +[2025-09-05 18:51:06] [Rank 0] step:9221/10000 train_time:341763ms step_avg:37.06ms +[2025-09-05 18:51:07] [Rank 0] step:9241/10000 train_time:342421ms step_avg:37.05ms +[2025-09-05 18:51:07] [Rank 0] step:9241/10000 train_time:342421ms step_avg:37.05ms +[2025-09-05 18:51:08] [Rank 0] step:9261/10000 train_time:343079ms step_avg:37.05ms +[2025-09-05 18:51:08] [Rank 0] step:9261/10000 train_time:343079ms step_avg:37.05ms +[2025-09-05 18:51:08] [Rank 0] step:9281/10000 train_time:343738ms step_avg:37.04ms +[2025-09-05 18:51:08] [Rank 0] step:9281/10000 train_time:343738ms step_avg:37.04ms +[2025-09-05 18:51:09] [Rank 0] step:9301/10000 train_time:344397ms step_avg:37.03ms +[2025-09-05 18:51:09] [Rank 0] step:9301/10000 train_time:344397ms step_avg:37.03ms +[2025-09-05 18:51:10] [Rank 0] step:9321/10000 train_time:345056ms step_avg:37.02ms +[2025-09-05 18:51:10] [Rank 0] step:9321/10000 train_time:345056ms step_avg:37.02ms +[2025-09-05 18:51:10] [Rank 0] step:9341/10000 train_time:345715ms step_avg:37.01ms +[2025-09-05 18:51:10] [Rank 0] step:9341/10000 train_time:345715ms step_avg:37.01ms +[2025-09-05 18:51:11] [Rank 0] step:9361/10000 train_time:346373ms step_avg:37.00ms +[2025-09-05 18:51:11] [Rank 0] step:9361/10000 train_time:346373ms step_avg:37.00ms +[2025-09-05 18:51:12] [Rank 0] step:9381/10000 train_time:347032ms step_avg:36.99ms +[2025-09-05 18:51:12] [Rank 0] step:9381/10000 train_time:347032ms step_avg:36.99ms +[2025-09-05 18:51:12] [Rank 0] step:9401/10000 train_time:347692ms step_avg:36.98ms +[2025-09-05 18:51:12] [Rank 0] step:9401/10000 train_time:347692ms step_avg:36.98ms +[2025-09-05 18:51:13] [Rank 0] step:9421/10000 train_time:348348ms step_avg:36.98ms +[2025-09-05 18:51:13] [Rank 0] step:9421/10000 train_time:348348ms step_avg:36.98ms +[2025-09-05 18:51:14] [Rank 0] step:9441/10000 train_time:349007ms step_avg:36.97ms +[2025-09-05 18:51:14] [Rank 0] step:9441/10000 train_time:349007ms step_avg:36.97ms +[2025-09-05 18:51:14] [Rank 0] step:9461/10000 train_time:349666ms step_avg:36.96ms +[2025-09-05 18:51:14] [Rank 0] step:9461/10000 train_time:349666ms step_avg:36.96ms +[2025-09-05 18:51:15] [Rank 0] step:9481/10000 train_time:350325ms step_avg:36.95ms +[2025-09-05 18:51:15] [Rank 0] step:9481/10000 train_time:350325ms step_avg:36.95ms +[2025-09-05 18:51:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:51:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:51:16] [Rank 0] PRINT: step:9500/10000 train_loss:0.6449 val_loss:0.6373 train_time:351219ms step_avg:36.97ms +[2025-09-05 18:51:16] [Rank 0] PRINT: step:9500/10000 train_loss:0.6449 val_loss:0.6373 train_time:351219ms step_avg:36.97ms +[2025-09-05 18:51:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:51:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:51:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:51:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:52:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:52:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:52:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:52:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:52:38] [Rank 0] Total Loss: 5.2542 +[2025-09-05 18:52:38] [Rank 0] Total Loss: 5.2542 +[2025-09-05 18:52:38] [Rank 0] Total FTA (Unweighted): 0.9006 +[2025-09-05 18:52:38] [Rank 0] Total FTA (Unweighted): 0.9006 +[2025-09-05 18:52:38] [Rank 0] Total FTA (Weighted): 0.9006 +[2025-09-05 18:52:38] [Rank 0] Total FTA (Weighted): 0.9006 +[2025-09-05 18:52:38] [Rank 0] Group 0 Loss: 5.1416 +[2025-09-05 18:52:38] [Rank 0] Group 0 Loss: 5.1416 +[2025-09-05 18:52:38] [Rank 0] Group 1 Loss: 4.7494 +[2025-09-05 18:52:38] [Rank 0] Group 1 Loss: 4.7494 +[2025-09-05 18:52:38] [Rank 0] Group 2 Loss: 4.7531 +[2025-09-05 18:52:38] [Rank 0] Group 2 Loss: 4.7531 +[2025-09-05 18:52:38] [Rank 0] Group 3 Loss: 5.1969 +[2025-09-05 18:52:38] [Rank 0] Group 3 Loss: 5.1969 +[2025-09-05 18:52:38] [Rank 0] Group 4 Loss: 5.2883 +[2025-09-05 18:52:38] [Rank 0] Group 4 Loss: 5.2883 +[2025-09-05 18:52:38] [Rank 0] Group 5 Loss: 5.1861 +[2025-09-05 18:52:38] [Rank 0] Group 5 Loss: 5.1861 +[2025-09-05 18:52:38] [Rank 0] Group 6 Loss: 5.1226 +[2025-09-05 18:52:38] [Rank 0] Group 6 Loss: 5.1226 +[2025-09-05 18:52:38] [Rank 0] Group 7 Loss: 5.1915 +[2025-09-05 18:52:38] [Rank 0] Group 7 Loss: 5.1915 +[2025-09-05 18:52:38] [Rank 0] Group 8 Loss: 5.3489 +[2025-09-05 18:52:38] [Rank 0] Group 8 Loss: 5.3489 +[2025-09-05 18:52:38] [Rank 0] Group 9 Loss: 5.2942 +[2025-09-05 18:52:38] [Rank 0] Group 9 Loss: 5.2942 +[2025-09-05 18:52:38] [Rank 0] Group 10 Loss: 5.4106 +[2025-09-05 18:52:38] [Rank 0] Group 10 Loss: 5.4106 +[2025-09-05 18:52:38] [Rank 0] Group 11 Loss: 5.4492 +[2025-09-05 18:52:38] [Rank 0] Group 11 Loss: 5.4492 +[2025-09-05 18:52:38] [Rank 0] Group 12 Loss: 5.4435 +[2025-09-05 18:52:38] [Rank 0] Group 12 Loss: 5.4435 +[2025-09-05 18:52:38] [Rank 0] Group 13 Loss: 5.4871 +[2025-09-05 18:52:38] [Rank 0] Group 13 Loss: 5.4871 +[2025-09-05 18:52:38] [Rank 0] Group 14 Loss: 5.4519 +[2025-09-05 18:52:38] [Rank 0] Group 14 Loss: 5.4519 +[2025-09-05 18:52:38] [Rank 0] Group 15 Loss: 5.5514 +[2025-09-05 18:52:38] [Rank 0] Group 15 Loss: 5.5514 +[2025-09-05 18:52:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 18:52:38] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 18:52:38] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 18:52:38] [Rank 0] Group 11 FTA: 0.9700 +[2025-09-05 18:52:38] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 18:52:38] [Rank 0] Group 13 FTA: 0.8500 +[2025-09-05 18:52:38] [Rank 0] Group 13 FTA: 0.8500 +[2025-09-05 18:52:38] [Rank 0] Group 14 FTA: 0.4600 +[2025-09-05 18:52:38] [Rank 0] Group 14 FTA: 0.4600 +[2025-09-05 18:52:38] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 18:52:38] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 18:52:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:52:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:52:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:52:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:52:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:52:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:52:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:52:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:52:39] [Rank 0] step:9501/10000 train_time:351227ms step_avg:36.97ms +[2025-09-05 18:52:39] [Rank 0] step:9501/10000 train_time:351227ms step_avg:36.97ms +[2025-09-05 18:52:40] [Rank 0] step:9521/10000 train_time:351663ms step_avg:36.94ms +[2025-09-05 18:52:40] [Rank 0] step:9521/10000 train_time:351663ms step_avg:36.94ms +[2025-09-05 18:52:41] [Rank 0] step:9541/10000 train_time:352321ms step_avg:36.93ms +[2025-09-05 18:52:41] [Rank 0] step:9541/10000 train_time:352321ms step_avg:36.93ms +[2025-09-05 18:52:41] [Rank 0] step:9561/10000 train_time:352978ms step_avg:36.92ms +[2025-09-05 18:52:41] [Rank 0] step:9561/10000 train_time:352978ms step_avg:36.92ms +[2025-09-05 18:52:42] [Rank 0] step:9581/10000 train_time:353636ms step_avg:36.91ms +[2025-09-05 18:52:42] [Rank 0] step:9581/10000 train_time:353636ms step_avg:36.91ms +[2025-09-05 18:52:43] [Rank 0] step:9601/10000 train_time:354293ms step_avg:36.90ms +[2025-09-05 18:52:43] [Rank 0] step:9601/10000 train_time:354293ms step_avg:36.90ms +[2025-09-05 18:52:43] [Rank 0] step:9621/10000 train_time:354951ms step_avg:36.89ms +[2025-09-05 18:52:43] [Rank 0] step:9621/10000 train_time:354951ms step_avg:36.89ms +[2025-09-05 18:52:44] [Rank 0] step:9641/10000 train_time:355609ms step_avg:36.89ms +[2025-09-05 18:52:44] [Rank 0] step:9641/10000 train_time:355609ms step_avg:36.89ms +[2025-09-05 18:52:45] [Rank 0] step:9661/10000 train_time:356972ms step_avg:36.95ms +[2025-09-05 18:52:45] [Rank 0] step:9661/10000 train_time:356972ms step_avg:36.95ms +[2025-09-05 18:52:46] [Rank 0] step:9681/10000 train_time:357629ms step_avg:36.94ms +[2025-09-05 18:52:46] [Rank 0] step:9681/10000 train_time:357629ms step_avg:36.94ms +[2025-09-05 18:52:47] [Rank 0] step:9701/10000 train_time:358287ms step_avg:36.93ms +[2025-09-05 18:52:47] [Rank 0] step:9701/10000 train_time:358287ms step_avg:36.93ms +[2025-09-05 18:52:47] [Rank 0] step:9721/10000 train_time:358944ms step_avg:36.92ms +[2025-09-05 18:52:47] [Rank 0] step:9721/10000 train_time:358944ms step_avg:36.92ms +[2025-09-05 18:52:48] [Rank 0] step:9741/10000 train_time:359601ms step_avg:36.92ms +[2025-09-05 18:52:48] [Rank 0] step:9741/10000 train_time:359601ms step_avg:36.92ms +[2025-09-05 18:52:49] [Rank 0] step:9761/10000 train_time:360259ms step_avg:36.91ms +[2025-09-05 18:52:49] [Rank 0] step:9761/10000 train_time:360259ms step_avg:36.91ms +[2025-09-05 18:52:49] [Rank 0] step:9781/10000 train_time:360917ms step_avg:36.90ms +[2025-09-05 18:52:49] [Rank 0] step:9781/10000 train_time:360917ms step_avg:36.90ms +[2025-09-05 18:52:50] [Rank 0] step:9801/10000 train_time:361575ms step_avg:36.89ms +[2025-09-05 18:52:50] [Rank 0] step:9801/10000 train_time:361575ms step_avg:36.89ms +[2025-09-05 18:52:50] [Rank 0] step:9821/10000 train_time:362233ms step_avg:36.88ms +[2025-09-05 18:52:50] [Rank 0] step:9821/10000 train_time:362233ms step_avg:36.88ms +[2025-09-05 18:52:51] [Rank 0] step:9841/10000 train_time:362891ms step_avg:36.88ms +[2025-09-05 18:52:51] [Rank 0] step:9841/10000 train_time:362891ms step_avg:36.88ms +[2025-09-05 18:52:52] [Rank 0] step:9861/10000 train_time:363548ms step_avg:36.87ms +[2025-09-05 18:52:52] [Rank 0] step:9861/10000 train_time:363548ms step_avg:36.87ms +[2025-09-05 18:52:52] [Rank 0] step:9881/10000 train_time:364205ms step_avg:36.86ms +[2025-09-05 18:52:52] [Rank 0] step:9881/10000 train_time:364205ms step_avg:36.86ms +[2025-09-05 18:52:53] [Rank 0] step:9901/10000 train_time:364863ms step_avg:36.85ms +[2025-09-05 18:52:53] [Rank 0] step:9901/10000 train_time:364863ms step_avg:36.85ms +[2025-09-05 18:52:54] [Rank 0] step:9921/10000 train_time:365521ms step_avg:36.84ms +[2025-09-05 18:52:54] [Rank 0] step:9921/10000 train_time:365521ms step_avg:36.84ms +[2025-09-05 18:52:54] [Rank 0] step:9941/10000 train_time:366179ms step_avg:36.84ms +[2025-09-05 18:52:54] [Rank 0] step:9941/10000 train_time:366179ms step_avg:36.84ms +[2025-09-05 18:52:55] [Rank 0] step:9961/10000 train_time:366836ms step_avg:36.83ms +[2025-09-05 18:52:55] [Rank 0] step:9961/10000 train_time:366836ms step_avg:36.83ms +[2025-09-05 18:52:56] [Rank 0] step:9981/10000 train_time:367494ms step_avg:36.82ms +[2025-09-05 18:52:56] [Rank 0] step:9981/10000 train_time:367494ms step_avg:36.82ms +[2025-09-05 18:52:56] [Rank 0] step:10000/10000 train_time:368120ms step_avg:36.81ms +[2025-09-05 18:52:56] [Rank 0] step:10000/10000 train_time:368120ms step_avg:36.81ms +[2025-09-05 18:52:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:52:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:52:57] [Rank 0] PRINT: step:10000/10000 train_loss:0.6401 val_loss:0.6331 train_time:368392ms step_avg:36.84ms +[2025-09-05 18:52:57] [Rank 0] PRINT: step:10000/10000 train_loss:0.6401 val_loss:0.6331 train_time:368392ms step_avg:36.84ms +[2025-09-05 18:52:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:52:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:52:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:52:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:54:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:54:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:54:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:54:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:54:18] [Rank 0] Total Loss: 5.2239 +[2025-09-05 18:54:18] [Rank 0] Total Loss: 5.2239 +[2025-09-05 18:54:18] [Rank 0] Total FTA (Unweighted): 0.9144 +[2025-09-05 18:54:18] [Rank 0] Total FTA (Unweighted): 0.9144 +[2025-09-05 18:54:18] [Rank 0] Total FTA (Weighted): 0.9144 +[2025-09-05 18:54:18] [Rank 0] Total FTA (Weighted): 0.9144 +[2025-09-05 18:54:18] [Rank 0] Group 0 Loss: 5.1424 +[2025-09-05 18:54:18] [Rank 0] Group 0 Loss: 5.1424 +[2025-09-05 18:54:18] [Rank 0] Group 1 Loss: 4.7741 +[2025-09-05 18:54:18] [Rank 0] Group 1 Loss: 4.7741 +[2025-09-05 18:54:18] [Rank 0] Group 2 Loss: 4.7523 +[2025-09-05 18:54:18] [Rank 0] Group 2 Loss: 4.7523 +[2025-09-05 18:54:18] [Rank 0] Group 3 Loss: 5.1813 +[2025-09-05 18:54:18] [Rank 0] Group 3 Loss: 5.1813 +[2025-09-05 18:54:18] [Rank 0] Group 4 Loss: 5.2099 +[2025-09-05 18:54:18] [Rank 0] Group 4 Loss: 5.2099 +[2025-09-05 18:54:18] [Rank 0] Group 5 Loss: 5.1560 +[2025-09-05 18:54:18] [Rank 0] Group 5 Loss: 5.1560 +[2025-09-05 18:54:18] [Rank 0] Group 6 Loss: 5.0920 +[2025-09-05 18:54:18] [Rank 0] Group 6 Loss: 5.0920 +[2025-09-05 18:54:18] [Rank 0] Group 7 Loss: 5.1413 +[2025-09-05 18:54:18] [Rank 0] Group 7 Loss: 5.1413 +[2025-09-05 18:54:18] [Rank 0] Group 8 Loss: 5.3091 +[2025-09-05 18:54:18] [Rank 0] Group 8 Loss: 5.3091 +[2025-09-05 18:54:18] [Rank 0] Group 9 Loss: 5.2498 +[2025-09-05 18:54:18] [Rank 0] Group 9 Loss: 5.2498 +[2025-09-05 18:54:18] [Rank 0] Group 10 Loss: 5.3786 +[2025-09-05 18:54:18] [Rank 0] Group 10 Loss: 5.3786 +[2025-09-05 18:54:18] [Rank 0] Group 11 Loss: 5.3940 +[2025-09-05 18:54:18] [Rank 0] Group 11 Loss: 5.3940 +[2025-09-05 18:54:18] [Rank 0] Group 12 Loss: 5.4107 +[2025-09-05 18:54:18] [Rank 0] Group 12 Loss: 5.4107 +[2025-09-05 18:54:18] [Rank 0] Group 13 Loss: 5.4615 +[2025-09-05 18:54:18] [Rank 0] Group 13 Loss: 5.4615 +[2025-09-05 18:54:18] [Rank 0] Group 14 Loss: 5.4133 +[2025-09-05 18:54:18] [Rank 0] Group 14 Loss: 5.4133 +[2025-09-05 18:54:18] [Rank 0] Group 15 Loss: 5.5168 +[2025-09-05 18:54:18] [Rank 0] Group 15 Loss: 5.5168 +[2025-09-05 18:54:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:54:18] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:54:18] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 18:54:18] [Rank 0] Group 13 FTA: 0.9100 +[2025-09-05 18:54:18] [Rank 0] Group 13 FTA: 0.9100 +[2025-09-05 18:54:18] [Rank 0] Group 14 FTA: 0.5300 +[2025-09-05 18:54:18] [Rank 0] Group 14 FTA: 0.5300 +[2025-09-05 18:54:18] [Rank 0] Group 15 FTA: 0.2100 +[2025-09-05 18:54:18] [Rank 0] Group 15 FTA: 0.2100 +[2025-09-05 18:54:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:54:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_loss_curves.png +[2025-09-05 18:54:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:54:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/per_class_acc_curves.png +[2025-09-05 18:54:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:54:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_loss_curve.png +[2025-09-05 18:54:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:54:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.005_seed_43/total_acc_curve.png +[2025-09-05 18:54:20] [Rank 0] step:10001/10000 train_time:368400ms step_avg:36.84ms +[2025-09-05 18:54:20] [Rank 0] step:10001/10000 train_time:368400ms step_avg:36.84ms +[2025-09-05 18:54:20] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 18:54:20 2025 --- +[2025-09-05 18:54:20] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 18:54:20 2025 --- +[2025-09-05 18:54:20] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 18:54:20] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..107aa3f3660d1cda08d32bed907ca61a1203dac7 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.01, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "be7abe2c-b9d2-410b-b9e0-5c7b92f89df6", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..055dab9c77f7cea86a7d6bf120d48c75748d236a --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:354ffcc7f29078995b07b30c3f5c10b8629d194524cfcbafc12255ce945f72f8 +size 365692 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..f8b3767ff6c19c56477587ff429f59db6ce7690b --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11519f5d88141c7562b4dabf0be263f7a2a32c61b931fd38f9f16b5435505765 +size 437611 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..62cd65361b0e1947236de76018e47e3b84fe50f3 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f1e24dfe7b16b3a6c2c726f5242456c304ca8aa05ce32e08397671579c7425e +size 98034 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..56e1ee3fdefc0863c5a755abd1a0f983c07198d1 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca6a332b50024b89d7166592478e91642c666a560398b587552d42509d749bdf +size 111656 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/training_log_be7abe2c-b9d2-410b-b9e0-5c7b92f89df6.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/training_log_be7abe2c-b9d2-410b-b9e0-5c7b92f89df6.txt new file mode 100644 index 0000000000000000000000000000000000000000..9e077c9644da31fc7ad0fa6f51e5f085c760cc80 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/training_log_be7abe2c-b9d2-410b-b9e0-5c7b92f89df6.txt @@ -0,0 +1,5614 @@ +[2025-09-05 14:43:50] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:43:50 2025 --- +[2025-09-05 14:43:50] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:43:50 2025 --- +[2025-09-05 14:43:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.01, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:43:50] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.01, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:43:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:43:50] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:43:50] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 14:43:50] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 14:43:50] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42 +[2025-09-05 14:43:50] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42 +[2025-09-05 14:43:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:43:50] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:43:50] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:43:50] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:43:52] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:43:52] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:43:52] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:43:52] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:43:52] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:43:52] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:43:56] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 14:43:56] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 14:43:56] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 14:43:56] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 14:43:56] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:43:56] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:43:56] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:43:56] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:43:56] [Rank 0] PRINT: Model returns: +[2025-09-05 14:43:56] [Rank 0] PRINT: Model returns: +[2025-09-05 14:43:56] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:43:56] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:43:56] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 14:43:56] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 14:43:56] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.01). +[2025-09-05 14:43:56] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.01). +[2025-09-05 14:43:56] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 14:43:56] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 14:43:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:43:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:43:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:43:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:44:00] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:44:00] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:44:00] [Rank 0] PRINT: Starting warmup... +[2025-09-05 14:44:00] [Rank 0] PRINT: Starting warmup... +[2025-09-05 14:45:55] [Rank 0] PRINT: Warmup complete. +[2025-09-05 14:45:55] [Rank 0] PRINT: Warmup complete. +[2025-09-05 14:45:55] [Rank 0] PRINT: Starting training... +[2025-09-05 14:45:55] [Rank 0] PRINT: Starting training... +[2025-09-05 14:46:01] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/fixed_eval_indices.json +[2025-09-05 14:46:01] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/fixed_eval_indices.json +[2025-09-05 14:46:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:46:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:46:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 14:46:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 14:47:12] [Rank 0] step:21/10000 train_time:39356ms step_avg:1874.09ms +[2025-09-05 14:47:12] [Rank 0] step:21/10000 train_time:39356ms step_avg:1874.09ms +[2025-09-05 14:47:13] [Rank 0] step:41/10000 train_time:40003ms step_avg:975.67ms +[2025-09-05 14:47:13] [Rank 0] step:41/10000 train_time:40003ms step_avg:975.67ms +[2025-09-05 14:47:14] [Rank 0] step:61/10000 train_time:40649ms step_avg:666.38ms +[2025-09-05 14:47:14] [Rank 0] step:61/10000 train_time:40649ms step_avg:666.38ms +[2025-09-05 14:47:14] [Rank 0] step:81/10000 train_time:41296ms step_avg:509.83ms +[2025-09-05 14:47:14] [Rank 0] step:81/10000 train_time:41296ms step_avg:509.83ms +[2025-09-05 14:47:15] [Rank 0] step:101/10000 train_time:41943ms step_avg:415.27ms +[2025-09-05 14:47:15] [Rank 0] step:101/10000 train_time:41943ms step_avg:415.27ms +[2025-09-05 14:47:16] [Rank 0] step:121/10000 train_time:42589ms step_avg:351.98ms +[2025-09-05 14:47:16] [Rank 0] step:121/10000 train_time:42589ms step_avg:351.98ms +[2025-09-05 14:47:16] [Rank 0] step:141/10000 train_time:43235ms step_avg:306.63ms +[2025-09-05 14:47:16] [Rank 0] step:141/10000 train_time:43235ms step_avg:306.63ms +[2025-09-05 14:47:17] [Rank 0] step:161/10000 train_time:43883ms step_avg:272.56ms +[2025-09-05 14:47:17] [Rank 0] step:161/10000 train_time:43883ms step_avg:272.56ms +[2025-09-05 14:47:18] [Rank 0] step:181/10000 train_time:44530ms step_avg:246.02ms +[2025-09-05 14:47:18] [Rank 0] step:181/10000 train_time:44530ms step_avg:246.02ms +[2025-09-05 14:47:18] [Rank 0] step:201/10000 train_time:45177ms step_avg:224.76ms +[2025-09-05 14:47:18] [Rank 0] step:201/10000 train_time:45177ms step_avg:224.76ms +[2025-09-05 14:47:19] [Rank 0] step:221/10000 train_time:45824ms step_avg:207.35ms +[2025-09-05 14:47:19] [Rank 0] step:221/10000 train_time:45824ms step_avg:207.35ms +[2025-09-05 14:47:20] [Rank 0] step:241/10000 train_time:46471ms step_avg:192.83ms +[2025-09-05 14:47:20] [Rank 0] step:241/10000 train_time:46471ms step_avg:192.83ms +[2025-09-05 14:47:20] [Rank 0] step:261/10000 train_time:47119ms step_avg:180.53ms +[2025-09-05 14:47:20] [Rank 0] step:261/10000 train_time:47119ms step_avg:180.53ms +[2025-09-05 14:47:21] [Rank 0] step:281/10000 train_time:47766ms step_avg:169.99ms +[2025-09-05 14:47:21] [Rank 0] step:281/10000 train_time:47766ms step_avg:169.99ms +[2025-09-05 14:47:21] [Rank 0] step:301/10000 train_time:48413ms step_avg:160.84ms +[2025-09-05 14:47:21] [Rank 0] step:301/10000 train_time:48413ms step_avg:160.84ms +[2025-09-05 14:47:22] [Rank 0] step:321/10000 train_time:49061ms step_avg:152.84ms +[2025-09-05 14:47:22] [Rank 0] step:321/10000 train_time:49061ms step_avg:152.84ms +[2025-09-05 14:47:23] [Rank 0] step:341/10000 train_time:49709ms step_avg:145.77ms +[2025-09-05 14:47:23] [Rank 0] step:341/10000 train_time:49709ms step_avg:145.77ms +[2025-09-05 14:47:23] [Rank 0] step:361/10000 train_time:50355ms step_avg:139.49ms +[2025-09-05 14:47:23] [Rank 0] step:361/10000 train_time:50355ms step_avg:139.49ms +[2025-09-05 14:47:24] [Rank 0] step:381/10000 train_time:51002ms step_avg:133.86ms +[2025-09-05 14:47:24] [Rank 0] step:381/10000 train_time:51002ms step_avg:133.86ms +[2025-09-05 14:47:25] [Rank 0] step:401/10000 train_time:51649ms step_avg:128.80ms +[2025-09-05 14:47:25] [Rank 0] step:401/10000 train_time:51649ms step_avg:128.80ms +[2025-09-05 14:47:25] [Rank 0] step:421/10000 train_time:52296ms step_avg:124.22ms +[2025-09-05 14:47:25] [Rank 0] step:421/10000 train_time:52296ms step_avg:124.22ms +[2025-09-05 14:47:26] [Rank 0] step:441/10000 train_time:52942ms step_avg:120.05ms +[2025-09-05 14:47:26] [Rank 0] step:441/10000 train_time:52942ms step_avg:120.05ms +[2025-09-05 14:47:27] [Rank 0] step:461/10000 train_time:53589ms step_avg:116.24ms +[2025-09-05 14:47:27] [Rank 0] step:461/10000 train_time:53589ms step_avg:116.24ms +[2025-09-05 14:47:27] [Rank 0] step:481/10000 train_time:54235ms step_avg:112.75ms +[2025-09-05 14:47:27] [Rank 0] step:481/10000 train_time:54235ms step_avg:112.75ms +[2025-09-05 14:47:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:47:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:47:28] [Rank 0] PRINT: step:500/10000 train_loss:2.9261 val_loss:1.2345 train_time:55112ms step_avg:110.22ms +[2025-09-05 14:47:28] [Rank 0] PRINT: step:500/10000 train_loss:2.9261 val_loss:1.2345 train_time:55112ms step_avg:110.22ms +[2025-09-05 14:47:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:47:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:47:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:47:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:48:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:48:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:48:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:48:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:48:50] [Rank 0] Total Loss: 4.8666 +[2025-09-05 14:48:50] [Rank 0] Total Loss: 4.8666 +[2025-09-05 14:48:50] [Rank 0] Total FTA (Unweighted): 0.3831 +[2025-09-05 14:48:50] [Rank 0] Total FTA (Unweighted): 0.3831 +[2025-09-05 14:48:50] [Rank 0] Total FTA (Weighted): 0.3831 +[2025-09-05 14:48:50] [Rank 0] Total FTA (Weighted): 0.3831 +[2025-09-05 14:48:50] [Rank 0] Group 0 Loss: 4.4033 +[2025-09-05 14:48:50] [Rank 0] Group 0 Loss: 4.4033 +[2025-09-05 14:48:50] [Rank 0] Group 1 Loss: 4.0609 +[2025-09-05 14:48:50] [Rank 0] Group 1 Loss: 4.0609 +[2025-09-05 14:48:50] [Rank 0] Group 2 Loss: 3.9388 +[2025-09-05 14:48:50] [Rank 0] Group 2 Loss: 3.9388 +[2025-09-05 14:48:50] [Rank 0] Group 3 Loss: 4.3880 +[2025-09-05 14:48:50] [Rank 0] Group 3 Loss: 4.3880 +[2025-09-05 14:48:50] [Rank 0] Group 4 Loss: 4.3976 +[2025-09-05 14:48:50] [Rank 0] Group 4 Loss: 4.3976 +[2025-09-05 14:48:50] [Rank 0] Group 5 Loss: 4.5605 +[2025-09-05 14:48:50] [Rank 0] Group 5 Loss: 4.5605 +[2025-09-05 14:48:50] [Rank 0] Group 6 Loss: 4.5041 +[2025-09-05 14:48:50] [Rank 0] Group 6 Loss: 4.5041 +[2025-09-05 14:48:50] [Rank 0] Group 7 Loss: 4.7221 +[2025-09-05 14:48:50] [Rank 0] Group 7 Loss: 4.7221 +[2025-09-05 14:48:50] [Rank 0] Group 8 Loss: 5.0353 +[2025-09-05 14:48:50] [Rank 0] Group 8 Loss: 5.0353 +[2025-09-05 14:48:50] [Rank 0] Group 9 Loss: 5.1856 +[2025-09-05 14:48:50] [Rank 0] Group 9 Loss: 5.1856 +[2025-09-05 14:48:50] [Rank 0] Group 10 Loss: 5.4232 +[2025-09-05 14:48:50] [Rank 0] Group 10 Loss: 5.4232 +[2025-09-05 14:48:50] [Rank 0] Group 11 Loss: 5.5181 +[2025-09-05 14:48:50] [Rank 0] Group 11 Loss: 5.5181 +[2025-09-05 14:48:50] [Rank 0] Group 12 Loss: 5.4187 +[2025-09-05 14:48:50] [Rank 0] Group 12 Loss: 5.4187 +[2025-09-05 14:48:50] [Rank 0] Group 13 Loss: 5.4552 +[2025-09-05 14:48:50] [Rank 0] Group 13 Loss: 5.4552 +[2025-09-05 14:48:50] [Rank 0] Group 14 Loss: 5.4671 +[2025-09-05 14:48:50] [Rank 0] Group 14 Loss: 5.4671 +[2025-09-05 14:48:50] [Rank 0] Group 15 Loss: 5.3868 +[2025-09-05 14:48:50] [Rank 0] Group 15 Loss: 5.3868 +[2025-09-05 14:48:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:48:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:48:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:48:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:48:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:48:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:48:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:48:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:48:50] [Rank 0] Group 4 FTA: 0.8600 +[2025-09-05 14:48:50] [Rank 0] Group 4 FTA: 0.8600 +[2025-09-05 14:48:50] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 14:48:50] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 14:48:50] [Rank 0] Group 6 FTA: 0.1900 +[2025-09-05 14:48:50] [Rank 0] Group 6 FTA: 0.1900 +[2025-09-05 14:48:50] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 14:48:50] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 14:48:50] [Rank 0] Group 8 FTA: 0.1000 +[2025-09-05 14:48:50] [Rank 0] Group 8 FTA: 0.1000 +[2025-09-05 14:48:50] [Rank 0] Group 9 FTA: 0.0500 +[2025-09-05 14:48:50] [Rank 0] Group 9 FTA: 0.0500 +[2025-09-05 14:48:50] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 14:48:50] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 14:48:50] [Rank 0] Group 11 FTA: 0.0200 +[2025-09-05 14:48:50] [Rank 0] Group 11 FTA: 0.0200 +[2025-09-05 14:48:50] [Rank 0] Group 12 FTA: 0.0600 +[2025-09-05 14:48:50] [Rank 0] Group 12 FTA: 0.0600 +[2025-09-05 14:48:50] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 14:48:50] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 14:48:50] [Rank 0] Group 14 FTA: 0.0700 +[2025-09-05 14:48:50] [Rank 0] Group 14 FTA: 0.0700 +[2025-09-05 14:48:50] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 14:48:50] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 14:48:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:48:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:48:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:48:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:48:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:48:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:48:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:48:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:48:51] [Rank 0] step:501/10000 train_time:55120ms step_avg:110.02ms +[2025-09-05 14:48:51] [Rank 0] step:501/10000 train_time:55120ms step_avg:110.02ms +[2025-09-05 14:48:52] [Rank 0] step:521/10000 train_time:55547ms step_avg:106.62ms +[2025-09-05 14:48:52] [Rank 0] step:521/10000 train_time:55547ms step_avg:106.62ms +[2025-09-05 14:48:53] [Rank 0] step:541/10000 train_time:56195ms step_avg:103.87ms +[2025-09-05 14:48:53] [Rank 0] step:541/10000 train_time:56195ms step_avg:103.87ms +[2025-09-05 14:48:53] [Rank 0] step:561/10000 train_time:56843ms step_avg:101.32ms +[2025-09-05 14:48:53] [Rank 0] step:561/10000 train_time:56843ms step_avg:101.32ms +[2025-09-05 14:48:54] [Rank 0] step:581/10000 train_time:57492ms step_avg:98.95ms +[2025-09-05 14:48:54] [Rank 0] step:581/10000 train_time:57492ms step_avg:98.95ms +[2025-09-05 14:48:55] [Rank 0] step:601/10000 train_time:58140ms step_avg:96.74ms +[2025-09-05 14:48:55] [Rank 0] step:601/10000 train_time:58140ms step_avg:96.74ms +[2025-09-05 14:48:55] [Rank 0] step:621/10000 train_time:58788ms step_avg:94.67ms +[2025-09-05 14:48:55] [Rank 0] step:621/10000 train_time:58788ms step_avg:94.67ms +[2025-09-05 14:48:56] [Rank 0] step:641/10000 train_time:59436ms step_avg:92.72ms +[2025-09-05 14:48:56] [Rank 0] step:641/10000 train_time:59436ms step_avg:92.72ms +[2025-09-05 14:48:56] [Rank 0] step:661/10000 train_time:60085ms step_avg:90.90ms +[2025-09-05 14:48:56] [Rank 0] step:661/10000 train_time:60085ms step_avg:90.90ms +[2025-09-05 14:48:57] [Rank 0] step:681/10000 train_time:60733ms step_avg:89.18ms +[2025-09-05 14:48:57] [Rank 0] step:681/10000 train_time:60733ms step_avg:89.18ms +[2025-09-05 14:48:58] [Rank 0] step:701/10000 train_time:61381ms step_avg:87.56ms +[2025-09-05 14:48:58] [Rank 0] step:701/10000 train_time:61381ms step_avg:87.56ms +[2025-09-05 14:48:58] [Rank 0] step:721/10000 train_time:62029ms step_avg:86.03ms +[2025-09-05 14:48:58] [Rank 0] step:721/10000 train_time:62029ms step_avg:86.03ms +[2025-09-05 14:48:59] [Rank 0] step:741/10000 train_time:62677ms step_avg:84.58ms +[2025-09-05 14:48:59] [Rank 0] step:741/10000 train_time:62677ms step_avg:84.58ms +[2025-09-05 14:49:00] [Rank 0] step:761/10000 train_time:63328ms step_avg:83.22ms +[2025-09-05 14:49:00] [Rank 0] step:761/10000 train_time:63328ms step_avg:83.22ms +[2025-09-05 14:49:00] [Rank 0] step:781/10000 train_time:63981ms step_avg:81.92ms +[2025-09-05 14:49:00] [Rank 0] step:781/10000 train_time:63981ms step_avg:81.92ms +[2025-09-05 14:49:01] [Rank 0] step:801/10000 train_time:64635ms step_avg:80.69ms +[2025-09-05 14:49:01] [Rank 0] step:801/10000 train_time:64635ms step_avg:80.69ms +[2025-09-05 14:49:02] [Rank 0] step:821/10000 train_time:65290ms step_avg:79.53ms +[2025-09-05 14:49:02] [Rank 0] step:821/10000 train_time:65290ms step_avg:79.53ms +[2025-09-05 14:49:03] [Rank 0] step:841/10000 train_time:66567ms step_avg:79.15ms +[2025-09-05 14:49:03] [Rank 0] step:841/10000 train_time:66567ms step_avg:79.15ms +[2025-09-05 14:49:04] [Rank 0] step:861/10000 train_time:67220ms step_avg:78.07ms +[2025-09-05 14:49:04] [Rank 0] step:861/10000 train_time:67220ms step_avg:78.07ms +[2025-09-05 14:49:04] [Rank 0] step:881/10000 train_time:67875ms step_avg:77.04ms +[2025-09-05 14:49:04] [Rank 0] step:881/10000 train_time:67875ms step_avg:77.04ms +[2025-09-05 14:49:05] [Rank 0] step:901/10000 train_time:68528ms step_avg:76.06ms +[2025-09-05 14:49:05] [Rank 0] step:901/10000 train_time:68528ms step_avg:76.06ms +[2025-09-05 14:49:06] [Rank 0] step:921/10000 train_time:69373ms step_avg:75.32ms +[2025-09-05 14:49:06] [Rank 0] step:921/10000 train_time:69373ms step_avg:75.32ms +[2025-09-05 14:49:06] [Rank 0] step:941/10000 train_time:70026ms step_avg:74.42ms +[2025-09-05 14:49:06] [Rank 0] step:941/10000 train_time:70026ms step_avg:74.42ms +[2025-09-05 14:49:07] [Rank 0] step:961/10000 train_time:70679ms step_avg:73.55ms +[2025-09-05 14:49:07] [Rank 0] step:961/10000 train_time:70679ms step_avg:73.55ms +[2025-09-05 14:49:08] [Rank 0] step:981/10000 train_time:71334ms step_avg:72.72ms +[2025-09-05 14:49:08] [Rank 0] step:981/10000 train_time:71334ms step_avg:72.72ms +[2025-09-05 14:49:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:49:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:49:09] [Rank 0] PRINT: step:1000/10000 train_loss:1.0617 val_loss:0.9402 train_time:72219ms step_avg:72.22ms +[2025-09-05 14:49:09] [Rank 0] PRINT: step:1000/10000 train_loss:1.0617 val_loss:0.9402 train_time:72219ms step_avg:72.22ms +[2025-09-05 14:49:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:49:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:49:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:49:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:50:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:50:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:50:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:50:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:50:30] [Rank 0] Total Loss: 5.1656 +[2025-09-05 14:50:30] [Rank 0] Total Loss: 5.1656 +[2025-09-05 14:50:30] [Rank 0] Total FTA (Unweighted): 0.5844 +[2025-09-05 14:50:30] [Rank 0] Total FTA (Unweighted): 0.5844 +[2025-09-05 14:50:30] [Rank 0] Total FTA (Weighted): 0.5844 +[2025-09-05 14:50:30] [Rank 0] Total FTA (Weighted): 0.5844 +[2025-09-05 14:50:30] [Rank 0] Group 0 Loss: 4.9407 +[2025-09-05 14:50:30] [Rank 0] Group 0 Loss: 4.9407 +[2025-09-05 14:50:30] [Rank 0] Group 1 Loss: 4.5035 +[2025-09-05 14:50:30] [Rank 0] Group 1 Loss: 4.5035 +[2025-09-05 14:50:30] [Rank 0] Group 2 Loss: 4.5127 +[2025-09-05 14:50:30] [Rank 0] Group 2 Loss: 4.5127 +[2025-09-05 14:50:30] [Rank 0] Group 3 Loss: 4.8990 +[2025-09-05 14:50:30] [Rank 0] Group 3 Loss: 4.8990 +[2025-09-05 14:50:30] [Rank 0] Group 4 Loss: 4.7947 +[2025-09-05 14:50:30] [Rank 0] Group 4 Loss: 4.7947 +[2025-09-05 14:50:30] [Rank 0] Group 5 Loss: 4.8618 +[2025-09-05 14:50:30] [Rank 0] Group 5 Loss: 4.8618 +[2025-09-05 14:50:30] [Rank 0] Group 6 Loss: 4.8357 +[2025-09-05 14:50:30] [Rank 0] Group 6 Loss: 4.8357 +[2025-09-05 14:50:30] [Rank 0] Group 7 Loss: 4.9278 +[2025-09-05 14:50:30] [Rank 0] Group 7 Loss: 4.9278 +[2025-09-05 14:50:30] [Rank 0] Group 8 Loss: 5.1030 +[2025-09-05 14:50:30] [Rank 0] Group 8 Loss: 5.1030 +[2025-09-05 14:50:30] [Rank 0] Group 9 Loss: 5.1881 +[2025-09-05 14:50:30] [Rank 0] Group 9 Loss: 5.1881 +[2025-09-05 14:50:30] [Rank 0] Group 10 Loss: 5.3880 +[2025-09-05 14:50:30] [Rank 0] Group 10 Loss: 5.3880 +[2025-09-05 14:50:30] [Rank 0] Group 11 Loss: 5.5903 +[2025-09-05 14:50:30] [Rank 0] Group 11 Loss: 5.5903 +[2025-09-05 14:50:30] [Rank 0] Group 12 Loss: 5.6376 +[2025-09-05 14:50:30] [Rank 0] Group 12 Loss: 5.6376 +[2025-09-05 14:50:30] [Rank 0] Group 13 Loss: 5.8451 +[2025-09-05 14:50:30] [Rank 0] Group 13 Loss: 5.8451 +[2025-09-05 14:50:30] [Rank 0] Group 14 Loss: 5.8302 +[2025-09-05 14:50:30] [Rank 0] Group 14 Loss: 5.8302 +[2025-09-05 14:50:30] [Rank 0] Group 15 Loss: 5.7909 +[2025-09-05 14:50:30] [Rank 0] Group 15 Loss: 5.7909 +[2025-09-05 14:50:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:50:30] [Rank 0] Group 6 FTA: 0.9800 +[2025-09-05 14:50:30] [Rank 0] Group 6 FTA: 0.9800 +[2025-09-05 14:50:30] [Rank 0] Group 7 FTA: 0.9500 +[2025-09-05 14:50:30] [Rank 0] Group 7 FTA: 0.9500 +[2025-09-05 14:50:30] [Rank 0] Group 8 FTA: 0.5900 +[2025-09-05 14:50:30] [Rank 0] Group 8 FTA: 0.5900 +[2025-09-05 14:50:30] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 14:50:30] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 14:50:30] [Rank 0] Group 10 FTA: 0.0400 +[2025-09-05 14:50:30] [Rank 0] Group 10 FTA: 0.0400 +[2025-09-05 14:50:30] [Rank 0] Group 11 FTA: 0.0600 +[2025-09-05 14:50:30] [Rank 0] Group 11 FTA: 0.0600 +[2025-09-05 14:50:30] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 14:50:30] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 14:50:30] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 14:50:30] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 14:50:30] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 14:50:30] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 14:50:30] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:50:30] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:50:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:50:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:50:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:50:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:50:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:50:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:50:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:50:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:50:32] [Rank 0] step:1001/10000 train_time:72227ms step_avg:72.16ms +[2025-09-05 14:50:32] [Rank 0] step:1001/10000 train_time:72227ms step_avg:72.16ms +[2025-09-05 14:50:32] [Rank 0] step:1021/10000 train_time:72655ms step_avg:71.16ms +[2025-09-05 14:50:32] [Rank 0] step:1021/10000 train_time:72655ms step_avg:71.16ms +[2025-09-05 14:50:33] [Rank 0] step:1041/10000 train_time:73307ms step_avg:70.42ms +[2025-09-05 14:50:33] [Rank 0] step:1041/10000 train_time:73307ms step_avg:70.42ms +[2025-09-05 14:50:34] [Rank 0] step:1061/10000 train_time:73959ms step_avg:69.71ms +[2025-09-05 14:50:34] [Rank 0] step:1061/10000 train_time:73959ms step_avg:69.71ms +[2025-09-05 14:50:34] [Rank 0] step:1081/10000 train_time:74612ms step_avg:69.02ms +[2025-09-05 14:50:34] [Rank 0] step:1081/10000 train_time:74612ms step_avg:69.02ms +[2025-09-05 14:50:35] [Rank 0] step:1101/10000 train_time:75265ms step_avg:68.36ms +[2025-09-05 14:50:35] [Rank 0] step:1101/10000 train_time:75265ms step_avg:68.36ms +[2025-09-05 14:50:36] [Rank 0] step:1121/10000 train_time:75915ms step_avg:67.72ms +[2025-09-05 14:50:36] [Rank 0] step:1121/10000 train_time:75915ms step_avg:67.72ms +[2025-09-05 14:50:36] [Rank 0] step:1141/10000 train_time:76568ms step_avg:67.11ms +[2025-09-05 14:50:36] [Rank 0] step:1141/10000 train_time:76568ms step_avg:67.11ms +[2025-09-05 14:50:37] [Rank 0] step:1161/10000 train_time:77220ms step_avg:66.51ms +[2025-09-05 14:50:37] [Rank 0] step:1161/10000 train_time:77220ms step_avg:66.51ms +[2025-09-05 14:50:37] [Rank 0] step:1181/10000 train_time:77872ms step_avg:65.94ms +[2025-09-05 14:50:37] [Rank 0] step:1181/10000 train_time:77872ms step_avg:65.94ms +[2025-09-05 14:50:38] [Rank 0] step:1201/10000 train_time:78525ms step_avg:65.38ms +[2025-09-05 14:50:38] [Rank 0] step:1201/10000 train_time:78525ms step_avg:65.38ms +[2025-09-05 14:50:39] [Rank 0] step:1221/10000 train_time:79178ms step_avg:64.85ms +[2025-09-05 14:50:39] [Rank 0] step:1221/10000 train_time:79178ms step_avg:64.85ms +[2025-09-05 14:50:39] [Rank 0] step:1241/10000 train_time:79829ms step_avg:64.33ms +[2025-09-05 14:50:39] [Rank 0] step:1241/10000 train_time:79829ms step_avg:64.33ms +[2025-09-05 14:50:40] [Rank 0] step:1261/10000 train_time:80482ms step_avg:63.82ms +[2025-09-05 14:50:40] [Rank 0] step:1261/10000 train_time:80482ms step_avg:63.82ms +[2025-09-05 14:50:41] [Rank 0] step:1281/10000 train_time:81134ms step_avg:63.34ms +[2025-09-05 14:50:41] [Rank 0] step:1281/10000 train_time:81134ms step_avg:63.34ms +[2025-09-05 14:50:41] [Rank 0] step:1301/10000 train_time:81787ms step_avg:62.86ms +[2025-09-05 14:50:41] [Rank 0] step:1301/10000 train_time:81787ms step_avg:62.86ms +[2025-09-05 14:50:42] [Rank 0] step:1321/10000 train_time:82440ms step_avg:62.41ms +[2025-09-05 14:50:42] [Rank 0] step:1321/10000 train_time:82440ms step_avg:62.41ms +[2025-09-05 14:50:43] [Rank 0] step:1341/10000 train_time:83092ms step_avg:61.96ms +[2025-09-05 14:50:43] [Rank 0] step:1341/10000 train_time:83092ms step_avg:61.96ms +[2025-09-05 14:50:43] [Rank 0] step:1361/10000 train_time:83745ms step_avg:61.53ms +[2025-09-05 14:50:43] [Rank 0] step:1361/10000 train_time:83745ms step_avg:61.53ms +[2025-09-05 14:50:44] [Rank 0] step:1381/10000 train_time:84398ms step_avg:61.11ms +[2025-09-05 14:50:44] [Rank 0] step:1381/10000 train_time:84398ms step_avg:61.11ms +[2025-09-05 14:50:45] [Rank 0] step:1401/10000 train_time:85051ms step_avg:60.71ms +[2025-09-05 14:50:45] [Rank 0] step:1401/10000 train_time:85051ms step_avg:60.71ms +[2025-09-05 14:50:45] [Rank 0] step:1421/10000 train_time:85703ms step_avg:60.31ms +[2025-09-05 14:50:45] [Rank 0] step:1421/10000 train_time:85703ms step_avg:60.31ms +[2025-09-05 14:50:46] [Rank 0] step:1441/10000 train_time:86355ms step_avg:59.93ms +[2025-09-05 14:50:46] [Rank 0] step:1441/10000 train_time:86355ms step_avg:59.93ms +[2025-09-05 14:50:47] [Rank 0] step:1461/10000 train_time:87007ms step_avg:59.55ms +[2025-09-05 14:50:47] [Rank 0] step:1461/10000 train_time:87007ms step_avg:59.55ms +[2025-09-05 14:50:47] [Rank 0] step:1481/10000 train_time:87660ms step_avg:59.19ms +[2025-09-05 14:50:47] [Rank 0] step:1481/10000 train_time:87660ms step_avg:59.19ms +[2025-09-05 14:50:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:50:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:50:48] [Rank 0] PRINT: step:1500/10000 train_loss:0.8942 val_loss:0.8456 train_time:88545ms step_avg:59.03ms +[2025-09-05 14:50:48] [Rank 0] PRINT: step:1500/10000 train_loss:0.8942 val_loss:0.8456 train_time:88545ms step_avg:59.03ms +[2025-09-05 14:50:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:50:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:50:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:50:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:52:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:52:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:52:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:52:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:52:10] [Rank 0] Total Loss: 5.3268 +[2025-09-05 14:52:10] [Rank 0] Total Loss: 5.3268 +[2025-09-05 14:52:10] [Rank 0] Total FTA (Unweighted): 0.6575 +[2025-09-05 14:52:10] [Rank 0] Total FTA (Unweighted): 0.6575 +[2025-09-05 14:52:10] [Rank 0] Total FTA (Weighted): 0.6575 +[2025-09-05 14:52:10] [Rank 0] Total FTA (Weighted): 0.6575 +[2025-09-05 14:52:10] [Rank 0] Group 0 Loss: 5.1619 +[2025-09-05 14:52:10] [Rank 0] Group 0 Loss: 5.1619 +[2025-09-05 14:52:10] [Rank 0] Group 1 Loss: 4.8903 +[2025-09-05 14:52:10] [Rank 0] Group 1 Loss: 4.8903 +[2025-09-05 14:52:10] [Rank 0] Group 2 Loss: 4.6858 +[2025-09-05 14:52:10] [Rank 0] Group 2 Loss: 4.6858 +[2025-09-05 14:52:10] [Rank 0] Group 3 Loss: 5.1217 +[2025-09-05 14:52:10] [Rank 0] Group 3 Loss: 5.1217 +[2025-09-05 14:52:10] [Rank 0] Group 4 Loss: 5.0814 +[2025-09-05 14:52:10] [Rank 0] Group 4 Loss: 5.0814 +[2025-09-05 14:52:10] [Rank 0] Group 5 Loss: 5.1359 +[2025-09-05 14:52:10] [Rank 0] Group 5 Loss: 5.1359 +[2025-09-05 14:52:10] [Rank 0] Group 6 Loss: 5.0296 +[2025-09-05 14:52:10] [Rank 0] Group 6 Loss: 5.0296 +[2025-09-05 14:52:10] [Rank 0] Group 7 Loss: 5.1205 +[2025-09-05 14:52:10] [Rank 0] Group 7 Loss: 5.1205 +[2025-09-05 14:52:10] [Rank 0] Group 8 Loss: 5.3087 +[2025-09-05 14:52:10] [Rank 0] Group 8 Loss: 5.3087 +[2025-09-05 14:52:10] [Rank 0] Group 9 Loss: 5.2808 +[2025-09-05 14:52:10] [Rank 0] Group 9 Loss: 5.2808 +[2025-09-05 14:52:10] [Rank 0] Group 10 Loss: 5.4569 +[2025-09-05 14:52:10] [Rank 0] Group 10 Loss: 5.4569 +[2025-09-05 14:52:10] [Rank 0] Group 11 Loss: 5.5835 +[2025-09-05 14:52:10] [Rank 0] Group 11 Loss: 5.5835 +[2025-09-05 14:52:10] [Rank 0] Group 12 Loss: 5.5756 +[2025-09-05 14:52:10] [Rank 0] Group 12 Loss: 5.5756 +[2025-09-05 14:52:10] [Rank 0] Group 13 Loss: 5.8666 +[2025-09-05 14:52:10] [Rank 0] Group 13 Loss: 5.8666 +[2025-09-05 14:52:10] [Rank 0] Group 14 Loss: 5.9434 +[2025-09-05 14:52:10] [Rank 0] Group 14 Loss: 5.9434 +[2025-09-05 14:52:10] [Rank 0] Group 15 Loss: 5.9857 +[2025-09-05 14:52:10] [Rank 0] Group 15 Loss: 5.9857 +[2025-09-05 14:52:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:52:10] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 14:52:10] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 14:52:10] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 14:52:10] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 14:52:10] [Rank 0] Group 9 FTA: 0.7100 +[2025-09-05 14:52:10] [Rank 0] Group 9 FTA: 0.7100 +[2025-09-05 14:52:10] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 14:52:10] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 14:52:10] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 14:52:10] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 14:52:10] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 14:52:10] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 14:52:10] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 14:52:10] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 14:52:10] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 14:52:10] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 14:52:10] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:52:10] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:52:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:52:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:52:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:52:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:52:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:52:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:52:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:52:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:52:12] [Rank 0] step:1501/10000 train_time:88554ms step_avg:59.00ms +[2025-09-05 14:52:12] [Rank 0] step:1501/10000 train_time:88554ms step_avg:59.00ms +[2025-09-05 14:52:12] [Rank 0] step:1521/10000 train_time:88996ms step_avg:58.51ms +[2025-09-05 14:52:12] [Rank 0] step:1521/10000 train_time:88996ms step_avg:58.51ms +[2025-09-05 14:52:13] [Rank 0] step:1541/10000 train_time:89849ms step_avg:58.31ms +[2025-09-05 14:52:13] [Rank 0] step:1541/10000 train_time:89849ms step_avg:58.31ms +[2025-09-05 14:52:14] [Rank 0] step:1561/10000 train_time:90502ms step_avg:57.98ms +[2025-09-05 14:52:14] [Rank 0] step:1561/10000 train_time:90502ms step_avg:57.98ms +[2025-09-05 14:52:14] [Rank 0] step:1581/10000 train_time:91155ms step_avg:57.66ms +[2025-09-05 14:52:14] [Rank 0] step:1581/10000 train_time:91155ms step_avg:57.66ms +[2025-09-05 14:52:15] [Rank 0] step:1601/10000 train_time:91938ms step_avg:57.43ms +[2025-09-05 14:52:15] [Rank 0] step:1601/10000 train_time:91938ms step_avg:57.43ms +[2025-09-05 14:52:16] [Rank 0] step:1621/10000 train_time:92591ms step_avg:57.12ms +[2025-09-05 14:52:16] [Rank 0] step:1621/10000 train_time:92591ms step_avg:57.12ms +[2025-09-05 14:52:17] [Rank 0] step:1641/10000 train_time:93246ms step_avg:56.82ms +[2025-09-05 14:52:17] [Rank 0] step:1641/10000 train_time:93246ms step_avg:56.82ms +[2025-09-05 14:52:17] [Rank 0] step:1661/10000 train_time:93899ms step_avg:56.53ms +[2025-09-05 14:52:17] [Rank 0] step:1661/10000 train_time:93899ms step_avg:56.53ms +[2025-09-05 14:52:18] [Rank 0] step:1681/10000 train_time:94553ms step_avg:56.25ms +[2025-09-05 14:52:18] [Rank 0] step:1681/10000 train_time:94553ms step_avg:56.25ms +[2025-09-05 14:52:18] [Rank 0] step:1701/10000 train_time:95207ms step_avg:55.97ms +[2025-09-05 14:52:18] [Rank 0] step:1701/10000 train_time:95207ms step_avg:55.97ms +[2025-09-05 14:52:19] [Rank 0] step:1721/10000 train_time:95860ms step_avg:55.70ms +[2025-09-05 14:52:19] [Rank 0] step:1721/10000 train_time:95860ms step_avg:55.70ms +[2025-09-05 14:52:20] [Rank 0] step:1741/10000 train_time:96514ms step_avg:55.44ms +[2025-09-05 14:52:20] [Rank 0] step:1741/10000 train_time:96514ms step_avg:55.44ms +[2025-09-05 14:52:20] [Rank 0] step:1761/10000 train_time:97168ms step_avg:55.18ms +[2025-09-05 14:52:20] [Rank 0] step:1761/10000 train_time:97168ms step_avg:55.18ms +[2025-09-05 14:52:21] [Rank 0] step:1781/10000 train_time:97822ms step_avg:54.93ms +[2025-09-05 14:52:21] [Rank 0] step:1781/10000 train_time:97822ms step_avg:54.93ms +[2025-09-05 14:52:22] [Rank 0] step:1801/10000 train_time:98475ms step_avg:54.68ms +[2025-09-05 14:52:22] [Rank 0] step:1801/10000 train_time:98475ms step_avg:54.68ms +[2025-09-05 14:52:22] [Rank 0] step:1821/10000 train_time:99129ms step_avg:54.44ms +[2025-09-05 14:52:22] [Rank 0] step:1821/10000 train_time:99129ms step_avg:54.44ms +[2025-09-05 14:52:23] [Rank 0] step:1841/10000 train_time:99782ms step_avg:54.20ms +[2025-09-05 14:52:23] [Rank 0] step:1841/10000 train_time:99782ms step_avg:54.20ms +[2025-09-05 14:52:24] [Rank 0] step:1861/10000 train_time:100436ms step_avg:53.97ms +[2025-09-05 14:52:24] [Rank 0] step:1861/10000 train_time:100436ms step_avg:53.97ms +[2025-09-05 14:52:24] [Rank 0] step:1881/10000 train_time:101090ms step_avg:53.74ms +[2025-09-05 14:52:24] [Rank 0] step:1881/10000 train_time:101090ms step_avg:53.74ms +[2025-09-05 14:52:25] [Rank 0] step:1901/10000 train_time:101744ms step_avg:53.52ms +[2025-09-05 14:52:25] [Rank 0] step:1901/10000 train_time:101744ms step_avg:53.52ms +[2025-09-05 14:52:26] [Rank 0] step:1921/10000 train_time:102398ms step_avg:53.30ms +[2025-09-05 14:52:26] [Rank 0] step:1921/10000 train_time:102398ms step_avg:53.30ms +[2025-09-05 14:52:26] [Rank 0] step:1941/10000 train_time:103052ms step_avg:53.09ms +[2025-09-05 14:52:26] [Rank 0] step:1941/10000 train_time:103052ms step_avg:53.09ms +[2025-09-05 14:52:27] [Rank 0] step:1961/10000 train_time:103705ms step_avg:52.88ms +[2025-09-05 14:52:27] [Rank 0] step:1961/10000 train_time:103705ms step_avg:52.88ms +[2025-09-05 14:52:28] [Rank 0] step:1981/10000 train_time:104359ms step_avg:52.68ms +[2025-09-05 14:52:28] [Rank 0] step:1981/10000 train_time:104359ms step_avg:52.68ms +[2025-09-05 14:52:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:52:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:52:29] [Rank 0] PRINT: step:2000/10000 train_loss:0.8283 val_loss:0.7953 train_time:105247ms step_avg:52.62ms +[2025-09-05 14:52:29] [Rank 0] PRINT: step:2000/10000 train_loss:0.8283 val_loss:0.7953 train_time:105247ms step_avg:52.62ms +[2025-09-05 14:52:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:52:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:52:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:52:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:53:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:53:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:53:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:53:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:53:50] [Rank 0] Total Loss: 5.4240 +[2025-09-05 14:53:50] [Rank 0] Total Loss: 5.4240 +[2025-09-05 14:53:50] [Rank 0] Total FTA (Unweighted): 0.7050 +[2025-09-05 14:53:50] [Rank 0] Total FTA (Unweighted): 0.7050 +[2025-09-05 14:53:50] [Rank 0] Total FTA (Weighted): 0.7050 +[2025-09-05 14:53:50] [Rank 0] Total FTA (Weighted): 0.7050 +[2025-09-05 14:53:50] [Rank 0] Group 0 Loss: 5.4627 +[2025-09-05 14:53:50] [Rank 0] Group 0 Loss: 5.4627 +[2025-09-05 14:53:50] [Rank 0] Group 1 Loss: 5.0940 +[2025-09-05 14:53:50] [Rank 0] Group 1 Loss: 5.0940 +[2025-09-05 14:53:50] [Rank 0] Group 2 Loss: 4.8452 +[2025-09-05 14:53:50] [Rank 0] Group 2 Loss: 4.8452 +[2025-09-05 14:53:50] [Rank 0] Group 3 Loss: 5.3389 +[2025-09-05 14:53:50] [Rank 0] Group 3 Loss: 5.3389 +[2025-09-05 14:53:50] [Rank 0] Group 4 Loss: 5.2180 +[2025-09-05 14:53:50] [Rank 0] Group 4 Loss: 5.2180 +[2025-09-05 14:53:50] [Rank 0] Group 5 Loss: 5.1952 +[2025-09-05 14:53:50] [Rank 0] Group 5 Loss: 5.1952 +[2025-09-05 14:53:50] [Rank 0] Group 6 Loss: 5.1560 +[2025-09-05 14:53:50] [Rank 0] Group 6 Loss: 5.1560 +[2025-09-05 14:53:50] [Rank 0] Group 7 Loss: 5.2379 +[2025-09-05 14:53:50] [Rank 0] Group 7 Loss: 5.2379 +[2025-09-05 14:53:50] [Rank 0] Group 8 Loss: 5.3470 +[2025-09-05 14:53:50] [Rank 0] Group 8 Loss: 5.3470 +[2025-09-05 14:53:50] [Rank 0] Group 9 Loss: 5.3285 +[2025-09-05 14:53:50] [Rank 0] Group 9 Loss: 5.3285 +[2025-09-05 14:53:50] [Rank 0] Group 10 Loss: 5.5515 +[2025-09-05 14:53:50] [Rank 0] Group 10 Loss: 5.5515 +[2025-09-05 14:53:50] [Rank 0] Group 11 Loss: 5.6323 +[2025-09-05 14:53:50] [Rank 0] Group 11 Loss: 5.6323 +[2025-09-05 14:53:50] [Rank 0] Group 12 Loss: 5.6270 +[2025-09-05 14:53:50] [Rank 0] Group 12 Loss: 5.6270 +[2025-09-05 14:53:50] [Rank 0] Group 13 Loss: 5.8241 +[2025-09-05 14:53:50] [Rank 0] Group 13 Loss: 5.8241 +[2025-09-05 14:53:50] [Rank 0] Group 14 Loss: 5.8714 +[2025-09-05 14:53:50] [Rank 0] Group 14 Loss: 5.8714 +[2025-09-05 14:53:50] [Rank 0] Group 15 Loss: 6.0551 +[2025-09-05 14:53:50] [Rank 0] Group 15 Loss: 6.0551 +[2025-09-05 14:53:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 14:53:50] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 14:53:50] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 14:53:50] [Rank 0] Group 10 FTA: 0.5900 +[2025-09-05 14:53:50] [Rank 0] Group 10 FTA: 0.5900 +[2025-09-05 14:53:50] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 14:53:50] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 14:53:50] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 14:53:50] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 14:53:50] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 14:53:50] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 14:53:50] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 14:53:50] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 14:53:50] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 14:53:50] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 14:53:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:53:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:53:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:53:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:53:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:53:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:53:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:53:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:53:52] [Rank 0] step:2001/10000 train_time:105255ms step_avg:52.60ms +[2025-09-05 14:53:52] [Rank 0] step:2001/10000 train_time:105255ms step_avg:52.60ms +[2025-09-05 14:53:52] [Rank 0] step:2021/10000 train_time:105902ms step_avg:52.40ms +[2025-09-05 14:53:52] [Rank 0] step:2021/10000 train_time:105902ms step_avg:52.40ms +[2025-09-05 14:53:53] [Rank 0] step:2041/10000 train_time:106555ms step_avg:52.21ms +[2025-09-05 14:53:53] [Rank 0] step:2041/10000 train_time:106555ms step_avg:52.21ms +[2025-09-05 14:53:54] [Rank 0] step:2061/10000 train_time:107208ms step_avg:52.02ms +[2025-09-05 14:53:54] [Rank 0] step:2061/10000 train_time:107208ms step_avg:52.02ms +[2025-09-05 14:53:54] [Rank 0] step:2081/10000 train_time:107861ms step_avg:51.83ms +[2025-09-05 14:53:54] [Rank 0] step:2081/10000 train_time:107861ms step_avg:51.83ms +[2025-09-05 14:53:55] [Rank 0] step:2101/10000 train_time:108515ms step_avg:51.65ms +[2025-09-05 14:53:55] [Rank 0] step:2101/10000 train_time:108515ms step_avg:51.65ms +[2025-09-05 14:53:56] [Rank 0] step:2121/10000 train_time:109167ms step_avg:51.47ms +[2025-09-05 14:53:56] [Rank 0] step:2121/10000 train_time:109167ms step_avg:51.47ms +[2025-09-05 14:53:56] [Rank 0] step:2141/10000 train_time:109821ms step_avg:51.29ms +[2025-09-05 14:53:56] [Rank 0] step:2141/10000 train_time:109821ms step_avg:51.29ms +[2025-09-05 14:53:57] [Rank 0] step:2161/10000 train_time:110474ms step_avg:51.12ms +[2025-09-05 14:53:57] [Rank 0] step:2161/10000 train_time:110474ms step_avg:51.12ms +[2025-09-05 14:53:58] [Rank 0] step:2181/10000 train_time:111127ms step_avg:50.95ms +[2025-09-05 14:53:58] [Rank 0] step:2181/10000 train_time:111127ms step_avg:50.95ms +[2025-09-05 14:53:58] [Rank 0] step:2201/10000 train_time:111779ms step_avg:50.79ms +[2025-09-05 14:53:58] [Rank 0] step:2201/10000 train_time:111779ms step_avg:50.79ms +[2025-09-05 14:53:59] [Rank 0] step:2221/10000 train_time:112433ms step_avg:50.62ms +[2025-09-05 14:53:59] [Rank 0] step:2221/10000 train_time:112433ms step_avg:50.62ms +[2025-09-05 14:54:00] [Rank 0] step:2241/10000 train_time:113089ms step_avg:50.46ms +[2025-09-05 14:54:00] [Rank 0] step:2241/10000 train_time:113089ms step_avg:50.46ms +[2025-09-05 14:54:00] [Rank 0] step:2261/10000 train_time:113748ms step_avg:50.31ms +[2025-09-05 14:54:00] [Rank 0] step:2261/10000 train_time:113748ms step_avg:50.31ms +[2025-09-05 14:54:01] [Rank 0] step:2281/10000 train_time:114407ms step_avg:50.16ms +[2025-09-05 14:54:01] [Rank 0] step:2281/10000 train_time:114407ms step_avg:50.16ms +[2025-09-05 14:54:02] [Rank 0] step:2301/10000 train_time:115070ms step_avg:50.01ms +[2025-09-05 14:54:02] [Rank 0] step:2301/10000 train_time:115070ms step_avg:50.01ms +[2025-09-05 14:54:02] [Rank 0] step:2321/10000 train_time:115730ms step_avg:49.86ms +[2025-09-05 14:54:02] [Rank 0] step:2321/10000 train_time:115730ms step_avg:49.86ms +[2025-09-05 14:54:03] [Rank 0] step:2341/10000 train_time:116389ms step_avg:49.72ms +[2025-09-05 14:54:03] [Rank 0] step:2341/10000 train_time:116389ms step_avg:49.72ms +[2025-09-05 14:54:04] [Rank 0] step:2361/10000 train_time:117048ms step_avg:49.58ms +[2025-09-05 14:54:04] [Rank 0] step:2361/10000 train_time:117048ms step_avg:49.58ms +[2025-09-05 14:54:04] [Rank 0] step:2381/10000 train_time:117707ms step_avg:49.44ms +[2025-09-05 14:54:04] [Rank 0] step:2381/10000 train_time:117707ms step_avg:49.44ms +[2025-09-05 14:54:05] [Rank 0] step:2401/10000 train_time:118366ms step_avg:49.30ms +[2025-09-05 14:54:05] [Rank 0] step:2401/10000 train_time:118366ms step_avg:49.30ms +[2025-09-05 14:54:06] [Rank 0] step:2421/10000 train_time:119026ms step_avg:49.16ms +[2025-09-05 14:54:06] [Rank 0] step:2421/10000 train_time:119026ms step_avg:49.16ms +[2025-09-05 14:54:06] [Rank 0] step:2441/10000 train_time:119685ms step_avg:49.03ms +[2025-09-05 14:54:06] [Rank 0] step:2441/10000 train_time:119685ms step_avg:49.03ms +[2025-09-05 14:54:07] [Rank 0] step:2461/10000 train_time:120344ms step_avg:48.90ms +[2025-09-05 14:54:07] [Rank 0] step:2461/10000 train_time:120344ms step_avg:48.90ms +[2025-09-05 14:54:08] [Rank 0] step:2481/10000 train_time:121003ms step_avg:48.77ms +[2025-09-05 14:54:08] [Rank 0] step:2481/10000 train_time:121003ms step_avg:48.77ms +[2025-09-05 14:54:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:54:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:54:09] [Rank 0] PRINT: step:2500/10000 train_loss:0.7880 val_loss:0.7588 train_time:121896ms step_avg:48.76ms +[2025-09-05 14:54:09] [Rank 0] PRINT: step:2500/10000 train_loss:0.7880 val_loss:0.7588 train_time:121896ms step_avg:48.76ms +[2025-09-05 14:54:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:54:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:54:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:54:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:55:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:55:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:55:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:55:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:55:30] [Rank 0] Total Loss: 5.3175 +[2025-09-05 14:55:30] [Rank 0] Total Loss: 5.3175 +[2025-09-05 14:55:30] [Rank 0] Total FTA (Unweighted): 0.7206 +[2025-09-05 14:55:30] [Rank 0] Total FTA (Unweighted): 0.7206 +[2025-09-05 14:55:30] [Rank 0] Total FTA (Weighted): 0.7206 +[2025-09-05 14:55:30] [Rank 0] Total FTA (Weighted): 0.7206 +[2025-09-05 14:55:30] [Rank 0] Group 0 Loss: 5.2765 +[2025-09-05 14:55:30] [Rank 0] Group 0 Loss: 5.2765 +[2025-09-05 14:55:30] [Rank 0] Group 1 Loss: 4.9423 +[2025-09-05 14:55:30] [Rank 0] Group 1 Loss: 4.9423 +[2025-09-05 14:55:30] [Rank 0] Group 2 Loss: 4.8004 +[2025-09-05 14:55:30] [Rank 0] Group 2 Loss: 4.8004 +[2025-09-05 14:55:30] [Rank 0] Group 3 Loss: 5.2092 +[2025-09-05 14:55:30] [Rank 0] Group 3 Loss: 5.2092 +[2025-09-05 14:55:30] [Rank 0] Group 4 Loss: 5.1864 +[2025-09-05 14:55:30] [Rank 0] Group 4 Loss: 5.1864 +[2025-09-05 14:55:30] [Rank 0] Group 5 Loss: 5.1671 +[2025-09-05 14:55:30] [Rank 0] Group 5 Loss: 5.1671 +[2025-09-05 14:55:30] [Rank 0] Group 6 Loss: 5.1218 +[2025-09-05 14:55:30] [Rank 0] Group 6 Loss: 5.1218 +[2025-09-05 14:55:30] [Rank 0] Group 7 Loss: 5.1996 +[2025-09-05 14:55:30] [Rank 0] Group 7 Loss: 5.1996 +[2025-09-05 14:55:30] [Rank 0] Group 8 Loss: 5.2412 +[2025-09-05 14:55:30] [Rank 0] Group 8 Loss: 5.2412 +[2025-09-05 14:55:30] [Rank 0] Group 9 Loss: 5.2549 +[2025-09-05 14:55:30] [Rank 0] Group 9 Loss: 5.2549 +[2025-09-05 14:55:30] [Rank 0] Group 10 Loss: 5.4168 +[2025-09-05 14:55:30] [Rank 0] Group 10 Loss: 5.4168 +[2025-09-05 14:55:30] [Rank 0] Group 11 Loss: 5.5017 +[2025-09-05 14:55:30] [Rank 0] Group 11 Loss: 5.5017 +[2025-09-05 14:55:30] [Rank 0] Group 12 Loss: 5.5396 +[2025-09-05 14:55:30] [Rank 0] Group 12 Loss: 5.5396 +[2025-09-05 14:55:30] [Rank 0] Group 13 Loss: 5.6615 +[2025-09-05 14:55:30] [Rank 0] Group 13 Loss: 5.6615 +[2025-09-05 14:55:30] [Rank 0] Group 14 Loss: 5.6914 +[2025-09-05 14:55:30] [Rank 0] Group 14 Loss: 5.6914 +[2025-09-05 14:55:30] [Rank 0] Group 15 Loss: 5.8701 +[2025-09-05 14:55:30] [Rank 0] Group 15 Loss: 5.8701 +[2025-09-05 14:55:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 14:55:30] [Rank 0] Group 10 FTA: 0.8000 +[2025-09-05 14:55:30] [Rank 0] Group 10 FTA: 0.8000 +[2025-09-05 14:55:30] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 14:55:30] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 14:55:30] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 14:55:30] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 14:55:30] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 14:55:30] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 14:55:30] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:55:30] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:55:30] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:55:30] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:55:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:55:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:55:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:55:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:55:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:55:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:55:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:55:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:55:32] [Rank 0] step:2501/10000 train_time:121904ms step_avg:48.74ms +[2025-09-05 14:55:32] [Rank 0] step:2501/10000 train_time:121904ms step_avg:48.74ms +[2025-09-05 14:55:32] [Rank 0] step:2521/10000 train_time:122344ms step_avg:48.53ms +[2025-09-05 14:55:32] [Rank 0] step:2521/10000 train_time:122344ms step_avg:48.53ms +[2025-09-05 14:55:33] [Rank 0] step:2541/10000 train_time:123002ms step_avg:48.41ms +[2025-09-05 14:55:33] [Rank 0] step:2541/10000 train_time:123002ms step_avg:48.41ms +[2025-09-05 14:55:34] [Rank 0] step:2561/10000 train_time:123661ms step_avg:48.29ms +[2025-09-05 14:55:34] [Rank 0] step:2561/10000 train_time:123661ms step_avg:48.29ms +[2025-09-05 14:55:34] [Rank 0] step:2581/10000 train_time:124321ms step_avg:48.17ms +[2025-09-05 14:55:34] [Rank 0] step:2581/10000 train_time:124321ms step_avg:48.17ms +[2025-09-05 14:55:35] [Rank 0] step:2601/10000 train_time:124982ms step_avg:48.05ms +[2025-09-05 14:55:35] [Rank 0] step:2601/10000 train_time:124982ms step_avg:48.05ms +[2025-09-05 14:55:35] [Rank 0] step:2621/10000 train_time:125641ms step_avg:47.94ms +[2025-09-05 14:55:35] [Rank 0] step:2621/10000 train_time:125641ms step_avg:47.94ms +[2025-09-05 14:55:36] [Rank 0] step:2641/10000 train_time:126300ms step_avg:47.82ms +[2025-09-05 14:55:36] [Rank 0] step:2641/10000 train_time:126300ms step_avg:47.82ms +[2025-09-05 14:55:37] [Rank 0] step:2661/10000 train_time:126960ms step_avg:47.71ms +[2025-09-05 14:55:37] [Rank 0] step:2661/10000 train_time:126960ms step_avg:47.71ms +[2025-09-05 14:55:37] [Rank 0] step:2681/10000 train_time:127620ms step_avg:47.60ms +[2025-09-05 14:55:37] [Rank 0] step:2681/10000 train_time:127620ms step_avg:47.60ms +[2025-09-05 14:55:38] [Rank 0] step:2701/10000 train_time:128280ms step_avg:47.49ms +[2025-09-05 14:55:38] [Rank 0] step:2701/10000 train_time:128280ms step_avg:47.49ms +[2025-09-05 14:55:39] [Rank 0] step:2721/10000 train_time:128940ms step_avg:47.39ms +[2025-09-05 14:55:39] [Rank 0] step:2721/10000 train_time:128940ms step_avg:47.39ms +[2025-09-05 14:55:39] [Rank 0] step:2741/10000 train_time:129600ms step_avg:47.28ms +[2025-09-05 14:55:39] [Rank 0] step:2741/10000 train_time:129600ms step_avg:47.28ms +[2025-09-05 14:55:40] [Rank 0] step:2761/10000 train_time:130260ms step_avg:47.18ms +[2025-09-05 14:55:40] [Rank 0] step:2761/10000 train_time:130260ms step_avg:47.18ms +[2025-09-05 14:55:41] [Rank 0] step:2781/10000 train_time:130920ms step_avg:47.08ms +[2025-09-05 14:55:41] [Rank 0] step:2781/10000 train_time:130920ms step_avg:47.08ms +[2025-09-05 14:55:41] [Rank 0] step:2801/10000 train_time:131581ms step_avg:46.98ms +[2025-09-05 14:55:41] [Rank 0] step:2801/10000 train_time:131581ms step_avg:46.98ms +[2025-09-05 14:55:43] [Rank 0] step:2821/10000 train_time:132239ms step_avg:46.88ms +[2025-09-05 14:55:43] [Rank 0] step:2821/10000 train_time:132239ms step_avg:46.88ms +[2025-09-05 14:55:43] [Rank 0] step:2841/10000 train_time:133356ms step_avg:46.94ms +[2025-09-05 14:55:43] [Rank 0] step:2841/10000 train_time:133356ms step_avg:46.94ms +[2025-09-05 14:55:44] [Rank 0] step:2861/10000 train_time:134016ms step_avg:46.84ms +[2025-09-05 14:55:44] [Rank 0] step:2861/10000 train_time:134016ms step_avg:46.84ms +[2025-09-05 14:55:45] [Rank 0] step:2881/10000 train_time:134675ms step_avg:46.75ms +[2025-09-05 14:55:45] [Rank 0] step:2881/10000 train_time:134675ms step_avg:46.75ms +[2025-09-05 14:55:45] [Rank 0] step:2901/10000 train_time:135335ms step_avg:46.65ms +[2025-09-05 14:55:45] [Rank 0] step:2901/10000 train_time:135335ms step_avg:46.65ms +[2025-09-05 14:55:46] [Rank 0] step:2921/10000 train_time:135995ms step_avg:46.56ms +[2025-09-05 14:55:46] [Rank 0] step:2921/10000 train_time:135995ms step_avg:46.56ms +[2025-09-05 14:55:47] [Rank 0] step:2941/10000 train_time:136655ms step_avg:46.47ms +[2025-09-05 14:55:47] [Rank 0] step:2941/10000 train_time:136655ms step_avg:46.47ms +[2025-09-05 14:55:47] [Rank 0] step:2961/10000 train_time:137314ms step_avg:46.37ms +[2025-09-05 14:55:47] [Rank 0] step:2961/10000 train_time:137314ms step_avg:46.37ms +[2025-09-05 14:55:48] [Rank 0] step:2981/10000 train_time:137974ms step_avg:46.28ms +[2025-09-05 14:55:48] [Rank 0] step:2981/10000 train_time:137974ms step_avg:46.28ms +[2025-09-05 14:55:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:55:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:55:49] [Rank 0] PRINT: step:3000/10000 train_loss:0.7580 val_loss:0.7375 train_time:138868ms step_avg:46.29ms +[2025-09-05 14:55:49] [Rank 0] PRINT: step:3000/10000 train_loss:0.7580 val_loss:0.7375 train_time:138868ms step_avg:46.29ms +[2025-09-05 14:55:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:55:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:55:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:55:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:57:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:57:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:57:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:57:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:57:10] [Rank 0] Total Loss: 5.2999 +[2025-09-05 14:57:10] [Rank 0] Total Loss: 5.2999 +[2025-09-05 14:57:10] [Rank 0] Total FTA (Unweighted): 0.7537 +[2025-09-05 14:57:10] [Rank 0] Total FTA (Unweighted): 0.7537 +[2025-09-05 14:57:10] [Rank 0] Total FTA (Weighted): 0.7538 +[2025-09-05 14:57:10] [Rank 0] Total FTA (Weighted): 0.7538 +[2025-09-05 14:57:10] [Rank 0] Group 0 Loss: 5.2420 +[2025-09-05 14:57:10] [Rank 0] Group 0 Loss: 5.2420 +[2025-09-05 14:57:10] [Rank 0] Group 1 Loss: 5.0745 +[2025-09-05 14:57:10] [Rank 0] Group 1 Loss: 5.0745 +[2025-09-05 14:57:10] [Rank 0] Group 2 Loss: 4.8262 +[2025-09-05 14:57:10] [Rank 0] Group 2 Loss: 4.8262 +[2025-09-05 14:57:10] [Rank 0] Group 3 Loss: 5.3109 +[2025-09-05 14:57:10] [Rank 0] Group 3 Loss: 5.3109 +[2025-09-05 14:57:10] [Rank 0] Group 4 Loss: 5.1776 +[2025-09-05 14:57:10] [Rank 0] Group 4 Loss: 5.1776 +[2025-09-05 14:57:10] [Rank 0] Group 5 Loss: 5.1573 +[2025-09-05 14:57:10] [Rank 0] Group 5 Loss: 5.1573 +[2025-09-05 14:57:10] [Rank 0] Group 6 Loss: 5.1309 +[2025-09-05 14:57:10] [Rank 0] Group 6 Loss: 5.1309 +[2025-09-05 14:57:10] [Rank 0] Group 7 Loss: 5.1545 +[2025-09-05 14:57:10] [Rank 0] Group 7 Loss: 5.1545 +[2025-09-05 14:57:10] [Rank 0] Group 8 Loss: 5.2515 +[2025-09-05 14:57:10] [Rank 0] Group 8 Loss: 5.2515 +[2025-09-05 14:57:10] [Rank 0] Group 9 Loss: 5.2716 +[2025-09-05 14:57:10] [Rank 0] Group 9 Loss: 5.2716 +[2025-09-05 14:57:10] [Rank 0] Group 10 Loss: 5.3617 +[2025-09-05 14:57:10] [Rank 0] Group 10 Loss: 5.3617 +[2025-09-05 14:57:10] [Rank 0] Group 11 Loss: 5.4342 +[2025-09-05 14:57:10] [Rank 0] Group 11 Loss: 5.4342 +[2025-09-05 14:57:10] [Rank 0] Group 12 Loss: 5.4938 +[2025-09-05 14:57:10] [Rank 0] Group 12 Loss: 5.4938 +[2025-09-05 14:57:10] [Rank 0] Group 13 Loss: 5.6059 +[2025-09-05 14:57:10] [Rank 0] Group 13 Loss: 5.6059 +[2025-09-05 14:57:10] [Rank 0] Group 14 Loss: 5.5777 +[2025-09-05 14:57:10] [Rank 0] Group 14 Loss: 5.5777 +[2025-09-05 14:57:10] [Rank 0] Group 15 Loss: 5.7273 +[2025-09-05 14:57:10] [Rank 0] Group 15 Loss: 5.7273 +[2025-09-05 14:57:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 14:57:10] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 14:57:10] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 14:57:10] [Rank 0] Group 11 FTA: 0.5900 +[2025-09-05 14:57:10] [Rank 0] Group 11 FTA: 0.5900 +[2025-09-05 14:57:10] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 14:57:10] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 14:57:10] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 14:57:10] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 14:57:10] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:57:10] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:57:10] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:57:10] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:57:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:57:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:57:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:57:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:57:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:57:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:57:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:57:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:57:12] [Rank 0] step:3001/10000 train_time:138876ms step_avg:46.28ms +[2025-09-05 14:57:12] [Rank 0] step:3001/10000 train_time:138876ms step_avg:46.28ms +[2025-09-05 14:57:12] [Rank 0] step:3021/10000 train_time:139318ms step_avg:46.12ms +[2025-09-05 14:57:12] [Rank 0] step:3021/10000 train_time:139318ms step_avg:46.12ms +[2025-09-05 14:57:13] [Rank 0] step:3041/10000 train_time:139976ms step_avg:46.03ms +[2025-09-05 14:57:13] [Rank 0] step:3041/10000 train_time:139976ms step_avg:46.03ms +[2025-09-05 14:57:14] [Rank 0] step:3061/10000 train_time:140635ms step_avg:45.94ms +[2025-09-05 14:57:14] [Rank 0] step:3061/10000 train_time:140635ms step_avg:45.94ms +[2025-09-05 14:57:14] [Rank 0] step:3081/10000 train_time:141294ms step_avg:45.86ms +[2025-09-05 14:57:14] [Rank 0] step:3081/10000 train_time:141294ms step_avg:45.86ms +[2025-09-05 14:57:15] [Rank 0] step:3101/10000 train_time:141952ms step_avg:45.78ms +[2025-09-05 14:57:15] [Rank 0] step:3101/10000 train_time:141952ms step_avg:45.78ms +[2025-09-05 14:57:16] [Rank 0] step:3121/10000 train_time:142611ms step_avg:45.69ms +[2025-09-05 14:57:16] [Rank 0] step:3121/10000 train_time:142611ms step_avg:45.69ms +[2025-09-05 14:57:16] [Rank 0] step:3141/10000 train_time:143270ms step_avg:45.61ms +[2025-09-05 14:57:16] [Rank 0] step:3141/10000 train_time:143270ms step_avg:45.61ms +[2025-09-05 14:57:17] [Rank 0] step:3161/10000 train_time:143928ms step_avg:45.53ms +[2025-09-05 14:57:17] [Rank 0] step:3161/10000 train_time:143928ms step_avg:45.53ms +[2025-09-05 14:57:18] [Rank 0] step:3181/10000 train_time:144593ms step_avg:45.46ms +[2025-09-05 14:57:18] [Rank 0] step:3181/10000 train_time:144593ms step_avg:45.46ms +[2025-09-05 14:57:18] [Rank 0] step:3201/10000 train_time:145250ms step_avg:45.38ms +[2025-09-05 14:57:18] [Rank 0] step:3201/10000 train_time:145250ms step_avg:45.38ms +[2025-09-05 14:57:19] [Rank 0] step:3221/10000 train_time:145909ms step_avg:45.30ms +[2025-09-05 14:57:19] [Rank 0] step:3221/10000 train_time:145909ms step_avg:45.30ms +[2025-09-05 14:57:20] [Rank 0] step:3241/10000 train_time:146568ms step_avg:45.22ms +[2025-09-05 14:57:20] [Rank 0] step:3241/10000 train_time:146568ms step_avg:45.22ms +[2025-09-05 14:57:20] [Rank 0] step:3261/10000 train_time:147226ms step_avg:45.15ms +[2025-09-05 14:57:20] [Rank 0] step:3261/10000 train_time:147226ms step_avg:45.15ms +[2025-09-05 14:57:21] [Rank 0] step:3281/10000 train_time:147885ms step_avg:45.07ms +[2025-09-05 14:57:21] [Rank 0] step:3281/10000 train_time:147885ms step_avg:45.07ms +[2025-09-05 14:57:22] [Rank 0] step:3301/10000 train_time:148543ms step_avg:45.00ms +[2025-09-05 14:57:22] [Rank 0] step:3301/10000 train_time:148543ms step_avg:45.00ms +[2025-09-05 14:57:22] [Rank 0] step:3321/10000 train_time:149202ms step_avg:44.93ms +[2025-09-05 14:57:22] [Rank 0] step:3321/10000 train_time:149202ms step_avg:44.93ms +[2025-09-05 14:57:23] [Rank 0] step:3341/10000 train_time:149861ms step_avg:44.86ms +[2025-09-05 14:57:23] [Rank 0] step:3341/10000 train_time:149861ms step_avg:44.86ms +[2025-09-05 14:57:23] [Rank 0] step:3361/10000 train_time:150519ms step_avg:44.78ms +[2025-09-05 14:57:23] [Rank 0] step:3361/10000 train_time:150519ms step_avg:44.78ms +[2025-09-05 14:57:24] [Rank 0] step:3381/10000 train_time:151176ms step_avg:44.71ms +[2025-09-05 14:57:24] [Rank 0] step:3381/10000 train_time:151176ms step_avg:44.71ms +[2025-09-05 14:57:25] [Rank 0] step:3401/10000 train_time:151835ms step_avg:44.64ms +[2025-09-05 14:57:25] [Rank 0] step:3401/10000 train_time:151835ms step_avg:44.64ms +[2025-09-05 14:57:25] [Rank 0] step:3421/10000 train_time:152493ms step_avg:44.58ms +[2025-09-05 14:57:25] [Rank 0] step:3421/10000 train_time:152493ms step_avg:44.58ms +[2025-09-05 14:57:26] [Rank 0] step:3441/10000 train_time:153152ms step_avg:44.51ms +[2025-09-05 14:57:26] [Rank 0] step:3441/10000 train_time:153152ms step_avg:44.51ms +[2025-09-05 14:57:27] [Rank 0] step:3461/10000 train_time:153810ms step_avg:44.44ms +[2025-09-05 14:57:27] [Rank 0] step:3461/10000 train_time:153810ms step_avg:44.44ms +[2025-09-05 14:57:27] [Rank 0] step:3481/10000 train_time:154468ms step_avg:44.37ms +[2025-09-05 14:57:27] [Rank 0] step:3481/10000 train_time:154468ms step_avg:44.37ms +[2025-09-05 14:57:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:57:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:57:28] [Rank 0] PRINT: step:3500/10000 train_loss:0.7364 val_loss:0.7195 train_time:155360ms step_avg:44.39ms +[2025-09-05 14:57:28] [Rank 0] PRINT: step:3500/10000 train_loss:0.7364 val_loss:0.7195 train_time:155360ms step_avg:44.39ms +[2025-09-05 14:57:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:57:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:57:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:57:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:58:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:58:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:58:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:58:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:58:50] [Rank 0] Total Loss: 5.2697 +[2025-09-05 14:58:50] [Rank 0] Total Loss: 5.2697 +[2025-09-05 14:58:50] [Rank 0] Total FTA (Unweighted): 0.7687 +[2025-09-05 14:58:50] [Rank 0] Total FTA (Unweighted): 0.7687 +[2025-09-05 14:58:50] [Rank 0] Total FTA (Weighted): 0.7688 +[2025-09-05 14:58:50] [Rank 0] Total FTA (Weighted): 0.7688 +[2025-09-05 14:58:50] [Rank 0] Group 0 Loss: 5.1187 +[2025-09-05 14:58:50] [Rank 0] Group 0 Loss: 5.1187 +[2025-09-05 14:58:50] [Rank 0] Group 1 Loss: 5.0537 +[2025-09-05 14:58:50] [Rank 0] Group 1 Loss: 5.0537 +[2025-09-05 14:58:50] [Rank 0] Group 2 Loss: 4.7605 +[2025-09-05 14:58:50] [Rank 0] Group 2 Loss: 4.7605 +[2025-09-05 14:58:50] [Rank 0] Group 3 Loss: 5.2285 +[2025-09-05 14:58:50] [Rank 0] Group 3 Loss: 5.2285 +[2025-09-05 14:58:50] [Rank 0] Group 4 Loss: 5.1600 +[2025-09-05 14:58:50] [Rank 0] Group 4 Loss: 5.1600 +[2025-09-05 14:58:50] [Rank 0] Group 5 Loss: 5.1190 +[2025-09-05 14:58:50] [Rank 0] Group 5 Loss: 5.1190 +[2025-09-05 14:58:50] [Rank 0] Group 6 Loss: 5.1287 +[2025-09-05 14:58:50] [Rank 0] Group 6 Loss: 5.1287 +[2025-09-05 14:58:50] [Rank 0] Group 7 Loss: 5.1698 +[2025-09-05 14:58:50] [Rank 0] Group 7 Loss: 5.1698 +[2025-09-05 14:58:50] [Rank 0] Group 8 Loss: 5.2274 +[2025-09-05 14:58:50] [Rank 0] Group 8 Loss: 5.2274 +[2025-09-05 14:58:50] [Rank 0] Group 9 Loss: 5.2519 +[2025-09-05 14:58:50] [Rank 0] Group 9 Loss: 5.2519 +[2025-09-05 14:58:50] [Rank 0] Group 10 Loss: 5.3618 +[2025-09-05 14:58:50] [Rank 0] Group 10 Loss: 5.3618 +[2025-09-05 14:58:50] [Rank 0] Group 11 Loss: 5.3434 +[2025-09-05 14:58:50] [Rank 0] Group 11 Loss: 5.3434 +[2025-09-05 14:58:50] [Rank 0] Group 12 Loss: 5.4873 +[2025-09-05 14:58:50] [Rank 0] Group 12 Loss: 5.4873 +[2025-09-05 14:58:50] [Rank 0] Group 13 Loss: 5.6081 +[2025-09-05 14:58:50] [Rank 0] Group 13 Loss: 5.6081 +[2025-09-05 14:58:50] [Rank 0] Group 14 Loss: 5.5878 +[2025-09-05 14:58:50] [Rank 0] Group 14 Loss: 5.5878 +[2025-09-05 14:58:50] [Rank 0] Group 15 Loss: 5.7091 +[2025-09-05 14:58:50] [Rank 0] Group 15 Loss: 5.7091 +[2025-09-05 14:58:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 14:58:50] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 14:58:50] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 14:58:50] [Rank 0] Group 11 FTA: 0.6900 +[2025-09-05 14:58:50] [Rank 0] Group 11 FTA: 0.6900 +[2025-09-05 14:58:50] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 14:58:50] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 14:58:50] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 14:58:50] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 14:58:50] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 14:58:50] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 14:58:50] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 14:58:50] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 14:58:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:58:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 14:58:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:58:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 14:58:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:58:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 14:58:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:58:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 14:58:51] [Rank 0] step:3501/10000 train_time:155368ms step_avg:44.38ms +[2025-09-05 14:58:51] [Rank 0] step:3501/10000 train_time:155368ms step_avg:44.38ms +[2025-09-05 14:58:52] [Rank 0] step:3521/10000 train_time:155801ms step_avg:44.25ms +[2025-09-05 14:58:52] [Rank 0] step:3521/10000 train_time:155801ms step_avg:44.25ms +[2025-09-05 14:58:52] [Rank 0] step:3541/10000 train_time:156461ms step_avg:44.19ms +[2025-09-05 14:58:52] [Rank 0] step:3541/10000 train_time:156461ms step_avg:44.19ms +[2025-09-05 14:58:53] [Rank 0] step:3561/10000 train_time:157120ms step_avg:44.12ms +[2025-09-05 14:58:53] [Rank 0] step:3561/10000 train_time:157120ms step_avg:44.12ms +[2025-09-05 14:58:54] [Rank 0] step:3581/10000 train_time:157779ms step_avg:44.06ms +[2025-09-05 14:58:54] [Rank 0] step:3581/10000 train_time:157779ms step_avg:44.06ms +[2025-09-05 14:58:54] [Rank 0] step:3601/10000 train_time:158438ms step_avg:44.00ms +[2025-09-05 14:58:54] [Rank 0] step:3601/10000 train_time:158438ms step_avg:44.00ms +[2025-09-05 14:58:55] [Rank 0] step:3621/10000 train_time:159097ms step_avg:43.94ms +[2025-09-05 14:58:55] [Rank 0] step:3621/10000 train_time:159097ms step_avg:43.94ms +[2025-09-05 14:58:56] [Rank 0] step:3641/10000 train_time:159829ms step_avg:43.90ms +[2025-09-05 14:58:56] [Rank 0] step:3641/10000 train_time:159829ms step_avg:43.90ms +[2025-09-05 14:58:56] [Rank 0] step:3661/10000 train_time:160488ms step_avg:43.84ms +[2025-09-05 14:58:56] [Rank 0] step:3661/10000 train_time:160488ms step_avg:43.84ms +[2025-09-05 14:58:57] [Rank 0] step:3681/10000 train_time:161151ms step_avg:43.78ms +[2025-09-05 14:58:57] [Rank 0] step:3681/10000 train_time:161151ms step_avg:43.78ms +[2025-09-05 14:58:58] [Rank 0] step:3701/10000 train_time:161812ms step_avg:43.72ms +[2025-09-05 14:58:58] [Rank 0] step:3701/10000 train_time:161812ms step_avg:43.72ms +[2025-09-05 14:58:58] [Rank 0] step:3721/10000 train_time:162470ms step_avg:43.66ms +[2025-09-05 14:58:58] [Rank 0] step:3721/10000 train_time:162470ms step_avg:43.66ms +[2025-09-05 14:58:59] [Rank 0] step:3741/10000 train_time:163130ms step_avg:43.61ms +[2025-09-05 14:58:59] [Rank 0] step:3741/10000 train_time:163130ms step_avg:43.61ms +[2025-09-05 14:59:00] [Rank 0] step:3761/10000 train_time:163792ms step_avg:43.55ms +[2025-09-05 14:59:00] [Rank 0] step:3761/10000 train_time:163792ms step_avg:43.55ms +[2025-09-05 14:59:00] [Rank 0] step:3781/10000 train_time:164452ms step_avg:43.49ms +[2025-09-05 14:59:00] [Rank 0] step:3781/10000 train_time:164452ms step_avg:43.49ms +[2025-09-05 14:59:01] [Rank 0] step:3801/10000 train_time:165113ms step_avg:43.44ms +[2025-09-05 14:59:01] [Rank 0] step:3801/10000 train_time:165113ms step_avg:43.44ms +[2025-09-05 14:59:02] [Rank 0] step:3821/10000 train_time:165773ms step_avg:43.38ms +[2025-09-05 14:59:02] [Rank 0] step:3821/10000 train_time:165773ms step_avg:43.38ms +[2025-09-05 14:59:02] [Rank 0] step:3841/10000 train_time:166434ms step_avg:43.33ms +[2025-09-05 14:59:02] [Rank 0] step:3841/10000 train_time:166434ms step_avg:43.33ms +[2025-09-05 14:59:03] [Rank 0] step:3861/10000 train_time:167093ms step_avg:43.28ms +[2025-09-05 14:59:03] [Rank 0] step:3861/10000 train_time:167093ms step_avg:43.28ms +[2025-09-05 14:59:04] [Rank 0] step:3881/10000 train_time:167753ms step_avg:43.22ms +[2025-09-05 14:59:04] [Rank 0] step:3881/10000 train_time:167753ms step_avg:43.22ms +[2025-09-05 14:59:04] [Rank 0] step:3901/10000 train_time:168414ms step_avg:43.17ms +[2025-09-05 14:59:04] [Rank 0] step:3901/10000 train_time:168414ms step_avg:43.17ms +[2025-09-05 14:59:05] [Rank 0] step:3921/10000 train_time:169074ms step_avg:43.12ms +[2025-09-05 14:59:05] [Rank 0] step:3921/10000 train_time:169074ms step_avg:43.12ms +[2025-09-05 14:59:06] [Rank 0] step:3941/10000 train_time:169734ms step_avg:43.07ms +[2025-09-05 14:59:06] [Rank 0] step:3941/10000 train_time:169734ms step_avg:43.07ms +[2025-09-05 14:59:06] [Rank 0] step:3961/10000 train_time:170393ms step_avg:43.02ms +[2025-09-05 14:59:06] [Rank 0] step:3961/10000 train_time:170393ms step_avg:43.02ms +[2025-09-05 14:59:07] [Rank 0] step:3981/10000 train_time:171053ms step_avg:42.97ms +[2025-09-05 14:59:07] [Rank 0] step:3981/10000 train_time:171053ms step_avg:42.97ms +[2025-09-05 14:59:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:59:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:59:08] [Rank 0] PRINT: step:4000/10000 train_loss:0.7210 val_loss:0.7073 train_time:171947ms step_avg:42.99ms +[2025-09-05 14:59:08] [Rank 0] PRINT: step:4000/10000 train_loss:0.7210 val_loss:0.7073 train_time:171947ms step_avg:42.99ms +[2025-09-05 14:59:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:59:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:59:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:59:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:00:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:00:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:00:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:00:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:00:29] [Rank 0] Total Loss: 5.3022 +[2025-09-05 15:00:29] [Rank 0] Total Loss: 5.3022 +[2025-09-05 15:00:29] [Rank 0] Total FTA (Unweighted): 0.7900 +[2025-09-05 15:00:29] [Rank 0] Total FTA (Unweighted): 0.7900 +[2025-09-05 15:00:29] [Rank 0] Total FTA (Weighted): 0.7900 +[2025-09-05 15:00:29] [Rank 0] Total FTA (Weighted): 0.7900 +[2025-09-05 15:00:29] [Rank 0] Group 0 Loss: 5.2263 +[2025-09-05 15:00:29] [Rank 0] Group 0 Loss: 5.2263 +[2025-09-05 15:00:29] [Rank 0] Group 1 Loss: 5.0233 +[2025-09-05 15:00:29] [Rank 0] Group 1 Loss: 5.0233 +[2025-09-05 15:00:29] [Rank 0] Group 2 Loss: 4.8313 +[2025-09-05 15:00:29] [Rank 0] Group 2 Loss: 4.8313 +[2025-09-05 15:00:29] [Rank 0] Group 3 Loss: 5.2516 +[2025-09-05 15:00:29] [Rank 0] Group 3 Loss: 5.2516 +[2025-09-05 15:00:29] [Rank 0] Group 4 Loss: 5.1517 +[2025-09-05 15:00:29] [Rank 0] Group 4 Loss: 5.1517 +[2025-09-05 15:00:29] [Rank 0] Group 5 Loss: 5.1405 +[2025-09-05 15:00:29] [Rank 0] Group 5 Loss: 5.1405 +[2025-09-05 15:00:29] [Rank 0] Group 6 Loss: 5.1651 +[2025-09-05 15:00:29] [Rank 0] Group 6 Loss: 5.1651 +[2025-09-05 15:00:29] [Rank 0] Group 7 Loss: 5.2238 +[2025-09-05 15:00:29] [Rank 0] Group 7 Loss: 5.2238 +[2025-09-05 15:00:29] [Rank 0] Group 8 Loss: 5.2469 +[2025-09-05 15:00:29] [Rank 0] Group 8 Loss: 5.2469 +[2025-09-05 15:00:29] [Rank 0] Group 9 Loss: 5.2560 +[2025-09-05 15:00:29] [Rank 0] Group 9 Loss: 5.2560 +[2025-09-05 15:00:29] [Rank 0] Group 10 Loss: 5.4011 +[2025-09-05 15:00:29] [Rank 0] Group 10 Loss: 5.4011 +[2025-09-05 15:00:29] [Rank 0] Group 11 Loss: 5.3553 +[2025-09-05 15:00:29] [Rank 0] Group 11 Loss: 5.3553 +[2025-09-05 15:00:29] [Rank 0] Group 12 Loss: 5.5168 +[2025-09-05 15:00:29] [Rank 0] Group 12 Loss: 5.5168 +[2025-09-05 15:00:29] [Rank 0] Group 13 Loss: 5.6744 +[2025-09-05 15:00:29] [Rank 0] Group 13 Loss: 5.6744 +[2025-09-05 15:00:29] [Rank 0] Group 14 Loss: 5.6114 +[2025-09-05 15:00:29] [Rank 0] Group 14 Loss: 5.6114 +[2025-09-05 15:00:29] [Rank 0] Group 15 Loss: 5.7591 +[2025-09-05 15:00:29] [Rank 0] Group 15 Loss: 5.7591 +[2025-09-05 15:00:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:00:29] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-05 15:00:29] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-05 15:00:29] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-05 15:00:29] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-05 15:00:29] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:00:29] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:00:29] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 15:00:29] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 15:00:29] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:00:29] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:00:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:00:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:00:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:00:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:00:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:00:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:00:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:00:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:00:31] [Rank 0] step:4001/10000 train_time:171954ms step_avg:42.98ms +[2025-09-05 15:00:31] [Rank 0] step:4001/10000 train_time:171954ms step_avg:42.98ms +[2025-09-05 15:00:31] [Rank 0] step:4021/10000 train_time:172405ms step_avg:42.88ms +[2025-09-05 15:00:31] [Rank 0] step:4021/10000 train_time:172405ms step_avg:42.88ms +[2025-09-05 15:00:32] [Rank 0] step:4041/10000 train_time:173063ms step_avg:42.83ms +[2025-09-05 15:00:32] [Rank 0] step:4041/10000 train_time:173063ms step_avg:42.83ms +[2025-09-05 15:00:33] [Rank 0] step:4061/10000 train_time:173721ms step_avg:42.78ms +[2025-09-05 15:00:33] [Rank 0] step:4061/10000 train_time:173721ms step_avg:42.78ms +[2025-09-05 15:00:33] [Rank 0] step:4081/10000 train_time:174379ms step_avg:42.73ms +[2025-09-05 15:00:33] [Rank 0] step:4081/10000 train_time:174379ms step_avg:42.73ms +[2025-09-05 15:00:34] [Rank 0] step:4101/10000 train_time:175037ms step_avg:42.68ms +[2025-09-05 15:00:34] [Rank 0] step:4101/10000 train_time:175037ms step_avg:42.68ms +[2025-09-05 15:00:35] [Rank 0] step:4121/10000 train_time:175695ms step_avg:42.63ms +[2025-09-05 15:00:35] [Rank 0] step:4121/10000 train_time:175695ms step_avg:42.63ms +[2025-09-05 15:00:35] [Rank 0] step:4141/10000 train_time:176354ms step_avg:42.59ms +[2025-09-05 15:00:35] [Rank 0] step:4141/10000 train_time:176354ms step_avg:42.59ms +[2025-09-05 15:00:36] [Rank 0] step:4161/10000 train_time:177011ms step_avg:42.54ms +[2025-09-05 15:00:36] [Rank 0] step:4161/10000 train_time:177011ms step_avg:42.54ms +[2025-09-05 15:00:37] [Rank 0] step:4181/10000 train_time:177670ms step_avg:42.49ms +[2025-09-05 15:00:37] [Rank 0] step:4181/10000 train_time:177670ms step_avg:42.49ms +[2025-09-05 15:00:37] [Rank 0] step:4201/10000 train_time:178328ms step_avg:42.45ms +[2025-09-05 15:00:37] [Rank 0] step:4201/10000 train_time:178328ms step_avg:42.45ms +[2025-09-05 15:00:38] [Rank 0] step:4221/10000 train_time:178986ms step_avg:42.40ms +[2025-09-05 15:00:38] [Rank 0] step:4221/10000 train_time:178986ms step_avg:42.40ms +[2025-09-05 15:00:39] [Rank 0] step:4241/10000 train_time:179838ms step_avg:42.40ms +[2025-09-05 15:00:39] [Rank 0] step:4241/10000 train_time:179838ms step_avg:42.40ms +[2025-09-05 15:00:39] [Rank 0] step:4261/10000 train_time:180495ms step_avg:42.36ms +[2025-09-05 15:00:39] [Rank 0] step:4261/10000 train_time:180495ms step_avg:42.36ms +[2025-09-05 15:00:40] [Rank 0] step:4281/10000 train_time:181153ms step_avg:42.32ms +[2025-09-05 15:00:40] [Rank 0] step:4281/10000 train_time:181153ms step_avg:42.32ms +[2025-09-05 15:00:41] [Rank 0] step:4301/10000 train_time:181813ms step_avg:42.27ms +[2025-09-05 15:00:41] [Rank 0] step:4301/10000 train_time:181813ms step_avg:42.27ms +[2025-09-05 15:00:42] [Rank 0] step:4321/10000 train_time:182618ms step_avg:42.26ms +[2025-09-05 15:00:42] [Rank 0] step:4321/10000 train_time:182618ms step_avg:42.26ms +[2025-09-05 15:00:42] [Rank 0] step:4341/10000 train_time:183276ms step_avg:42.22ms +[2025-09-05 15:00:42] [Rank 0] step:4341/10000 train_time:183276ms step_avg:42.22ms +[2025-09-05 15:00:43] [Rank 0] step:4361/10000 train_time:183934ms step_avg:42.18ms +[2025-09-05 15:00:43] [Rank 0] step:4361/10000 train_time:183934ms step_avg:42.18ms +[2025-09-05 15:00:44] [Rank 0] step:4381/10000 train_time:184591ms step_avg:42.13ms +[2025-09-05 15:00:44] [Rank 0] step:4381/10000 train_time:184591ms step_avg:42.13ms +[2025-09-05 15:00:44] [Rank 0] step:4401/10000 train_time:185249ms step_avg:42.09ms +[2025-09-05 15:00:44] [Rank 0] step:4401/10000 train_time:185249ms step_avg:42.09ms +[2025-09-05 15:00:45] [Rank 0] step:4421/10000 train_time:185907ms step_avg:42.05ms +[2025-09-05 15:00:45] [Rank 0] step:4421/10000 train_time:185907ms step_avg:42.05ms +[2025-09-05 15:00:46] [Rank 0] step:4441/10000 train_time:186565ms step_avg:42.01ms +[2025-09-05 15:00:46] [Rank 0] step:4441/10000 train_time:186565ms step_avg:42.01ms +[2025-09-05 15:00:46] [Rank 0] step:4461/10000 train_time:187223ms step_avg:41.97ms +[2025-09-05 15:00:46] [Rank 0] step:4461/10000 train_time:187223ms step_avg:41.97ms +[2025-09-05 15:00:47] [Rank 0] step:4481/10000 train_time:187881ms step_avg:41.93ms +[2025-09-05 15:00:47] [Rank 0] step:4481/10000 train_time:187881ms step_avg:41.93ms +[2025-09-05 15:00:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:00:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:00:48] [Rank 0] PRINT: step:4500/10000 train_loss:0.7097 val_loss:0.6974 train_time:188772ms step_avg:41.95ms +[2025-09-05 15:00:48] [Rank 0] PRINT: step:4500/10000 train_loss:0.7097 val_loss:0.6974 train_time:188772ms step_avg:41.95ms +[2025-09-05 15:00:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:00:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:00:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:00:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:02:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:02:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:02:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:02:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:02:09] [Rank 0] Total Loss: 5.3093 +[2025-09-05 15:02:09] [Rank 0] Total Loss: 5.3093 +[2025-09-05 15:02:09] [Rank 0] Total FTA (Unweighted): 0.7944 +[2025-09-05 15:02:09] [Rank 0] Total FTA (Unweighted): 0.7944 +[2025-09-05 15:02:09] [Rank 0] Total FTA (Weighted): 0.7944 +[2025-09-05 15:02:09] [Rank 0] Total FTA (Weighted): 0.7944 +[2025-09-05 15:02:09] [Rank 0] Group 0 Loss: 5.3560 +[2025-09-05 15:02:09] [Rank 0] Group 0 Loss: 5.3560 +[2025-09-05 15:02:09] [Rank 0] Group 1 Loss: 5.0154 +[2025-09-05 15:02:09] [Rank 0] Group 1 Loss: 5.0154 +[2025-09-05 15:02:09] [Rank 0] Group 2 Loss: 4.8926 +[2025-09-05 15:02:09] [Rank 0] Group 2 Loss: 4.8926 +[2025-09-05 15:02:09] [Rank 0] Group 3 Loss: 5.2955 +[2025-09-05 15:02:09] [Rank 0] Group 3 Loss: 5.2955 +[2025-09-05 15:02:09] [Rank 0] Group 4 Loss: 5.2046 +[2025-09-05 15:02:09] [Rank 0] Group 4 Loss: 5.2046 +[2025-09-05 15:02:09] [Rank 0] Group 5 Loss: 5.1044 +[2025-09-05 15:02:09] [Rank 0] Group 5 Loss: 5.1044 +[2025-09-05 15:02:09] [Rank 0] Group 6 Loss: 5.1371 +[2025-09-05 15:02:09] [Rank 0] Group 6 Loss: 5.1371 +[2025-09-05 15:02:09] [Rank 0] Group 7 Loss: 5.2209 +[2025-09-05 15:02:09] [Rank 0] Group 7 Loss: 5.2209 +[2025-09-05 15:02:09] [Rank 0] Group 8 Loss: 5.2592 +[2025-09-05 15:02:09] [Rank 0] Group 8 Loss: 5.2592 +[2025-09-05 15:02:09] [Rank 0] Group 9 Loss: 5.2892 +[2025-09-05 15:02:09] [Rank 0] Group 9 Loss: 5.2892 +[2025-09-05 15:02:09] [Rank 0] Group 10 Loss: 5.3749 +[2025-09-05 15:02:09] [Rank 0] Group 10 Loss: 5.3749 +[2025-09-05 15:02:09] [Rank 0] Group 11 Loss: 5.3614 +[2025-09-05 15:02:09] [Rank 0] Group 11 Loss: 5.3614 +[2025-09-05 15:02:09] [Rank 0] Group 12 Loss: 5.5176 +[2025-09-05 15:02:09] [Rank 0] Group 12 Loss: 5.5176 +[2025-09-05 15:02:09] [Rank 0] Group 13 Loss: 5.5985 +[2025-09-05 15:02:09] [Rank 0] Group 13 Loss: 5.5985 +[2025-09-05 15:02:09] [Rank 0] Group 14 Loss: 5.5977 +[2025-09-05 15:02:09] [Rank 0] Group 14 Loss: 5.5977 +[2025-09-05 15:02:09] [Rank 0] Group 15 Loss: 5.7235 +[2025-09-05 15:02:09] [Rank 0] Group 15 Loss: 5.7235 +[2025-09-05 15:02:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:02:09] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 15:02:09] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 15:02:09] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:02:09] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:02:09] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 15:02:09] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 15:02:09] [Rank 0] Group 12 FTA: 0.4200 +[2025-09-05 15:02:09] [Rank 0] Group 12 FTA: 0.4200 +[2025-09-05 15:02:09] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 15:02:09] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 15:02:09] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:02:09] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:02:09] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:02:09] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:02:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:02:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:02:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:02:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:02:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:02:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:02:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:02:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:02:11] [Rank 0] step:4501/10000 train_time:188780ms step_avg:41.94ms +[2025-09-05 15:02:11] [Rank 0] step:4501/10000 train_time:188780ms step_avg:41.94ms +[2025-09-05 15:02:11] [Rank 0] step:4521/10000 train_time:189229ms step_avg:41.86ms +[2025-09-05 15:02:11] [Rank 0] step:4521/10000 train_time:189229ms step_avg:41.86ms +[2025-09-05 15:02:12] [Rank 0] step:4541/10000 train_time:189888ms step_avg:41.82ms +[2025-09-05 15:02:12] [Rank 0] step:4541/10000 train_time:189888ms step_avg:41.82ms +[2025-09-05 15:02:13] [Rank 0] step:4561/10000 train_time:190547ms step_avg:41.78ms +[2025-09-05 15:02:13] [Rank 0] step:4561/10000 train_time:190547ms step_avg:41.78ms +[2025-09-05 15:02:13] [Rank 0] step:4581/10000 train_time:191208ms step_avg:41.74ms +[2025-09-05 15:02:13] [Rank 0] step:4581/10000 train_time:191208ms step_avg:41.74ms +[2025-09-05 15:02:14] [Rank 0] step:4601/10000 train_time:191868ms step_avg:41.70ms +[2025-09-05 15:02:14] [Rank 0] step:4601/10000 train_time:191868ms step_avg:41.70ms +[2025-09-05 15:02:15] [Rank 0] step:4621/10000 train_time:192527ms step_avg:41.66ms +[2025-09-05 15:02:15] [Rank 0] step:4621/10000 train_time:192527ms step_avg:41.66ms +[2025-09-05 15:02:15] [Rank 0] step:4641/10000 train_time:193186ms step_avg:41.63ms +[2025-09-05 15:02:15] [Rank 0] step:4641/10000 train_time:193186ms step_avg:41.63ms +[2025-09-05 15:02:16] [Rank 0] step:4661/10000 train_time:193846ms step_avg:41.59ms +[2025-09-05 15:02:16] [Rank 0] step:4661/10000 train_time:193846ms step_avg:41.59ms +[2025-09-05 15:02:17] [Rank 0] step:4681/10000 train_time:194505ms step_avg:41.55ms +[2025-09-05 15:02:17] [Rank 0] step:4681/10000 train_time:194505ms step_avg:41.55ms +[2025-09-05 15:02:17] [Rank 0] step:4701/10000 train_time:195165ms step_avg:41.52ms +[2025-09-05 15:02:17] [Rank 0] step:4701/10000 train_time:195165ms step_avg:41.52ms +[2025-09-05 15:02:18] [Rank 0] step:4721/10000 train_time:195830ms step_avg:41.48ms +[2025-09-05 15:02:18] [Rank 0] step:4721/10000 train_time:195830ms step_avg:41.48ms +[2025-09-05 15:02:19] [Rank 0] step:4741/10000 train_time:196489ms step_avg:41.44ms +[2025-09-05 15:02:19] [Rank 0] step:4741/10000 train_time:196489ms step_avg:41.44ms +[2025-09-05 15:02:19] [Rank 0] step:4761/10000 train_time:197150ms step_avg:41.41ms +[2025-09-05 15:02:19] [Rank 0] step:4761/10000 train_time:197150ms step_avg:41.41ms +[2025-09-05 15:02:20] [Rank 0] step:4781/10000 train_time:197810ms step_avg:41.37ms +[2025-09-05 15:02:20] [Rank 0] step:4781/10000 train_time:197810ms step_avg:41.37ms +[2025-09-05 15:02:21] [Rank 0] step:4801/10000 train_time:198469ms step_avg:41.34ms +[2025-09-05 15:02:21] [Rank 0] step:4801/10000 train_time:198469ms step_avg:41.34ms +[2025-09-05 15:02:21] [Rank 0] step:4821/10000 train_time:199130ms step_avg:41.30ms +[2025-09-05 15:02:21] [Rank 0] step:4821/10000 train_time:199130ms step_avg:41.30ms +[2025-09-05 15:02:22] [Rank 0] step:4841/10000 train_time:200096ms step_avg:41.33ms +[2025-09-05 15:02:22] [Rank 0] step:4841/10000 train_time:200096ms step_avg:41.33ms +[2025-09-05 15:02:23] [Rank 0] step:4861/10000 train_time:200757ms step_avg:41.30ms +[2025-09-05 15:02:23] [Rank 0] step:4861/10000 train_time:200757ms step_avg:41.30ms +[2025-09-05 15:02:24] [Rank 0] step:4881/10000 train_time:201416ms step_avg:41.27ms +[2025-09-05 15:02:24] [Rank 0] step:4881/10000 train_time:201416ms step_avg:41.27ms +[2025-09-05 15:02:24] [Rank 0] step:4901/10000 train_time:202076ms step_avg:41.23ms +[2025-09-05 15:02:24] [Rank 0] step:4901/10000 train_time:202076ms step_avg:41.23ms +[2025-09-05 15:02:25] [Rank 0] step:4921/10000 train_time:202736ms step_avg:41.20ms +[2025-09-05 15:02:25] [Rank 0] step:4921/10000 train_time:202736ms step_avg:41.20ms +[2025-09-05 15:02:26] [Rank 0] step:4941/10000 train_time:203395ms step_avg:41.16ms +[2025-09-05 15:02:26] [Rank 0] step:4941/10000 train_time:203395ms step_avg:41.16ms +[2025-09-05 15:02:26] [Rank 0] step:4961/10000 train_time:204055ms step_avg:41.13ms +[2025-09-05 15:02:26] [Rank 0] step:4961/10000 train_time:204055ms step_avg:41.13ms +[2025-09-05 15:02:27] [Rank 0] step:4981/10000 train_time:204716ms step_avg:41.10ms +[2025-09-05 15:02:27] [Rank 0] step:4981/10000 train_time:204716ms step_avg:41.10ms +[2025-09-05 15:02:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:02:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:02:28] [Rank 0] PRINT: step:5000/10000 train_loss:0.7017 val_loss:0.6888 train_time:205611ms step_avg:41.12ms +[2025-09-05 15:02:28] [Rank 0] PRINT: step:5000/10000 train_loss:0.7017 val_loss:0.6888 train_time:205611ms step_avg:41.12ms +[2025-09-05 15:02:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:02:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:02:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:02:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:03:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:03:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:03:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:03:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:03:49] [Rank 0] Total Loss: 5.2560 +[2025-09-05 15:03:49] [Rank 0] Total Loss: 5.2560 +[2025-09-05 15:03:49] [Rank 0] Total FTA (Unweighted): 0.8163 +[2025-09-05 15:03:49] [Rank 0] Total FTA (Unweighted): 0.8163 +[2025-09-05 15:03:49] [Rank 0] Total FTA (Weighted): 0.8163 +[2025-09-05 15:03:49] [Rank 0] Total FTA (Weighted): 0.8163 +[2025-09-05 15:03:49] [Rank 0] Group 0 Loss: 5.2076 +[2025-09-05 15:03:49] [Rank 0] Group 0 Loss: 5.2076 +[2025-09-05 15:03:49] [Rank 0] Group 1 Loss: 4.9576 +[2025-09-05 15:03:49] [Rank 0] Group 1 Loss: 4.9576 +[2025-09-05 15:03:49] [Rank 0] Group 2 Loss: 4.8118 +[2025-09-05 15:03:49] [Rank 0] Group 2 Loss: 4.8118 +[2025-09-05 15:03:49] [Rank 0] Group 3 Loss: 5.2123 +[2025-09-05 15:03:49] [Rank 0] Group 3 Loss: 5.2123 +[2025-09-05 15:03:49] [Rank 0] Group 4 Loss: 5.1641 +[2025-09-05 15:03:49] [Rank 0] Group 4 Loss: 5.1641 +[2025-09-05 15:03:49] [Rank 0] Group 5 Loss: 5.1494 +[2025-09-05 15:03:49] [Rank 0] Group 5 Loss: 5.1494 +[2025-09-05 15:03:49] [Rank 0] Group 6 Loss: 5.1647 +[2025-09-05 15:03:49] [Rank 0] Group 6 Loss: 5.1647 +[2025-09-05 15:03:49] [Rank 0] Group 7 Loss: 5.2097 +[2025-09-05 15:03:49] [Rank 0] Group 7 Loss: 5.2097 +[2025-09-05 15:03:49] [Rank 0] Group 8 Loss: 5.2575 +[2025-09-05 15:03:49] [Rank 0] Group 8 Loss: 5.2575 +[2025-09-05 15:03:49] [Rank 0] Group 9 Loss: 5.2417 +[2025-09-05 15:03:49] [Rank 0] Group 9 Loss: 5.2417 +[2025-09-05 15:03:49] [Rank 0] Group 10 Loss: 5.3360 +[2025-09-05 15:03:49] [Rank 0] Group 10 Loss: 5.3360 +[2025-09-05 15:03:49] [Rank 0] Group 11 Loss: 5.2946 +[2025-09-05 15:03:49] [Rank 0] Group 11 Loss: 5.2946 +[2025-09-05 15:03:49] [Rank 0] Group 12 Loss: 5.4266 +[2025-09-05 15:03:49] [Rank 0] Group 12 Loss: 5.4266 +[2025-09-05 15:03:49] [Rank 0] Group 13 Loss: 5.5523 +[2025-09-05 15:03:49] [Rank 0] Group 13 Loss: 5.5523 +[2025-09-05 15:03:49] [Rank 0] Group 14 Loss: 5.5097 +[2025-09-05 15:03:49] [Rank 0] Group 14 Loss: 5.5097 +[2025-09-05 15:03:49] [Rank 0] Group 15 Loss: 5.5996 +[2025-09-05 15:03:49] [Rank 0] Group 15 Loss: 5.5996 +[2025-09-05 15:03:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:03:49] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-05 15:03:49] [Rank 0] Group 11 FTA: 0.9500 +[2025-09-05 15:03:49] [Rank 0] Group 12 FTA: 0.6800 +[2025-09-05 15:03:49] [Rank 0] Group 12 FTA: 0.6800 +[2025-09-05 15:03:50] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 15:03:50] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 15:03:50] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:03:50] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:03:50] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:03:50] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:03:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:03:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:03:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:03:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:03:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:03:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:03:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:03:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:03:51] [Rank 0] step:5001/10000 train_time:205618ms step_avg:41.12ms +[2025-09-05 15:03:51] [Rank 0] step:5001/10000 train_time:205618ms step_avg:41.12ms +[2025-09-05 15:03:52] [Rank 0] step:5021/10000 train_time:206074ms step_avg:41.04ms +[2025-09-05 15:03:52] [Rank 0] step:5021/10000 train_time:206074ms step_avg:41.04ms +[2025-09-05 15:03:52] [Rank 0] step:5041/10000 train_time:206733ms step_avg:41.01ms +[2025-09-05 15:03:52] [Rank 0] step:5041/10000 train_time:206733ms step_avg:41.01ms +[2025-09-05 15:03:53] [Rank 0] step:5061/10000 train_time:207393ms step_avg:40.98ms +[2025-09-05 15:03:53] [Rank 0] step:5061/10000 train_time:207393ms step_avg:40.98ms +[2025-09-05 15:03:54] [Rank 0] step:5081/10000 train_time:208050ms step_avg:40.95ms +[2025-09-05 15:03:54] [Rank 0] step:5081/10000 train_time:208050ms step_avg:40.95ms +[2025-09-05 15:03:54] [Rank 0] step:5101/10000 train_time:208709ms step_avg:40.92ms +[2025-09-05 15:03:54] [Rank 0] step:5101/10000 train_time:208709ms step_avg:40.92ms +[2025-09-05 15:03:55] [Rank 0] step:5121/10000 train_time:209368ms step_avg:40.88ms +[2025-09-05 15:03:55] [Rank 0] step:5121/10000 train_time:209368ms step_avg:40.88ms +[2025-09-05 15:03:55] [Rank 0] step:5141/10000 train_time:210026ms step_avg:40.85ms +[2025-09-05 15:03:55] [Rank 0] step:5141/10000 train_time:210026ms step_avg:40.85ms +[2025-09-05 15:03:56] [Rank 0] step:5161/10000 train_time:210685ms step_avg:40.82ms +[2025-09-05 15:03:56] [Rank 0] step:5161/10000 train_time:210685ms step_avg:40.82ms +[2025-09-05 15:03:57] [Rank 0] step:5181/10000 train_time:211344ms step_avg:40.79ms +[2025-09-05 15:03:57] [Rank 0] step:5181/10000 train_time:211344ms step_avg:40.79ms +[2025-09-05 15:03:57] [Rank 0] step:5201/10000 train_time:212004ms step_avg:40.76ms +[2025-09-05 15:03:57] [Rank 0] step:5201/10000 train_time:212004ms step_avg:40.76ms +[2025-09-05 15:03:58] [Rank 0] step:5221/10000 train_time:212661ms step_avg:40.73ms +[2025-09-05 15:03:58] [Rank 0] step:5221/10000 train_time:212661ms step_avg:40.73ms +[2025-09-05 15:03:59] [Rank 0] step:5241/10000 train_time:213320ms step_avg:40.70ms +[2025-09-05 15:03:59] [Rank 0] step:5241/10000 train_time:213320ms step_avg:40.70ms +[2025-09-05 15:03:59] [Rank 0] step:5261/10000 train_time:213979ms step_avg:40.67ms +[2025-09-05 15:03:59] [Rank 0] step:5261/10000 train_time:213979ms step_avg:40.67ms +[2025-09-05 15:04:00] [Rank 0] step:5281/10000 train_time:214637ms step_avg:40.64ms +[2025-09-05 15:04:00] [Rank 0] step:5281/10000 train_time:214637ms step_avg:40.64ms +[2025-09-05 15:04:01] [Rank 0] step:5301/10000 train_time:215296ms step_avg:40.61ms +[2025-09-05 15:04:01] [Rank 0] step:5301/10000 train_time:215296ms step_avg:40.61ms +[2025-09-05 15:04:01] [Rank 0] step:5321/10000 train_time:215955ms step_avg:40.59ms +[2025-09-05 15:04:01] [Rank 0] step:5321/10000 train_time:215955ms step_avg:40.59ms +[2025-09-05 15:04:02] [Rank 0] step:5341/10000 train_time:216614ms step_avg:40.56ms +[2025-09-05 15:04:02] [Rank 0] step:5341/10000 train_time:216614ms step_avg:40.56ms +[2025-09-05 15:04:03] [Rank 0] step:5361/10000 train_time:217274ms step_avg:40.53ms +[2025-09-05 15:04:03] [Rank 0] step:5361/10000 train_time:217274ms step_avg:40.53ms +[2025-09-05 15:04:03] [Rank 0] step:5381/10000 train_time:217933ms step_avg:40.50ms +[2025-09-05 15:04:03] [Rank 0] step:5381/10000 train_time:217933ms step_avg:40.50ms +[2025-09-05 15:04:04] [Rank 0] step:5401/10000 train_time:218594ms step_avg:40.47ms +[2025-09-05 15:04:04] [Rank 0] step:5401/10000 train_time:218594ms step_avg:40.47ms +[2025-09-05 15:04:05] [Rank 0] step:5421/10000 train_time:219252ms step_avg:40.44ms +[2025-09-05 15:04:05] [Rank 0] step:5421/10000 train_time:219252ms step_avg:40.44ms +[2025-09-05 15:04:05] [Rank 0] step:5441/10000 train_time:219909ms step_avg:40.42ms +[2025-09-05 15:04:05] [Rank 0] step:5441/10000 train_time:219909ms step_avg:40.42ms +[2025-09-05 15:04:06] [Rank 0] step:5461/10000 train_time:220567ms step_avg:40.39ms +[2025-09-05 15:04:06] [Rank 0] step:5461/10000 train_time:220567ms step_avg:40.39ms +[2025-09-05 15:04:07] [Rank 0] step:5481/10000 train_time:221225ms step_avg:40.36ms +[2025-09-05 15:04:07] [Rank 0] step:5481/10000 train_time:221225ms step_avg:40.36ms +[2025-09-05 15:04:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:04:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:04:08] [Rank 0] PRINT: step:5500/10000 train_loss:0.6918 val_loss:0.6813 train_time:222117ms step_avg:40.38ms +[2025-09-05 15:04:08] [Rank 0] PRINT: step:5500/10000 train_loss:0.6918 val_loss:0.6813 train_time:222117ms step_avg:40.38ms +[2025-09-05 15:04:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:04:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:04:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:04:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:05:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:05:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:05:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:05:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:05:29] [Rank 0] Total Loss: 5.1778 +[2025-09-05 15:05:29] [Rank 0] Total Loss: 5.1778 +[2025-09-05 15:05:29] [Rank 0] Total FTA (Unweighted): 0.8319 +[2025-09-05 15:05:29] [Rank 0] Total FTA (Unweighted): 0.8319 +[2025-09-05 15:05:29] [Rank 0] Total FTA (Weighted): 0.8319 +[2025-09-05 15:05:29] [Rank 0] Total FTA (Weighted): 0.8319 +[2025-09-05 15:05:29] [Rank 0] Group 0 Loss: 5.1934 +[2025-09-05 15:05:29] [Rank 0] Group 0 Loss: 5.1934 +[2025-09-05 15:05:29] [Rank 0] Group 1 Loss: 4.8555 +[2025-09-05 15:05:29] [Rank 0] Group 1 Loss: 4.8555 +[2025-09-05 15:05:29] [Rank 0] Group 2 Loss: 4.7351 +[2025-09-05 15:05:29] [Rank 0] Group 2 Loss: 4.7351 +[2025-09-05 15:05:29] [Rank 0] Group 3 Loss: 5.2048 +[2025-09-05 15:05:29] [Rank 0] Group 3 Loss: 5.2048 +[2025-09-05 15:05:29] [Rank 0] Group 4 Loss: 5.1157 +[2025-09-05 15:05:29] [Rank 0] Group 4 Loss: 5.1157 +[2025-09-05 15:05:29] [Rank 0] Group 5 Loss: 5.0471 +[2025-09-05 15:05:29] [Rank 0] Group 5 Loss: 5.0471 +[2025-09-05 15:05:29] [Rank 0] Group 6 Loss: 5.0477 +[2025-09-05 15:05:29] [Rank 0] Group 6 Loss: 5.0477 +[2025-09-05 15:05:29] [Rank 0] Group 7 Loss: 5.0903 +[2025-09-05 15:05:29] [Rank 0] Group 7 Loss: 5.0903 +[2025-09-05 15:05:29] [Rank 0] Group 8 Loss: 5.1953 +[2025-09-05 15:05:29] [Rank 0] Group 8 Loss: 5.1953 +[2025-09-05 15:05:29] [Rank 0] Group 9 Loss: 5.1758 +[2025-09-05 15:05:29] [Rank 0] Group 9 Loss: 5.1758 +[2025-09-05 15:05:29] [Rank 0] Group 10 Loss: 5.2759 +[2025-09-05 15:05:29] [Rank 0] Group 10 Loss: 5.2759 +[2025-09-05 15:05:29] [Rank 0] Group 11 Loss: 5.2157 +[2025-09-05 15:05:29] [Rank 0] Group 11 Loss: 5.2157 +[2025-09-05 15:05:29] [Rank 0] Group 12 Loss: 5.2760 +[2025-09-05 15:05:29] [Rank 0] Group 12 Loss: 5.2760 +[2025-09-05 15:05:29] [Rank 0] Group 13 Loss: 5.4641 +[2025-09-05 15:05:29] [Rank 0] Group 13 Loss: 5.4641 +[2025-09-05 15:05:29] [Rank 0] Group 14 Loss: 5.4211 +[2025-09-05 15:05:29] [Rank 0] Group 14 Loss: 5.4211 +[2025-09-05 15:05:29] [Rank 0] Group 15 Loss: 5.5309 +[2025-09-05 15:05:29] [Rank 0] Group 15 Loss: 5.5309 +[2025-09-05 15:05:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:05:29] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:05:29] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:05:29] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 15:05:29] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 15:05:29] [Rank 0] Group 12 FTA: 0.7600 +[2025-09-05 15:05:29] [Rank 0] Group 12 FTA: 0.7600 +[2025-09-05 15:05:29] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 15:05:29] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 15:05:29] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 15:05:29] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 15:05:29] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:05:29] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:05:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:05:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:05:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:05:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:05:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:05:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:05:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:05:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:05:31] [Rank 0] step:5501/10000 train_time:222125ms step_avg:40.38ms +[2025-09-05 15:05:31] [Rank 0] step:5501/10000 train_time:222125ms step_avg:40.38ms +[2025-09-05 15:05:31] [Rank 0] step:5521/10000 train_time:222579ms step_avg:40.31ms +[2025-09-05 15:05:31] [Rank 0] step:5521/10000 train_time:222579ms step_avg:40.31ms +[2025-09-05 15:05:32] [Rank 0] step:5541/10000 train_time:223238ms step_avg:40.29ms +[2025-09-05 15:05:32] [Rank 0] step:5541/10000 train_time:223238ms step_avg:40.29ms +[2025-09-05 15:05:33] [Rank 0] step:5561/10000 train_time:223896ms step_avg:40.26ms +[2025-09-05 15:05:33] [Rank 0] step:5561/10000 train_time:223896ms step_avg:40.26ms +[2025-09-05 15:05:33] [Rank 0] step:5581/10000 train_time:224555ms step_avg:40.24ms +[2025-09-05 15:05:33] [Rank 0] step:5581/10000 train_time:224555ms step_avg:40.24ms +[2025-09-05 15:05:34] [Rank 0] step:5601/10000 train_time:225215ms step_avg:40.21ms +[2025-09-05 15:05:34] [Rank 0] step:5601/10000 train_time:225215ms step_avg:40.21ms +[2025-09-05 15:05:35] [Rank 0] step:5621/10000 train_time:225874ms step_avg:40.18ms +[2025-09-05 15:05:35] [Rank 0] step:5621/10000 train_time:225874ms step_avg:40.18ms +[2025-09-05 15:05:36] [Rank 0] step:5641/10000 train_time:226532ms step_avg:40.16ms +[2025-09-05 15:05:36] [Rank 0] step:5641/10000 train_time:226532ms step_avg:40.16ms +[2025-09-05 15:05:36] [Rank 0] step:5661/10000 train_time:227647ms step_avg:40.21ms +[2025-09-05 15:05:36] [Rank 0] step:5661/10000 train_time:227647ms step_avg:40.21ms +[2025-09-05 15:05:37] [Rank 0] step:5681/10000 train_time:228306ms step_avg:40.19ms +[2025-09-05 15:05:37] [Rank 0] step:5681/10000 train_time:228306ms step_avg:40.19ms +[2025-09-05 15:05:38] [Rank 0] step:5701/10000 train_time:228964ms step_avg:40.16ms +[2025-09-05 15:05:38] [Rank 0] step:5701/10000 train_time:228964ms step_avg:40.16ms +[2025-09-05 15:05:38] [Rank 0] step:5721/10000 train_time:229623ms step_avg:40.14ms +[2025-09-05 15:05:38] [Rank 0] step:5721/10000 train_time:229623ms step_avg:40.14ms +[2025-09-05 15:05:39] [Rank 0] step:5741/10000 train_time:230283ms step_avg:40.11ms +[2025-09-05 15:05:39] [Rank 0] step:5741/10000 train_time:230283ms step_avg:40.11ms +[2025-09-05 15:05:40] [Rank 0] step:5761/10000 train_time:230942ms step_avg:40.09ms +[2025-09-05 15:05:40] [Rank 0] step:5761/10000 train_time:230942ms step_avg:40.09ms +[2025-09-05 15:05:40] [Rank 0] step:5781/10000 train_time:231601ms step_avg:40.06ms +[2025-09-05 15:05:40] [Rank 0] step:5781/10000 train_time:231601ms step_avg:40.06ms +[2025-09-05 15:05:41] [Rank 0] step:5801/10000 train_time:232261ms step_avg:40.04ms +[2025-09-05 15:05:41] [Rank 0] step:5801/10000 train_time:232261ms step_avg:40.04ms +[2025-09-05 15:05:42] [Rank 0] step:5821/10000 train_time:232921ms step_avg:40.01ms +[2025-09-05 15:05:42] [Rank 0] step:5821/10000 train_time:232921ms step_avg:40.01ms +[2025-09-05 15:05:42] [Rank 0] step:5841/10000 train_time:233580ms step_avg:39.99ms +[2025-09-05 15:05:42] [Rank 0] step:5841/10000 train_time:233580ms step_avg:39.99ms +[2025-09-05 15:05:43] [Rank 0] step:5861/10000 train_time:234239ms step_avg:39.97ms +[2025-09-05 15:05:43] [Rank 0] step:5861/10000 train_time:234239ms step_avg:39.97ms +[2025-09-05 15:05:44] [Rank 0] step:5881/10000 train_time:234898ms step_avg:39.94ms +[2025-09-05 15:05:44] [Rank 0] step:5881/10000 train_time:234898ms step_avg:39.94ms +[2025-09-05 15:05:44] [Rank 0] step:5901/10000 train_time:235558ms step_avg:39.92ms +[2025-09-05 15:05:44] [Rank 0] step:5901/10000 train_time:235558ms step_avg:39.92ms +[2025-09-05 15:05:45] [Rank 0] step:5921/10000 train_time:236217ms step_avg:39.89ms +[2025-09-05 15:05:45] [Rank 0] step:5921/10000 train_time:236217ms step_avg:39.89ms +[2025-09-05 15:05:46] [Rank 0] step:5941/10000 train_time:236877ms step_avg:39.87ms +[2025-09-05 15:05:46] [Rank 0] step:5941/10000 train_time:236877ms step_avg:39.87ms +[2025-09-05 15:05:46] [Rank 0] step:5961/10000 train_time:237536ms step_avg:39.85ms +[2025-09-05 15:05:46] [Rank 0] step:5961/10000 train_time:237536ms step_avg:39.85ms +[2025-09-05 15:05:47] [Rank 0] step:5981/10000 train_time:238194ms step_avg:39.83ms +[2025-09-05 15:05:47] [Rank 0] step:5981/10000 train_time:238194ms step_avg:39.83ms +[2025-09-05 15:05:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:05:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:05:48] [Rank 0] PRINT: step:6000/10000 train_loss:0.6849 val_loss:0.6742 train_time:239087ms step_avg:39.85ms +[2025-09-05 15:05:48] [Rank 0] PRINT: step:6000/10000 train_loss:0.6849 val_loss:0.6742 train_time:239087ms step_avg:39.85ms +[2025-09-05 15:05:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:05:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:05:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:05:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:07:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:07:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:07:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:07:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:07:09] [Rank 0] Total Loss: 5.3138 +[2025-09-05 15:07:09] [Rank 0] Total Loss: 5.3138 +[2025-09-05 15:07:09] [Rank 0] Total FTA (Unweighted): 0.8419 +[2025-09-05 15:07:09] [Rank 0] Total FTA (Unweighted): 0.8419 +[2025-09-05 15:07:09] [Rank 0] Total FTA (Weighted): 0.8419 +[2025-09-05 15:07:09] [Rank 0] Total FTA (Weighted): 0.8419 +[2025-09-05 15:07:09] [Rank 0] Group 0 Loss: 5.3218 +[2025-09-05 15:07:09] [Rank 0] Group 0 Loss: 5.3218 +[2025-09-05 15:07:09] [Rank 0] Group 1 Loss: 5.0497 +[2025-09-05 15:07:09] [Rank 0] Group 1 Loss: 5.0497 +[2025-09-05 15:07:09] [Rank 0] Group 2 Loss: 4.8699 +[2025-09-05 15:07:09] [Rank 0] Group 2 Loss: 4.8699 +[2025-09-05 15:07:09] [Rank 0] Group 3 Loss: 5.2706 +[2025-09-05 15:07:09] [Rank 0] Group 3 Loss: 5.2706 +[2025-09-05 15:07:09] [Rank 0] Group 4 Loss: 5.2353 +[2025-09-05 15:07:09] [Rank 0] Group 4 Loss: 5.2353 +[2025-09-05 15:07:09] [Rank 0] Group 5 Loss: 5.1651 +[2025-09-05 15:07:09] [Rank 0] Group 5 Loss: 5.1651 +[2025-09-05 15:07:09] [Rank 0] Group 6 Loss: 5.2009 +[2025-09-05 15:07:09] [Rank 0] Group 6 Loss: 5.2009 +[2025-09-05 15:07:09] [Rank 0] Group 7 Loss: 5.2621 +[2025-09-05 15:07:09] [Rank 0] Group 7 Loss: 5.2621 +[2025-09-05 15:07:09] [Rank 0] Group 8 Loss: 5.3298 +[2025-09-05 15:07:09] [Rank 0] Group 8 Loss: 5.3298 +[2025-09-05 15:07:09] [Rank 0] Group 9 Loss: 5.3322 +[2025-09-05 15:07:09] [Rank 0] Group 9 Loss: 5.3322 +[2025-09-05 15:07:09] [Rank 0] Group 10 Loss: 5.4392 +[2025-09-05 15:07:09] [Rank 0] Group 10 Loss: 5.4392 +[2025-09-05 15:07:09] [Rank 0] Group 11 Loss: 5.3786 +[2025-09-05 15:07:09] [Rank 0] Group 11 Loss: 5.3786 +[2025-09-05 15:07:09] [Rank 0] Group 12 Loss: 5.4411 +[2025-09-05 15:07:09] [Rank 0] Group 12 Loss: 5.4411 +[2025-09-05 15:07:09] [Rank 0] Group 13 Loss: 5.5807 +[2025-09-05 15:07:09] [Rank 0] Group 13 Loss: 5.5807 +[2025-09-05 15:07:09] [Rank 0] Group 14 Loss: 5.5119 +[2025-09-05 15:07:09] [Rank 0] Group 14 Loss: 5.5119 +[2025-09-05 15:07:09] [Rank 0] Group 15 Loss: 5.6322 +[2025-09-05 15:07:09] [Rank 0] Group 15 Loss: 5.6322 +[2025-09-05 15:07:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:07:09] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:07:09] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:07:09] [Rank 0] Group 12 FTA: 0.8300 +[2025-09-05 15:07:09] [Rank 0] Group 12 FTA: 0.8300 +[2025-09-05 15:07:09] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 15:07:09] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 15:07:09] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 15:07:09] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 15:07:09] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 15:07:09] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 15:07:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:07:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:07:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:07:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:07:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:07:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:07:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:07:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:07:11] [Rank 0] step:6001/10000 train_time:239095ms step_avg:39.84ms +[2025-09-05 15:07:11] [Rank 0] step:6001/10000 train_time:239095ms step_avg:39.84ms +[2025-09-05 15:07:11] [Rank 0] step:6021/10000 train_time:239609ms step_avg:39.80ms +[2025-09-05 15:07:11] [Rank 0] step:6021/10000 train_time:239609ms step_avg:39.80ms +[2025-09-05 15:07:12] [Rank 0] step:6041/10000 train_time:240241ms step_avg:39.77ms +[2025-09-05 15:07:12] [Rank 0] step:6041/10000 train_time:240241ms step_avg:39.77ms +[2025-09-05 15:07:13] [Rank 0] step:6061/10000 train_time:240899ms step_avg:39.75ms +[2025-09-05 15:07:13] [Rank 0] step:6061/10000 train_time:240899ms step_avg:39.75ms +[2025-09-05 15:07:13] [Rank 0] step:6081/10000 train_time:241556ms step_avg:39.72ms +[2025-09-05 15:07:13] [Rank 0] step:6081/10000 train_time:241556ms step_avg:39.72ms +[2025-09-05 15:07:14] [Rank 0] step:6101/10000 train_time:242213ms step_avg:39.70ms +[2025-09-05 15:07:14] [Rank 0] step:6101/10000 train_time:242213ms step_avg:39.70ms +[2025-09-05 15:07:15] [Rank 0] step:6121/10000 train_time:242871ms step_avg:39.68ms +[2025-09-05 15:07:15] [Rank 0] step:6121/10000 train_time:242871ms step_avg:39.68ms +[2025-09-05 15:07:15] [Rank 0] step:6141/10000 train_time:243536ms step_avg:39.66ms +[2025-09-05 15:07:15] [Rank 0] step:6141/10000 train_time:243536ms step_avg:39.66ms +[2025-09-05 15:07:16] [Rank 0] step:6161/10000 train_time:244194ms step_avg:39.64ms +[2025-09-05 15:07:16] [Rank 0] step:6161/10000 train_time:244194ms step_avg:39.64ms +[2025-09-05 15:07:17] [Rank 0] step:6181/10000 train_time:244852ms step_avg:39.61ms +[2025-09-05 15:07:17] [Rank 0] step:6181/10000 train_time:244852ms step_avg:39.61ms +[2025-09-05 15:07:17] [Rank 0] step:6201/10000 train_time:245509ms step_avg:39.59ms +[2025-09-05 15:07:17] [Rank 0] step:6201/10000 train_time:245509ms step_avg:39.59ms +[2025-09-05 15:07:18] [Rank 0] step:6221/10000 train_time:246167ms step_avg:39.57ms +[2025-09-05 15:07:18] [Rank 0] step:6221/10000 train_time:246167ms step_avg:39.57ms +[2025-09-05 15:07:19] [Rank 0] step:6241/10000 train_time:246825ms step_avg:39.55ms +[2025-09-05 15:07:19] [Rank 0] step:6241/10000 train_time:246825ms step_avg:39.55ms +[2025-09-05 15:07:19] [Rank 0] step:6261/10000 train_time:247483ms step_avg:39.53ms +[2025-09-05 15:07:19] [Rank 0] step:6261/10000 train_time:247483ms step_avg:39.53ms +[2025-09-05 15:07:20] [Rank 0] step:6281/10000 train_time:248141ms step_avg:39.51ms +[2025-09-05 15:07:20] [Rank 0] step:6281/10000 train_time:248141ms step_avg:39.51ms +[2025-09-05 15:07:21] [Rank 0] step:6301/10000 train_time:248799ms step_avg:39.49ms +[2025-09-05 15:07:21] [Rank 0] step:6301/10000 train_time:248799ms step_avg:39.49ms +[2025-09-05 15:07:21] [Rank 0] step:6321/10000 train_time:249457ms step_avg:39.46ms +[2025-09-05 15:07:21] [Rank 0] step:6321/10000 train_time:249457ms step_avg:39.46ms +[2025-09-05 15:07:22] [Rank 0] step:6341/10000 train_time:250115ms step_avg:39.44ms +[2025-09-05 15:07:22] [Rank 0] step:6341/10000 train_time:250115ms step_avg:39.44ms +[2025-09-05 15:07:23] [Rank 0] step:6361/10000 train_time:250773ms step_avg:39.42ms +[2025-09-05 15:07:23] [Rank 0] step:6361/10000 train_time:250773ms step_avg:39.42ms +[2025-09-05 15:07:23] [Rank 0] step:6381/10000 train_time:251430ms step_avg:39.40ms +[2025-09-05 15:07:23] [Rank 0] step:6381/10000 train_time:251430ms step_avg:39.40ms +[2025-09-05 15:07:24] [Rank 0] step:6401/10000 train_time:252088ms step_avg:39.38ms +[2025-09-05 15:07:24] [Rank 0] step:6401/10000 train_time:252088ms step_avg:39.38ms +[2025-09-05 15:07:25] [Rank 0] step:6421/10000 train_time:252746ms step_avg:39.36ms +[2025-09-05 15:07:25] [Rank 0] step:6421/10000 train_time:252746ms step_avg:39.36ms +[2025-09-05 15:07:25] [Rank 0] step:6441/10000 train_time:253404ms step_avg:39.34ms +[2025-09-05 15:07:25] [Rank 0] step:6441/10000 train_time:253404ms step_avg:39.34ms +[2025-09-05 15:07:26] [Rank 0] step:6461/10000 train_time:254062ms step_avg:39.32ms +[2025-09-05 15:07:26] [Rank 0] step:6461/10000 train_time:254062ms step_avg:39.32ms +[2025-09-05 15:07:27] [Rank 0] step:6481/10000 train_time:254732ms step_avg:39.30ms +[2025-09-05 15:07:27] [Rank 0] step:6481/10000 train_time:254732ms step_avg:39.30ms +[2025-09-05 15:07:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:07:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:07:28] [Rank 0] PRINT: step:6500/10000 train_loss:0.6784 val_loss:0.6681 train_time:255624ms step_avg:39.33ms +[2025-09-05 15:07:28] [Rank 0] PRINT: step:6500/10000 train_loss:0.6784 val_loss:0.6681 train_time:255624ms step_avg:39.33ms +[2025-09-05 15:07:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:07:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:07:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:07:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:08:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:08:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:08:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:08:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:08:49] [Rank 0] Total Loss: 5.3210 +[2025-09-05 15:08:49] [Rank 0] Total Loss: 5.3210 +[2025-09-05 15:08:49] [Rank 0] Total FTA (Unweighted): 0.8575 +[2025-09-05 15:08:49] [Rank 0] Total FTA (Unweighted): 0.8575 +[2025-09-05 15:08:49] [Rank 0] Total FTA (Weighted): 0.8575 +[2025-09-05 15:08:49] [Rank 0] Total FTA (Weighted): 0.8575 +[2025-09-05 15:08:49] [Rank 0] Group 0 Loss: 5.2598 +[2025-09-05 15:08:49] [Rank 0] Group 0 Loss: 5.2598 +[2025-09-05 15:08:49] [Rank 0] Group 1 Loss: 5.1332 +[2025-09-05 15:08:49] [Rank 0] Group 1 Loss: 5.1332 +[2025-09-05 15:08:49] [Rank 0] Group 2 Loss: 4.9188 +[2025-09-05 15:08:49] [Rank 0] Group 2 Loss: 4.9188 +[2025-09-05 15:08:49] [Rank 0] Group 3 Loss: 5.3187 +[2025-09-05 15:08:49] [Rank 0] Group 3 Loss: 5.3187 +[2025-09-05 15:08:49] [Rank 0] Group 4 Loss: 5.3017 +[2025-09-05 15:08:49] [Rank 0] Group 4 Loss: 5.3017 +[2025-09-05 15:08:49] [Rank 0] Group 5 Loss: 5.1911 +[2025-09-05 15:08:49] [Rank 0] Group 5 Loss: 5.1911 +[2025-09-05 15:08:49] [Rank 0] Group 6 Loss: 5.2037 +[2025-09-05 15:08:49] [Rank 0] Group 6 Loss: 5.2037 +[2025-09-05 15:08:49] [Rank 0] Group 7 Loss: 5.2734 +[2025-09-05 15:08:49] [Rank 0] Group 7 Loss: 5.2734 +[2025-09-05 15:08:49] [Rank 0] Group 8 Loss: 5.3169 +[2025-09-05 15:08:49] [Rank 0] Group 8 Loss: 5.3169 +[2025-09-05 15:08:49] [Rank 0] Group 9 Loss: 5.3102 +[2025-09-05 15:08:49] [Rank 0] Group 9 Loss: 5.3102 +[2025-09-05 15:08:49] [Rank 0] Group 10 Loss: 5.4114 +[2025-09-05 15:08:49] [Rank 0] Group 10 Loss: 5.4114 +[2025-09-05 15:08:49] [Rank 0] Group 11 Loss: 5.3803 +[2025-09-05 15:08:49] [Rank 0] Group 11 Loss: 5.3803 +[2025-09-05 15:08:49] [Rank 0] Group 12 Loss: 5.4407 +[2025-09-05 15:08:49] [Rank 0] Group 12 Loss: 5.4407 +[2025-09-05 15:08:49] [Rank 0] Group 13 Loss: 5.5450 +[2025-09-05 15:08:49] [Rank 0] Group 13 Loss: 5.5450 +[2025-09-05 15:08:49] [Rank 0] Group 14 Loss: 5.5199 +[2025-09-05 15:08:49] [Rank 0] Group 14 Loss: 5.5199 +[2025-09-05 15:08:49] [Rank 0] Group 15 Loss: 5.6113 +[2025-09-05 15:08:49] [Rank 0] Group 15 Loss: 5.6113 +[2025-09-05 15:08:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:08:49] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 15:08:49] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 15:08:49] [Rank 0] Group 13 FTA: 0.4300 +[2025-09-05 15:08:49] [Rank 0] Group 13 FTA: 0.4300 +[2025-09-05 15:08:49] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 15:08:49] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 15:08:49] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 15:08:49] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 15:08:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:08:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:08:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:08:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:08:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:08:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:08:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:08:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:08:50] [Rank 0] step:6501/10000 train_time:255631ms step_avg:39.32ms +[2025-09-05 15:08:50] [Rank 0] step:6501/10000 train_time:255631ms step_avg:39.32ms +[2025-09-05 15:08:51] [Rank 0] step:6521/10000 train_time:256084ms step_avg:39.27ms +[2025-09-05 15:08:51] [Rank 0] step:6521/10000 train_time:256084ms step_avg:39.27ms +[2025-09-05 15:08:52] [Rank 0] step:6541/10000 train_time:256744ms step_avg:39.25ms +[2025-09-05 15:08:52] [Rank 0] step:6541/10000 train_time:256744ms step_avg:39.25ms +[2025-09-05 15:08:52] [Rank 0] step:6561/10000 train_time:257403ms step_avg:39.23ms +[2025-09-05 15:08:52] [Rank 0] step:6561/10000 train_time:257403ms step_avg:39.23ms +[2025-09-05 15:08:53] [Rank 0] step:6581/10000 train_time:258065ms step_avg:39.21ms +[2025-09-05 15:08:53] [Rank 0] step:6581/10000 train_time:258065ms step_avg:39.21ms +[2025-09-05 15:08:54] [Rank 0] step:6601/10000 train_time:258725ms step_avg:39.19ms +[2025-09-05 15:08:54] [Rank 0] step:6601/10000 train_time:258725ms step_avg:39.19ms +[2025-09-05 15:08:54] [Rank 0] step:6621/10000 train_time:259384ms step_avg:39.18ms +[2025-09-05 15:08:54] [Rank 0] step:6621/10000 train_time:259384ms step_avg:39.18ms +[2025-09-05 15:08:55] [Rank 0] step:6641/10000 train_time:260043ms step_avg:39.16ms +[2025-09-05 15:08:55] [Rank 0] step:6641/10000 train_time:260043ms step_avg:39.16ms +[2025-09-05 15:08:56] [Rank 0] step:6661/10000 train_time:260703ms step_avg:39.14ms +[2025-09-05 15:08:56] [Rank 0] step:6661/10000 train_time:260703ms step_avg:39.14ms +[2025-09-05 15:08:56] [Rank 0] step:6681/10000 train_time:261363ms step_avg:39.12ms +[2025-09-05 15:08:56] [Rank 0] step:6681/10000 train_time:261363ms step_avg:39.12ms +[2025-09-05 15:08:57] [Rank 0] step:6701/10000 train_time:262022ms step_avg:39.10ms +[2025-09-05 15:08:57] [Rank 0] step:6701/10000 train_time:262022ms step_avg:39.10ms +[2025-09-05 15:08:58] [Rank 0] step:6721/10000 train_time:262681ms step_avg:39.08ms +[2025-09-05 15:08:58] [Rank 0] step:6721/10000 train_time:262681ms step_avg:39.08ms +[2025-09-05 15:08:58] [Rank 0] step:6741/10000 train_time:263341ms step_avg:39.07ms +[2025-09-05 15:08:58] [Rank 0] step:6741/10000 train_time:263341ms step_avg:39.07ms +[2025-09-05 15:08:59] [Rank 0] step:6761/10000 train_time:263999ms step_avg:39.05ms +[2025-09-05 15:08:59] [Rank 0] step:6761/10000 train_time:263999ms step_avg:39.05ms +[2025-09-05 15:09:00] [Rank 0] step:6781/10000 train_time:264660ms step_avg:39.03ms +[2025-09-05 15:09:00] [Rank 0] step:6781/10000 train_time:264660ms step_avg:39.03ms +[2025-09-05 15:09:00] [Rank 0] step:6801/10000 train_time:265320ms step_avg:39.01ms +[2025-09-05 15:09:00] [Rank 0] step:6801/10000 train_time:265320ms step_avg:39.01ms +[2025-09-05 15:09:01] [Rank 0] step:6821/10000 train_time:265980ms step_avg:38.99ms +[2025-09-05 15:09:01] [Rank 0] step:6821/10000 train_time:265980ms step_avg:38.99ms +[2025-09-05 15:09:02] [Rank 0] step:6841/10000 train_time:266641ms step_avg:38.98ms +[2025-09-05 15:09:02] [Rank 0] step:6841/10000 train_time:266641ms step_avg:38.98ms +[2025-09-05 15:09:02] [Rank 0] step:6861/10000 train_time:267302ms step_avg:38.96ms +[2025-09-05 15:09:02] [Rank 0] step:6861/10000 train_time:267302ms step_avg:38.96ms +[2025-09-05 15:09:03] [Rank 0] step:6881/10000 train_time:267959ms step_avg:38.94ms +[2025-09-05 15:09:03] [Rank 0] step:6881/10000 train_time:267959ms step_avg:38.94ms +[2025-09-05 15:09:04] [Rank 0] step:6901/10000 train_time:268619ms step_avg:38.92ms +[2025-09-05 15:09:04] [Rank 0] step:6901/10000 train_time:268619ms step_avg:38.92ms +[2025-09-05 15:09:04] [Rank 0] step:6921/10000 train_time:269450ms step_avg:38.93ms +[2025-09-05 15:09:04] [Rank 0] step:6921/10000 train_time:269450ms step_avg:38.93ms +[2025-09-05 15:09:05] [Rank 0] step:6941/10000 train_time:270108ms step_avg:38.91ms +[2025-09-05 15:09:05] [Rank 0] step:6941/10000 train_time:270108ms step_avg:38.91ms +[2025-09-05 15:09:06] [Rank 0] step:6961/10000 train_time:270767ms step_avg:38.90ms +[2025-09-05 15:09:06] [Rank 0] step:6961/10000 train_time:270767ms step_avg:38.90ms +[2025-09-05 15:09:06] [Rank 0] step:6981/10000 train_time:271427ms step_avg:38.88ms +[2025-09-05 15:09:06] [Rank 0] step:6981/10000 train_time:271427ms step_avg:38.88ms +[2025-09-05 15:09:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:09:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:09:08] [Rank 0] PRINT: step:7000/10000 train_loss:0.6725 val_loss:0.6636 train_time:272488ms step_avg:38.93ms +[2025-09-05 15:09:08] [Rank 0] PRINT: step:7000/10000 train_loss:0.6725 val_loss:0.6636 train_time:272488ms step_avg:38.93ms +[2025-09-05 15:09:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:09:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:09:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:09:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:10:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:10:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:10:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:10:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:10:29] [Rank 0] Total Loss: 5.3300 +[2025-09-05 15:10:29] [Rank 0] Total Loss: 5.3300 +[2025-09-05 15:10:29] [Rank 0] Total FTA (Unweighted): 0.8619 +[2025-09-05 15:10:29] [Rank 0] Total FTA (Unweighted): 0.8619 +[2025-09-05 15:10:29] [Rank 0] Total FTA (Weighted): 0.8619 +[2025-09-05 15:10:29] [Rank 0] Total FTA (Weighted): 0.8619 +[2025-09-05 15:10:29] [Rank 0] Group 0 Loss: 5.3423 +[2025-09-05 15:10:29] [Rank 0] Group 0 Loss: 5.3423 +[2025-09-05 15:10:29] [Rank 0] Group 1 Loss: 5.1255 +[2025-09-05 15:10:29] [Rank 0] Group 1 Loss: 5.1255 +[2025-09-05 15:10:29] [Rank 0] Group 2 Loss: 4.9093 +[2025-09-05 15:10:29] [Rank 0] Group 2 Loss: 4.9093 +[2025-09-05 15:10:29] [Rank 0] Group 3 Loss: 5.3070 +[2025-09-05 15:10:29] [Rank 0] Group 3 Loss: 5.3070 +[2025-09-05 15:10:29] [Rank 0] Group 4 Loss: 5.3112 +[2025-09-05 15:10:29] [Rank 0] Group 4 Loss: 5.3112 +[2025-09-05 15:10:29] [Rank 0] Group 5 Loss: 5.2148 +[2025-09-05 15:10:29] [Rank 0] Group 5 Loss: 5.2148 +[2025-09-05 15:10:29] [Rank 0] Group 6 Loss: 5.2261 +[2025-09-05 15:10:29] [Rank 0] Group 6 Loss: 5.2261 +[2025-09-05 15:10:29] [Rank 0] Group 7 Loss: 5.3066 +[2025-09-05 15:10:29] [Rank 0] Group 7 Loss: 5.3066 +[2025-09-05 15:10:29] [Rank 0] Group 8 Loss: 5.3448 +[2025-09-05 15:10:29] [Rank 0] Group 8 Loss: 5.3448 +[2025-09-05 15:10:29] [Rank 0] Group 9 Loss: 5.3303 +[2025-09-05 15:10:29] [Rank 0] Group 9 Loss: 5.3303 +[2025-09-05 15:10:29] [Rank 0] Group 10 Loss: 5.4184 +[2025-09-05 15:10:29] [Rank 0] Group 10 Loss: 5.4184 +[2025-09-05 15:10:29] [Rank 0] Group 11 Loss: 5.3749 +[2025-09-05 15:10:29] [Rank 0] Group 11 Loss: 5.3749 +[2025-09-05 15:10:29] [Rank 0] Group 12 Loss: 5.4229 +[2025-09-05 15:10:29] [Rank 0] Group 12 Loss: 5.4229 +[2025-09-05 15:10:29] [Rank 0] Group 13 Loss: 5.5603 +[2025-09-05 15:10:29] [Rank 0] Group 13 Loss: 5.5603 +[2025-09-05 15:10:29] [Rank 0] Group 14 Loss: 5.4790 +[2025-09-05 15:10:29] [Rank 0] Group 14 Loss: 5.4790 +[2025-09-05 15:10:29] [Rank 0] Group 15 Loss: 5.6058 +[2025-09-05 15:10:29] [Rank 0] Group 15 Loss: 5.6058 +[2025-09-05 15:10:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:10:29] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 15:10:29] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:10:29] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 15:10:29] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 15:10:29] [Rank 0] Group 13 FTA: 0.5100 +[2025-09-05 15:10:29] [Rank 0] Group 13 FTA: 0.5100 +[2025-09-05 15:10:29] [Rank 0] Group 14 FTA: 0.2300 +[2025-09-05 15:10:29] [Rank 0] Group 14 FTA: 0.2300 +[2025-09-05 15:10:29] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 15:10:29] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 15:10:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:10:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:10:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:10:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:10:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:10:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:10:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:10:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:10:30] [Rank 0] step:7001/10000 train_time:272497ms step_avg:38.92ms +[2025-09-05 15:10:30] [Rank 0] step:7001/10000 train_time:272497ms step_avg:38.92ms +[2025-09-05 15:10:31] [Rank 0] step:7021/10000 train_time:272945ms step_avg:38.88ms +[2025-09-05 15:10:31] [Rank 0] step:7021/10000 train_time:272945ms step_avg:38.88ms +[2025-09-05 15:10:32] [Rank 0] step:7041/10000 train_time:273603ms step_avg:38.86ms +[2025-09-05 15:10:32] [Rank 0] step:7041/10000 train_time:273603ms step_avg:38.86ms +[2025-09-05 15:10:32] [Rank 0] step:7061/10000 train_time:274262ms step_avg:38.84ms +[2025-09-05 15:10:32] [Rank 0] step:7061/10000 train_time:274262ms step_avg:38.84ms +[2025-09-05 15:10:33] [Rank 0] step:7081/10000 train_time:274919ms step_avg:38.82ms +[2025-09-05 15:10:33] [Rank 0] step:7081/10000 train_time:274919ms step_avg:38.82ms +[2025-09-05 15:10:34] [Rank 0] step:7101/10000 train_time:275577ms step_avg:38.81ms +[2025-09-05 15:10:34] [Rank 0] step:7101/10000 train_time:275577ms step_avg:38.81ms +[2025-09-05 15:10:34] [Rank 0] step:7121/10000 train_time:276236ms step_avg:38.79ms +[2025-09-05 15:10:34] [Rank 0] step:7121/10000 train_time:276236ms step_avg:38.79ms +[2025-09-05 15:10:35] [Rank 0] step:7141/10000 train_time:276895ms step_avg:38.78ms +[2025-09-05 15:10:35] [Rank 0] step:7141/10000 train_time:276895ms step_avg:38.78ms +[2025-09-05 15:10:36] [Rank 0] step:7161/10000 train_time:277553ms step_avg:38.76ms +[2025-09-05 15:10:36] [Rank 0] step:7161/10000 train_time:277553ms step_avg:38.76ms +[2025-09-05 15:10:36] [Rank 0] step:7181/10000 train_time:278211ms step_avg:38.74ms +[2025-09-05 15:10:36] [Rank 0] step:7181/10000 train_time:278211ms step_avg:38.74ms +[2025-09-05 15:10:37] [Rank 0] step:7201/10000 train_time:278869ms step_avg:38.73ms +[2025-09-05 15:10:37] [Rank 0] step:7201/10000 train_time:278869ms step_avg:38.73ms +[2025-09-05 15:10:38] [Rank 0] step:7221/10000 train_time:279528ms step_avg:38.71ms +[2025-09-05 15:10:38] [Rank 0] step:7221/10000 train_time:279528ms step_avg:38.71ms +[2025-09-05 15:10:38] [Rank 0] step:7241/10000 train_time:280186ms step_avg:38.69ms +[2025-09-05 15:10:38] [Rank 0] step:7241/10000 train_time:280186ms step_avg:38.69ms +[2025-09-05 15:10:39] [Rank 0] step:7261/10000 train_time:280845ms step_avg:38.68ms +[2025-09-05 15:10:39] [Rank 0] step:7261/10000 train_time:280845ms step_avg:38.68ms +[2025-09-05 15:10:40] [Rank 0] step:7281/10000 train_time:281504ms step_avg:38.66ms +[2025-09-05 15:10:40] [Rank 0] step:7281/10000 train_time:281504ms step_avg:38.66ms +[2025-09-05 15:10:40] [Rank 0] step:7301/10000 train_time:282161ms step_avg:38.65ms +[2025-09-05 15:10:40] [Rank 0] step:7301/10000 train_time:282161ms step_avg:38.65ms +[2025-09-05 15:10:41] [Rank 0] step:7321/10000 train_time:282819ms step_avg:38.63ms +[2025-09-05 15:10:41] [Rank 0] step:7321/10000 train_time:282819ms step_avg:38.63ms +[2025-09-05 15:10:42] [Rank 0] step:7341/10000 train_time:283477ms step_avg:38.62ms +[2025-09-05 15:10:42] [Rank 0] step:7341/10000 train_time:283477ms step_avg:38.62ms +[2025-09-05 15:10:42] [Rank 0] step:7361/10000 train_time:284136ms step_avg:38.60ms +[2025-09-05 15:10:42] [Rank 0] step:7361/10000 train_time:284136ms step_avg:38.60ms +[2025-09-05 15:10:43] [Rank 0] step:7381/10000 train_time:284794ms step_avg:38.58ms +[2025-09-05 15:10:43] [Rank 0] step:7381/10000 train_time:284794ms step_avg:38.58ms +[2025-09-05 15:10:44] [Rank 0] step:7401/10000 train_time:285452ms step_avg:38.57ms +[2025-09-05 15:10:44] [Rank 0] step:7401/10000 train_time:285452ms step_avg:38.57ms +[2025-09-05 15:10:44] [Rank 0] step:7421/10000 train_time:286110ms step_avg:38.55ms +[2025-09-05 15:10:44] [Rank 0] step:7421/10000 train_time:286110ms step_avg:38.55ms +[2025-09-05 15:10:45] [Rank 0] step:7441/10000 train_time:286768ms step_avg:38.54ms +[2025-09-05 15:10:45] [Rank 0] step:7441/10000 train_time:286768ms step_avg:38.54ms +[2025-09-05 15:10:46] [Rank 0] step:7461/10000 train_time:287426ms step_avg:38.52ms +[2025-09-05 15:10:46] [Rank 0] step:7461/10000 train_time:287426ms step_avg:38.52ms +[2025-09-05 15:10:46] [Rank 0] step:7481/10000 train_time:288084ms step_avg:38.51ms +[2025-09-05 15:10:46] [Rank 0] step:7481/10000 train_time:288084ms step_avg:38.51ms +[2025-09-05 15:10:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:10:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:10:47] [Rank 0] PRINT: step:7500/10000 train_loss:0.6667 val_loss:0.6577 train_time:288976ms step_avg:38.53ms +[2025-09-05 15:10:47] [Rank 0] PRINT: step:7500/10000 train_loss:0.6667 val_loss:0.6577 train_time:288976ms step_avg:38.53ms +[2025-09-05 15:10:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:10:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:10:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:10:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:12:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:12:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:12:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:12:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:12:09] [Rank 0] Total Loss: 5.3157 +[2025-09-05 15:12:09] [Rank 0] Total Loss: 5.3157 +[2025-09-05 15:12:09] [Rank 0] Total FTA (Unweighted): 0.8750 +[2025-09-05 15:12:09] [Rank 0] Total FTA (Unweighted): 0.8750 +[2025-09-05 15:12:09] [Rank 0] Total FTA (Weighted): 0.8750 +[2025-09-05 15:12:09] [Rank 0] Total FTA (Weighted): 0.8750 +[2025-09-05 15:12:09] [Rank 0] Group 0 Loss: 5.3669 +[2025-09-05 15:12:09] [Rank 0] Group 0 Loss: 5.3669 +[2025-09-05 15:12:09] [Rank 0] Group 1 Loss: 5.0820 +[2025-09-05 15:12:09] [Rank 0] Group 1 Loss: 5.0820 +[2025-09-05 15:12:09] [Rank 0] Group 2 Loss: 4.8992 +[2025-09-05 15:12:09] [Rank 0] Group 2 Loss: 4.8992 +[2025-09-05 15:12:09] [Rank 0] Group 3 Loss: 5.3254 +[2025-09-05 15:12:09] [Rank 0] Group 3 Loss: 5.3254 +[2025-09-05 15:12:09] [Rank 0] Group 4 Loss: 5.3091 +[2025-09-05 15:12:09] [Rank 0] Group 4 Loss: 5.3091 +[2025-09-05 15:12:09] [Rank 0] Group 5 Loss: 5.1644 +[2025-09-05 15:12:09] [Rank 0] Group 5 Loss: 5.1644 +[2025-09-05 15:12:09] [Rank 0] Group 6 Loss: 5.2113 +[2025-09-05 15:12:09] [Rank 0] Group 6 Loss: 5.2113 +[2025-09-05 15:12:09] [Rank 0] Group 7 Loss: 5.2744 +[2025-09-05 15:12:09] [Rank 0] Group 7 Loss: 5.2744 +[2025-09-05 15:12:09] [Rank 0] Group 8 Loss: 5.2956 +[2025-09-05 15:12:09] [Rank 0] Group 8 Loss: 5.2956 +[2025-09-05 15:12:09] [Rank 0] Group 9 Loss: 5.2811 +[2025-09-05 15:12:09] [Rank 0] Group 9 Loss: 5.2811 +[2025-09-05 15:12:09] [Rank 0] Group 10 Loss: 5.4326 +[2025-09-05 15:12:09] [Rank 0] Group 10 Loss: 5.4326 +[2025-09-05 15:12:09] [Rank 0] Group 11 Loss: 5.3724 +[2025-09-05 15:12:09] [Rank 0] Group 11 Loss: 5.3724 +[2025-09-05 15:12:09] [Rank 0] Group 12 Loss: 5.4079 +[2025-09-05 15:12:09] [Rank 0] Group 12 Loss: 5.4079 +[2025-09-05 15:12:09] [Rank 0] Group 13 Loss: 5.5361 +[2025-09-05 15:12:09] [Rank 0] Group 13 Loss: 5.5361 +[2025-09-05 15:12:09] [Rank 0] Group 14 Loss: 5.5029 +[2025-09-05 15:12:09] [Rank 0] Group 14 Loss: 5.5029 +[2025-09-05 15:12:09] [Rank 0] Group 15 Loss: 5.5892 +[2025-09-05 15:12:09] [Rank 0] Group 15 Loss: 5.5892 +[2025-09-05 15:12:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:12:09] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 15:12:09] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 15:12:09] [Rank 0] Group 13 FTA: 0.6200 +[2025-09-05 15:12:09] [Rank 0] Group 13 FTA: 0.6200 +[2025-09-05 15:12:09] [Rank 0] Group 14 FTA: 0.2700 +[2025-09-05 15:12:09] [Rank 0] Group 14 FTA: 0.2700 +[2025-09-05 15:12:09] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 15:12:09] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 15:12:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:12:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:12:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:12:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:12:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:12:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:12:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:12:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:12:10] [Rank 0] step:7501/10000 train_time:288983ms step_avg:38.53ms +[2025-09-05 15:12:10] [Rank 0] step:7501/10000 train_time:288983ms step_avg:38.53ms +[2025-09-05 15:12:11] [Rank 0] step:7521/10000 train_time:289421ms step_avg:38.48ms +[2025-09-05 15:12:11] [Rank 0] step:7521/10000 train_time:289421ms step_avg:38.48ms +[2025-09-05 15:12:12] [Rank 0] step:7541/10000 train_time:290080ms step_avg:38.47ms +[2025-09-05 15:12:12] [Rank 0] step:7541/10000 train_time:290080ms step_avg:38.47ms +[2025-09-05 15:12:12] [Rank 0] step:7561/10000 train_time:290738ms step_avg:38.45ms +[2025-09-05 15:12:12] [Rank 0] step:7561/10000 train_time:290738ms step_avg:38.45ms +[2025-09-05 15:12:13] [Rank 0] step:7581/10000 train_time:291397ms step_avg:38.44ms +[2025-09-05 15:12:13] [Rank 0] step:7581/10000 train_time:291397ms step_avg:38.44ms +[2025-09-05 15:12:14] [Rank 0] step:7601/10000 train_time:292055ms step_avg:38.42ms +[2025-09-05 15:12:14] [Rank 0] step:7601/10000 train_time:292055ms step_avg:38.42ms +[2025-09-05 15:12:14] [Rank 0] step:7621/10000 train_time:292923ms step_avg:38.44ms +[2025-09-05 15:12:14] [Rank 0] step:7621/10000 train_time:292923ms step_avg:38.44ms +[2025-09-05 15:12:16] [Rank 0] step:7641/10000 train_time:293582ms step_avg:38.42ms +[2025-09-05 15:12:16] [Rank 0] step:7641/10000 train_time:293582ms step_avg:38.42ms +[2025-09-05 15:12:16] [Rank 0] step:7661/10000 train_time:294706ms step_avg:38.47ms +[2025-09-05 15:12:16] [Rank 0] step:7661/10000 train_time:294706ms step_avg:38.47ms +[2025-09-05 15:12:17] [Rank 0] step:7681/10000 train_time:295493ms step_avg:38.47ms +[2025-09-05 15:12:17] [Rank 0] step:7681/10000 train_time:295493ms step_avg:38.47ms +[2025-09-05 15:12:18] [Rank 0] step:7701/10000 train_time:296151ms step_avg:38.46ms +[2025-09-05 15:12:18] [Rank 0] step:7701/10000 train_time:296151ms step_avg:38.46ms +[2025-09-05 15:12:18] [Rank 0] step:7721/10000 train_time:296810ms step_avg:38.44ms +[2025-09-05 15:12:18] [Rank 0] step:7721/10000 train_time:296810ms step_avg:38.44ms +[2025-09-05 15:12:19] [Rank 0] step:7741/10000 train_time:297471ms step_avg:38.43ms +[2025-09-05 15:12:19] [Rank 0] step:7741/10000 train_time:297471ms step_avg:38.43ms +[2025-09-05 15:12:20] [Rank 0] step:7761/10000 train_time:298130ms step_avg:38.41ms +[2025-09-05 15:12:20] [Rank 0] step:7761/10000 train_time:298130ms step_avg:38.41ms +[2025-09-05 15:12:20] [Rank 0] step:7781/10000 train_time:298789ms step_avg:38.40ms +[2025-09-05 15:12:20] [Rank 0] step:7781/10000 train_time:298789ms step_avg:38.40ms +[2025-09-05 15:12:21] [Rank 0] step:7801/10000 train_time:299448ms step_avg:38.39ms +[2025-09-05 15:12:21] [Rank 0] step:7801/10000 train_time:299448ms step_avg:38.39ms +[2025-09-05 15:12:22] [Rank 0] step:7821/10000 train_time:300108ms step_avg:38.37ms +[2025-09-05 15:12:22] [Rank 0] step:7821/10000 train_time:300108ms step_avg:38.37ms +[2025-09-05 15:12:22] [Rank 0] step:7841/10000 train_time:300767ms step_avg:38.36ms +[2025-09-05 15:12:22] [Rank 0] step:7841/10000 train_time:300767ms step_avg:38.36ms +[2025-09-05 15:12:23] [Rank 0] step:7861/10000 train_time:301426ms step_avg:38.34ms +[2025-09-05 15:12:23] [Rank 0] step:7861/10000 train_time:301426ms step_avg:38.34ms +[2025-09-05 15:12:24] [Rank 0] step:7881/10000 train_time:302084ms step_avg:38.33ms +[2025-09-05 15:12:24] [Rank 0] step:7881/10000 train_time:302084ms step_avg:38.33ms +[2025-09-05 15:12:24] [Rank 0] step:7901/10000 train_time:302743ms step_avg:38.32ms +[2025-09-05 15:12:24] [Rank 0] step:7901/10000 train_time:302743ms step_avg:38.32ms +[2025-09-05 15:12:25] [Rank 0] step:7921/10000 train_time:303402ms step_avg:38.30ms +[2025-09-05 15:12:25] [Rank 0] step:7921/10000 train_time:303402ms step_avg:38.30ms +[2025-09-05 15:12:26] [Rank 0] step:7941/10000 train_time:304061ms step_avg:38.29ms +[2025-09-05 15:12:26] [Rank 0] step:7941/10000 train_time:304061ms step_avg:38.29ms +[2025-09-05 15:12:26] [Rank 0] step:7961/10000 train_time:304719ms step_avg:38.28ms +[2025-09-05 15:12:26] [Rank 0] step:7961/10000 train_time:304719ms step_avg:38.28ms +[2025-09-05 15:12:27] [Rank 0] step:7981/10000 train_time:305378ms step_avg:38.26ms +[2025-09-05 15:12:27] [Rank 0] step:7981/10000 train_time:305378ms step_avg:38.26ms +[2025-09-05 15:12:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:12:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:12:28] [Rank 0] PRINT: step:8000/10000 train_loss:0.6614 val_loss:0.6520 train_time:306271ms step_avg:38.28ms +[2025-09-05 15:12:28] [Rank 0] PRINT: step:8000/10000 train_loss:0.6614 val_loss:0.6520 train_time:306271ms step_avg:38.28ms +[2025-09-05 15:12:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:12:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:12:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:12:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:13:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:13:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:13:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:13:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:13:49] [Rank 0] Total Loss: 5.2536 +[2025-09-05 15:13:49] [Rank 0] Total Loss: 5.2536 +[2025-09-05 15:13:49] [Rank 0] Total FTA (Unweighted): 0.8831 +[2025-09-05 15:13:49] [Rank 0] Total FTA (Unweighted): 0.8831 +[2025-09-05 15:13:49] [Rank 0] Total FTA (Weighted): 0.8831 +[2025-09-05 15:13:49] [Rank 0] Total FTA (Weighted): 0.8831 +[2025-09-05 15:13:49] [Rank 0] Group 0 Loss: 5.2386 +[2025-09-05 15:13:49] [Rank 0] Group 0 Loss: 5.2386 +[2025-09-05 15:13:49] [Rank 0] Group 1 Loss: 4.9558 +[2025-09-05 15:13:49] [Rank 0] Group 1 Loss: 4.9558 +[2025-09-05 15:13:49] [Rank 0] Group 2 Loss: 4.9160 +[2025-09-05 15:13:49] [Rank 0] Group 2 Loss: 4.9160 +[2025-09-05 15:13:49] [Rank 0] Group 3 Loss: 5.2527 +[2025-09-05 15:13:49] [Rank 0] Group 3 Loss: 5.2527 +[2025-09-05 15:13:49] [Rank 0] Group 4 Loss: 5.2699 +[2025-09-05 15:13:49] [Rank 0] Group 4 Loss: 5.2699 +[2025-09-05 15:13:49] [Rank 0] Group 5 Loss: 5.1192 +[2025-09-05 15:13:49] [Rank 0] Group 5 Loss: 5.1192 +[2025-09-05 15:13:49] [Rank 0] Group 6 Loss: 5.1563 +[2025-09-05 15:13:49] [Rank 0] Group 6 Loss: 5.1563 +[2025-09-05 15:13:49] [Rank 0] Group 7 Loss: 5.2223 +[2025-09-05 15:13:49] [Rank 0] Group 7 Loss: 5.2223 +[2025-09-05 15:13:49] [Rank 0] Group 8 Loss: 5.2544 +[2025-09-05 15:13:49] [Rank 0] Group 8 Loss: 5.2544 +[2025-09-05 15:13:49] [Rank 0] Group 9 Loss: 5.2722 +[2025-09-05 15:13:49] [Rank 0] Group 9 Loss: 5.2722 +[2025-09-05 15:13:49] [Rank 0] Group 10 Loss: 5.3831 +[2025-09-05 15:13:49] [Rank 0] Group 10 Loss: 5.3831 +[2025-09-05 15:13:49] [Rank 0] Group 11 Loss: 5.2994 +[2025-09-05 15:13:49] [Rank 0] Group 11 Loss: 5.2994 +[2025-09-05 15:13:49] [Rank 0] Group 12 Loss: 5.3296 +[2025-09-05 15:13:49] [Rank 0] Group 12 Loss: 5.3296 +[2025-09-05 15:13:49] [Rank 0] Group 13 Loss: 5.4732 +[2025-09-05 15:13:49] [Rank 0] Group 13 Loss: 5.4732 +[2025-09-05 15:13:49] [Rank 0] Group 14 Loss: 5.4259 +[2025-09-05 15:13:49] [Rank 0] Group 14 Loss: 5.4259 +[2025-09-05 15:13:49] [Rank 0] Group 15 Loss: 5.4897 +[2025-09-05 15:13:49] [Rank 0] Group 15 Loss: 5.4897 +[2025-09-05 15:13:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:13:49] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 15:13:49] [Rank 0] Group 12 FTA: 0.9800 +[2025-09-05 15:13:49] [Rank 0] Group 13 FTA: 0.7000 +[2025-09-05 15:13:49] [Rank 0] Group 13 FTA: 0.7000 +[2025-09-05 15:13:49] [Rank 0] Group 14 FTA: 0.2800 +[2025-09-05 15:13:49] [Rank 0] Group 14 FTA: 0.2800 +[2025-09-05 15:13:49] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 15:13:49] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 15:13:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:13:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:13:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:13:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:13:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:13:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:13:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:13:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:13:51] [Rank 0] step:8001/10000 train_time:306278ms step_avg:38.28ms +[2025-09-05 15:13:51] [Rank 0] step:8001/10000 train_time:306278ms step_avg:38.28ms +[2025-09-05 15:13:51] [Rank 0] step:8021/10000 train_time:306791ms step_avg:38.25ms +[2025-09-05 15:13:51] [Rank 0] step:8021/10000 train_time:306791ms step_avg:38.25ms +[2025-09-05 15:13:52] [Rank 0] step:8041/10000 train_time:307421ms step_avg:38.23ms +[2025-09-05 15:13:52] [Rank 0] step:8041/10000 train_time:307421ms step_avg:38.23ms +[2025-09-05 15:13:53] [Rank 0] step:8061/10000 train_time:308078ms step_avg:38.22ms +[2025-09-05 15:13:53] [Rank 0] step:8061/10000 train_time:308078ms step_avg:38.22ms +[2025-09-05 15:13:53] [Rank 0] step:8081/10000 train_time:308735ms step_avg:38.21ms +[2025-09-05 15:13:53] [Rank 0] step:8081/10000 train_time:308735ms step_avg:38.21ms +[2025-09-05 15:13:54] [Rank 0] step:8101/10000 train_time:309393ms step_avg:38.19ms +[2025-09-05 15:13:54] [Rank 0] step:8101/10000 train_time:309393ms step_avg:38.19ms +[2025-09-05 15:13:55] [Rank 0] step:8121/10000 train_time:310050ms step_avg:38.18ms +[2025-09-05 15:13:55] [Rank 0] step:8121/10000 train_time:310050ms step_avg:38.18ms +[2025-09-05 15:13:55] [Rank 0] step:8141/10000 train_time:310708ms step_avg:38.17ms +[2025-09-05 15:13:55] [Rank 0] step:8141/10000 train_time:310708ms step_avg:38.17ms +[2025-09-05 15:13:56] [Rank 0] step:8161/10000 train_time:311366ms step_avg:38.15ms +[2025-09-05 15:13:56] [Rank 0] step:8161/10000 train_time:311366ms step_avg:38.15ms +[2025-09-05 15:13:57] [Rank 0] step:8181/10000 train_time:312023ms step_avg:38.14ms +[2025-09-05 15:13:57] [Rank 0] step:8181/10000 train_time:312023ms step_avg:38.14ms +[2025-09-05 15:13:57] [Rank 0] step:8201/10000 train_time:312681ms step_avg:38.13ms +[2025-09-05 15:13:57] [Rank 0] step:8201/10000 train_time:312681ms step_avg:38.13ms +[2025-09-05 15:13:58] [Rank 0] step:8221/10000 train_time:313338ms step_avg:38.11ms +[2025-09-05 15:13:58] [Rank 0] step:8221/10000 train_time:313338ms step_avg:38.11ms +[2025-09-05 15:13:59] [Rank 0] step:8241/10000 train_time:313996ms step_avg:38.10ms +[2025-09-05 15:13:59] [Rank 0] step:8241/10000 train_time:313996ms step_avg:38.10ms +[2025-09-05 15:13:59] [Rank 0] step:8261/10000 train_time:314653ms step_avg:38.09ms +[2025-09-05 15:13:59] [Rank 0] step:8261/10000 train_time:314653ms step_avg:38.09ms +[2025-09-05 15:14:00] [Rank 0] step:8281/10000 train_time:315311ms step_avg:38.08ms +[2025-09-05 15:14:00] [Rank 0] step:8281/10000 train_time:315311ms step_avg:38.08ms +[2025-09-05 15:14:00] [Rank 0] step:8301/10000 train_time:315968ms step_avg:38.06ms +[2025-09-05 15:14:00] [Rank 0] step:8301/10000 train_time:315968ms step_avg:38.06ms +[2025-09-05 15:14:01] [Rank 0] step:8321/10000 train_time:316625ms step_avg:38.05ms +[2025-09-05 15:14:01] [Rank 0] step:8321/10000 train_time:316625ms step_avg:38.05ms +[2025-09-05 15:14:02] [Rank 0] step:8341/10000 train_time:317282ms step_avg:38.04ms +[2025-09-05 15:14:02] [Rank 0] step:8341/10000 train_time:317282ms step_avg:38.04ms +[2025-09-05 15:14:02] [Rank 0] step:8361/10000 train_time:317941ms step_avg:38.03ms +[2025-09-05 15:14:02] [Rank 0] step:8361/10000 train_time:317941ms step_avg:38.03ms +[2025-09-05 15:14:03] [Rank 0] step:8381/10000 train_time:318599ms step_avg:38.01ms +[2025-09-05 15:14:03] [Rank 0] step:8381/10000 train_time:318599ms step_avg:38.01ms +[2025-09-05 15:14:04] [Rank 0] step:8401/10000 train_time:319256ms step_avg:38.00ms +[2025-09-05 15:14:04] [Rank 0] step:8401/10000 train_time:319256ms step_avg:38.00ms +[2025-09-05 15:14:04] [Rank 0] step:8421/10000 train_time:319914ms step_avg:37.99ms +[2025-09-05 15:14:04] [Rank 0] step:8421/10000 train_time:319914ms step_avg:37.99ms +[2025-09-05 15:14:05] [Rank 0] step:8441/10000 train_time:320572ms step_avg:37.98ms +[2025-09-05 15:14:05] [Rank 0] step:8441/10000 train_time:320572ms step_avg:37.98ms +[2025-09-05 15:14:06] [Rank 0] step:8461/10000 train_time:321229ms step_avg:37.97ms +[2025-09-05 15:14:06] [Rank 0] step:8461/10000 train_time:321229ms step_avg:37.97ms +[2025-09-05 15:14:06] [Rank 0] step:8481/10000 train_time:321888ms step_avg:37.95ms +[2025-09-05 15:14:06] [Rank 0] step:8481/10000 train_time:321888ms step_avg:37.95ms +[2025-09-05 15:14:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:14:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:14:08] [Rank 0] PRINT: step:8500/10000 train_loss:0.6564 val_loss:0.6479 train_time:322779ms step_avg:37.97ms +[2025-09-05 15:14:08] [Rank 0] PRINT: step:8500/10000 train_loss:0.6564 val_loss:0.6479 train_time:322779ms step_avg:37.97ms +[2025-09-05 15:14:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:14:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:14:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:14:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:15:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:15:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:15:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:15:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:15:28] [Rank 0] Total Loss: 5.2916 +[2025-09-05 15:15:28] [Rank 0] Total Loss: 5.2916 +[2025-09-05 15:15:28] [Rank 0] Total FTA (Unweighted): 0.8900 +[2025-09-05 15:15:28] [Rank 0] Total FTA (Unweighted): 0.8900 +[2025-09-05 15:15:28] [Rank 0] Total FTA (Weighted): 0.8900 +[2025-09-05 15:15:28] [Rank 0] Total FTA (Weighted): 0.8900 +[2025-09-05 15:15:28] [Rank 0] Group 0 Loss: 5.3924 +[2025-09-05 15:15:28] [Rank 0] Group 0 Loss: 5.3924 +[2025-09-05 15:15:28] [Rank 0] Group 1 Loss: 5.0603 +[2025-09-05 15:15:28] [Rank 0] Group 1 Loss: 5.0603 +[2025-09-05 15:15:28] [Rank 0] Group 2 Loss: 4.9049 +[2025-09-05 15:15:28] [Rank 0] Group 2 Loss: 4.9049 +[2025-09-05 15:15:28] [Rank 0] Group 3 Loss: 5.2205 +[2025-09-05 15:15:28] [Rank 0] Group 3 Loss: 5.2205 +[2025-09-05 15:15:28] [Rank 0] Group 4 Loss: 5.2658 +[2025-09-05 15:15:28] [Rank 0] Group 4 Loss: 5.2658 +[2025-09-05 15:15:28] [Rank 0] Group 5 Loss: 5.1322 +[2025-09-05 15:15:28] [Rank 0] Group 5 Loss: 5.1322 +[2025-09-05 15:15:28] [Rank 0] Group 6 Loss: 5.2006 +[2025-09-05 15:15:28] [Rank 0] Group 6 Loss: 5.2006 +[2025-09-05 15:15:28] [Rank 0] Group 7 Loss: 5.2835 +[2025-09-05 15:15:28] [Rank 0] Group 7 Loss: 5.2835 +[2025-09-05 15:15:28] [Rank 0] Group 8 Loss: 5.2906 +[2025-09-05 15:15:28] [Rank 0] Group 8 Loss: 5.2906 +[2025-09-05 15:15:28] [Rank 0] Group 9 Loss: 5.2922 +[2025-09-05 15:15:28] [Rank 0] Group 9 Loss: 5.2922 +[2025-09-05 15:15:28] [Rank 0] Group 10 Loss: 5.3931 +[2025-09-05 15:15:28] [Rank 0] Group 10 Loss: 5.3931 +[2025-09-05 15:15:28] [Rank 0] Group 11 Loss: 5.3330 +[2025-09-05 15:15:28] [Rank 0] Group 11 Loss: 5.3330 +[2025-09-05 15:15:28] [Rank 0] Group 12 Loss: 5.3828 +[2025-09-05 15:15:28] [Rank 0] Group 12 Loss: 5.3828 +[2025-09-05 15:15:28] [Rank 0] Group 13 Loss: 5.5077 +[2025-09-05 15:15:28] [Rank 0] Group 13 Loss: 5.5077 +[2025-09-05 15:15:28] [Rank 0] Group 14 Loss: 5.4559 +[2025-09-05 15:15:28] [Rank 0] Group 14 Loss: 5.4559 +[2025-09-05 15:15:28] [Rank 0] Group 15 Loss: 5.5500 +[2025-09-05 15:15:28] [Rank 0] Group 15 Loss: 5.5500 +[2025-09-05 15:15:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 15:15:28] [Rank 0] Group 13 FTA: 0.8200 +[2025-09-05 15:15:28] [Rank 0] Group 13 FTA: 0.8200 +[2025-09-05 15:15:28] [Rank 0] Group 14 FTA: 0.2400 +[2025-09-05 15:15:28] [Rank 0] Group 14 FTA: 0.2400 +[2025-09-05 15:15:28] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 15:15:28] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 15:15:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:15:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:15:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:15:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:15:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:15:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:15:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:15:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:15:30] [Rank 0] step:8501/10000 train_time:322788ms step_avg:37.97ms +[2025-09-05 15:15:30] [Rank 0] step:8501/10000 train_time:322788ms step_avg:37.97ms +[2025-09-05 15:15:30] [Rank 0] step:8521/10000 train_time:323228ms step_avg:37.93ms +[2025-09-05 15:15:30] [Rank 0] step:8521/10000 train_time:323228ms step_avg:37.93ms +[2025-09-05 15:15:31] [Rank 0] step:8541/10000 train_time:323888ms step_avg:37.92ms +[2025-09-05 15:15:31] [Rank 0] step:8541/10000 train_time:323888ms step_avg:37.92ms +[2025-09-05 15:15:32] [Rank 0] step:8561/10000 train_time:324547ms step_avg:37.91ms +[2025-09-05 15:15:32] [Rank 0] step:8561/10000 train_time:324547ms step_avg:37.91ms +[2025-09-05 15:15:32] [Rank 0] step:8581/10000 train_time:325206ms step_avg:37.90ms +[2025-09-05 15:15:32] [Rank 0] step:8581/10000 train_time:325206ms step_avg:37.90ms +[2025-09-05 15:15:33] [Rank 0] step:8601/10000 train_time:325865ms step_avg:37.89ms +[2025-09-05 15:15:33] [Rank 0] step:8601/10000 train_time:325865ms step_avg:37.89ms +[2025-09-05 15:15:34] [Rank 0] step:8621/10000 train_time:326523ms step_avg:37.88ms +[2025-09-05 15:15:34] [Rank 0] step:8621/10000 train_time:326523ms step_avg:37.88ms +[2025-09-05 15:15:34] [Rank 0] step:8641/10000 train_time:327182ms step_avg:37.86ms +[2025-09-05 15:15:34] [Rank 0] step:8641/10000 train_time:327182ms step_avg:37.86ms +[2025-09-05 15:15:35] [Rank 0] step:8661/10000 train_time:327841ms step_avg:37.85ms +[2025-09-05 15:15:35] [Rank 0] step:8661/10000 train_time:327841ms step_avg:37.85ms +[2025-09-05 15:15:36] [Rank 0] step:8681/10000 train_time:328500ms step_avg:37.84ms +[2025-09-05 15:15:36] [Rank 0] step:8681/10000 train_time:328500ms step_avg:37.84ms +[2025-09-05 15:15:36] [Rank 0] step:8701/10000 train_time:329159ms step_avg:37.83ms +[2025-09-05 15:15:36] [Rank 0] step:8701/10000 train_time:329159ms step_avg:37.83ms +[2025-09-05 15:15:37] [Rank 0] step:8721/10000 train_time:329818ms step_avg:37.82ms +[2025-09-05 15:15:37] [Rank 0] step:8721/10000 train_time:329818ms step_avg:37.82ms +[2025-09-05 15:15:38] [Rank 0] step:8741/10000 train_time:330476ms step_avg:37.81ms +[2025-09-05 15:15:38] [Rank 0] step:8741/10000 train_time:330476ms step_avg:37.81ms +[2025-09-05 15:15:38] [Rank 0] step:8761/10000 train_time:331137ms step_avg:37.80ms +[2025-09-05 15:15:38] [Rank 0] step:8761/10000 train_time:331137ms step_avg:37.80ms +[2025-09-05 15:15:39] [Rank 0] step:8781/10000 train_time:331797ms step_avg:37.79ms +[2025-09-05 15:15:39] [Rank 0] step:8781/10000 train_time:331797ms step_avg:37.79ms +[2025-09-05 15:15:40] [Rank 0] step:8801/10000 train_time:332456ms step_avg:37.77ms +[2025-09-05 15:15:40] [Rank 0] step:8801/10000 train_time:332456ms step_avg:37.77ms +[2025-09-05 15:15:40] [Rank 0] step:8821/10000 train_time:333115ms step_avg:37.76ms +[2025-09-05 15:15:40] [Rank 0] step:8821/10000 train_time:333115ms step_avg:37.76ms +[2025-09-05 15:15:41] [Rank 0] step:8841/10000 train_time:333774ms step_avg:37.75ms +[2025-09-05 15:15:41] [Rank 0] step:8841/10000 train_time:333774ms step_avg:37.75ms +[2025-09-05 15:15:42] [Rank 0] step:8861/10000 train_time:334434ms step_avg:37.74ms +[2025-09-05 15:15:42] [Rank 0] step:8861/10000 train_time:334434ms step_avg:37.74ms +[2025-09-05 15:15:42] [Rank 0] step:8881/10000 train_time:335093ms step_avg:37.73ms +[2025-09-05 15:15:42] [Rank 0] step:8881/10000 train_time:335093ms step_avg:37.73ms +[2025-09-05 15:15:43] [Rank 0] step:8901/10000 train_time:335751ms step_avg:37.72ms +[2025-09-05 15:15:43] [Rank 0] step:8901/10000 train_time:335751ms step_avg:37.72ms +[2025-09-05 15:15:43] [Rank 0] step:8921/10000 train_time:336410ms step_avg:37.71ms +[2025-09-05 15:15:43] [Rank 0] step:8921/10000 train_time:336410ms step_avg:37.71ms +[2025-09-05 15:15:44] [Rank 0] step:8941/10000 train_time:337069ms step_avg:37.70ms +[2025-09-05 15:15:44] [Rank 0] step:8941/10000 train_time:337069ms step_avg:37.70ms +[2025-09-05 15:15:45] [Rank 0] step:8961/10000 train_time:337728ms step_avg:37.69ms +[2025-09-05 15:15:45] [Rank 0] step:8961/10000 train_time:337728ms step_avg:37.69ms +[2025-09-05 15:15:45] [Rank 0] step:8981/10000 train_time:338387ms step_avg:37.68ms +[2025-09-05 15:15:45] [Rank 0] step:8981/10000 train_time:338387ms step_avg:37.68ms +[2025-09-05 15:15:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:15:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:15:46] [Rank 0] PRINT: step:9000/10000 train_loss:0.6516 val_loss:0.6435 train_time:339281ms step_avg:37.70ms +[2025-09-05 15:15:46] [Rank 0] PRINT: step:9000/10000 train_loss:0.6516 val_loss:0.6435 train_time:339281ms step_avg:37.70ms +[2025-09-05 15:15:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:15:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:15:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:15:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:17:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:17:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:17:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:17:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:17:08] [Rank 0] Total Loss: 5.3238 +[2025-09-05 15:17:08] [Rank 0] Total Loss: 5.3238 +[2025-09-05 15:17:08] [Rank 0] Total FTA (Unweighted): 0.9031 +[2025-09-05 15:17:08] [Rank 0] Total FTA (Unweighted): 0.9031 +[2025-09-05 15:17:08] [Rank 0] Total FTA (Weighted): 0.9031 +[2025-09-05 15:17:08] [Rank 0] Total FTA (Weighted): 0.9031 +[2025-09-05 15:17:08] [Rank 0] Group 0 Loss: 5.3520 +[2025-09-05 15:17:08] [Rank 0] Group 0 Loss: 5.3520 +[2025-09-05 15:17:08] [Rank 0] Group 1 Loss: 5.1081 +[2025-09-05 15:17:08] [Rank 0] Group 1 Loss: 5.1081 +[2025-09-05 15:17:08] [Rank 0] Group 2 Loss: 4.9210 +[2025-09-05 15:17:08] [Rank 0] Group 2 Loss: 4.9210 +[2025-09-05 15:17:08] [Rank 0] Group 3 Loss: 5.3070 +[2025-09-05 15:17:08] [Rank 0] Group 3 Loss: 5.3070 +[2025-09-05 15:17:08] [Rank 0] Group 4 Loss: 5.3191 +[2025-09-05 15:17:08] [Rank 0] Group 4 Loss: 5.3191 +[2025-09-05 15:17:08] [Rank 0] Group 5 Loss: 5.1885 +[2025-09-05 15:17:08] [Rank 0] Group 5 Loss: 5.1885 +[2025-09-05 15:17:08] [Rank 0] Group 6 Loss: 5.2439 +[2025-09-05 15:17:08] [Rank 0] Group 6 Loss: 5.2439 +[2025-09-05 15:17:08] [Rank 0] Group 7 Loss: 5.2985 +[2025-09-05 15:17:08] [Rank 0] Group 7 Loss: 5.2985 +[2025-09-05 15:17:08] [Rank 0] Group 8 Loss: 5.3178 +[2025-09-05 15:17:08] [Rank 0] Group 8 Loss: 5.3178 +[2025-09-05 15:17:08] [Rank 0] Group 9 Loss: 5.3132 +[2025-09-05 15:17:08] [Rank 0] Group 9 Loss: 5.3132 +[2025-09-05 15:17:08] [Rank 0] Group 10 Loss: 5.4346 +[2025-09-05 15:17:08] [Rank 0] Group 10 Loss: 5.4346 +[2025-09-05 15:17:08] [Rank 0] Group 11 Loss: 5.3770 +[2025-09-05 15:17:08] [Rank 0] Group 11 Loss: 5.3770 +[2025-09-05 15:17:08] [Rank 0] Group 12 Loss: 5.4265 +[2025-09-05 15:17:08] [Rank 0] Group 12 Loss: 5.4265 +[2025-09-05 15:17:08] [Rank 0] Group 13 Loss: 5.5462 +[2025-09-05 15:17:08] [Rank 0] Group 13 Loss: 5.5462 +[2025-09-05 15:17:08] [Rank 0] Group 14 Loss: 5.4762 +[2025-09-05 15:17:08] [Rank 0] Group 14 Loss: 5.4762 +[2025-09-05 15:17:08] [Rank 0] Group 15 Loss: 5.5516 +[2025-09-05 15:17:08] [Rank 0] Group 15 Loss: 5.5516 +[2025-09-05 15:17:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 15:17:08] [Rank 0] Group 13 FTA: 0.8700 +[2025-09-05 15:17:08] [Rank 0] Group 13 FTA: 0.8700 +[2025-09-05 15:17:08] [Rank 0] Group 14 FTA: 0.3800 +[2025-09-05 15:17:08] [Rank 0] Group 14 FTA: 0.3800 +[2025-09-05 15:17:08] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 15:17:08] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 15:17:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:17:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:17:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:17:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:17:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:17:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:17:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:17:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:17:09] [Rank 0] step:9001/10000 train_time:339288ms step_avg:37.69ms +[2025-09-05 15:17:09] [Rank 0] step:9001/10000 train_time:339288ms step_avg:37.69ms +[2025-09-05 15:17:10] [Rank 0] step:9021/10000 train_time:339741ms step_avg:37.66ms +[2025-09-05 15:17:10] [Rank 0] step:9021/10000 train_time:339741ms step_avg:37.66ms +[2025-09-05 15:17:11] [Rank 0] step:9041/10000 train_time:340398ms step_avg:37.65ms +[2025-09-05 15:17:11] [Rank 0] step:9041/10000 train_time:340398ms step_avg:37.65ms +[2025-09-05 15:17:11] [Rank 0] step:9061/10000 train_time:341055ms step_avg:37.64ms +[2025-09-05 15:17:11] [Rank 0] step:9061/10000 train_time:341055ms step_avg:37.64ms +[2025-09-05 15:17:12] [Rank 0] step:9081/10000 train_time:341712ms step_avg:37.63ms +[2025-09-05 15:17:12] [Rank 0] step:9081/10000 train_time:341712ms step_avg:37.63ms +[2025-09-05 15:17:13] [Rank 0] step:9101/10000 train_time:342369ms step_avg:37.62ms +[2025-09-05 15:17:13] [Rank 0] step:9101/10000 train_time:342369ms step_avg:37.62ms +[2025-09-05 15:17:13] [Rank 0] step:9121/10000 train_time:343026ms step_avg:37.61ms +[2025-09-05 15:17:13] [Rank 0] step:9121/10000 train_time:343026ms step_avg:37.61ms +[2025-09-05 15:17:14] [Rank 0] step:9141/10000 train_time:343683ms step_avg:37.60ms +[2025-09-05 15:17:14] [Rank 0] step:9141/10000 train_time:343683ms step_avg:37.60ms +[2025-09-05 15:17:15] [Rank 0] step:9161/10000 train_time:344340ms step_avg:37.59ms +[2025-09-05 15:17:15] [Rank 0] step:9161/10000 train_time:344340ms step_avg:37.59ms +[2025-09-05 15:17:15] [Rank 0] step:9181/10000 train_time:344998ms step_avg:37.58ms +[2025-09-05 15:17:15] [Rank 0] step:9181/10000 train_time:344998ms step_avg:37.58ms +[2025-09-05 15:17:16] [Rank 0] step:9201/10000 train_time:345656ms step_avg:37.57ms +[2025-09-05 15:17:16] [Rank 0] step:9201/10000 train_time:345656ms step_avg:37.57ms +[2025-09-05 15:17:17] [Rank 0] step:9221/10000 train_time:346313ms step_avg:37.56ms +[2025-09-05 15:17:17] [Rank 0] step:9221/10000 train_time:346313ms step_avg:37.56ms +[2025-09-05 15:17:17] [Rank 0] step:9241/10000 train_time:346971ms step_avg:37.55ms +[2025-09-05 15:17:17] [Rank 0] step:9241/10000 train_time:346971ms step_avg:37.55ms +[2025-09-05 15:17:18] [Rank 0] step:9261/10000 train_time:347628ms step_avg:37.54ms +[2025-09-05 15:17:18] [Rank 0] step:9261/10000 train_time:347628ms step_avg:37.54ms +[2025-09-05 15:17:19] [Rank 0] step:9281/10000 train_time:348286ms step_avg:37.53ms +[2025-09-05 15:17:19] [Rank 0] step:9281/10000 train_time:348286ms step_avg:37.53ms +[2025-09-05 15:17:19] [Rank 0] step:9301/10000 train_time:348944ms step_avg:37.52ms +[2025-09-05 15:17:19] [Rank 0] step:9301/10000 train_time:348944ms step_avg:37.52ms +[2025-09-05 15:17:20] [Rank 0] step:9321/10000 train_time:349601ms step_avg:37.51ms +[2025-09-05 15:17:20] [Rank 0] step:9321/10000 train_time:349601ms step_avg:37.51ms +[2025-09-05 15:17:21] [Rank 0] step:9341/10000 train_time:350259ms step_avg:37.50ms +[2025-09-05 15:17:21] [Rank 0] step:9341/10000 train_time:350259ms step_avg:37.50ms +[2025-09-05 15:17:21] [Rank 0] step:9361/10000 train_time:350918ms step_avg:37.49ms +[2025-09-05 15:17:21] [Rank 0] step:9361/10000 train_time:350918ms step_avg:37.49ms +[2025-09-05 15:17:22] [Rank 0] step:9381/10000 train_time:351576ms step_avg:37.48ms +[2025-09-05 15:17:22] [Rank 0] step:9381/10000 train_time:351576ms step_avg:37.48ms +[2025-09-05 15:17:23] [Rank 0] step:9401/10000 train_time:352233ms step_avg:37.47ms +[2025-09-05 15:17:23] [Rank 0] step:9401/10000 train_time:352233ms step_avg:37.47ms +[2025-09-05 15:17:23] [Rank 0] step:9421/10000 train_time:352891ms step_avg:37.46ms +[2025-09-05 15:17:23] [Rank 0] step:9421/10000 train_time:352891ms step_avg:37.46ms +[2025-09-05 15:17:24] [Rank 0] step:9441/10000 train_time:353549ms step_avg:37.45ms +[2025-09-05 15:17:24] [Rank 0] step:9441/10000 train_time:353549ms step_avg:37.45ms +[2025-09-05 15:17:25] [Rank 0] step:9461/10000 train_time:354207ms step_avg:37.44ms +[2025-09-05 15:17:25] [Rank 0] step:9461/10000 train_time:354207ms step_avg:37.44ms +[2025-09-05 15:17:25] [Rank 0] step:9481/10000 train_time:354864ms step_avg:37.43ms +[2025-09-05 15:17:25] [Rank 0] step:9481/10000 train_time:354864ms step_avg:37.43ms +[2025-09-05 15:17:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:17:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:17:26] [Rank 0] PRINT: step:9500/10000 train_loss:0.6471 val_loss:0.6391 train_time:355764ms step_avg:37.45ms +[2025-09-05 15:17:26] [Rank 0] PRINT: step:9500/10000 train_loss:0.6471 val_loss:0.6391 train_time:355764ms step_avg:37.45ms +[2025-09-05 15:17:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:17:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:17:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:17:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:18:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:18:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:18:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:18:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:18:48] [Rank 0] Total Loss: 5.2939 +[2025-09-05 15:18:48] [Rank 0] Total Loss: 5.2939 +[2025-09-05 15:18:48] [Rank 0] Total FTA (Unweighted): 0.9056 +[2025-09-05 15:18:48] [Rank 0] Total FTA (Unweighted): 0.9056 +[2025-09-05 15:18:48] [Rank 0] Total FTA (Weighted): 0.9056 +[2025-09-05 15:18:48] [Rank 0] Total FTA (Weighted): 0.9056 +[2025-09-05 15:18:48] [Rank 0] Group 0 Loss: 5.3812 +[2025-09-05 15:18:48] [Rank 0] Group 0 Loss: 5.3812 +[2025-09-05 15:18:48] [Rank 0] Group 1 Loss: 5.0858 +[2025-09-05 15:18:48] [Rank 0] Group 1 Loss: 5.0858 +[2025-09-05 15:18:48] [Rank 0] Group 2 Loss: 4.9237 +[2025-09-05 15:18:48] [Rank 0] Group 2 Loss: 4.9237 +[2025-09-05 15:18:48] [Rank 0] Group 3 Loss: 5.2711 +[2025-09-05 15:18:48] [Rank 0] Group 3 Loss: 5.2711 +[2025-09-05 15:18:48] [Rank 0] Group 4 Loss: 5.2913 +[2025-09-05 15:18:48] [Rank 0] Group 4 Loss: 5.2913 +[2025-09-05 15:18:48] [Rank 0] Group 5 Loss: 5.1199 +[2025-09-05 15:18:48] [Rank 0] Group 5 Loss: 5.1199 +[2025-09-05 15:18:48] [Rank 0] Group 6 Loss: 5.2220 +[2025-09-05 15:18:48] [Rank 0] Group 6 Loss: 5.2220 +[2025-09-05 15:18:48] [Rank 0] Group 7 Loss: 5.2773 +[2025-09-05 15:18:48] [Rank 0] Group 7 Loss: 5.2773 +[2025-09-05 15:18:48] [Rank 0] Group 8 Loss: 5.2781 +[2025-09-05 15:18:48] [Rank 0] Group 8 Loss: 5.2781 +[2025-09-05 15:18:48] [Rank 0] Group 9 Loss: 5.2773 +[2025-09-05 15:18:48] [Rank 0] Group 9 Loss: 5.2773 +[2025-09-05 15:18:48] [Rank 0] Group 10 Loss: 5.3905 +[2025-09-05 15:18:48] [Rank 0] Group 10 Loss: 5.3905 +[2025-09-05 15:18:48] [Rank 0] Group 11 Loss: 5.3450 +[2025-09-05 15:18:48] [Rank 0] Group 11 Loss: 5.3450 +[2025-09-05 15:18:48] [Rank 0] Group 12 Loss: 5.4025 +[2025-09-05 15:18:48] [Rank 0] Group 12 Loss: 5.4025 +[2025-09-05 15:18:48] [Rank 0] Group 13 Loss: 5.4827 +[2025-09-05 15:18:48] [Rank 0] Group 13 Loss: 5.4827 +[2025-09-05 15:18:48] [Rank 0] Group 14 Loss: 5.4285 +[2025-09-05 15:18:48] [Rank 0] Group 14 Loss: 5.4285 +[2025-09-05 15:18:48] [Rank 0] Group 15 Loss: 5.5250 +[2025-09-05 15:18:48] [Rank 0] Group 15 Loss: 5.5250 +[2025-09-05 15:18:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:18:48] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 15:18:48] [Rank 0] Group 12 FTA: 0.9900 +[2025-09-05 15:18:48] [Rank 0] Group 13 FTA: 0.8900 +[2025-09-05 15:18:48] [Rank 0] Group 13 FTA: 0.8900 +[2025-09-05 15:18:48] [Rank 0] Group 14 FTA: 0.4300 +[2025-09-05 15:18:48] [Rank 0] Group 14 FTA: 0.4300 +[2025-09-05 15:18:48] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 15:18:48] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 15:18:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:18:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:18:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:18:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:18:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:18:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:18:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:18:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:18:49] [Rank 0] step:9501/10000 train_time:355771ms step_avg:37.45ms +[2025-09-05 15:18:49] [Rank 0] step:9501/10000 train_time:355771ms step_avg:37.45ms +[2025-09-05 15:18:50] [Rank 0] step:9521/10000 train_time:356209ms step_avg:37.41ms +[2025-09-05 15:18:50] [Rank 0] step:9521/10000 train_time:356209ms step_avg:37.41ms +[2025-09-05 15:18:51] [Rank 0] step:9541/10000 train_time:356868ms step_avg:37.40ms +[2025-09-05 15:18:51] [Rank 0] step:9541/10000 train_time:356868ms step_avg:37.40ms +[2025-09-05 15:18:51] [Rank 0] step:9561/10000 train_time:357526ms step_avg:37.39ms +[2025-09-05 15:18:51] [Rank 0] step:9561/10000 train_time:357526ms step_avg:37.39ms +[2025-09-05 15:18:52] [Rank 0] step:9581/10000 train_time:358187ms step_avg:37.39ms +[2025-09-05 15:18:52] [Rank 0] step:9581/10000 train_time:358187ms step_avg:37.39ms +[2025-09-05 15:18:53] [Rank 0] step:9601/10000 train_time:358846ms step_avg:37.38ms +[2025-09-05 15:18:53] [Rank 0] step:9601/10000 train_time:358846ms step_avg:37.38ms +[2025-09-05 15:18:53] [Rank 0] step:9621/10000 train_time:359505ms step_avg:37.37ms +[2025-09-05 15:18:53] [Rank 0] step:9621/10000 train_time:359505ms step_avg:37.37ms +[2025-09-05 15:18:54] [Rank 0] step:9641/10000 train_time:360163ms step_avg:37.36ms +[2025-09-05 15:18:54] [Rank 0] step:9641/10000 train_time:360163ms step_avg:37.36ms +[2025-09-05 15:18:55] [Rank 0] step:9661/10000 train_time:361103ms step_avg:37.38ms +[2025-09-05 15:18:55] [Rank 0] step:9661/10000 train_time:361103ms step_avg:37.38ms +[2025-09-05 15:18:55] [Rank 0] step:9681/10000 train_time:361762ms step_avg:37.37ms +[2025-09-05 15:18:55] [Rank 0] step:9681/10000 train_time:361762ms step_avg:37.37ms +[2025-09-05 15:18:56] [Rank 0] step:9701/10000 train_time:362420ms step_avg:37.36ms +[2025-09-05 15:18:56] [Rank 0] step:9701/10000 train_time:362420ms step_avg:37.36ms +[2025-09-05 15:18:57] [Rank 0] step:9721/10000 train_time:363078ms step_avg:37.35ms +[2025-09-05 15:18:57] [Rank 0] step:9721/10000 train_time:363078ms step_avg:37.35ms +[2025-09-05 15:18:57] [Rank 0] step:9741/10000 train_time:363737ms step_avg:37.34ms +[2025-09-05 15:18:57] [Rank 0] step:9741/10000 train_time:363737ms step_avg:37.34ms +[2025-09-05 15:18:58] [Rank 0] step:9761/10000 train_time:364395ms step_avg:37.33ms +[2025-09-05 15:18:58] [Rank 0] step:9761/10000 train_time:364395ms step_avg:37.33ms +[2025-09-05 15:18:59] [Rank 0] step:9781/10000 train_time:365054ms step_avg:37.32ms +[2025-09-05 15:18:59] [Rank 0] step:9781/10000 train_time:365054ms step_avg:37.32ms +[2025-09-05 15:18:59] [Rank 0] step:9801/10000 train_time:365712ms step_avg:37.31ms +[2025-09-05 15:18:59] [Rank 0] step:9801/10000 train_time:365712ms step_avg:37.31ms +[2025-09-05 15:19:00] [Rank 0] step:9821/10000 train_time:366371ms step_avg:37.30ms +[2025-09-05 15:19:00] [Rank 0] step:9821/10000 train_time:366371ms step_avg:37.30ms +[2025-09-05 15:19:01] [Rank 0] step:9841/10000 train_time:367029ms step_avg:37.30ms +[2025-09-05 15:19:01] [Rank 0] step:9841/10000 train_time:367029ms step_avg:37.30ms +[2025-09-05 15:19:01] [Rank 0] step:9861/10000 train_time:367688ms step_avg:37.29ms +[2025-09-05 15:19:01] [Rank 0] step:9861/10000 train_time:367688ms step_avg:37.29ms +[2025-09-05 15:19:02] [Rank 0] step:9881/10000 train_time:368348ms step_avg:37.28ms +[2025-09-05 15:19:02] [Rank 0] step:9881/10000 train_time:368348ms step_avg:37.28ms +[2025-09-05 15:19:03] [Rank 0] step:9901/10000 train_time:369006ms step_avg:37.27ms +[2025-09-05 15:19:03] [Rank 0] step:9901/10000 train_time:369006ms step_avg:37.27ms +[2025-09-05 15:19:03] [Rank 0] step:9921/10000 train_time:369665ms step_avg:37.26ms +[2025-09-05 15:19:03] [Rank 0] step:9921/10000 train_time:369665ms step_avg:37.26ms +[2025-09-05 15:19:04] [Rank 0] step:9941/10000 train_time:370324ms step_avg:37.25ms +[2025-09-05 15:19:04] [Rank 0] step:9941/10000 train_time:370324ms step_avg:37.25ms +[2025-09-05 15:19:05] [Rank 0] step:9961/10000 train_time:370983ms step_avg:37.24ms +[2025-09-05 15:19:05] [Rank 0] step:9961/10000 train_time:370983ms step_avg:37.24ms +[2025-09-05 15:19:05] [Rank 0] step:9981/10000 train_time:371641ms step_avg:37.23ms +[2025-09-05 15:19:05] [Rank 0] step:9981/10000 train_time:371641ms step_avg:37.23ms +[2025-09-05 15:19:06] [Rank 0] step:10000/10000 train_time:372272ms step_avg:37.23ms +[2025-09-05 15:19:06] [Rank 0] step:10000/10000 train_time:372272ms step_avg:37.23ms +[2025-09-05 15:19:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:19:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:19:06] [Rank 0] PRINT: step:10000/10000 train_loss:0.6426 val_loss:0.6354 train_time:372545ms step_avg:37.25ms +[2025-09-05 15:19:06] [Rank 0] PRINT: step:10000/10000 train_loss:0.6426 val_loss:0.6354 train_time:372545ms step_avg:37.25ms +[2025-09-05 15:19:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:19:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:19:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:19:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:20:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:20:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:20:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:20:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:20:27] [Rank 0] Total Loss: 5.2854 +[2025-09-05 15:20:27] [Rank 0] Total Loss: 5.2854 +[2025-09-05 15:20:27] [Rank 0] Total FTA (Unweighted): 0.9150 +[2025-09-05 15:20:27] [Rank 0] Total FTA (Unweighted): 0.9150 +[2025-09-05 15:20:27] [Rank 0] Total FTA (Weighted): 0.9150 +[2025-09-05 15:20:27] [Rank 0] Total FTA (Weighted): 0.9150 +[2025-09-05 15:20:27] [Rank 0] Group 0 Loss: 5.3383 +[2025-09-05 15:20:27] [Rank 0] Group 0 Loss: 5.3383 +[2025-09-05 15:20:27] [Rank 0] Group 1 Loss: 5.0121 +[2025-09-05 15:20:27] [Rank 0] Group 1 Loss: 5.0121 +[2025-09-05 15:20:27] [Rank 0] Group 2 Loss: 4.9672 +[2025-09-05 15:20:27] [Rank 0] Group 2 Loss: 4.9672 +[2025-09-05 15:20:27] [Rank 0] Group 3 Loss: 5.2611 +[2025-09-05 15:20:27] [Rank 0] Group 3 Loss: 5.2611 +[2025-09-05 15:20:27] [Rank 0] Group 4 Loss: 5.2810 +[2025-09-05 15:20:27] [Rank 0] Group 4 Loss: 5.2810 +[2025-09-05 15:20:27] [Rank 0] Group 5 Loss: 5.1332 +[2025-09-05 15:20:27] [Rank 0] Group 5 Loss: 5.1332 +[2025-09-05 15:20:27] [Rank 0] Group 6 Loss: 5.2171 +[2025-09-05 15:20:27] [Rank 0] Group 6 Loss: 5.2171 +[2025-09-05 15:20:27] [Rank 0] Group 7 Loss: 5.2788 +[2025-09-05 15:20:27] [Rank 0] Group 7 Loss: 5.2788 +[2025-09-05 15:20:27] [Rank 0] Group 8 Loss: 5.2842 +[2025-09-05 15:20:27] [Rank 0] Group 8 Loss: 5.2842 +[2025-09-05 15:20:27] [Rank 0] Group 9 Loss: 5.2999 +[2025-09-05 15:20:27] [Rank 0] Group 9 Loss: 5.2999 +[2025-09-05 15:20:27] [Rank 0] Group 10 Loss: 5.3944 +[2025-09-05 15:20:27] [Rank 0] Group 10 Loss: 5.3944 +[2025-09-05 15:20:27] [Rank 0] Group 11 Loss: 5.3256 +[2025-09-05 15:20:27] [Rank 0] Group 11 Loss: 5.3256 +[2025-09-05 15:20:27] [Rank 0] Group 12 Loss: 5.3814 +[2025-09-05 15:20:27] [Rank 0] Group 12 Loss: 5.3814 +[2025-09-05 15:20:27] [Rank 0] Group 13 Loss: 5.4651 +[2025-09-05 15:20:27] [Rank 0] Group 13 Loss: 5.4651 +[2025-09-05 15:20:27] [Rank 0] Group 14 Loss: 5.4232 +[2025-09-05 15:20:27] [Rank 0] Group 14 Loss: 5.4232 +[2025-09-05 15:20:27] [Rank 0] Group 15 Loss: 5.5038 +[2025-09-05 15:20:27] [Rank 0] Group 15 Loss: 5.5038 +[2025-09-05 15:20:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 10 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 11 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 12 FTA: 1.0000 +[2025-09-05 15:20:27] [Rank 0] Group 13 FTA: 0.9100 +[2025-09-05 15:20:27] [Rank 0] Group 13 FTA: 0.9100 +[2025-09-05 15:20:27] [Rank 0] Group 14 FTA: 0.5000 +[2025-09-05 15:20:27] [Rank 0] Group 14 FTA: 0.5000 +[2025-09-05 15:20:27] [Rank 0] Group 15 FTA: 0.2300 +[2025-09-05 15:20:27] [Rank 0] Group 15 FTA: 0.2300 +[2025-09-05 15:20:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:20:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_loss_curves.png +[2025-09-05 15:20:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:20:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/per_class_acc_curves.png +[2025-09-05 15:20:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:20:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_loss_curve.png +[2025-09-05 15:20:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:20:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_42/total_acc_curve.png +[2025-09-05 15:20:30] [Rank 0] step:10001/10000 train_time:372553ms step_avg:37.25ms +[2025-09-05 15:20:30] [Rank 0] step:10001/10000 train_time:372553ms step_avg:37.25ms +[2025-09-05 15:20:30] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 15:20:30 2025 --- +[2025-09-05 15:20:30] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 15:20:30 2025 --- +[2025-09-05 15:20:30] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 15:20:30] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/config.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..17c48a8ea3d1bd6a3088cfe23e6adb66bb159b0f --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.01, + "base_dir": "logs_qa_adam_gated/lr_search_long", + "sgd_lr": 0.01, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "94725803-484f-4779-aae2-2100327082da", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/fixed_eval_indices.json b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..1e988c4f3316952d892f7818253a5c68acd357ae --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bfa4f37ac50e81c459d65c645f34cadea13aeec7a8a8f820dc186148073cbf1 +size 396896 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..8e2cc8e4f1d40fd9a5853bfad0dffa535ae06ccd --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:529a26ee4d91728a1c3a5d9b87e5ff7667c6f5f23dd5ee2285f6c040ebb94e70 +size 503427 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..5148c84ecb018e7a62f6a0c263b82b0931b1bac5 --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e34ae87fb9add4ac85d30fee9b440e4ce2396233ad2c7bd8c24fb4158275d07a +size 97590 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d1ba370afd1751451becad9e5a2696e24542dedb --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ac314bfe59c435b241e7cbdbe1fdcea6cf1aa808430fda1caf4114748e7f359 +size 120420 diff --git a/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/training_log_94725803-484f-4779-aae2-2100327082da.txt b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/training_log_94725803-484f-4779-aae2-2100327082da.txt new file mode 100644 index 0000000000000000000000000000000000000000..fb4db1ea3373752f8680532b16fafd763ed0f06e --- /dev/null +++ b/logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/training_log_94725803-484f-4779-aae2-2100327082da.txt @@ -0,0 +1,5614 @@ +[2025-09-05 17:43:28] [Rank 0] PRINT: --- Script Start: Fri Sep 5 17:43:28 2025 --- +[2025-09-05 17:43:28] [Rank 0] PRINT: --- Script Start: Fri Sep 5 17:43:28 2025 --- +[2025-09-05 17:43:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.01, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 17:43:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.01, base_dir='logs_qa_adam_gated/lr_search_long', sgd_lr=0.01, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 17:43:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 17:43:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 17:43:28] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 17:43:28] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 17:43:28] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43 +[2025-09-05 17:43:28] [Rank 0] PRINT: Run directory: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43 +[2025-09-05 17:43:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 17:43:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 17:43:28] [Rank 0] PRINT: Constructing model... +[2025-09-05 17:43:28] [Rank 0] PRINT: Constructing model... +[2025-09-05 17:43:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 17:43:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 17:43:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 17:43:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 17:43:30] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 17:43:30] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 17:43:34] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 17:43:34] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 17:43:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 17:43:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 17:43:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 17:43:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 17:43:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 17:43:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 17:43:34] [Rank 0] PRINT: Model returns: +[2025-09-05 17:43:34] [Rank 0] PRINT: Model returns: +[2025-09-05 17:43:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 17:43:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 17:43:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 17:43:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-09-05 17:43:34] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.01). +[2025-09-05 17:43:34] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.01). +[2025-09-05 17:43:34] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 17:43:34] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-09-05 17:43:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 17:43:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 17:43:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 17:43:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 17:43:39] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 17:43:39] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 17:43:39] [Rank 0] PRINT: Starting warmup... +[2025-09-05 17:43:39] [Rank 0] PRINT: Starting warmup... +[2025-09-05 17:44:19] [Rank 0] PRINT: Warmup complete. +[2025-09-05 17:44:19] [Rank 0] PRINT: Warmup complete. +[2025-09-05 17:44:19] [Rank 0] PRINT: Starting training... +[2025-09-05 17:44:19] [Rank 0] PRINT: Starting training... +[2025-09-05 17:44:26] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/fixed_eval_indices.json +[2025-09-05 17:44:26] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/fixed_eval_indices.json +[2025-09-05 17:44:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:44:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:44:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 17:44:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 17:45:03] [Rank 0] step:21/10000 train_time:32839ms step_avg:1563.77ms +[2025-09-05 17:45:03] [Rank 0] step:21/10000 train_time:32839ms step_avg:1563.77ms +[2025-09-05 17:45:04] [Rank 0] step:41/10000 train_time:33487ms step_avg:816.76ms +[2025-09-05 17:45:04] [Rank 0] step:41/10000 train_time:33487ms step_avg:816.76ms +[2025-09-05 17:45:04] [Rank 0] step:61/10000 train_time:34134ms step_avg:559.58ms +[2025-09-05 17:45:04] [Rank 0] step:61/10000 train_time:34134ms step_avg:559.58ms +[2025-09-05 17:45:05] [Rank 0] step:81/10000 train_time:34781ms step_avg:429.40ms +[2025-09-05 17:45:05] [Rank 0] step:81/10000 train_time:34781ms step_avg:429.40ms +[2025-09-05 17:45:06] [Rank 0] step:101/10000 train_time:35582ms step_avg:352.30ms +[2025-09-05 17:45:06] [Rank 0] step:101/10000 train_time:35582ms step_avg:352.30ms +[2025-09-05 17:45:06] [Rank 0] step:121/10000 train_time:36231ms step_avg:299.43ms +[2025-09-05 17:45:06] [Rank 0] step:121/10000 train_time:36231ms step_avg:299.43ms +[2025-09-05 17:45:07] [Rank 0] step:141/10000 train_time:36879ms step_avg:261.56ms +[2025-09-05 17:45:07] [Rank 0] step:141/10000 train_time:36879ms step_avg:261.56ms +[2025-09-05 17:45:08] [Rank 0] step:161/10000 train_time:37528ms step_avg:233.09ms +[2025-09-05 17:45:08] [Rank 0] step:161/10000 train_time:37528ms step_avg:233.09ms +[2025-09-05 17:45:09] [Rank 0] step:181/10000 train_time:38342ms step_avg:211.84ms +[2025-09-05 17:45:09] [Rank 0] step:181/10000 train_time:38342ms step_avg:211.84ms +[2025-09-05 17:45:09] [Rank 0] step:201/10000 train_time:38990ms step_avg:193.98ms +[2025-09-05 17:45:09] [Rank 0] step:201/10000 train_time:38990ms step_avg:193.98ms +[2025-09-05 17:45:10] [Rank 0] step:221/10000 train_time:39637ms step_avg:179.35ms +[2025-09-05 17:45:10] [Rank 0] step:221/10000 train_time:39637ms step_avg:179.35ms +[2025-09-05 17:45:11] [Rank 0] step:241/10000 train_time:40284ms step_avg:167.15ms +[2025-09-05 17:45:11] [Rank 0] step:241/10000 train_time:40284ms step_avg:167.15ms +[2025-09-05 17:45:11] [Rank 0] step:261/10000 train_time:40933ms step_avg:156.83ms +[2025-09-05 17:45:11] [Rank 0] step:261/10000 train_time:40933ms step_avg:156.83ms +[2025-09-05 17:45:12] [Rank 0] step:281/10000 train_time:41580ms step_avg:147.97ms +[2025-09-05 17:45:12] [Rank 0] step:281/10000 train_time:41580ms step_avg:147.97ms +[2025-09-05 17:45:12] [Rank 0] step:301/10000 train_time:42227ms step_avg:140.29ms +[2025-09-05 17:45:12] [Rank 0] step:301/10000 train_time:42227ms step_avg:140.29ms +[2025-09-05 17:45:13] [Rank 0] step:321/10000 train_time:42874ms step_avg:133.56ms +[2025-09-05 17:45:13] [Rank 0] step:321/10000 train_time:42874ms step_avg:133.56ms +[2025-09-05 17:45:14] [Rank 0] step:341/10000 train_time:43521ms step_avg:127.63ms +[2025-09-05 17:45:14] [Rank 0] step:341/10000 train_time:43521ms step_avg:127.63ms +[2025-09-05 17:45:14] [Rank 0] step:361/10000 train_time:44169ms step_avg:122.35ms +[2025-09-05 17:45:14] [Rank 0] step:361/10000 train_time:44169ms step_avg:122.35ms +[2025-09-05 17:45:15] [Rank 0] step:381/10000 train_time:44819ms step_avg:117.64ms +[2025-09-05 17:45:15] [Rank 0] step:381/10000 train_time:44819ms step_avg:117.64ms +[2025-09-05 17:45:16] [Rank 0] step:401/10000 train_time:45466ms step_avg:113.38ms +[2025-09-05 17:45:16] [Rank 0] step:401/10000 train_time:45466ms step_avg:113.38ms +[2025-09-05 17:45:16] [Rank 0] step:421/10000 train_time:46113ms step_avg:109.53ms +[2025-09-05 17:45:16] [Rank 0] step:421/10000 train_time:46113ms step_avg:109.53ms +[2025-09-05 17:45:17] [Rank 0] step:441/10000 train_time:46760ms step_avg:106.03ms +[2025-09-05 17:45:17] [Rank 0] step:441/10000 train_time:46760ms step_avg:106.03ms +[2025-09-05 17:45:18] [Rank 0] step:461/10000 train_time:47407ms step_avg:102.84ms +[2025-09-05 17:45:18] [Rank 0] step:461/10000 train_time:47407ms step_avg:102.84ms +[2025-09-05 17:45:18] [Rank 0] step:481/10000 train_time:48055ms step_avg:99.91ms +[2025-09-05 17:45:18] [Rank 0] step:481/10000 train_time:48055ms step_avg:99.91ms +[2025-09-05 17:45:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:45:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:45:19] [Rank 0] PRINT: step:500/10000 train_loss:2.8376 val_loss:1.2005 train_time:48933ms step_avg:97.87ms +[2025-09-05 17:45:19] [Rank 0] PRINT: step:500/10000 train_loss:2.8376 val_loss:1.2005 train_time:48933ms step_avg:97.87ms +[2025-09-05 17:45:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:45:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:45:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:45:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:46:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:46:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:46:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:46:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:46:43] [Rank 0] Total Loss: 4.8357 +[2025-09-05 17:46:43] [Rank 0] Total Loss: 4.8357 +[2025-09-05 17:46:43] [Rank 0] Total FTA (Unweighted): 0.4056 +[2025-09-05 17:46:43] [Rank 0] Total FTA (Unweighted): 0.4056 +[2025-09-05 17:46:43] [Rank 0] Total FTA (Weighted): 0.4056 +[2025-09-05 17:46:43] [Rank 0] Total FTA (Weighted): 0.4056 +[2025-09-05 17:46:43] [Rank 0] Group 0 Loss: 4.2373 +[2025-09-05 17:46:43] [Rank 0] Group 0 Loss: 4.2373 +[2025-09-05 17:46:43] [Rank 0] Group 1 Loss: 3.9148 +[2025-09-05 17:46:43] [Rank 0] Group 1 Loss: 3.9148 +[2025-09-05 17:46:43] [Rank 0] Group 2 Loss: 3.9764 +[2025-09-05 17:46:43] [Rank 0] Group 2 Loss: 3.9764 +[2025-09-05 17:46:43] [Rank 0] Group 3 Loss: 4.3161 +[2025-09-05 17:46:43] [Rank 0] Group 3 Loss: 4.3161 +[2025-09-05 17:46:43] [Rank 0] Group 4 Loss: 4.2864 +[2025-09-05 17:46:43] [Rank 0] Group 4 Loss: 4.2864 +[2025-09-05 17:46:43] [Rank 0] Group 5 Loss: 4.4929 +[2025-09-05 17:46:43] [Rank 0] Group 5 Loss: 4.4929 +[2025-09-05 17:46:43] [Rank 0] Group 6 Loss: 4.5648 +[2025-09-05 17:46:43] [Rank 0] Group 6 Loss: 4.5648 +[2025-09-05 17:46:43] [Rank 0] Group 7 Loss: 4.6961 +[2025-09-05 17:46:43] [Rank 0] Group 7 Loss: 4.6961 +[2025-09-05 17:46:43] [Rank 0] Group 8 Loss: 4.9860 +[2025-09-05 17:46:43] [Rank 0] Group 8 Loss: 4.9860 +[2025-09-05 17:46:43] [Rank 0] Group 9 Loss: 5.1927 +[2025-09-05 17:46:43] [Rank 0] Group 9 Loss: 5.1927 +[2025-09-05 17:46:43] [Rank 0] Group 10 Loss: 5.3859 +[2025-09-05 17:46:43] [Rank 0] Group 10 Loss: 5.3859 +[2025-09-05 17:46:43] [Rank 0] Group 11 Loss: 5.4155 +[2025-09-05 17:46:43] [Rank 0] Group 11 Loss: 5.4155 +[2025-09-05 17:46:43] [Rank 0] Group 12 Loss: 5.4730 +[2025-09-05 17:46:43] [Rank 0] Group 12 Loss: 5.4730 +[2025-09-05 17:46:43] [Rank 0] Group 13 Loss: 5.5106 +[2025-09-05 17:46:43] [Rank 0] Group 13 Loss: 5.5106 +[2025-09-05 17:46:43] [Rank 0] Group 14 Loss: 5.5121 +[2025-09-05 17:46:43] [Rank 0] Group 14 Loss: 5.5121 +[2025-09-05 17:46:43] [Rank 0] Group 15 Loss: 5.4110 +[2025-09-05 17:46:43] [Rank 0] Group 15 Loss: 5.4110 +[2025-09-05 17:46:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:46:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:46:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:46:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:46:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:46:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:46:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:46:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:46:43] [Rank 0] Group 4 FTA: 0.9600 +[2025-09-05 17:46:43] [Rank 0] Group 4 FTA: 0.9600 +[2025-09-05 17:46:43] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 17:46:43] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 17:46:43] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-05 17:46:43] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-05 17:46:43] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 17:46:43] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 17:46:43] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 17:46:43] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 17:46:43] [Rank 0] Group 9 FTA: 0.0300 +[2025-09-05 17:46:43] [Rank 0] Group 9 FTA: 0.0300 +[2025-09-05 17:46:43] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 17:46:43] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 17:46:43] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 17:46:43] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 17:46:43] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-05 17:46:43] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-05 17:46:43] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 17:46:43] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 17:46:43] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:46:43] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:46:43] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 17:46:43] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 17:46:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:46:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:46:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:46:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:46:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:46:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:46:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:46:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:46:44] [Rank 0] step:501/10000 train_time:48942ms step_avg:97.69ms +[2025-09-05 17:46:44] [Rank 0] step:501/10000 train_time:48942ms step_avg:97.69ms +[2025-09-05 17:46:45] [Rank 0] step:521/10000 train_time:49388ms step_avg:94.79ms +[2025-09-05 17:46:45] [Rank 0] step:521/10000 train_time:49388ms step_avg:94.79ms +[2025-09-05 17:46:45] [Rank 0] step:541/10000 train_time:50034ms step_avg:92.48ms +[2025-09-05 17:46:45] [Rank 0] step:541/10000 train_time:50034ms step_avg:92.48ms +[2025-09-05 17:46:46] [Rank 0] step:561/10000 train_time:50681ms step_avg:90.34ms +[2025-09-05 17:46:46] [Rank 0] step:561/10000 train_time:50681ms step_avg:90.34ms +[2025-09-05 17:46:47] [Rank 0] step:581/10000 train_time:51327ms step_avg:88.34ms +[2025-09-05 17:46:47] [Rank 0] step:581/10000 train_time:51327ms step_avg:88.34ms +[2025-09-05 17:46:47] [Rank 0] step:601/10000 train_time:51974ms step_avg:86.48ms +[2025-09-05 17:46:47] [Rank 0] step:601/10000 train_time:51974ms step_avg:86.48ms +[2025-09-05 17:46:48] [Rank 0] step:621/10000 train_time:52622ms step_avg:84.74ms +[2025-09-05 17:46:48] [Rank 0] step:621/10000 train_time:52622ms step_avg:84.74ms +[2025-09-05 17:46:49] [Rank 0] step:641/10000 train_time:53268ms step_avg:83.10ms +[2025-09-05 17:46:49] [Rank 0] step:641/10000 train_time:53268ms step_avg:83.10ms +[2025-09-05 17:46:49] [Rank 0] step:661/10000 train_time:53916ms step_avg:81.57ms +[2025-09-05 17:46:49] [Rank 0] step:661/10000 train_time:53916ms step_avg:81.57ms +[2025-09-05 17:46:50] [Rank 0] step:681/10000 train_time:54563ms step_avg:80.12ms +[2025-09-05 17:46:50] [Rank 0] step:681/10000 train_time:54563ms step_avg:80.12ms +[2025-09-05 17:46:51] [Rank 0] step:701/10000 train_time:55211ms step_avg:78.76ms +[2025-09-05 17:46:51] [Rank 0] step:701/10000 train_time:55211ms step_avg:78.76ms +[2025-09-05 17:46:51] [Rank 0] step:721/10000 train_time:55857ms step_avg:77.47ms +[2025-09-05 17:46:51] [Rank 0] step:721/10000 train_time:55857ms step_avg:77.47ms +[2025-09-05 17:46:52] [Rank 0] step:741/10000 train_time:56507ms step_avg:76.26ms +[2025-09-05 17:46:52] [Rank 0] step:741/10000 train_time:56507ms step_avg:76.26ms +[2025-09-05 17:46:53] [Rank 0] step:761/10000 train_time:57155ms step_avg:75.11ms +[2025-09-05 17:46:53] [Rank 0] step:761/10000 train_time:57155ms step_avg:75.11ms +[2025-09-05 17:46:53] [Rank 0] step:781/10000 train_time:57808ms step_avg:74.02ms +[2025-09-05 17:46:53] [Rank 0] step:781/10000 train_time:57808ms step_avg:74.02ms +[2025-09-05 17:46:54] [Rank 0] step:801/10000 train_time:58460ms step_avg:72.98ms +[2025-09-05 17:46:54] [Rank 0] step:801/10000 train_time:58460ms step_avg:72.98ms +[2025-09-05 17:46:55] [Rank 0] step:821/10000 train_time:59113ms step_avg:72.00ms +[2025-09-05 17:46:55] [Rank 0] step:821/10000 train_time:59113ms step_avg:72.00ms +[2025-09-05 17:46:56] [Rank 0] step:841/10000 train_time:60242ms step_avg:71.63ms +[2025-09-05 17:46:56] [Rank 0] step:841/10000 train_time:60242ms step_avg:71.63ms +[2025-09-05 17:46:56] [Rank 0] step:861/10000 train_time:60894ms step_avg:70.72ms +[2025-09-05 17:46:56] [Rank 0] step:861/10000 train_time:60894ms step_avg:70.72ms +[2025-09-05 17:46:57] [Rank 0] step:881/10000 train_time:61546ms step_avg:69.86ms +[2025-09-05 17:46:57] [Rank 0] step:881/10000 train_time:61546ms step_avg:69.86ms +[2025-09-05 17:46:58] [Rank 0] step:901/10000 train_time:62198ms step_avg:69.03ms +[2025-09-05 17:46:58] [Rank 0] step:901/10000 train_time:62198ms step_avg:69.03ms +[2025-09-05 17:46:58] [Rank 0] step:921/10000 train_time:62850ms step_avg:68.24ms +[2025-09-05 17:46:58] [Rank 0] step:921/10000 train_time:62850ms step_avg:68.24ms +[2025-09-05 17:46:59] [Rank 0] step:941/10000 train_time:63502ms step_avg:67.48ms +[2025-09-05 17:46:59] [Rank 0] step:941/10000 train_time:63502ms step_avg:67.48ms +[2025-09-05 17:47:00] [Rank 0] step:961/10000 train_time:64154ms step_avg:66.76ms +[2025-09-05 17:47:00] [Rank 0] step:961/10000 train_time:64154ms step_avg:66.76ms +[2025-09-05 17:47:00] [Rank 0] step:981/10000 train_time:64807ms step_avg:66.06ms +[2025-09-05 17:47:00] [Rank 0] step:981/10000 train_time:64807ms step_avg:66.06ms +[2025-09-05 17:47:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:47:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:47:01] [Rank 0] PRINT: step:1000/10000 train_loss:1.0626 val_loss:0.9448 train_time:65691ms step_avg:65.69ms +[2025-09-05 17:47:01] [Rank 0] PRINT: step:1000/10000 train_loss:1.0626 val_loss:0.9448 train_time:65691ms step_avg:65.69ms +[2025-09-05 17:47:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:47:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:47:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:47:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:48:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:48:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:48:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:48:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:48:24] [Rank 0] Total Loss: 5.3393 +[2025-09-05 17:48:24] [Rank 0] Total Loss: 5.3393 +[2025-09-05 17:48:24] [Rank 0] Total FTA (Unweighted): 0.5794 +[2025-09-05 17:48:24] [Rank 0] Total FTA (Unweighted): 0.5794 +[2025-09-05 17:48:24] [Rank 0] Total FTA (Weighted): 0.5794 +[2025-09-05 17:48:24] [Rank 0] Total FTA (Weighted): 0.5794 +[2025-09-05 17:48:24] [Rank 0] Group 0 Loss: 4.9999 +[2025-09-05 17:48:24] [Rank 0] Group 0 Loss: 4.9999 +[2025-09-05 17:48:24] [Rank 0] Group 1 Loss: 4.5720 +[2025-09-05 17:48:24] [Rank 0] Group 1 Loss: 4.5720 +[2025-09-05 17:48:24] [Rank 0] Group 2 Loss: 4.6114 +[2025-09-05 17:48:24] [Rank 0] Group 2 Loss: 4.6114 +[2025-09-05 17:48:24] [Rank 0] Group 3 Loss: 5.0035 +[2025-09-05 17:48:24] [Rank 0] Group 3 Loss: 5.0035 +[2025-09-05 17:48:24] [Rank 0] Group 4 Loss: 4.9267 +[2025-09-05 17:48:24] [Rank 0] Group 4 Loss: 4.9267 +[2025-09-05 17:48:24] [Rank 0] Group 5 Loss: 5.0326 +[2025-09-05 17:48:24] [Rank 0] Group 5 Loss: 5.0326 +[2025-09-05 17:48:24] [Rank 0] Group 6 Loss: 5.0324 +[2025-09-05 17:48:24] [Rank 0] Group 6 Loss: 5.0324 +[2025-09-05 17:48:24] [Rank 0] Group 7 Loss: 5.0726 +[2025-09-05 17:48:24] [Rank 0] Group 7 Loss: 5.0726 +[2025-09-05 17:48:24] [Rank 0] Group 8 Loss: 5.3111 +[2025-09-05 17:48:24] [Rank 0] Group 8 Loss: 5.3111 +[2025-09-05 17:48:24] [Rank 0] Group 9 Loss: 5.4102 +[2025-09-05 17:48:24] [Rank 0] Group 9 Loss: 5.4102 +[2025-09-05 17:48:24] [Rank 0] Group 10 Loss: 5.6559 +[2025-09-05 17:48:24] [Rank 0] Group 10 Loss: 5.6559 +[2025-09-05 17:48:24] [Rank 0] Group 11 Loss: 5.7915 +[2025-09-05 17:48:24] [Rank 0] Group 11 Loss: 5.7915 +[2025-09-05 17:48:24] [Rank 0] Group 12 Loss: 5.9174 +[2025-09-05 17:48:24] [Rank 0] Group 12 Loss: 5.9174 +[2025-09-05 17:48:24] [Rank 0] Group 13 Loss: 6.0558 +[2025-09-05 17:48:24] [Rank 0] Group 13 Loss: 6.0558 +[2025-09-05 17:48:24] [Rank 0] Group 14 Loss: 6.0465 +[2025-09-05 17:48:24] [Rank 0] Group 14 Loss: 6.0465 +[2025-09-05 17:48:24] [Rank 0] Group 15 Loss: 5.9895 +[2025-09-05 17:48:24] [Rank 0] Group 15 Loss: 5.9895 +[2025-09-05 17:48:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:48:24] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 17:48:24] [Rank 0] Group 5 FTA: 0.9900 +[2025-09-05 17:48:24] [Rank 0] Group 6 FTA: 0.9100 +[2025-09-05 17:48:24] [Rank 0] Group 6 FTA: 0.9100 +[2025-09-05 17:48:24] [Rank 0] Group 7 FTA: 0.9400 +[2025-09-05 17:48:24] [Rank 0] Group 7 FTA: 0.9400 +[2025-09-05 17:48:24] [Rank 0] Group 8 FTA: 0.5300 +[2025-09-05 17:48:24] [Rank 0] Group 8 FTA: 0.5300 +[2025-09-05 17:48:24] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 17:48:24] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 17:48:24] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 17:48:24] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 17:48:24] [Rank 0] Group 11 FTA: 0.0800 +[2025-09-05 17:48:24] [Rank 0] Group 11 FTA: 0.0800 +[2025-09-05 17:48:24] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 17:48:24] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 17:48:24] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 17:48:24] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 17:48:24] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:48:24] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:48:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:48:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:48:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:48:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:48:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:48:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:48:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:48:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:48:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:48:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:48:26] [Rank 0] step:1001/10000 train_time:65700ms step_avg:65.63ms +[2025-09-05 17:48:26] [Rank 0] step:1001/10000 train_time:65700ms step_avg:65.63ms +[2025-09-05 17:48:27] [Rank 0] step:1021/10000 train_time:66133ms step_avg:64.77ms +[2025-09-05 17:48:27] [Rank 0] step:1021/10000 train_time:66133ms step_avg:64.77ms +[2025-09-05 17:48:27] [Rank 0] step:1041/10000 train_time:66786ms step_avg:64.16ms +[2025-09-05 17:48:27] [Rank 0] step:1041/10000 train_time:66786ms step_avg:64.16ms +[2025-09-05 17:48:28] [Rank 0] step:1061/10000 train_time:67438ms step_avg:63.56ms +[2025-09-05 17:48:28] [Rank 0] step:1061/10000 train_time:67438ms step_avg:63.56ms +[2025-09-05 17:48:29] [Rank 0] step:1081/10000 train_time:68092ms step_avg:62.99ms +[2025-09-05 17:48:29] [Rank 0] step:1081/10000 train_time:68092ms step_avg:62.99ms +[2025-09-05 17:48:29] [Rank 0] step:1101/10000 train_time:68745ms step_avg:62.44ms +[2025-09-05 17:48:29] [Rank 0] step:1101/10000 train_time:68745ms step_avg:62.44ms +[2025-09-05 17:48:30] [Rank 0] step:1121/10000 train_time:69397ms step_avg:61.91ms +[2025-09-05 17:48:30] [Rank 0] step:1121/10000 train_time:69397ms step_avg:61.91ms +[2025-09-05 17:48:30] [Rank 0] step:1141/10000 train_time:70050ms step_avg:61.39ms +[2025-09-05 17:48:30] [Rank 0] step:1141/10000 train_time:70050ms step_avg:61.39ms +[2025-09-05 17:48:31] [Rank 0] step:1161/10000 train_time:70703ms step_avg:60.90ms +[2025-09-05 17:48:31] [Rank 0] step:1161/10000 train_time:70703ms step_avg:60.90ms +[2025-09-05 17:48:32] [Rank 0] step:1181/10000 train_time:71356ms step_avg:60.42ms +[2025-09-05 17:48:32] [Rank 0] step:1181/10000 train_time:71356ms step_avg:60.42ms +[2025-09-05 17:48:32] [Rank 0] step:1201/10000 train_time:72008ms step_avg:59.96ms +[2025-09-05 17:48:32] [Rank 0] step:1201/10000 train_time:72008ms step_avg:59.96ms +[2025-09-05 17:48:33] [Rank 0] step:1221/10000 train_time:72661ms step_avg:59.51ms +[2025-09-05 17:48:33] [Rank 0] step:1221/10000 train_time:72661ms step_avg:59.51ms +[2025-09-05 17:48:34] [Rank 0] step:1241/10000 train_time:73314ms step_avg:59.08ms +[2025-09-05 17:48:34] [Rank 0] step:1241/10000 train_time:73314ms step_avg:59.08ms +[2025-09-05 17:48:34] [Rank 0] step:1261/10000 train_time:73966ms step_avg:58.66ms +[2025-09-05 17:48:34] [Rank 0] step:1261/10000 train_time:73966ms step_avg:58.66ms +[2025-09-05 17:48:35] [Rank 0] step:1281/10000 train_time:74620ms step_avg:58.25ms +[2025-09-05 17:48:35] [Rank 0] step:1281/10000 train_time:74620ms step_avg:58.25ms +[2025-09-05 17:48:36] [Rank 0] step:1301/10000 train_time:75274ms step_avg:57.86ms +[2025-09-05 17:48:36] [Rank 0] step:1301/10000 train_time:75274ms step_avg:57.86ms +[2025-09-05 17:48:36] [Rank 0] step:1321/10000 train_time:75927ms step_avg:57.48ms +[2025-09-05 17:48:36] [Rank 0] step:1321/10000 train_time:75927ms step_avg:57.48ms +[2025-09-05 17:48:37] [Rank 0] step:1341/10000 train_time:76580ms step_avg:57.11ms +[2025-09-05 17:48:37] [Rank 0] step:1341/10000 train_time:76580ms step_avg:57.11ms +[2025-09-05 17:48:38] [Rank 0] step:1361/10000 train_time:77233ms step_avg:56.75ms +[2025-09-05 17:48:38] [Rank 0] step:1361/10000 train_time:77233ms step_avg:56.75ms +[2025-09-05 17:48:38] [Rank 0] step:1381/10000 train_time:77887ms step_avg:56.40ms +[2025-09-05 17:48:38] [Rank 0] step:1381/10000 train_time:77887ms step_avg:56.40ms +[2025-09-05 17:48:39] [Rank 0] step:1401/10000 train_time:78539ms step_avg:56.06ms +[2025-09-05 17:48:39] [Rank 0] step:1401/10000 train_time:78539ms step_avg:56.06ms +[2025-09-05 17:48:40] [Rank 0] step:1421/10000 train_time:79193ms step_avg:55.73ms +[2025-09-05 17:48:40] [Rank 0] step:1421/10000 train_time:79193ms step_avg:55.73ms +[2025-09-05 17:48:40] [Rank 0] step:1441/10000 train_time:79847ms step_avg:55.41ms +[2025-09-05 17:48:40] [Rank 0] step:1441/10000 train_time:79847ms step_avg:55.41ms +[2025-09-05 17:48:41] [Rank 0] step:1461/10000 train_time:80500ms step_avg:55.10ms +[2025-09-05 17:48:41] [Rank 0] step:1461/10000 train_time:80500ms step_avg:55.10ms +[2025-09-05 17:48:42] [Rank 0] step:1481/10000 train_time:81152ms step_avg:54.80ms +[2025-09-05 17:48:42] [Rank 0] step:1481/10000 train_time:81152ms step_avg:54.80ms +[2025-09-05 17:48:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:48:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:48:43] [Rank 0] PRINT: step:1500/10000 train_loss:0.9043 val_loss:0.8566 train_time:82037ms step_avg:54.69ms +[2025-09-05 17:48:43] [Rank 0] PRINT: step:1500/10000 train_loss:0.9043 val_loss:0.8566 train_time:82037ms step_avg:54.69ms +[2025-09-05 17:48:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:48:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:48:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:48:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:50:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:50:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:50:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:50:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:50:05] [Rank 0] Total Loss: 5.3495 +[2025-09-05 17:50:05] [Rank 0] Total Loss: 5.3495 +[2025-09-05 17:50:05] [Rank 0] Total FTA (Unweighted): 0.6456 +[2025-09-05 17:50:05] [Rank 0] Total FTA (Unweighted): 0.6456 +[2025-09-05 17:50:05] [Rank 0] Total FTA (Weighted): 0.6456 +[2025-09-05 17:50:05] [Rank 0] Total FTA (Weighted): 0.6456 +[2025-09-05 17:50:05] [Rank 0] Group 0 Loss: 5.1056 +[2025-09-05 17:50:05] [Rank 0] Group 0 Loss: 5.1056 +[2025-09-05 17:50:05] [Rank 0] Group 1 Loss: 4.7902 +[2025-09-05 17:50:05] [Rank 0] Group 1 Loss: 4.7902 +[2025-09-05 17:50:05] [Rank 0] Group 2 Loss: 4.7435 +[2025-09-05 17:50:05] [Rank 0] Group 2 Loss: 4.7435 +[2025-09-05 17:50:05] [Rank 0] Group 3 Loss: 5.0271 +[2025-09-05 17:50:05] [Rank 0] Group 3 Loss: 5.0271 +[2025-09-05 17:50:05] [Rank 0] Group 4 Loss: 4.9772 +[2025-09-05 17:50:05] [Rank 0] Group 4 Loss: 4.9772 +[2025-09-05 17:50:05] [Rank 0] Group 5 Loss: 5.0489 +[2025-09-05 17:50:05] [Rank 0] Group 5 Loss: 5.0489 +[2025-09-05 17:50:05] [Rank 0] Group 6 Loss: 5.0296 +[2025-09-05 17:50:05] [Rank 0] Group 6 Loss: 5.0296 +[2025-09-05 17:50:05] [Rank 0] Group 7 Loss: 5.1133 +[2025-09-05 17:50:05] [Rank 0] Group 7 Loss: 5.1133 +[2025-09-05 17:50:05] [Rank 0] Group 8 Loss: 5.2861 +[2025-09-05 17:50:05] [Rank 0] Group 8 Loss: 5.2861 +[2025-09-05 17:50:05] [Rank 0] Group 9 Loss: 5.3264 +[2025-09-05 17:50:05] [Rank 0] Group 9 Loss: 5.3264 +[2025-09-05 17:50:05] [Rank 0] Group 10 Loss: 5.5937 +[2025-09-05 17:50:05] [Rank 0] Group 10 Loss: 5.5937 +[2025-09-05 17:50:05] [Rank 0] Group 11 Loss: 5.6630 +[2025-09-05 17:50:05] [Rank 0] Group 11 Loss: 5.6630 +[2025-09-05 17:50:05] [Rank 0] Group 12 Loss: 5.7454 +[2025-09-05 17:50:05] [Rank 0] Group 12 Loss: 5.7454 +[2025-09-05 17:50:05] [Rank 0] Group 13 Loss: 5.9852 +[2025-09-05 17:50:05] [Rank 0] Group 13 Loss: 5.9852 +[2025-09-05 17:50:05] [Rank 0] Group 14 Loss: 6.0654 +[2025-09-05 17:50:05] [Rank 0] Group 14 Loss: 6.0654 +[2025-09-05 17:50:05] [Rank 0] Group 15 Loss: 6.0915 +[2025-09-05 17:50:05] [Rank 0] Group 15 Loss: 6.0915 +[2025-09-05 17:50:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:50:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:50:06] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 17:50:06] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 17:50:06] [Rank 0] Group 6 FTA: 0.9600 +[2025-09-05 17:50:06] [Rank 0] Group 6 FTA: 0.9600 +[2025-09-05 17:50:06] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 17:50:06] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 17:50:06] [Rank 0] Group 8 FTA: 0.9500 +[2025-09-05 17:50:06] [Rank 0] Group 8 FTA: 0.9500 +[2025-09-05 17:50:06] [Rank 0] Group 9 FTA: 0.6300 +[2025-09-05 17:50:06] [Rank 0] Group 9 FTA: 0.6300 +[2025-09-05 17:50:06] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 17:50:06] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 17:50:06] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 17:50:06] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 17:50:06] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-05 17:50:06] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-05 17:50:06] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 17:50:06] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 17:50:06] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:50:06] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:50:06] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:50:06] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:50:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:50:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:50:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:50:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:50:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:50:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:50:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:50:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:50:07] [Rank 0] step:1501/10000 train_time:82046ms step_avg:54.66ms +[2025-09-05 17:50:07] [Rank 0] step:1501/10000 train_time:82046ms step_avg:54.66ms +[2025-09-05 17:50:08] [Rank 0] step:1521/10000 train_time:82489ms step_avg:54.23ms +[2025-09-05 17:50:08] [Rank 0] step:1521/10000 train_time:82489ms step_avg:54.23ms +[2025-09-05 17:50:08] [Rank 0] step:1541/10000 train_time:83142ms step_avg:53.95ms +[2025-09-05 17:50:08] [Rank 0] step:1541/10000 train_time:83142ms step_avg:53.95ms +[2025-09-05 17:50:09] [Rank 0] step:1561/10000 train_time:83795ms step_avg:53.68ms +[2025-09-05 17:50:09] [Rank 0] step:1561/10000 train_time:83795ms step_avg:53.68ms +[2025-09-05 17:50:10] [Rank 0] step:1581/10000 train_time:84447ms step_avg:53.41ms +[2025-09-05 17:50:10] [Rank 0] step:1581/10000 train_time:84447ms step_avg:53.41ms +[2025-09-05 17:50:10] [Rank 0] step:1601/10000 train_time:85099ms step_avg:53.15ms +[2025-09-05 17:50:10] [Rank 0] step:1601/10000 train_time:85099ms step_avg:53.15ms +[2025-09-05 17:50:11] [Rank 0] step:1621/10000 train_time:85752ms step_avg:52.90ms +[2025-09-05 17:50:11] [Rank 0] step:1621/10000 train_time:85752ms step_avg:52.90ms +[2025-09-05 17:50:12] [Rank 0] step:1641/10000 train_time:86585ms step_avg:52.76ms +[2025-09-05 17:50:12] [Rank 0] step:1641/10000 train_time:86585ms step_avg:52.76ms +[2025-09-05 17:50:12] [Rank 0] step:1661/10000 train_time:87237ms step_avg:52.52ms +[2025-09-05 17:50:12] [Rank 0] step:1661/10000 train_time:87237ms step_avg:52.52ms +[2025-09-05 17:50:13] [Rank 0] step:1681/10000 train_time:87890ms step_avg:52.28ms +[2025-09-05 17:50:13] [Rank 0] step:1681/10000 train_time:87890ms step_avg:52.28ms +[2025-09-05 17:50:14] [Rank 0] step:1701/10000 train_time:88542ms step_avg:52.05ms +[2025-09-05 17:50:14] [Rank 0] step:1701/10000 train_time:88542ms step_avg:52.05ms +[2025-09-05 17:50:14] [Rank 0] step:1721/10000 train_time:89195ms step_avg:51.83ms +[2025-09-05 17:50:14] [Rank 0] step:1721/10000 train_time:89195ms step_avg:51.83ms +[2025-09-05 17:50:15] [Rank 0] step:1741/10000 train_time:89848ms step_avg:51.61ms +[2025-09-05 17:50:15] [Rank 0] step:1741/10000 train_time:89848ms step_avg:51.61ms +[2025-09-05 17:50:16] [Rank 0] step:1761/10000 train_time:90501ms step_avg:51.39ms +[2025-09-05 17:50:16] [Rank 0] step:1761/10000 train_time:90501ms step_avg:51.39ms +[2025-09-05 17:50:16] [Rank 0] step:1781/10000 train_time:91152ms step_avg:51.18ms +[2025-09-05 17:50:16] [Rank 0] step:1781/10000 train_time:91152ms step_avg:51.18ms +[2025-09-05 17:50:17] [Rank 0] step:1801/10000 train_time:91805ms step_avg:50.97ms +[2025-09-05 17:50:17] [Rank 0] step:1801/10000 train_time:91805ms step_avg:50.97ms +[2025-09-05 17:50:18] [Rank 0] step:1821/10000 train_time:92457ms step_avg:50.77ms +[2025-09-05 17:50:18] [Rank 0] step:1821/10000 train_time:92457ms step_avg:50.77ms +[2025-09-05 17:50:18] [Rank 0] step:1841/10000 train_time:93110ms step_avg:50.58ms +[2025-09-05 17:50:18] [Rank 0] step:1841/10000 train_time:93110ms step_avg:50.58ms +[2025-09-05 17:50:19] [Rank 0] step:1861/10000 train_time:93761ms step_avg:50.38ms +[2025-09-05 17:50:19] [Rank 0] step:1861/10000 train_time:93761ms step_avg:50.38ms +[2025-09-05 17:50:20] [Rank 0] step:1881/10000 train_time:94414ms step_avg:50.19ms +[2025-09-05 17:50:20] [Rank 0] step:1881/10000 train_time:94414ms step_avg:50.19ms +[2025-09-05 17:50:20] [Rank 0] step:1901/10000 train_time:95070ms step_avg:50.01ms +[2025-09-05 17:50:20] [Rank 0] step:1901/10000 train_time:95070ms step_avg:50.01ms +[2025-09-05 17:50:21] [Rank 0] step:1921/10000 train_time:95720ms step_avg:49.83ms +[2025-09-05 17:50:21] [Rank 0] step:1921/10000 train_time:95720ms step_avg:49.83ms +[2025-09-05 17:50:22] [Rank 0] step:1941/10000 train_time:96580ms step_avg:49.76ms +[2025-09-05 17:50:22] [Rank 0] step:1941/10000 train_time:96580ms step_avg:49.76ms +[2025-09-05 17:50:22] [Rank 0] step:1961/10000 train_time:97235ms step_avg:49.58ms +[2025-09-05 17:50:22] [Rank 0] step:1961/10000 train_time:97235ms step_avg:49.58ms +[2025-09-05 17:50:23] [Rank 0] step:1981/10000 train_time:97885ms step_avg:49.41ms +[2025-09-05 17:50:23] [Rank 0] step:1981/10000 train_time:97885ms step_avg:49.41ms +[2025-09-05 17:50:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:50:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:50:24] [Rank 0] PRINT: step:2000/10000 train_loss:0.8393 val_loss:0.8054 train_time:98769ms step_avg:49.38ms +[2025-09-05 17:50:24] [Rank 0] PRINT: step:2000/10000 train_loss:0.8393 val_loss:0.8054 train_time:98769ms step_avg:49.38ms +[2025-09-05 17:50:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:50:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:50:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:50:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:51:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:51:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:51:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:51:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:51:47] [Rank 0] Total Loss: 5.2053 +[2025-09-05 17:51:47] [Rank 0] Total Loss: 5.2053 +[2025-09-05 17:51:47] [Rank 0] Total FTA (Unweighted): 0.6919 +[2025-09-05 17:51:47] [Rank 0] Total FTA (Unweighted): 0.6919 +[2025-09-05 17:51:47] [Rank 0] Total FTA (Weighted): 0.6919 +[2025-09-05 17:51:47] [Rank 0] Total FTA (Weighted): 0.6919 +[2025-09-05 17:51:47] [Rank 0] Group 0 Loss: 5.0397 +[2025-09-05 17:51:47] [Rank 0] Group 0 Loss: 5.0397 +[2025-09-05 17:51:47] [Rank 0] Group 1 Loss: 4.5726 +[2025-09-05 17:51:47] [Rank 0] Group 1 Loss: 4.5726 +[2025-09-05 17:51:47] [Rank 0] Group 2 Loss: 4.5217 +[2025-09-05 17:51:47] [Rank 0] Group 2 Loss: 4.5217 +[2025-09-05 17:51:47] [Rank 0] Group 3 Loss: 4.9967 +[2025-09-05 17:51:47] [Rank 0] Group 3 Loss: 4.9967 +[2025-09-05 17:51:47] [Rank 0] Group 4 Loss: 4.8728 +[2025-09-05 17:51:47] [Rank 0] Group 4 Loss: 4.8728 +[2025-09-05 17:51:47] [Rank 0] Group 5 Loss: 4.9443 +[2025-09-05 17:51:47] [Rank 0] Group 5 Loss: 4.9443 +[2025-09-05 17:51:47] [Rank 0] Group 6 Loss: 4.8917 +[2025-09-05 17:51:47] [Rank 0] Group 6 Loss: 4.8917 +[2025-09-05 17:51:47] [Rank 0] Group 7 Loss: 5.0148 +[2025-09-05 17:51:47] [Rank 0] Group 7 Loss: 5.0148 +[2025-09-05 17:51:47] [Rank 0] Group 8 Loss: 5.1627 +[2025-09-05 17:51:47] [Rank 0] Group 8 Loss: 5.1627 +[2025-09-05 17:51:47] [Rank 0] Group 9 Loss: 5.1551 +[2025-09-05 17:51:47] [Rank 0] Group 9 Loss: 5.1551 +[2025-09-05 17:51:48] [Rank 0] Group 10 Loss: 5.4075 +[2025-09-05 17:51:48] [Rank 0] Group 10 Loss: 5.4075 +[2025-09-05 17:51:48] [Rank 0] Group 11 Loss: 5.5281 +[2025-09-05 17:51:48] [Rank 0] Group 11 Loss: 5.5281 +[2025-09-05 17:51:48] [Rank 0] Group 12 Loss: 5.5585 +[2025-09-05 17:51:48] [Rank 0] Group 12 Loss: 5.5585 +[2025-09-05 17:51:48] [Rank 0] Group 13 Loss: 5.7470 +[2025-09-05 17:51:48] [Rank 0] Group 13 Loss: 5.7470 +[2025-09-05 17:51:48] [Rank 0] Group 14 Loss: 5.8758 +[2025-09-05 17:51:48] [Rank 0] Group 14 Loss: 5.8758 +[2025-09-05 17:51:48] [Rank 0] Group 15 Loss: 5.9950 +[2025-09-05 17:51:48] [Rank 0] Group 15 Loss: 5.9950 +[2025-09-05 17:51:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:51:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:51:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:51:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:51:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:51:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:51:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:51:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:51:48] [Rank 0] Group 4 FTA: 0.9800 +[2025-09-05 17:51:48] [Rank 0] Group 4 FTA: 0.9800 +[2025-09-05 17:51:48] [Rank 0] Group 5 FTA: 0.9400 +[2025-09-05 17:51:48] [Rank 0] Group 5 FTA: 0.9400 +[2025-09-05 17:51:48] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 17:51:48] [Rank 0] Group 6 FTA: 0.9900 +[2025-09-05 17:51:48] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 17:51:48] [Rank 0] Group 7 FTA: 0.9700 +[2025-09-05 17:51:48] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 17:51:48] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 17:51:48] [Rank 0] Group 9 FTA: 0.9000 +[2025-09-05 17:51:48] [Rank 0] Group 9 FTA: 0.9000 +[2025-09-05 17:51:48] [Rank 0] Group 10 FTA: 0.5900 +[2025-09-05 17:51:48] [Rank 0] Group 10 FTA: 0.5900 +[2025-09-05 17:51:48] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 17:51:48] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 17:51:48] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 17:51:48] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 17:51:48] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 17:51:48] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 17:51:48] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:51:48] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:51:48] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:51:48] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:51:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:51:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:51:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:51:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:51:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:51:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:51:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:51:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:51:49] [Rank 0] step:2001/10000 train_time:98778ms step_avg:49.36ms +[2025-09-05 17:51:49] [Rank 0] step:2001/10000 train_time:98778ms step_avg:49.36ms +[2025-09-05 17:51:50] [Rank 0] step:2021/10000 train_time:99231ms step_avg:49.10ms +[2025-09-05 17:51:50] [Rank 0] step:2021/10000 train_time:99231ms step_avg:49.10ms +[2025-09-05 17:51:50] [Rank 0] step:2041/10000 train_time:99884ms step_avg:48.94ms +[2025-09-05 17:51:50] [Rank 0] step:2041/10000 train_time:99884ms step_avg:48.94ms +[2025-09-05 17:51:51] [Rank 0] step:2061/10000 train_time:100537ms step_avg:48.78ms +[2025-09-05 17:51:51] [Rank 0] step:2061/10000 train_time:100537ms step_avg:48.78ms +[2025-09-05 17:51:52] [Rank 0] step:2081/10000 train_time:101190ms step_avg:48.63ms +[2025-09-05 17:51:52] [Rank 0] step:2081/10000 train_time:101190ms step_avg:48.63ms +[2025-09-05 17:51:52] [Rank 0] step:2101/10000 train_time:101842ms step_avg:48.47ms +[2025-09-05 17:51:52] [Rank 0] step:2101/10000 train_time:101842ms step_avg:48.47ms +[2025-09-05 17:51:53] [Rank 0] step:2121/10000 train_time:102495ms step_avg:48.32ms +[2025-09-05 17:51:53] [Rank 0] step:2121/10000 train_time:102495ms step_avg:48.32ms +[2025-09-05 17:51:54] [Rank 0] step:2141/10000 train_time:103148ms step_avg:48.18ms +[2025-09-05 17:51:54] [Rank 0] step:2141/10000 train_time:103148ms step_avg:48.18ms +[2025-09-05 17:51:54] [Rank 0] step:2161/10000 train_time:103801ms step_avg:48.03ms +[2025-09-05 17:51:54] [Rank 0] step:2161/10000 train_time:103801ms step_avg:48.03ms +[2025-09-05 17:51:55] [Rank 0] step:2181/10000 train_time:104455ms step_avg:47.89ms +[2025-09-05 17:51:55] [Rank 0] step:2181/10000 train_time:104455ms step_avg:47.89ms +[2025-09-05 17:51:56] [Rank 0] step:2201/10000 train_time:105107ms step_avg:47.75ms +[2025-09-05 17:51:56] [Rank 0] step:2201/10000 train_time:105107ms step_avg:47.75ms +[2025-09-05 17:51:56] [Rank 0] step:2221/10000 train_time:105760ms step_avg:47.62ms +[2025-09-05 17:51:56] [Rank 0] step:2221/10000 train_time:105760ms step_avg:47.62ms +[2025-09-05 17:51:57] [Rank 0] step:2241/10000 train_time:106415ms step_avg:47.49ms +[2025-09-05 17:51:57] [Rank 0] step:2241/10000 train_time:106415ms step_avg:47.49ms +[2025-09-05 17:51:58] [Rank 0] step:2261/10000 train_time:107075ms step_avg:47.36ms +[2025-09-05 17:51:58] [Rank 0] step:2261/10000 train_time:107075ms step_avg:47.36ms +[2025-09-05 17:51:58] [Rank 0] step:2281/10000 train_time:107734ms step_avg:47.23ms +[2025-09-05 17:51:58] [Rank 0] step:2281/10000 train_time:107734ms step_avg:47.23ms +[2025-09-05 17:51:59] [Rank 0] step:2301/10000 train_time:108392ms step_avg:47.11ms +[2025-09-05 17:51:59] [Rank 0] step:2301/10000 train_time:108392ms step_avg:47.11ms +[2025-09-05 17:52:00] [Rank 0] step:2321/10000 train_time:109051ms step_avg:46.98ms +[2025-09-05 17:52:00] [Rank 0] step:2321/10000 train_time:109051ms step_avg:46.98ms +[2025-09-05 17:52:00] [Rank 0] step:2341/10000 train_time:109711ms step_avg:46.86ms +[2025-09-05 17:52:00] [Rank 0] step:2341/10000 train_time:109711ms step_avg:46.86ms +[2025-09-05 17:52:01] [Rank 0] step:2361/10000 train_time:110369ms step_avg:46.75ms +[2025-09-05 17:52:01] [Rank 0] step:2361/10000 train_time:110369ms step_avg:46.75ms +[2025-09-05 17:52:02] [Rank 0] step:2381/10000 train_time:111029ms step_avg:46.63ms +[2025-09-05 17:52:02] [Rank 0] step:2381/10000 train_time:111029ms step_avg:46.63ms +[2025-09-05 17:52:02] [Rank 0] step:2401/10000 train_time:111689ms step_avg:46.52ms +[2025-09-05 17:52:02] [Rank 0] step:2401/10000 train_time:111689ms step_avg:46.52ms +[2025-09-05 17:52:03] [Rank 0] step:2421/10000 train_time:112349ms step_avg:46.41ms +[2025-09-05 17:52:03] [Rank 0] step:2421/10000 train_time:112349ms step_avg:46.41ms +[2025-09-05 17:52:04] [Rank 0] step:2441/10000 train_time:113008ms step_avg:46.30ms +[2025-09-05 17:52:04] [Rank 0] step:2441/10000 train_time:113008ms step_avg:46.30ms +[2025-09-05 17:52:04] [Rank 0] step:2461/10000 train_time:113668ms step_avg:46.19ms +[2025-09-05 17:52:04] [Rank 0] step:2461/10000 train_time:113668ms step_avg:46.19ms +[2025-09-05 17:52:05] [Rank 0] step:2481/10000 train_time:114327ms step_avg:46.08ms +[2025-09-05 17:52:05] [Rank 0] step:2481/10000 train_time:114327ms step_avg:46.08ms +[2025-09-05 17:52:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:52:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:52:06] [Rank 0] PRINT: step:2500/10000 train_loss:0.7985 val_loss:0.7680 train_time:115220ms step_avg:46.09ms +[2025-09-05 17:52:06] [Rank 0] PRINT: step:2500/10000 train_loss:0.7985 val_loss:0.7680 train_time:115220ms step_avg:46.09ms +[2025-09-05 17:52:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:52:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:52:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:52:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:53:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:53:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:53:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:53:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:53:29] [Rank 0] Total Loss: 5.0807 +[2025-09-05 17:53:29] [Rank 0] Total Loss: 5.0807 +[2025-09-05 17:53:29] [Rank 0] Total FTA (Unweighted): 0.7162 +[2025-09-05 17:53:29] [Rank 0] Total FTA (Unweighted): 0.7162 +[2025-09-05 17:53:29] [Rank 0] Total FTA (Weighted): 0.7163 +[2025-09-05 17:53:29] [Rank 0] Total FTA (Weighted): 0.7163 +[2025-09-05 17:53:29] [Rank 0] Group 0 Loss: 5.1179 +[2025-09-05 17:53:29] [Rank 0] Group 0 Loss: 5.1179 +[2025-09-05 17:53:29] [Rank 0] Group 1 Loss: 4.6076 +[2025-09-05 17:53:29] [Rank 0] Group 1 Loss: 4.6076 +[2025-09-05 17:53:29] [Rank 0] Group 2 Loss: 4.5152 +[2025-09-05 17:53:29] [Rank 0] Group 2 Loss: 4.5152 +[2025-09-05 17:53:29] [Rank 0] Group 3 Loss: 4.9006 +[2025-09-05 17:53:29] [Rank 0] Group 3 Loss: 4.9006 +[2025-09-05 17:53:29] [Rank 0] Group 4 Loss: 4.7693 +[2025-09-05 17:53:29] [Rank 0] Group 4 Loss: 4.7693 +[2025-09-05 17:53:29] [Rank 0] Group 5 Loss: 4.8919 +[2025-09-05 17:53:29] [Rank 0] Group 5 Loss: 4.8919 +[2025-09-05 17:53:29] [Rank 0] Group 6 Loss: 4.8154 +[2025-09-05 17:53:29] [Rank 0] Group 6 Loss: 4.8154 +[2025-09-05 17:53:29] [Rank 0] Group 7 Loss: 4.8840 +[2025-09-05 17:53:29] [Rank 0] Group 7 Loss: 4.8840 +[2025-09-05 17:53:29] [Rank 0] Group 8 Loss: 5.0660 +[2025-09-05 17:53:29] [Rank 0] Group 8 Loss: 5.0660 +[2025-09-05 17:53:29] [Rank 0] Group 9 Loss: 5.0379 +[2025-09-05 17:53:29] [Rank 0] Group 9 Loss: 5.0379 +[2025-09-05 17:53:29] [Rank 0] Group 10 Loss: 5.2303 +[2025-09-05 17:53:29] [Rank 0] Group 10 Loss: 5.2303 +[2025-09-05 17:53:29] [Rank 0] Group 11 Loss: 5.3240 +[2025-09-05 17:53:29] [Rank 0] Group 11 Loss: 5.3240 +[2025-09-05 17:53:29] [Rank 0] Group 12 Loss: 5.3496 +[2025-09-05 17:53:29] [Rank 0] Group 12 Loss: 5.3496 +[2025-09-05 17:53:29] [Rank 0] Group 13 Loss: 5.4200 +[2025-09-05 17:53:29] [Rank 0] Group 13 Loss: 5.4200 +[2025-09-05 17:53:29] [Rank 0] Group 14 Loss: 5.5809 +[2025-09-05 17:53:29] [Rank 0] Group 14 Loss: 5.5809 +[2025-09-05 17:53:29] [Rank 0] Group 15 Loss: 5.7814 +[2025-09-05 17:53:29] [Rank 0] Group 15 Loss: 5.7814 +[2025-09-05 17:53:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 5 FTA: 0.9800 +[2025-09-05 17:53:29] [Rank 0] Group 5 FTA: 0.9800 +[2025-09-05 17:53:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:53:29] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 17:53:29] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 17:53:29] [Rank 0] Group 9 FTA: 0.9600 +[2025-09-05 17:53:29] [Rank 0] Group 9 FTA: 0.9600 +[2025-09-05 17:53:29] [Rank 0] Group 10 FTA: 0.7700 +[2025-09-05 17:53:29] [Rank 0] Group 10 FTA: 0.7700 +[2025-09-05 17:53:29] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 17:53:29] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 17:53:29] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 17:53:29] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 17:53:29] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 17:53:29] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 17:53:29] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:53:29] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:53:29] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:53:29] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:53:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:53:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:53:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:53:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:53:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:53:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:53:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:53:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:53:31] [Rank 0] step:2501/10000 train_time:115229ms step_avg:46.07ms +[2025-09-05 17:53:31] [Rank 0] step:2501/10000 train_time:115229ms step_avg:46.07ms +[2025-09-05 17:53:31] [Rank 0] step:2521/10000 train_time:115727ms step_avg:45.91ms +[2025-09-05 17:53:31] [Rank 0] step:2521/10000 train_time:115727ms step_avg:45.91ms +[2025-09-05 17:53:32] [Rank 0] step:2541/10000 train_time:116386ms step_avg:45.80ms +[2025-09-05 17:53:32] [Rank 0] step:2541/10000 train_time:116386ms step_avg:45.80ms +[2025-09-05 17:53:33] [Rank 0] step:2561/10000 train_time:117044ms step_avg:45.70ms +[2025-09-05 17:53:33] [Rank 0] step:2561/10000 train_time:117044ms step_avg:45.70ms +[2025-09-05 17:53:33] [Rank 0] step:2581/10000 train_time:117703ms step_avg:45.60ms +[2025-09-05 17:53:33] [Rank 0] step:2581/10000 train_time:117703ms step_avg:45.60ms +[2025-09-05 17:53:34] [Rank 0] step:2601/10000 train_time:118557ms step_avg:45.58ms +[2025-09-05 17:53:34] [Rank 0] step:2601/10000 train_time:118557ms step_avg:45.58ms +[2025-09-05 17:53:35] [Rank 0] step:2621/10000 train_time:119216ms step_avg:45.49ms +[2025-09-05 17:53:35] [Rank 0] step:2621/10000 train_time:119216ms step_avg:45.49ms +[2025-09-05 17:53:35] [Rank 0] step:2641/10000 train_time:119873ms step_avg:45.39ms +[2025-09-05 17:53:35] [Rank 0] step:2641/10000 train_time:119873ms step_avg:45.39ms +[2025-09-05 17:53:36] [Rank 0] step:2661/10000 train_time:120532ms step_avg:45.30ms +[2025-09-05 17:53:36] [Rank 0] step:2661/10000 train_time:120532ms step_avg:45.30ms +[2025-09-05 17:53:37] [Rank 0] step:2681/10000 train_time:121190ms step_avg:45.20ms +[2025-09-05 17:53:37] [Rank 0] step:2681/10000 train_time:121190ms step_avg:45.20ms +[2025-09-05 17:53:37] [Rank 0] step:2701/10000 train_time:121850ms step_avg:45.11ms +[2025-09-05 17:53:37] [Rank 0] step:2701/10000 train_time:121850ms step_avg:45.11ms +[2025-09-05 17:53:38] [Rank 0] step:2721/10000 train_time:122508ms step_avg:45.02ms +[2025-09-05 17:53:38] [Rank 0] step:2721/10000 train_time:122508ms step_avg:45.02ms +[2025-09-05 17:53:39] [Rank 0] step:2741/10000 train_time:123168ms step_avg:44.94ms +[2025-09-05 17:53:39] [Rank 0] step:2741/10000 train_time:123168ms step_avg:44.94ms +[2025-09-05 17:53:39] [Rank 0] step:2761/10000 train_time:123828ms step_avg:44.85ms +[2025-09-05 17:53:39] [Rank 0] step:2761/10000 train_time:123828ms step_avg:44.85ms +[2025-09-05 17:53:40] [Rank 0] step:2781/10000 train_time:124485ms step_avg:44.76ms +[2025-09-05 17:53:40] [Rank 0] step:2781/10000 train_time:124485ms step_avg:44.76ms +[2025-09-05 17:53:41] [Rank 0] step:2801/10000 train_time:125143ms step_avg:44.68ms +[2025-09-05 17:53:41] [Rank 0] step:2801/10000 train_time:125143ms step_avg:44.68ms +[2025-09-05 17:53:42] [Rank 0] step:2821/10000 train_time:126434ms step_avg:44.82ms +[2025-09-05 17:53:42] [Rank 0] step:2821/10000 train_time:126434ms step_avg:44.82ms +[2025-09-05 17:53:43] [Rank 0] step:2841/10000 train_time:126931ms step_avg:44.68ms +[2025-09-05 17:53:43] [Rank 0] step:2841/10000 train_time:126931ms step_avg:44.68ms +[2025-09-05 17:53:43] [Rank 0] step:2861/10000 train_time:127590ms step_avg:44.60ms +[2025-09-05 17:53:43] [Rank 0] step:2861/10000 train_time:127590ms step_avg:44.60ms +[2025-09-05 17:53:44] [Rank 0] step:2881/10000 train_time:128249ms step_avg:44.52ms +[2025-09-05 17:53:44] [Rank 0] step:2881/10000 train_time:128249ms step_avg:44.52ms +[2025-09-05 17:53:45] [Rank 0] step:2901/10000 train_time:128909ms step_avg:44.44ms +[2025-09-05 17:53:45] [Rank 0] step:2901/10000 train_time:128909ms step_avg:44.44ms +[2025-09-05 17:53:45] [Rank 0] step:2921/10000 train_time:129568ms step_avg:44.36ms +[2025-09-05 17:53:45] [Rank 0] step:2921/10000 train_time:129568ms step_avg:44.36ms +[2025-09-05 17:53:46] [Rank 0] step:2941/10000 train_time:130226ms step_avg:44.28ms +[2025-09-05 17:53:46] [Rank 0] step:2941/10000 train_time:130226ms step_avg:44.28ms +[2025-09-05 17:53:46] [Rank 0] step:2961/10000 train_time:130886ms step_avg:44.20ms +[2025-09-05 17:53:46] [Rank 0] step:2961/10000 train_time:130886ms step_avg:44.20ms +[2025-09-05 17:53:47] [Rank 0] step:2981/10000 train_time:131542ms step_avg:44.13ms +[2025-09-05 17:53:47] [Rank 0] step:2981/10000 train_time:131542ms step_avg:44.13ms +[2025-09-05 17:53:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:53:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:53:48] [Rank 0] PRINT: step:3000/10000 train_loss:0.7669 val_loss:0.7447 train_time:132435ms step_avg:44.14ms +[2025-09-05 17:53:48] [Rank 0] PRINT: step:3000/10000 train_loss:0.7669 val_loss:0.7447 train_time:132435ms step_avg:44.14ms +[2025-09-05 17:53:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:53:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:53:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:53:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:55:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:55:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:55:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:55:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:55:11] [Rank 0] Total Loss: 5.1360 +[2025-09-05 17:55:11] [Rank 0] Total Loss: 5.1360 +[2025-09-05 17:55:11] [Rank 0] Total FTA (Unweighted): 0.7387 +[2025-09-05 17:55:11] [Rank 0] Total FTA (Unweighted): 0.7387 +[2025-09-05 17:55:11] [Rank 0] Total FTA (Weighted): 0.7388 +[2025-09-05 17:55:11] [Rank 0] Total FTA (Weighted): 0.7388 +[2025-09-05 17:55:11] [Rank 0] Group 0 Loss: 4.8246 +[2025-09-05 17:55:11] [Rank 0] Group 0 Loss: 4.8246 +[2025-09-05 17:55:11] [Rank 0] Group 1 Loss: 4.6441 +[2025-09-05 17:55:11] [Rank 0] Group 1 Loss: 4.6441 +[2025-09-05 17:55:11] [Rank 0] Group 2 Loss: 4.6545 +[2025-09-05 17:55:11] [Rank 0] Group 2 Loss: 4.6545 +[2025-09-05 17:55:11] [Rank 0] Group 3 Loss: 4.9056 +[2025-09-05 17:55:11] [Rank 0] Group 3 Loss: 4.9056 +[2025-09-05 17:55:11] [Rank 0] Group 4 Loss: 4.8969 +[2025-09-05 17:55:11] [Rank 0] Group 4 Loss: 4.8969 +[2025-09-05 17:55:11] [Rank 0] Group 5 Loss: 4.9565 +[2025-09-05 17:55:11] [Rank 0] Group 5 Loss: 4.9565 +[2025-09-05 17:55:11] [Rank 0] Group 6 Loss: 4.9379 +[2025-09-05 17:55:11] [Rank 0] Group 6 Loss: 4.9379 +[2025-09-05 17:55:11] [Rank 0] Group 7 Loss: 4.9904 +[2025-09-05 17:55:11] [Rank 0] Group 7 Loss: 4.9904 +[2025-09-05 17:55:11] [Rank 0] Group 8 Loss: 5.1489 +[2025-09-05 17:55:11] [Rank 0] Group 8 Loss: 5.1489 +[2025-09-05 17:55:11] [Rank 0] Group 9 Loss: 5.1616 +[2025-09-05 17:55:11] [Rank 0] Group 9 Loss: 5.1616 +[2025-09-05 17:55:11] [Rank 0] Group 10 Loss: 5.2733 +[2025-09-05 17:55:11] [Rank 0] Group 10 Loss: 5.2733 +[2025-09-05 17:55:11] [Rank 0] Group 11 Loss: 5.3602 +[2025-09-05 17:55:11] [Rank 0] Group 11 Loss: 5.3602 +[2025-09-05 17:55:11] [Rank 0] Group 12 Loss: 5.4552 +[2025-09-05 17:55:11] [Rank 0] Group 12 Loss: 5.4552 +[2025-09-05 17:55:11] [Rank 0] Group 13 Loss: 5.5621 +[2025-09-05 17:55:11] [Rank 0] Group 13 Loss: 5.5621 +[2025-09-05 17:55:11] [Rank 0] Group 14 Loss: 5.5947 +[2025-09-05 17:55:11] [Rank 0] Group 14 Loss: 5.5947 +[2025-09-05 17:55:11] [Rank 0] Group 15 Loss: 5.8102 +[2025-09-05 17:55:11] [Rank 0] Group 15 Loss: 5.8102 +[2025-09-05 17:55:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 17:55:11] [Rank 0] Group 5 FTA: 0.9700 +[2025-09-05 17:55:11] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:55:11] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 17:55:11] [Rank 0] Group 7 FTA: 0.9800 +[2025-09-05 17:55:11] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 17:55:11] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 17:55:11] [Rank 0] Group 9 FTA: 0.9300 +[2025-09-05 17:55:11] [Rank 0] Group 9 FTA: 0.9300 +[2025-09-05 17:55:11] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 17:55:11] [Rank 0] Group 10 FTA: 0.9100 +[2025-09-05 17:55:11] [Rank 0] Group 11 FTA: 0.4700 +[2025-09-05 17:55:11] [Rank 0] Group 11 FTA: 0.4700 +[2025-09-05 17:55:11] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 17:55:11] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 17:55:11] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:55:11] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:55:11] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:55:11] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:55:11] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:55:11] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:55:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:55:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:55:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:55:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:55:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:55:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:55:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:55:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:55:13] [Rank 0] step:3001/10000 train_time:132443ms step_avg:44.13ms +[2025-09-05 17:55:13] [Rank 0] step:3001/10000 train_time:132443ms step_avg:44.13ms +[2025-09-05 17:55:13] [Rank 0] step:3021/10000 train_time:132885ms step_avg:43.99ms +[2025-09-05 17:55:13] [Rank 0] step:3021/10000 train_time:132885ms step_avg:43.99ms +[2025-09-05 17:55:14] [Rank 0] step:3041/10000 train_time:133545ms step_avg:43.91ms +[2025-09-05 17:55:14] [Rank 0] step:3041/10000 train_time:133545ms step_avg:43.91ms +[2025-09-05 17:55:15] [Rank 0] step:3061/10000 train_time:134204ms step_avg:43.84ms +[2025-09-05 17:55:15] [Rank 0] step:3061/10000 train_time:134204ms step_avg:43.84ms +[2025-09-05 17:55:15] [Rank 0] step:3081/10000 train_time:134862ms step_avg:43.77ms +[2025-09-05 17:55:15] [Rank 0] step:3081/10000 train_time:134862ms step_avg:43.77ms +[2025-09-05 17:55:16] [Rank 0] step:3101/10000 train_time:135520ms step_avg:43.70ms +[2025-09-05 17:55:16] [Rank 0] step:3101/10000 train_time:135520ms step_avg:43.70ms +[2025-09-05 17:55:17] [Rank 0] step:3121/10000 train_time:136179ms step_avg:43.63ms +[2025-09-05 17:55:17] [Rank 0] step:3121/10000 train_time:136179ms step_avg:43.63ms +[2025-09-05 17:55:17] [Rank 0] step:3141/10000 train_time:136837ms step_avg:43.56ms +[2025-09-05 17:55:17] [Rank 0] step:3141/10000 train_time:136837ms step_avg:43.56ms +[2025-09-05 17:55:18] [Rank 0] step:3161/10000 train_time:137496ms step_avg:43.50ms +[2025-09-05 17:55:18] [Rank 0] step:3161/10000 train_time:137496ms step_avg:43.50ms +[2025-09-05 17:55:19] [Rank 0] step:3181/10000 train_time:138155ms step_avg:43.43ms +[2025-09-05 17:55:19] [Rank 0] step:3181/10000 train_time:138155ms step_avg:43.43ms +[2025-09-05 17:55:19] [Rank 0] step:3201/10000 train_time:138813ms step_avg:43.37ms +[2025-09-05 17:55:19] [Rank 0] step:3201/10000 train_time:138813ms step_avg:43.37ms +[2025-09-05 17:55:20] [Rank 0] step:3221/10000 train_time:139472ms step_avg:43.30ms +[2025-09-05 17:55:20] [Rank 0] step:3221/10000 train_time:139472ms step_avg:43.30ms +[2025-09-05 17:55:21] [Rank 0] step:3241/10000 train_time:140130ms step_avg:43.24ms +[2025-09-05 17:55:21] [Rank 0] step:3241/10000 train_time:140130ms step_avg:43.24ms +[2025-09-05 17:55:21] [Rank 0] step:3261/10000 train_time:140789ms step_avg:43.17ms +[2025-09-05 17:55:21] [Rank 0] step:3261/10000 train_time:140789ms step_avg:43.17ms +[2025-09-05 17:55:22] [Rank 0] step:3281/10000 train_time:141448ms step_avg:43.11ms +[2025-09-05 17:55:22] [Rank 0] step:3281/10000 train_time:141448ms step_avg:43.11ms +[2025-09-05 17:55:23] [Rank 0] step:3301/10000 train_time:142107ms step_avg:43.05ms +[2025-09-05 17:55:23] [Rank 0] step:3301/10000 train_time:142107ms step_avg:43.05ms +[2025-09-05 17:55:23] [Rank 0] step:3321/10000 train_time:142765ms step_avg:42.99ms +[2025-09-05 17:55:23] [Rank 0] step:3321/10000 train_time:142765ms step_avg:42.99ms +[2025-09-05 17:55:24] [Rank 0] step:3341/10000 train_time:143424ms step_avg:42.93ms +[2025-09-05 17:55:24] [Rank 0] step:3341/10000 train_time:143424ms step_avg:42.93ms +[2025-09-05 17:55:25] [Rank 0] step:3361/10000 train_time:144083ms step_avg:42.87ms +[2025-09-05 17:55:25] [Rank 0] step:3361/10000 train_time:144083ms step_avg:42.87ms +[2025-09-05 17:55:25] [Rank 0] step:3381/10000 train_time:144741ms step_avg:42.81ms +[2025-09-05 17:55:25] [Rank 0] step:3381/10000 train_time:144741ms step_avg:42.81ms +[2025-09-05 17:55:26] [Rank 0] step:3401/10000 train_time:145400ms step_avg:42.75ms +[2025-09-05 17:55:26] [Rank 0] step:3401/10000 train_time:145400ms step_avg:42.75ms +[2025-09-05 17:55:27] [Rank 0] step:3421/10000 train_time:146059ms step_avg:42.69ms +[2025-09-05 17:55:27] [Rank 0] step:3421/10000 train_time:146059ms step_avg:42.69ms +[2025-09-05 17:55:27] [Rank 0] step:3441/10000 train_time:146717ms step_avg:42.64ms +[2025-09-05 17:55:27] [Rank 0] step:3441/10000 train_time:146717ms step_avg:42.64ms +[2025-09-05 17:55:28] [Rank 0] step:3461/10000 train_time:147377ms step_avg:42.58ms +[2025-09-05 17:55:28] [Rank 0] step:3461/10000 train_time:147377ms step_avg:42.58ms +[2025-09-05 17:55:29] [Rank 0] step:3481/10000 train_time:148035ms step_avg:42.53ms +[2025-09-05 17:55:29] [Rank 0] step:3481/10000 train_time:148035ms step_avg:42.53ms +[2025-09-05 17:55:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:55:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:55:30] [Rank 0] PRINT: step:3500/10000 train_loss:0.7443 val_loss:0.7255 train_time:148928ms step_avg:42.55ms +[2025-09-05 17:55:30] [Rank 0] PRINT: step:3500/10000 train_loss:0.7443 val_loss:0.7255 train_time:148928ms step_avg:42.55ms +[2025-09-05 17:55:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:55:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:55:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:55:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:56:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:56:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:56:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:56:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:56:52] [Rank 0] Total Loss: 5.0864 +[2025-09-05 17:56:52] [Rank 0] Total Loss: 5.0864 +[2025-09-05 17:56:52] [Rank 0] Total FTA (Unweighted): 0.7631 +[2025-09-05 17:56:52] [Rank 0] Total FTA (Unweighted): 0.7631 +[2025-09-05 17:56:52] [Rank 0] Total FTA (Weighted): 0.7631 +[2025-09-05 17:56:52] [Rank 0] Total FTA (Weighted): 0.7631 +[2025-09-05 17:56:52] [Rank 0] Group 0 Loss: 5.0034 +[2025-09-05 17:56:52] [Rank 0] Group 0 Loss: 5.0034 +[2025-09-05 17:56:52] [Rank 0] Group 1 Loss: 4.5885 +[2025-09-05 17:56:52] [Rank 0] Group 1 Loss: 4.5885 +[2025-09-05 17:56:52] [Rank 0] Group 2 Loss: 4.5038 +[2025-09-05 17:56:52] [Rank 0] Group 2 Loss: 4.5038 +[2025-09-05 17:56:52] [Rank 0] Group 3 Loss: 4.9140 +[2025-09-05 17:56:52] [Rank 0] Group 3 Loss: 4.9140 +[2025-09-05 17:56:53] [Rank 0] Group 4 Loss: 4.9152 +[2025-09-05 17:56:53] [Rank 0] Group 4 Loss: 4.9152 +[2025-09-05 17:56:53] [Rank 0] Group 5 Loss: 4.8828 +[2025-09-05 17:56:53] [Rank 0] Group 5 Loss: 4.8828 +[2025-09-05 17:56:53] [Rank 0] Group 6 Loss: 4.8492 +[2025-09-05 17:56:53] [Rank 0] Group 6 Loss: 4.8492 +[2025-09-05 17:56:53] [Rank 0] Group 7 Loss: 4.9448 +[2025-09-05 17:56:53] [Rank 0] Group 7 Loss: 4.9448 +[2025-09-05 17:56:53] [Rank 0] Group 8 Loss: 5.1017 +[2025-09-05 17:56:53] [Rank 0] Group 8 Loss: 5.1017 +[2025-09-05 17:56:53] [Rank 0] Group 9 Loss: 5.0661 +[2025-09-05 17:56:53] [Rank 0] Group 9 Loss: 5.0661 +[2025-09-05 17:56:53] [Rank 0] Group 10 Loss: 5.2060 +[2025-09-05 17:56:53] [Rank 0] Group 10 Loss: 5.2060 +[2025-09-05 17:56:53] [Rank 0] Group 11 Loss: 5.2695 +[2025-09-05 17:56:53] [Rank 0] Group 11 Loss: 5.2695 +[2025-09-05 17:56:53] [Rank 0] Group 12 Loss: 5.4082 +[2025-09-05 17:56:53] [Rank 0] Group 12 Loss: 5.4082 +[2025-09-05 17:56:53] [Rank 0] Group 13 Loss: 5.4866 +[2025-09-05 17:56:53] [Rank 0] Group 13 Loss: 5.4866 +[2025-09-05 17:56:53] [Rank 0] Group 14 Loss: 5.5437 +[2025-09-05 17:56:53] [Rank 0] Group 14 Loss: 5.5437 +[2025-09-05 17:56:53] [Rank 0] Group 15 Loss: 5.6984 +[2025-09-05 17:56:53] [Rank 0] Group 15 Loss: 5.6984 +[2025-09-05 17:56:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:56:53] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 17:56:53] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 17:56:53] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 17:56:53] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 17:56:53] [Rank 0] Group 11 FTA: 0.6700 +[2025-09-05 17:56:53] [Rank 0] Group 11 FTA: 0.6700 +[2025-09-05 17:56:53] [Rank 0] Group 12 FTA: 0.2300 +[2025-09-05 17:56:53] [Rank 0] Group 12 FTA: 0.2300 +[2025-09-05 17:56:53] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:56:53] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:56:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:56:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:56:53] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:56:53] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:56:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:56:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:56:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:56:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:56:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:56:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:56:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:56:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:56:54] [Rank 0] step:3501/10000 train_time:148936ms step_avg:42.54ms +[2025-09-05 17:56:54] [Rank 0] step:3501/10000 train_time:148936ms step_avg:42.54ms +[2025-09-05 17:56:55] [Rank 0] step:3521/10000 train_time:149382ms step_avg:42.43ms +[2025-09-05 17:56:55] [Rank 0] step:3521/10000 train_time:149382ms step_avg:42.43ms +[2025-09-05 17:56:55] [Rank 0] step:3541/10000 train_time:150039ms step_avg:42.37ms +[2025-09-05 17:56:55] [Rank 0] step:3541/10000 train_time:150039ms step_avg:42.37ms +[2025-09-05 17:56:56] [Rank 0] step:3561/10000 train_time:150696ms step_avg:42.32ms +[2025-09-05 17:56:56] [Rank 0] step:3561/10000 train_time:150696ms step_avg:42.32ms +[2025-09-05 17:56:57] [Rank 0] step:3581/10000 train_time:151357ms step_avg:42.27ms +[2025-09-05 17:56:57] [Rank 0] step:3581/10000 train_time:151357ms step_avg:42.27ms +[2025-09-05 17:56:57] [Rank 0] step:3601/10000 train_time:152012ms step_avg:42.21ms +[2025-09-05 17:56:57] [Rank 0] step:3601/10000 train_time:152012ms step_avg:42.21ms +[2025-09-05 17:56:58] [Rank 0] step:3621/10000 train_time:152670ms step_avg:42.16ms +[2025-09-05 17:56:58] [Rank 0] step:3621/10000 train_time:152670ms step_avg:42.16ms +[2025-09-05 17:56:59] [Rank 0] step:3641/10000 train_time:153397ms step_avg:42.13ms +[2025-09-05 17:56:59] [Rank 0] step:3641/10000 train_time:153397ms step_avg:42.13ms +[2025-09-05 17:56:59] [Rank 0] step:3661/10000 train_time:154056ms step_avg:42.08ms +[2025-09-05 17:56:59] [Rank 0] step:3661/10000 train_time:154056ms step_avg:42.08ms +[2025-09-05 17:57:00] [Rank 0] step:3681/10000 train_time:154713ms step_avg:42.03ms +[2025-09-05 17:57:00] [Rank 0] step:3681/10000 train_time:154713ms step_avg:42.03ms +[2025-09-05 17:57:01] [Rank 0] step:3701/10000 train_time:155371ms step_avg:41.98ms +[2025-09-05 17:57:01] [Rank 0] step:3701/10000 train_time:155371ms step_avg:41.98ms +[2025-09-05 17:57:01] [Rank 0] step:3721/10000 train_time:156031ms step_avg:41.93ms +[2025-09-05 17:57:01] [Rank 0] step:3721/10000 train_time:156031ms step_avg:41.93ms +[2025-09-05 17:57:02] [Rank 0] step:3741/10000 train_time:156690ms step_avg:41.88ms +[2025-09-05 17:57:02] [Rank 0] step:3741/10000 train_time:156690ms step_avg:41.88ms +[2025-09-05 17:57:03] [Rank 0] step:3761/10000 train_time:157348ms step_avg:41.84ms +[2025-09-05 17:57:03] [Rank 0] step:3761/10000 train_time:157348ms step_avg:41.84ms +[2025-09-05 17:57:03] [Rank 0] step:3781/10000 train_time:158007ms step_avg:41.79ms +[2025-09-05 17:57:03] [Rank 0] step:3781/10000 train_time:158007ms step_avg:41.79ms +[2025-09-05 17:57:04] [Rank 0] step:3801/10000 train_time:158665ms step_avg:41.74ms +[2025-09-05 17:57:04] [Rank 0] step:3801/10000 train_time:158665ms step_avg:41.74ms +[2025-09-05 17:57:05] [Rank 0] step:3821/10000 train_time:159323ms step_avg:41.70ms +[2025-09-05 17:57:05] [Rank 0] step:3821/10000 train_time:159323ms step_avg:41.70ms +[2025-09-05 17:57:05] [Rank 0] step:3841/10000 train_time:159981ms step_avg:41.65ms +[2025-09-05 17:57:05] [Rank 0] step:3841/10000 train_time:159981ms step_avg:41.65ms +[2025-09-05 17:57:06] [Rank 0] step:3861/10000 train_time:160639ms step_avg:41.61ms +[2025-09-05 17:57:06] [Rank 0] step:3861/10000 train_time:160639ms step_avg:41.61ms +[2025-09-05 17:57:07] [Rank 0] step:3881/10000 train_time:161298ms step_avg:41.56ms +[2025-09-05 17:57:07] [Rank 0] step:3881/10000 train_time:161298ms step_avg:41.56ms +[2025-09-05 17:57:07] [Rank 0] step:3901/10000 train_time:161956ms step_avg:41.52ms +[2025-09-05 17:57:07] [Rank 0] step:3901/10000 train_time:161956ms step_avg:41.52ms +[2025-09-05 17:57:08] [Rank 0] step:3921/10000 train_time:162614ms step_avg:41.47ms +[2025-09-05 17:57:08] [Rank 0] step:3921/10000 train_time:162614ms step_avg:41.47ms +[2025-09-05 17:57:09] [Rank 0] step:3941/10000 train_time:163273ms step_avg:41.43ms +[2025-09-05 17:57:09] [Rank 0] step:3941/10000 train_time:163273ms step_avg:41.43ms +[2025-09-05 17:57:09] [Rank 0] step:3961/10000 train_time:163932ms step_avg:41.39ms +[2025-09-05 17:57:09] [Rank 0] step:3961/10000 train_time:163932ms step_avg:41.39ms +[2025-09-05 17:57:10] [Rank 0] step:3981/10000 train_time:164591ms step_avg:41.34ms +[2025-09-05 17:57:10] [Rank 0] step:3981/10000 train_time:164591ms step_avg:41.34ms +[2025-09-05 17:57:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:57:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:57:11] [Rank 0] PRINT: step:4000/10000 train_loss:0.7274 val_loss:0.7122 train_time:165483ms step_avg:41.37ms +[2025-09-05 17:57:11] [Rank 0] PRINT: step:4000/10000 train_loss:0.7274 val_loss:0.7122 train_time:165483ms step_avg:41.37ms +[2025-09-05 17:57:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:57:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:57:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:57:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:58:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:58:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:58:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:58:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:58:34] [Rank 0] Total Loss: 5.0672 +[2025-09-05 17:58:34] [Rank 0] Total Loss: 5.0672 +[2025-09-05 17:58:34] [Rank 0] Total FTA (Unweighted): 0.7856 +[2025-09-05 17:58:34] [Rank 0] Total FTA (Unweighted): 0.7856 +[2025-09-05 17:58:34] [Rank 0] Total FTA (Weighted): 0.7856 +[2025-09-05 17:58:34] [Rank 0] Total FTA (Weighted): 0.7856 +[2025-09-05 17:58:34] [Rank 0] Group 0 Loss: 4.9169 +[2025-09-05 17:58:34] [Rank 0] Group 0 Loss: 4.9169 +[2025-09-05 17:58:34] [Rank 0] Group 1 Loss: 4.5479 +[2025-09-05 17:58:34] [Rank 0] Group 1 Loss: 4.5479 +[2025-09-05 17:58:34] [Rank 0] Group 2 Loss: 4.5285 +[2025-09-05 17:58:34] [Rank 0] Group 2 Loss: 4.5285 +[2025-09-05 17:58:34] [Rank 0] Group 3 Loss: 4.9303 +[2025-09-05 17:58:34] [Rank 0] Group 3 Loss: 4.9303 +[2025-09-05 17:58:34] [Rank 0] Group 4 Loss: 4.8612 +[2025-09-05 17:58:34] [Rank 0] Group 4 Loss: 4.8612 +[2025-09-05 17:58:34] [Rank 0] Group 5 Loss: 4.8561 +[2025-09-05 17:58:34] [Rank 0] Group 5 Loss: 4.8561 +[2025-09-05 17:58:34] [Rank 0] Group 6 Loss: 4.9043 +[2025-09-05 17:58:34] [Rank 0] Group 6 Loss: 4.9043 +[2025-09-05 17:58:34] [Rank 0] Group 7 Loss: 4.9219 +[2025-09-05 17:58:34] [Rank 0] Group 7 Loss: 4.9219 +[2025-09-05 17:58:34] [Rank 0] Group 8 Loss: 5.0946 +[2025-09-05 17:58:34] [Rank 0] Group 8 Loss: 5.0946 +[2025-09-05 17:58:34] [Rank 0] Group 9 Loss: 5.0265 +[2025-09-05 17:58:34] [Rank 0] Group 9 Loss: 5.0265 +[2025-09-05 17:58:34] [Rank 0] Group 10 Loss: 5.1765 +[2025-09-05 17:58:34] [Rank 0] Group 10 Loss: 5.1765 +[2025-09-05 17:58:34] [Rank 0] Group 11 Loss: 5.2212 +[2025-09-05 17:58:34] [Rank 0] Group 11 Loss: 5.2212 +[2025-09-05 17:58:34] [Rank 0] Group 12 Loss: 5.3877 +[2025-09-05 17:58:34] [Rank 0] Group 12 Loss: 5.3877 +[2025-09-05 17:58:34] [Rank 0] Group 13 Loss: 5.4957 +[2025-09-05 17:58:34] [Rank 0] Group 13 Loss: 5.4957 +[2025-09-05 17:58:34] [Rank 0] Group 14 Loss: 5.5290 +[2025-09-05 17:58:34] [Rank 0] Group 14 Loss: 5.5290 +[2025-09-05 17:58:34] [Rank 0] Group 15 Loss: 5.6761 +[2025-09-05 17:58:34] [Rank 0] Group 15 Loss: 5.6761 +[2025-09-05 17:58:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 17:58:34] [Rank 0] Group 9 FTA: 0.9900 +[2025-09-05 17:58:34] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 17:58:34] [Rank 0] Group 10 FTA: 0.9500 +[2025-09-05 17:58:34] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-05 17:58:34] [Rank 0] Group 11 FTA: 0.8900 +[2025-09-05 17:58:34] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 17:58:34] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 17:58:34] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 17:58:34] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 17:58:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:58:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:58:34] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:58:34] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:58:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:58:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 17:58:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:58:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 17:58:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:58:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 17:58:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:58:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 17:58:35] [Rank 0] step:4001/10000 train_time:165491ms step_avg:41.36ms +[2025-09-05 17:58:35] [Rank 0] step:4001/10000 train_time:165491ms step_avg:41.36ms +[2025-09-05 17:58:36] [Rank 0] step:4021/10000 train_time:166039ms step_avg:41.29ms +[2025-09-05 17:58:36] [Rank 0] step:4021/10000 train_time:166039ms step_avg:41.29ms +[2025-09-05 17:58:37] [Rank 0] step:4041/10000 train_time:166697ms step_avg:41.25ms +[2025-09-05 17:58:37] [Rank 0] step:4041/10000 train_time:166697ms step_avg:41.25ms +[2025-09-05 17:58:37] [Rank 0] step:4061/10000 train_time:167356ms step_avg:41.21ms +[2025-09-05 17:58:37] [Rank 0] step:4061/10000 train_time:167356ms step_avg:41.21ms +[2025-09-05 17:58:38] [Rank 0] step:4081/10000 train_time:168015ms step_avg:41.17ms +[2025-09-05 17:58:38] [Rank 0] step:4081/10000 train_time:168015ms step_avg:41.17ms +[2025-09-05 17:58:39] [Rank 0] step:4101/10000 train_time:168675ms step_avg:41.13ms +[2025-09-05 17:58:39] [Rank 0] step:4101/10000 train_time:168675ms step_avg:41.13ms +[2025-09-05 17:58:39] [Rank 0] step:4121/10000 train_time:169334ms step_avg:41.09ms +[2025-09-05 17:58:39] [Rank 0] step:4121/10000 train_time:169334ms step_avg:41.09ms +[2025-09-05 17:58:40] [Rank 0] step:4141/10000 train_time:169992ms step_avg:41.05ms +[2025-09-05 17:58:40] [Rank 0] step:4141/10000 train_time:169992ms step_avg:41.05ms +[2025-09-05 17:58:41] [Rank 0] step:4161/10000 train_time:170650ms step_avg:41.01ms +[2025-09-05 17:58:41] [Rank 0] step:4161/10000 train_time:170650ms step_avg:41.01ms +[2025-09-05 17:58:41] [Rank 0] step:4181/10000 train_time:171308ms step_avg:40.97ms +[2025-09-05 17:58:41] [Rank 0] step:4181/10000 train_time:171308ms step_avg:40.97ms +[2025-09-05 17:58:42] [Rank 0] step:4201/10000 train_time:171966ms step_avg:40.93ms +[2025-09-05 17:58:42] [Rank 0] step:4201/10000 train_time:171966ms step_avg:40.93ms +[2025-09-05 17:58:43] [Rank 0] step:4221/10000 train_time:172624ms step_avg:40.90ms +[2025-09-05 17:58:43] [Rank 0] step:4221/10000 train_time:172624ms step_avg:40.90ms +[2025-09-05 17:58:43] [Rank 0] step:4241/10000 train_time:173285ms step_avg:40.86ms +[2025-09-05 17:58:43] [Rank 0] step:4241/10000 train_time:173285ms step_avg:40.86ms +[2025-09-05 17:58:44] [Rank 0] step:4261/10000 train_time:173941ms step_avg:40.82ms +[2025-09-05 17:58:44] [Rank 0] step:4261/10000 train_time:173941ms step_avg:40.82ms +[2025-09-05 17:58:45] [Rank 0] step:4281/10000 train_time:174599ms step_avg:40.78ms +[2025-09-05 17:58:45] [Rank 0] step:4281/10000 train_time:174599ms step_avg:40.78ms +[2025-09-05 17:58:45] [Rank 0] step:4301/10000 train_time:175257ms step_avg:40.75ms +[2025-09-05 17:58:45] [Rank 0] step:4301/10000 train_time:175257ms step_avg:40.75ms +[2025-09-05 17:58:46] [Rank 0] step:4321/10000 train_time:175915ms step_avg:40.71ms +[2025-09-05 17:58:46] [Rank 0] step:4321/10000 train_time:175915ms step_avg:40.71ms +[2025-09-05 17:58:47] [Rank 0] step:4341/10000 train_time:176574ms step_avg:40.68ms +[2025-09-05 17:58:47] [Rank 0] step:4341/10000 train_time:176574ms step_avg:40.68ms +[2025-09-05 17:58:47] [Rank 0] step:4361/10000 train_time:177232ms step_avg:40.64ms +[2025-09-05 17:58:47] [Rank 0] step:4361/10000 train_time:177232ms step_avg:40.64ms +[2025-09-05 17:58:48] [Rank 0] step:4381/10000 train_time:178073ms step_avg:40.65ms +[2025-09-05 17:58:48] [Rank 0] step:4381/10000 train_time:178073ms step_avg:40.65ms +[2025-09-05 17:58:49] [Rank 0] step:4401/10000 train_time:178731ms step_avg:40.61ms +[2025-09-05 17:58:49] [Rank 0] step:4401/10000 train_time:178731ms step_avg:40.61ms +[2025-09-05 17:58:49] [Rank 0] step:4421/10000 train_time:179390ms step_avg:40.58ms +[2025-09-05 17:58:49] [Rank 0] step:4421/10000 train_time:179390ms step_avg:40.58ms +[2025-09-05 17:58:50] [Rank 0] step:4441/10000 train_time:180048ms step_avg:40.54ms +[2025-09-05 17:58:50] [Rank 0] step:4441/10000 train_time:180048ms step_avg:40.54ms +[2025-09-05 17:58:51] [Rank 0] step:4461/10000 train_time:180881ms step_avg:40.55ms +[2025-09-05 17:58:51] [Rank 0] step:4461/10000 train_time:180881ms step_avg:40.55ms +[2025-09-05 17:58:52] [Rank 0] step:4481/10000 train_time:181541ms step_avg:40.51ms +[2025-09-05 17:58:52] [Rank 0] step:4481/10000 train_time:181541ms step_avg:40.51ms +[2025-09-05 17:58:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:58:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:58:53] [Rank 0] PRINT: step:4500/10000 train_loss:0.7145 val_loss:0.7008 train_time:182433ms step_avg:40.54ms +[2025-09-05 17:58:53] [Rank 0] PRINT: step:4500/10000 train_loss:0.7145 val_loss:0.7008 train_time:182433ms step_avg:40.54ms +[2025-09-05 17:58:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:58:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:58:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:58:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:00:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:00:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:00:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:00:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:00:15] [Rank 0] Total Loss: 5.0802 +[2025-09-05 18:00:15] [Rank 0] Total Loss: 5.0802 +[2025-09-05 18:00:15] [Rank 0] Total FTA (Unweighted): 0.7937 +[2025-09-05 18:00:15] [Rank 0] Total FTA (Unweighted): 0.7937 +[2025-09-05 18:00:15] [Rank 0] Total FTA (Weighted): 0.7937 +[2025-09-05 18:00:15] [Rank 0] Total FTA (Weighted): 0.7937 +[2025-09-05 18:00:15] [Rank 0] Group 0 Loss: 5.0887 +[2025-09-05 18:00:15] [Rank 0] Group 0 Loss: 5.0887 +[2025-09-05 18:00:15] [Rank 0] Group 1 Loss: 4.5821 +[2025-09-05 18:00:15] [Rank 0] Group 1 Loss: 4.5821 +[2025-09-05 18:00:15] [Rank 0] Group 2 Loss: 4.6165 +[2025-09-05 18:00:15] [Rank 0] Group 2 Loss: 4.6165 +[2025-09-05 18:00:15] [Rank 0] Group 3 Loss: 4.9310 +[2025-09-05 18:00:15] [Rank 0] Group 3 Loss: 4.9310 +[2025-09-05 18:00:15] [Rank 0] Group 4 Loss: 4.8620 +[2025-09-05 18:00:15] [Rank 0] Group 4 Loss: 4.8620 +[2025-09-05 18:00:15] [Rank 0] Group 5 Loss: 4.9287 +[2025-09-05 18:00:15] [Rank 0] Group 5 Loss: 4.9287 +[2025-09-05 18:00:15] [Rank 0] Group 6 Loss: 4.9462 +[2025-09-05 18:00:15] [Rank 0] Group 6 Loss: 4.9462 +[2025-09-05 18:00:15] [Rank 0] Group 7 Loss: 5.0268 +[2025-09-05 18:00:15] [Rank 0] Group 7 Loss: 5.0268 +[2025-09-05 18:00:15] [Rank 0] Group 8 Loss: 5.1008 +[2025-09-05 18:00:15] [Rank 0] Group 8 Loss: 5.1008 +[2025-09-05 18:00:15] [Rank 0] Group 9 Loss: 5.0858 +[2025-09-05 18:00:15] [Rank 0] Group 9 Loss: 5.0858 +[2025-09-05 18:00:15] [Rank 0] Group 10 Loss: 5.2413 +[2025-09-05 18:00:15] [Rank 0] Group 10 Loss: 5.2413 +[2025-09-05 18:00:15] [Rank 0] Group 11 Loss: 5.2056 +[2025-09-05 18:00:15] [Rank 0] Group 11 Loss: 5.2056 +[2025-09-05 18:00:15] [Rank 0] Group 12 Loss: 5.3468 +[2025-09-05 18:00:15] [Rank 0] Group 12 Loss: 5.3468 +[2025-09-05 18:00:15] [Rank 0] Group 13 Loss: 5.3676 +[2025-09-05 18:00:15] [Rank 0] Group 13 Loss: 5.3676 +[2025-09-05 18:00:15] [Rank 0] Group 14 Loss: 5.4304 +[2025-09-05 18:00:15] [Rank 0] Group 14 Loss: 5.4304 +[2025-09-05 18:00:15] [Rank 0] Group 15 Loss: 5.5230 +[2025-09-05 18:00:15] [Rank 0] Group 15 Loss: 5.5230 +[2025-09-05 18:00:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 18:00:15] [Rank 0] Group 7 FTA: 0.9900 +[2025-09-05 18:00:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:00:15] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 18:00:15] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 18:00:15] [Rank 0] Group 11 FTA: 0.9100 +[2025-09-05 18:00:15] [Rank 0] Group 11 FTA: 0.9100 +[2025-09-05 18:00:15] [Rank 0] Group 12 FTA: 0.4200 +[2025-09-05 18:00:15] [Rank 0] Group 12 FTA: 0.4200 +[2025-09-05 18:00:15] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:00:15] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:00:15] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:00:15] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:00:15] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:00:15] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:00:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:00:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:00:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:00:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:00:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:00:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:00:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:00:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:00:16] [Rank 0] step:4501/10000 train_time:182441ms step_avg:40.53ms +[2025-09-05 18:00:16] [Rank 0] step:4501/10000 train_time:182441ms step_avg:40.53ms +[2025-09-05 18:00:17] [Rank 0] step:4521/10000 train_time:182888ms step_avg:40.45ms +[2025-09-05 18:00:17] [Rank 0] step:4521/10000 train_time:182888ms step_avg:40.45ms +[2025-09-05 18:00:18] [Rank 0] step:4541/10000 train_time:183546ms step_avg:40.42ms +[2025-09-05 18:00:18] [Rank 0] step:4541/10000 train_time:183546ms step_avg:40.42ms +[2025-09-05 18:00:18] [Rank 0] step:4561/10000 train_time:184204ms step_avg:40.39ms +[2025-09-05 18:00:18] [Rank 0] step:4561/10000 train_time:184204ms step_avg:40.39ms +[2025-09-05 18:00:19] [Rank 0] step:4581/10000 train_time:184863ms step_avg:40.35ms +[2025-09-05 18:00:19] [Rank 0] step:4581/10000 train_time:184863ms step_avg:40.35ms +[2025-09-05 18:00:20] [Rank 0] step:4601/10000 train_time:185522ms step_avg:40.32ms +[2025-09-05 18:00:20] [Rank 0] step:4601/10000 train_time:185522ms step_avg:40.32ms +[2025-09-05 18:00:20] [Rank 0] step:4621/10000 train_time:186181ms step_avg:40.29ms +[2025-09-05 18:00:20] [Rank 0] step:4621/10000 train_time:186181ms step_avg:40.29ms +[2025-09-05 18:00:21] [Rank 0] step:4641/10000 train_time:186839ms step_avg:40.26ms +[2025-09-05 18:00:21] [Rank 0] step:4641/10000 train_time:186839ms step_avg:40.26ms +[2025-09-05 18:00:22] [Rank 0] step:4661/10000 train_time:187497ms step_avg:40.23ms +[2025-09-05 18:00:22] [Rank 0] step:4661/10000 train_time:187497ms step_avg:40.23ms +[2025-09-05 18:00:22] [Rank 0] step:4681/10000 train_time:188156ms step_avg:40.20ms +[2025-09-05 18:00:22] [Rank 0] step:4681/10000 train_time:188156ms step_avg:40.20ms +[2025-09-05 18:00:23] [Rank 0] step:4701/10000 train_time:188815ms step_avg:40.16ms +[2025-09-05 18:00:23] [Rank 0] step:4701/10000 train_time:188815ms step_avg:40.16ms +[2025-09-05 18:00:24] [Rank 0] step:4721/10000 train_time:189474ms step_avg:40.13ms +[2025-09-05 18:00:24] [Rank 0] step:4721/10000 train_time:189474ms step_avg:40.13ms +[2025-09-05 18:00:24] [Rank 0] step:4741/10000 train_time:190132ms step_avg:40.10ms +[2025-09-05 18:00:24] [Rank 0] step:4741/10000 train_time:190132ms step_avg:40.10ms +[2025-09-05 18:00:25] [Rank 0] step:4761/10000 train_time:190790ms step_avg:40.07ms +[2025-09-05 18:00:25] [Rank 0] step:4761/10000 train_time:190790ms step_avg:40.07ms +[2025-09-05 18:00:26] [Rank 0] step:4781/10000 train_time:191449ms step_avg:40.04ms +[2025-09-05 18:00:26] [Rank 0] step:4781/10000 train_time:191449ms step_avg:40.04ms +[2025-09-05 18:00:26] [Rank 0] step:4801/10000 train_time:192107ms step_avg:40.01ms +[2025-09-05 18:00:26] [Rank 0] step:4801/10000 train_time:192107ms step_avg:40.01ms +[2025-09-05 18:00:27] [Rank 0] step:4821/10000 train_time:192766ms step_avg:39.98ms +[2025-09-05 18:00:27] [Rank 0] step:4821/10000 train_time:192766ms step_avg:39.98ms +[2025-09-05 18:00:28] [Rank 0] step:4841/10000 train_time:193731ms step_avg:40.02ms +[2025-09-05 18:00:28] [Rank 0] step:4841/10000 train_time:193731ms step_avg:40.02ms +[2025-09-05 18:00:29] [Rank 0] step:4861/10000 train_time:194390ms step_avg:39.99ms +[2025-09-05 18:00:29] [Rank 0] step:4861/10000 train_time:194390ms step_avg:39.99ms +[2025-09-05 18:00:29] [Rank 0] step:4881/10000 train_time:195048ms step_avg:39.96ms +[2025-09-05 18:00:29] [Rank 0] step:4881/10000 train_time:195048ms step_avg:39.96ms +[2025-09-05 18:00:30] [Rank 0] step:4901/10000 train_time:195706ms step_avg:39.93ms +[2025-09-05 18:00:30] [Rank 0] step:4901/10000 train_time:195706ms step_avg:39.93ms +[2025-09-05 18:00:31] [Rank 0] step:4921/10000 train_time:196367ms step_avg:39.90ms +[2025-09-05 18:00:31] [Rank 0] step:4921/10000 train_time:196367ms step_avg:39.90ms +[2025-09-05 18:00:31] [Rank 0] step:4941/10000 train_time:197028ms step_avg:39.88ms +[2025-09-05 18:00:31] [Rank 0] step:4941/10000 train_time:197028ms step_avg:39.88ms +[2025-09-05 18:00:32] [Rank 0] step:4961/10000 train_time:197684ms step_avg:39.85ms +[2025-09-05 18:00:32] [Rank 0] step:4961/10000 train_time:197684ms step_avg:39.85ms +[2025-09-05 18:00:33] [Rank 0] step:4981/10000 train_time:198343ms step_avg:39.82ms +[2025-09-05 18:00:33] [Rank 0] step:4981/10000 train_time:198343ms step_avg:39.82ms +[2025-09-05 18:00:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:00:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:00:34] [Rank 0] PRINT: step:5000/10000 train_loss:0.7043 val_loss:0.6915 train_time:199236ms step_avg:39.85ms +[2025-09-05 18:00:34] [Rank 0] PRINT: step:5000/10000 train_loss:0.7043 val_loss:0.6915 train_time:199236ms step_avg:39.85ms +[2025-09-05 18:00:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:00:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:00:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:00:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:01:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:01:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:01:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:01:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:01:56] [Rank 0] Total Loss: 5.2567 +[2025-09-05 18:01:56] [Rank 0] Total Loss: 5.2567 +[2025-09-05 18:01:56] [Rank 0] Total FTA (Unweighted): 0.8050 +[2025-09-05 18:01:56] [Rank 0] Total FTA (Unweighted): 0.8050 +[2025-09-05 18:01:56] [Rank 0] Total FTA (Weighted): 0.8050 +[2025-09-05 18:01:56] [Rank 0] Total FTA (Weighted): 0.8050 +[2025-09-05 18:01:56] [Rank 0] Group 0 Loss: 5.1419 +[2025-09-05 18:01:56] [Rank 0] Group 0 Loss: 5.1419 +[2025-09-05 18:01:56] [Rank 0] Group 1 Loss: 4.8818 +[2025-09-05 18:01:56] [Rank 0] Group 1 Loss: 4.8818 +[2025-09-05 18:01:56] [Rank 0] Group 2 Loss: 4.5694 +[2025-09-05 18:01:56] [Rank 0] Group 2 Loss: 4.5694 +[2025-09-05 18:01:56] [Rank 0] Group 3 Loss: 5.1675 +[2025-09-05 18:01:56] [Rank 0] Group 3 Loss: 5.1675 +[2025-09-05 18:01:56] [Rank 0] Group 4 Loss: 5.1353 +[2025-09-05 18:01:56] [Rank 0] Group 4 Loss: 5.1353 +[2025-09-05 18:01:56] [Rank 0] Group 5 Loss: 5.1590 +[2025-09-05 18:01:56] [Rank 0] Group 5 Loss: 5.1590 +[2025-09-05 18:01:56] [Rank 0] Group 6 Loss: 5.1163 +[2025-09-05 18:01:56] [Rank 0] Group 6 Loss: 5.1163 +[2025-09-05 18:01:56] [Rank 0] Group 7 Loss: 5.1783 +[2025-09-05 18:01:56] [Rank 0] Group 7 Loss: 5.1783 +[2025-09-05 18:01:56] [Rank 0] Group 8 Loss: 5.2534 +[2025-09-05 18:01:56] [Rank 0] Group 8 Loss: 5.2534 +[2025-09-05 18:01:56] [Rank 0] Group 9 Loss: 5.2683 +[2025-09-05 18:01:56] [Rank 0] Group 9 Loss: 5.2683 +[2025-09-05 18:01:56] [Rank 0] Group 10 Loss: 5.4094 +[2025-09-05 18:01:56] [Rank 0] Group 10 Loss: 5.4094 +[2025-09-05 18:01:56] [Rank 0] Group 11 Loss: 5.4270 +[2025-09-05 18:01:56] [Rank 0] Group 11 Loss: 5.4270 +[2025-09-05 18:01:56] [Rank 0] Group 12 Loss: 5.5105 +[2025-09-05 18:01:56] [Rank 0] Group 12 Loss: 5.5105 +[2025-09-05 18:01:56] [Rank 0] Group 13 Loss: 5.6414 +[2025-09-05 18:01:56] [Rank 0] Group 13 Loss: 5.6414 +[2025-09-05 18:01:56] [Rank 0] Group 14 Loss: 5.5860 +[2025-09-05 18:01:56] [Rank 0] Group 14 Loss: 5.5860 +[2025-09-05 18:01:56] [Rank 0] Group 15 Loss: 5.6619 +[2025-09-05 18:01:56] [Rank 0] Group 15 Loss: 5.6619 +[2025-09-05 18:01:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:01:56] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 18:01:57] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 18:01:57] [Rank 0] Group 11 FTA: 0.9300 +[2025-09-05 18:01:57] [Rank 0] Group 11 FTA: 0.9300 +[2025-09-05 18:01:57] [Rank 0] Group 12 FTA: 0.5500 +[2025-09-05 18:01:57] [Rank 0] Group 12 FTA: 0.5500 +[2025-09-05 18:01:57] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 18:01:57] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 18:01:57] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 18:01:57] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 18:01:57] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:01:57] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:01:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:01:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:01:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:01:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:01:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:01:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:01:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:01:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:01:58] [Rank 0] step:5001/10000 train_time:199245ms step_avg:39.84ms +[2025-09-05 18:01:58] [Rank 0] step:5001/10000 train_time:199245ms step_avg:39.84ms +[2025-09-05 18:01:59] [Rank 0] step:5021/10000 train_time:199698ms step_avg:39.77ms +[2025-09-05 18:01:59] [Rank 0] step:5021/10000 train_time:199698ms step_avg:39.77ms +[2025-09-05 18:01:59] [Rank 0] step:5041/10000 train_time:200359ms step_avg:39.75ms +[2025-09-05 18:01:59] [Rank 0] step:5041/10000 train_time:200359ms step_avg:39.75ms +[2025-09-05 18:02:00] [Rank 0] step:5061/10000 train_time:201179ms step_avg:39.75ms +[2025-09-05 18:02:00] [Rank 0] step:5061/10000 train_time:201179ms step_avg:39.75ms +[2025-09-05 18:02:01] [Rank 0] step:5081/10000 train_time:201839ms step_avg:39.72ms +[2025-09-05 18:02:01] [Rank 0] step:5081/10000 train_time:201839ms step_avg:39.72ms +[2025-09-05 18:02:01] [Rank 0] step:5101/10000 train_time:202499ms step_avg:39.70ms +[2025-09-05 18:02:01] [Rank 0] step:5101/10000 train_time:202499ms step_avg:39.70ms +[2025-09-05 18:02:02] [Rank 0] step:5121/10000 train_time:203162ms step_avg:39.67ms +[2025-09-05 18:02:02] [Rank 0] step:5121/10000 train_time:203162ms step_avg:39.67ms +[2025-09-05 18:02:03] [Rank 0] step:5141/10000 train_time:203819ms step_avg:39.65ms +[2025-09-05 18:02:03] [Rank 0] step:5141/10000 train_time:203819ms step_avg:39.65ms +[2025-09-05 18:02:03] [Rank 0] step:5161/10000 train_time:204478ms step_avg:39.62ms +[2025-09-05 18:02:03] [Rank 0] step:5161/10000 train_time:204478ms step_avg:39.62ms +[2025-09-05 18:02:04] [Rank 0] step:5181/10000 train_time:205139ms step_avg:39.59ms +[2025-09-05 18:02:04] [Rank 0] step:5181/10000 train_time:205139ms step_avg:39.59ms +[2025-09-05 18:02:05] [Rank 0] step:5201/10000 train_time:205799ms step_avg:39.57ms +[2025-09-05 18:02:05] [Rank 0] step:5201/10000 train_time:205799ms step_avg:39.57ms +[2025-09-05 18:02:05] [Rank 0] step:5221/10000 train_time:206459ms step_avg:39.54ms +[2025-09-05 18:02:05] [Rank 0] step:5221/10000 train_time:206459ms step_avg:39.54ms +[2025-09-05 18:02:06] [Rank 0] step:5241/10000 train_time:207119ms step_avg:39.52ms +[2025-09-05 18:02:06] [Rank 0] step:5241/10000 train_time:207119ms step_avg:39.52ms +[2025-09-05 18:02:07] [Rank 0] step:5261/10000 train_time:207778ms step_avg:39.49ms +[2025-09-05 18:02:07] [Rank 0] step:5261/10000 train_time:207778ms step_avg:39.49ms +[2025-09-05 18:02:07] [Rank 0] step:5281/10000 train_time:208438ms step_avg:39.47ms +[2025-09-05 18:02:07] [Rank 0] step:5281/10000 train_time:208438ms step_avg:39.47ms +[2025-09-05 18:02:08] [Rank 0] step:5301/10000 train_time:209098ms step_avg:39.44ms +[2025-09-05 18:02:08] [Rank 0] step:5301/10000 train_time:209098ms step_avg:39.44ms +[2025-09-05 18:02:09] [Rank 0] step:5321/10000 train_time:209757ms step_avg:39.42ms +[2025-09-05 18:02:09] [Rank 0] step:5321/10000 train_time:209757ms step_avg:39.42ms +[2025-09-05 18:02:09] [Rank 0] step:5341/10000 train_time:210416ms step_avg:39.40ms +[2025-09-05 18:02:09] [Rank 0] step:5341/10000 train_time:210416ms step_avg:39.40ms +[2025-09-05 18:02:10] [Rank 0] step:5361/10000 train_time:211076ms step_avg:39.37ms +[2025-09-05 18:02:10] [Rank 0] step:5361/10000 train_time:211076ms step_avg:39.37ms +[2025-09-05 18:02:11] [Rank 0] step:5381/10000 train_time:211734ms step_avg:39.35ms +[2025-09-05 18:02:11] [Rank 0] step:5381/10000 train_time:211734ms step_avg:39.35ms +[2025-09-05 18:02:11] [Rank 0] step:5401/10000 train_time:212393ms step_avg:39.32ms +[2025-09-05 18:02:11] [Rank 0] step:5401/10000 train_time:212393ms step_avg:39.32ms +[2025-09-05 18:02:12] [Rank 0] step:5421/10000 train_time:213052ms step_avg:39.30ms +[2025-09-05 18:02:12] [Rank 0] step:5421/10000 train_time:213052ms step_avg:39.30ms +[2025-09-05 18:02:13] [Rank 0] step:5441/10000 train_time:213712ms step_avg:39.28ms +[2025-09-05 18:02:13] [Rank 0] step:5441/10000 train_time:213712ms step_avg:39.28ms +[2025-09-05 18:02:13] [Rank 0] step:5461/10000 train_time:214370ms step_avg:39.25ms +[2025-09-05 18:02:13] [Rank 0] step:5461/10000 train_time:214370ms step_avg:39.25ms +[2025-09-05 18:02:14] [Rank 0] step:5481/10000 train_time:215030ms step_avg:39.23ms +[2025-09-05 18:02:14] [Rank 0] step:5481/10000 train_time:215030ms step_avg:39.23ms +[2025-09-05 18:02:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:02:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:02:15] [Rank 0] PRINT: step:5500/10000 train_loss:0.6953 val_loss:0.6837 train_time:215923ms step_avg:39.26ms +[2025-09-05 18:02:15] [Rank 0] PRINT: step:5500/10000 train_loss:0.6953 val_loss:0.6837 train_time:215923ms step_avg:39.26ms +[2025-09-05 18:02:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:02:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:02:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:02:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:03:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:03:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:03:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:03:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:03:37] [Rank 0] Total Loss: 5.1671 +[2025-09-05 18:03:37] [Rank 0] Total Loss: 5.1671 +[2025-09-05 18:03:37] [Rank 0] Total FTA (Unweighted): 0.8213 +[2025-09-05 18:03:37] [Rank 0] Total FTA (Unweighted): 0.8213 +[2025-09-05 18:03:37] [Rank 0] Total FTA (Weighted): 0.8213 +[2025-09-05 18:03:37] [Rank 0] Total FTA (Weighted): 0.8213 +[2025-09-05 18:03:37] [Rank 0] Group 0 Loss: 5.0853 +[2025-09-05 18:03:37] [Rank 0] Group 0 Loss: 5.0853 +[2025-09-05 18:03:37] [Rank 0] Group 1 Loss: 4.7931 +[2025-09-05 18:03:37] [Rank 0] Group 1 Loss: 4.7931 +[2025-09-05 18:03:37] [Rank 0] Group 2 Loss: 4.5821 +[2025-09-05 18:03:37] [Rank 0] Group 2 Loss: 4.5821 +[2025-09-05 18:03:37] [Rank 0] Group 3 Loss: 5.0529 +[2025-09-05 18:03:37] [Rank 0] Group 3 Loss: 5.0529 +[2025-09-05 18:03:37] [Rank 0] Group 4 Loss: 5.0065 +[2025-09-05 18:03:37] [Rank 0] Group 4 Loss: 5.0065 +[2025-09-05 18:03:37] [Rank 0] Group 5 Loss: 5.0579 +[2025-09-05 18:03:37] [Rank 0] Group 5 Loss: 5.0579 +[2025-09-05 18:03:37] [Rank 0] Group 6 Loss: 5.0362 +[2025-09-05 18:03:37] [Rank 0] Group 6 Loss: 5.0362 +[2025-09-05 18:03:37] [Rank 0] Group 7 Loss: 5.1240 +[2025-09-05 18:03:37] [Rank 0] Group 7 Loss: 5.1240 +[2025-09-05 18:03:37] [Rank 0] Group 8 Loss: 5.1789 +[2025-09-05 18:03:37] [Rank 0] Group 8 Loss: 5.1789 +[2025-09-05 18:03:37] [Rank 0] Group 9 Loss: 5.1702 +[2025-09-05 18:03:37] [Rank 0] Group 9 Loss: 5.1702 +[2025-09-05 18:03:37] [Rank 0] Group 10 Loss: 5.2809 +[2025-09-05 18:03:37] [Rank 0] Group 10 Loss: 5.2809 +[2025-09-05 18:03:37] [Rank 0] Group 11 Loss: 5.2828 +[2025-09-05 18:03:37] [Rank 0] Group 11 Loss: 5.2828 +[2025-09-05 18:03:37] [Rank 0] Group 12 Loss: 5.4001 +[2025-09-05 18:03:37] [Rank 0] Group 12 Loss: 5.4001 +[2025-09-05 18:03:37] [Rank 0] Group 13 Loss: 5.4998 +[2025-09-05 18:03:37] [Rank 0] Group 13 Loss: 5.4998 +[2025-09-05 18:03:37] [Rank 0] Group 14 Loss: 5.4959 +[2025-09-05 18:03:37] [Rank 0] Group 14 Loss: 5.4959 +[2025-09-05 18:03:37] [Rank 0] Group 15 Loss: 5.6264 +[2025-09-05 18:03:37] [Rank 0] Group 15 Loss: 5.6264 +[2025-09-05 18:03:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:03:37] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:03:37] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:03:37] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 18:03:37] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 18:03:37] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:03:37] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:03:37] [Rank 0] Group 12 FTA: 0.6600 +[2025-09-05 18:03:37] [Rank 0] Group 12 FTA: 0.6600 +[2025-09-05 18:03:37] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 18:03:37] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 18:03:37] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:03:37] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:03:37] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:03:37] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:03:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:03:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:03:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:03:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:03:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:03:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:03:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:03:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:03:39] [Rank 0] step:5501/10000 train_time:215932ms step_avg:39.25ms +[2025-09-05 18:03:39] [Rank 0] step:5501/10000 train_time:215932ms step_avg:39.25ms +[2025-09-05 18:03:40] [Rank 0] step:5521/10000 train_time:216384ms step_avg:39.19ms +[2025-09-05 18:03:40] [Rank 0] step:5521/10000 train_time:216384ms step_avg:39.19ms +[2025-09-05 18:03:40] [Rank 0] step:5541/10000 train_time:217043ms step_avg:39.17ms +[2025-09-05 18:03:40] [Rank 0] step:5541/10000 train_time:217043ms step_avg:39.17ms +[2025-09-05 18:03:41] [Rank 0] step:5561/10000 train_time:217702ms step_avg:39.15ms +[2025-09-05 18:03:41] [Rank 0] step:5561/10000 train_time:217702ms step_avg:39.15ms +[2025-09-05 18:03:41] [Rank 0] step:5581/10000 train_time:218362ms step_avg:39.13ms +[2025-09-05 18:03:41] [Rank 0] step:5581/10000 train_time:218362ms step_avg:39.13ms +[2025-09-05 18:03:42] [Rank 0] step:5601/10000 train_time:219022ms step_avg:39.10ms +[2025-09-05 18:03:42] [Rank 0] step:5601/10000 train_time:219022ms step_avg:39.10ms +[2025-09-05 18:03:43] [Rank 0] step:5621/10000 train_time:219681ms step_avg:39.08ms +[2025-09-05 18:03:43] [Rank 0] step:5621/10000 train_time:219681ms step_avg:39.08ms +[2025-09-05 18:03:44] [Rank 0] step:5641/10000 train_time:220468ms step_avg:39.08ms +[2025-09-05 18:03:44] [Rank 0] step:5641/10000 train_time:220468ms step_avg:39.08ms +[2025-09-05 18:03:45] [Rank 0] step:5661/10000 train_time:221452ms step_avg:39.12ms +[2025-09-05 18:03:45] [Rank 0] step:5661/10000 train_time:221452ms step_avg:39.12ms +[2025-09-05 18:03:45] [Rank 0] step:5681/10000 train_time:222112ms step_avg:39.10ms +[2025-09-05 18:03:45] [Rank 0] step:5681/10000 train_time:222112ms step_avg:39.10ms +[2025-09-05 18:03:46] [Rank 0] step:5701/10000 train_time:222770ms step_avg:39.08ms +[2025-09-05 18:03:46] [Rank 0] step:5701/10000 train_time:222770ms step_avg:39.08ms +[2025-09-05 18:03:47] [Rank 0] step:5721/10000 train_time:223431ms step_avg:39.05ms +[2025-09-05 18:03:47] [Rank 0] step:5721/10000 train_time:223431ms step_avg:39.05ms +[2025-09-05 18:03:47] [Rank 0] step:5741/10000 train_time:224090ms step_avg:39.03ms +[2025-09-05 18:03:47] [Rank 0] step:5741/10000 train_time:224090ms step_avg:39.03ms +[2025-09-05 18:03:48] [Rank 0] step:5761/10000 train_time:224749ms step_avg:39.01ms +[2025-09-05 18:03:48] [Rank 0] step:5761/10000 train_time:224749ms step_avg:39.01ms +[2025-09-05 18:03:49] [Rank 0] step:5781/10000 train_time:225408ms step_avg:38.99ms +[2025-09-05 18:03:49] [Rank 0] step:5781/10000 train_time:225408ms step_avg:38.99ms +[2025-09-05 18:03:49] [Rank 0] step:5801/10000 train_time:226068ms step_avg:38.97ms +[2025-09-05 18:03:49] [Rank 0] step:5801/10000 train_time:226068ms step_avg:38.97ms +[2025-09-05 18:03:50] [Rank 0] step:5821/10000 train_time:226725ms step_avg:38.95ms +[2025-09-05 18:03:50] [Rank 0] step:5821/10000 train_time:226725ms step_avg:38.95ms +[2025-09-05 18:03:51] [Rank 0] step:5841/10000 train_time:227382ms step_avg:38.93ms +[2025-09-05 18:03:51] [Rank 0] step:5841/10000 train_time:227382ms step_avg:38.93ms +[2025-09-05 18:03:51] [Rank 0] step:5861/10000 train_time:228040ms step_avg:38.91ms +[2025-09-05 18:03:51] [Rank 0] step:5861/10000 train_time:228040ms step_avg:38.91ms +[2025-09-05 18:03:52] [Rank 0] step:5881/10000 train_time:228700ms step_avg:38.89ms +[2025-09-05 18:03:52] [Rank 0] step:5881/10000 train_time:228700ms step_avg:38.89ms +[2025-09-05 18:03:52] [Rank 0] step:5901/10000 train_time:229356ms step_avg:38.87ms +[2025-09-05 18:03:52] [Rank 0] step:5901/10000 train_time:229356ms step_avg:38.87ms +[2025-09-05 18:03:53] [Rank 0] step:5921/10000 train_time:230015ms step_avg:38.85ms +[2025-09-05 18:03:53] [Rank 0] step:5921/10000 train_time:230015ms step_avg:38.85ms +[2025-09-05 18:03:54] [Rank 0] step:5941/10000 train_time:230673ms step_avg:38.83ms +[2025-09-05 18:03:54] [Rank 0] step:5941/10000 train_time:230673ms step_avg:38.83ms +[2025-09-05 18:03:54] [Rank 0] step:5961/10000 train_time:231332ms step_avg:38.81ms +[2025-09-05 18:03:54] [Rank 0] step:5961/10000 train_time:231332ms step_avg:38.81ms +[2025-09-05 18:03:55] [Rank 0] step:5981/10000 train_time:231991ms step_avg:38.79ms +[2025-09-05 18:03:55] [Rank 0] step:5981/10000 train_time:231991ms step_avg:38.79ms +[2025-09-05 18:03:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:03:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:03:56] [Rank 0] PRINT: step:6000/10000 train_loss:0.6878 val_loss:0.6765 train_time:232883ms step_avg:38.81ms +[2025-09-05 18:03:56] [Rank 0] PRINT: step:6000/10000 train_loss:0.6878 val_loss:0.6765 train_time:232883ms step_avg:38.81ms +[2025-09-05 18:03:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:03:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:03:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:03:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:05:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:05:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:05:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:05:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:05:18] [Rank 0] Total Loss: 5.1139 +[2025-09-05 18:05:18] [Rank 0] Total Loss: 5.1139 +[2025-09-05 18:05:18] [Rank 0] Total FTA (Unweighted): 0.8269 +[2025-09-05 18:05:18] [Rank 0] Total FTA (Unweighted): 0.8269 +[2025-09-05 18:05:18] [Rank 0] Total FTA (Weighted): 0.8269 +[2025-09-05 18:05:18] [Rank 0] Total FTA (Weighted): 0.8269 +[2025-09-05 18:05:18] [Rank 0] Group 0 Loss: 5.0951 +[2025-09-05 18:05:18] [Rank 0] Group 0 Loss: 5.0951 +[2025-09-05 18:05:18] [Rank 0] Group 1 Loss: 4.7409 +[2025-09-05 18:05:18] [Rank 0] Group 1 Loss: 4.7409 +[2025-09-05 18:05:18] [Rank 0] Group 2 Loss: 4.5120 +[2025-09-05 18:05:18] [Rank 0] Group 2 Loss: 4.5120 +[2025-09-05 18:05:18] [Rank 0] Group 3 Loss: 4.9919 +[2025-09-05 18:05:18] [Rank 0] Group 3 Loss: 4.9919 +[2025-09-05 18:05:18] [Rank 0] Group 4 Loss: 4.9923 +[2025-09-05 18:05:18] [Rank 0] Group 4 Loss: 4.9923 +[2025-09-05 18:05:18] [Rank 0] Group 5 Loss: 4.9938 +[2025-09-05 18:05:18] [Rank 0] Group 5 Loss: 4.9938 +[2025-09-05 18:05:18] [Rank 0] Group 6 Loss: 4.9346 +[2025-09-05 18:05:18] [Rank 0] Group 6 Loss: 4.9346 +[2025-09-05 18:05:18] [Rank 0] Group 7 Loss: 5.0418 +[2025-09-05 18:05:18] [Rank 0] Group 7 Loss: 5.0418 +[2025-09-05 18:05:18] [Rank 0] Group 8 Loss: 5.1553 +[2025-09-05 18:05:18] [Rank 0] Group 8 Loss: 5.1553 +[2025-09-05 18:05:18] [Rank 0] Group 9 Loss: 5.1139 +[2025-09-05 18:05:18] [Rank 0] Group 9 Loss: 5.1139 +[2025-09-05 18:05:18] [Rank 0] Group 10 Loss: 5.2164 +[2025-09-05 18:05:18] [Rank 0] Group 10 Loss: 5.2164 +[2025-09-05 18:05:18] [Rank 0] Group 11 Loss: 5.2217 +[2025-09-05 18:05:18] [Rank 0] Group 11 Loss: 5.2217 +[2025-09-05 18:05:18] [Rank 0] Group 12 Loss: 5.3255 +[2025-09-05 18:05:18] [Rank 0] Group 12 Loss: 5.3255 +[2025-09-05 18:05:18] [Rank 0] Group 13 Loss: 5.4289 +[2025-09-05 18:05:18] [Rank 0] Group 13 Loss: 5.4289 +[2025-09-05 18:05:18] [Rank 0] Group 14 Loss: 5.4604 +[2025-09-05 18:05:18] [Rank 0] Group 14 Loss: 5.4604 +[2025-09-05 18:05:18] [Rank 0] Group 15 Loss: 5.5985 +[2025-09-05 18:05:18] [Rank 0] Group 15 Loss: 5.5985 +[2025-09-05 18:05:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:05:18] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 18:05:18] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 18:05:18] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:05:18] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:05:18] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:05:18] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:05:18] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 18:05:18] [Rank 0] Group 11 FTA: 0.9400 +[2025-09-05 18:05:18] [Rank 0] Group 12 FTA: 0.7200 +[2025-09-05 18:05:18] [Rank 0] Group 12 FTA: 0.7200 +[2025-09-05 18:05:18] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-05 18:05:18] [Rank 0] Group 13 FTA: 0.3500 +[2025-09-05 18:05:18] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 18:05:18] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 18:05:18] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:05:18] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:05:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:05:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:05:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:05:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:05:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:05:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:05:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:05:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:05:20] [Rank 0] step:6001/10000 train_time:232891ms step_avg:38.81ms +[2025-09-05 18:05:20] [Rank 0] step:6001/10000 train_time:232891ms step_avg:38.81ms +[2025-09-05 18:05:21] [Rank 0] step:6021/10000 train_time:233789ms step_avg:38.83ms +[2025-09-05 18:05:21] [Rank 0] step:6021/10000 train_time:233789ms step_avg:38.83ms +[2025-09-05 18:05:22] [Rank 0] step:6041/10000 train_time:234447ms step_avg:38.81ms +[2025-09-05 18:05:22] [Rank 0] step:6041/10000 train_time:234447ms step_avg:38.81ms +[2025-09-05 18:05:22] [Rank 0] step:6061/10000 train_time:235105ms step_avg:38.79ms +[2025-09-05 18:05:22] [Rank 0] step:6061/10000 train_time:235105ms step_avg:38.79ms +[2025-09-05 18:05:23] [Rank 0] step:6081/10000 train_time:235764ms step_avg:38.77ms +[2025-09-05 18:05:23] [Rank 0] step:6081/10000 train_time:235764ms step_avg:38.77ms +[2025-09-05 18:05:23] [Rank 0] step:6101/10000 train_time:236423ms step_avg:38.75ms +[2025-09-05 18:05:23] [Rank 0] step:6101/10000 train_time:236423ms step_avg:38.75ms +[2025-09-05 18:05:24] [Rank 0] step:6121/10000 train_time:237086ms step_avg:38.73ms +[2025-09-05 18:05:24] [Rank 0] step:6121/10000 train_time:237086ms step_avg:38.73ms +[2025-09-05 18:05:25] [Rank 0] step:6141/10000 train_time:237745ms step_avg:38.71ms +[2025-09-05 18:05:25] [Rank 0] step:6141/10000 train_time:237745ms step_avg:38.71ms +[2025-09-05 18:05:25] [Rank 0] step:6161/10000 train_time:238403ms step_avg:38.70ms +[2025-09-05 18:05:25] [Rank 0] step:6161/10000 train_time:238403ms step_avg:38.70ms +[2025-09-05 18:05:26] [Rank 0] step:6181/10000 train_time:239062ms step_avg:38.68ms +[2025-09-05 18:05:26] [Rank 0] step:6181/10000 train_time:239062ms step_avg:38.68ms +[2025-09-05 18:05:27] [Rank 0] step:6201/10000 train_time:239721ms step_avg:38.66ms +[2025-09-05 18:05:27] [Rank 0] step:6201/10000 train_time:239721ms step_avg:38.66ms +[2025-09-05 18:05:27] [Rank 0] step:6221/10000 train_time:240380ms step_avg:38.64ms +[2025-09-05 18:05:27] [Rank 0] step:6221/10000 train_time:240380ms step_avg:38.64ms +[2025-09-05 18:05:28] [Rank 0] step:6241/10000 train_time:241039ms step_avg:38.62ms +[2025-09-05 18:05:28] [Rank 0] step:6241/10000 train_time:241039ms step_avg:38.62ms +[2025-09-05 18:05:29] [Rank 0] step:6261/10000 train_time:241698ms step_avg:38.60ms +[2025-09-05 18:05:29] [Rank 0] step:6261/10000 train_time:241698ms step_avg:38.60ms +[2025-09-05 18:05:29] [Rank 0] step:6281/10000 train_time:242357ms step_avg:38.59ms +[2025-09-05 18:05:29] [Rank 0] step:6281/10000 train_time:242357ms step_avg:38.59ms +[2025-09-05 18:05:30] [Rank 0] step:6301/10000 train_time:243015ms step_avg:38.57ms +[2025-09-05 18:05:30] [Rank 0] step:6301/10000 train_time:243015ms step_avg:38.57ms +[2025-09-05 18:05:31] [Rank 0] step:6321/10000 train_time:243675ms step_avg:38.55ms +[2025-09-05 18:05:31] [Rank 0] step:6321/10000 train_time:243675ms step_avg:38.55ms +[2025-09-05 18:05:31] [Rank 0] step:6341/10000 train_time:244333ms step_avg:38.53ms +[2025-09-05 18:05:31] [Rank 0] step:6341/10000 train_time:244333ms step_avg:38.53ms +[2025-09-05 18:05:32] [Rank 0] step:6361/10000 train_time:244992ms step_avg:38.51ms +[2025-09-05 18:05:32] [Rank 0] step:6361/10000 train_time:244992ms step_avg:38.51ms +[2025-09-05 18:05:33] [Rank 0] step:6381/10000 train_time:245650ms step_avg:38.50ms +[2025-09-05 18:05:33] [Rank 0] step:6381/10000 train_time:245650ms step_avg:38.50ms +[2025-09-05 18:05:33] [Rank 0] step:6401/10000 train_time:246308ms step_avg:38.48ms +[2025-09-05 18:05:33] [Rank 0] step:6401/10000 train_time:246308ms step_avg:38.48ms +[2025-09-05 18:05:34] [Rank 0] step:6421/10000 train_time:246968ms step_avg:38.46ms +[2025-09-05 18:05:34] [Rank 0] step:6421/10000 train_time:246968ms step_avg:38.46ms +[2025-09-05 18:05:35] [Rank 0] step:6441/10000 train_time:247625ms step_avg:38.45ms +[2025-09-05 18:05:35] [Rank 0] step:6441/10000 train_time:247625ms step_avg:38.45ms +[2025-09-05 18:05:35] [Rank 0] step:6461/10000 train_time:248284ms step_avg:38.43ms +[2025-09-05 18:05:35] [Rank 0] step:6461/10000 train_time:248284ms step_avg:38.43ms +[2025-09-05 18:05:36] [Rank 0] step:6481/10000 train_time:248942ms step_avg:38.41ms +[2025-09-05 18:05:36] [Rank 0] step:6481/10000 train_time:248942ms step_avg:38.41ms +[2025-09-05 18:05:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:05:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:05:37] [Rank 0] PRINT: step:6500/10000 train_loss:0.6809 val_loss:0.6700 train_time:249836ms step_avg:38.44ms +[2025-09-05 18:05:37] [Rank 0] PRINT: step:6500/10000 train_loss:0.6809 val_loss:0.6700 train_time:249836ms step_avg:38.44ms +[2025-09-05 18:05:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:05:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:05:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:05:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:07:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:07:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:07:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:07:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:07:00] [Rank 0] Total Loss: 5.1721 +[2025-09-05 18:07:00] [Rank 0] Total Loss: 5.1721 +[2025-09-05 18:07:00] [Rank 0] Total FTA (Unweighted): 0.8444 +[2025-09-05 18:07:00] [Rank 0] Total FTA (Unweighted): 0.8444 +[2025-09-05 18:07:00] [Rank 0] Total FTA (Weighted): 0.8444 +[2025-09-05 18:07:00] [Rank 0] Total FTA (Weighted): 0.8444 +[2025-09-05 18:07:00] [Rank 0] Group 0 Loss: 5.2772 +[2025-09-05 18:07:00] [Rank 0] Group 0 Loss: 5.2772 +[2025-09-05 18:07:00] [Rank 0] Group 1 Loss: 4.7720 +[2025-09-05 18:07:00] [Rank 0] Group 1 Loss: 4.7720 +[2025-09-05 18:07:00] [Rank 0] Group 2 Loss: 4.6462 +[2025-09-05 18:07:00] [Rank 0] Group 2 Loss: 4.6462 +[2025-09-05 18:07:00] [Rank 0] Group 3 Loss: 5.0567 +[2025-09-05 18:07:00] [Rank 0] Group 3 Loss: 5.0567 +[2025-09-05 18:07:00] [Rank 0] Group 4 Loss: 5.0858 +[2025-09-05 18:07:00] [Rank 0] Group 4 Loss: 5.0858 +[2025-09-05 18:07:00] [Rank 0] Group 5 Loss: 5.0582 +[2025-09-05 18:07:00] [Rank 0] Group 5 Loss: 5.0582 +[2025-09-05 18:07:00] [Rank 0] Group 6 Loss: 5.0185 +[2025-09-05 18:07:00] [Rank 0] Group 6 Loss: 5.0185 +[2025-09-05 18:07:00] [Rank 0] Group 7 Loss: 5.1580 +[2025-09-05 18:07:00] [Rank 0] Group 7 Loss: 5.1580 +[2025-09-05 18:07:00] [Rank 0] Group 8 Loss: 5.2379 +[2025-09-05 18:07:00] [Rank 0] Group 8 Loss: 5.2379 +[2025-09-05 18:07:00] [Rank 0] Group 9 Loss: 5.1453 +[2025-09-05 18:07:00] [Rank 0] Group 9 Loss: 5.1453 +[2025-09-05 18:07:00] [Rank 0] Group 10 Loss: 5.2907 +[2025-09-05 18:07:00] [Rank 0] Group 10 Loss: 5.2907 +[2025-09-05 18:07:00] [Rank 0] Group 11 Loss: 5.2222 +[2025-09-05 18:07:00] [Rank 0] Group 11 Loss: 5.2222 +[2025-09-05 18:07:00] [Rank 0] Group 12 Loss: 5.3523 +[2025-09-05 18:07:00] [Rank 0] Group 12 Loss: 5.3523 +[2025-09-05 18:07:00] [Rank 0] Group 13 Loss: 5.4040 +[2025-09-05 18:07:00] [Rank 0] Group 13 Loss: 5.4040 +[2025-09-05 18:07:00] [Rank 0] Group 14 Loss: 5.4372 +[2025-09-05 18:07:00] [Rank 0] Group 14 Loss: 5.4372 +[2025-09-05 18:07:00] [Rank 0] Group 15 Loss: 5.5918 +[2025-09-05 18:07:00] [Rank 0] Group 15 Loss: 5.5918 +[2025-09-05 18:07:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 18:07:00] [Rank 0] Group 8 FTA: 0.9800 +[2025-09-05 18:07:00] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 9 FTA: 1.0000 +[2025-09-05 18:07:00] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:07:00] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:07:00] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:07:00] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:07:00] [Rank 0] Group 12 FTA: 0.9000 +[2025-09-05 18:07:00] [Rank 0] Group 12 FTA: 0.9000 +[2025-09-05 18:07:00] [Rank 0] Group 13 FTA: 0.4400 +[2025-09-05 18:07:00] [Rank 0] Group 13 FTA: 0.4400 +[2025-09-05 18:07:00] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:07:00] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:07:00] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:07:00] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:07:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:07:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:07:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:07:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:07:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:07:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:07:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:07:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:07:02] [Rank 0] step:6501/10000 train_time:249844ms step_avg:38.43ms +[2025-09-05 18:07:02] [Rank 0] step:6501/10000 train_time:249844ms step_avg:38.43ms +[2025-09-05 18:07:02] [Rank 0] step:6521/10000 train_time:250280ms step_avg:38.38ms +[2025-09-05 18:07:02] [Rank 0] step:6521/10000 train_time:250280ms step_avg:38.38ms +[2025-09-05 18:07:03] [Rank 0] step:6541/10000 train_time:250937ms step_avg:38.36ms +[2025-09-05 18:07:03] [Rank 0] step:6541/10000 train_time:250937ms step_avg:38.36ms +[2025-09-05 18:07:04] [Rank 0] step:6561/10000 train_time:251596ms step_avg:38.35ms +[2025-09-05 18:07:04] [Rank 0] step:6561/10000 train_time:251596ms step_avg:38.35ms +[2025-09-05 18:07:04] [Rank 0] step:6581/10000 train_time:252256ms step_avg:38.33ms +[2025-09-05 18:07:04] [Rank 0] step:6581/10000 train_time:252256ms step_avg:38.33ms +[2025-09-05 18:07:05] [Rank 0] step:6601/10000 train_time:252913ms step_avg:38.31ms +[2025-09-05 18:07:05] [Rank 0] step:6601/10000 train_time:252913ms step_avg:38.31ms +[2025-09-05 18:07:06] [Rank 0] step:6621/10000 train_time:253572ms step_avg:38.30ms +[2025-09-05 18:07:06] [Rank 0] step:6621/10000 train_time:253572ms step_avg:38.30ms +[2025-09-05 18:07:06] [Rank 0] step:6641/10000 train_time:254230ms step_avg:38.28ms +[2025-09-05 18:07:06] [Rank 0] step:6641/10000 train_time:254230ms step_avg:38.28ms +[2025-09-05 18:07:07] [Rank 0] step:6661/10000 train_time:254889ms step_avg:38.27ms +[2025-09-05 18:07:07] [Rank 0] step:6661/10000 train_time:254889ms step_avg:38.27ms +[2025-09-05 18:07:08] [Rank 0] step:6681/10000 train_time:255548ms step_avg:38.25ms +[2025-09-05 18:07:08] [Rank 0] step:6681/10000 train_time:255548ms step_avg:38.25ms +[2025-09-05 18:07:08] [Rank 0] step:6701/10000 train_time:256207ms step_avg:38.23ms +[2025-09-05 18:07:08] [Rank 0] step:6701/10000 train_time:256207ms step_avg:38.23ms +[2025-09-05 18:07:09] [Rank 0] step:6721/10000 train_time:256867ms step_avg:38.22ms +[2025-09-05 18:07:09] [Rank 0] step:6721/10000 train_time:256867ms step_avg:38.22ms +[2025-09-05 18:07:09] [Rank 0] step:6741/10000 train_time:257525ms step_avg:38.20ms +[2025-09-05 18:07:09] [Rank 0] step:6741/10000 train_time:257525ms step_avg:38.20ms +[2025-09-05 18:07:10] [Rank 0] step:6761/10000 train_time:258183ms step_avg:38.19ms +[2025-09-05 18:07:10] [Rank 0] step:6761/10000 train_time:258183ms step_avg:38.19ms +[2025-09-05 18:07:11] [Rank 0] step:6781/10000 train_time:258842ms step_avg:38.17ms +[2025-09-05 18:07:11] [Rank 0] step:6781/10000 train_time:258842ms step_avg:38.17ms +[2025-09-05 18:07:11] [Rank 0] step:6801/10000 train_time:259500ms step_avg:38.16ms +[2025-09-05 18:07:11] [Rank 0] step:6801/10000 train_time:259500ms step_avg:38.16ms +[2025-09-05 18:07:12] [Rank 0] step:6821/10000 train_time:260158ms step_avg:38.14ms +[2025-09-05 18:07:12] [Rank 0] step:6821/10000 train_time:260158ms step_avg:38.14ms +[2025-09-05 18:07:13] [Rank 0] step:6841/10000 train_time:261017ms step_avg:38.15ms +[2025-09-05 18:07:13] [Rank 0] step:6841/10000 train_time:261017ms step_avg:38.15ms +[2025-09-05 18:07:14] [Rank 0] step:6861/10000 train_time:261871ms step_avg:38.17ms +[2025-09-05 18:07:14] [Rank 0] step:6861/10000 train_time:261871ms step_avg:38.17ms +[2025-09-05 18:07:14] [Rank 0] step:6881/10000 train_time:262529ms step_avg:38.15ms +[2025-09-05 18:07:14] [Rank 0] step:6881/10000 train_time:262529ms step_avg:38.15ms +[2025-09-05 18:07:15] [Rank 0] step:6901/10000 train_time:263187ms step_avg:38.14ms +[2025-09-05 18:07:15] [Rank 0] step:6901/10000 train_time:263187ms step_avg:38.14ms +[2025-09-05 18:07:16] [Rank 0] step:6921/10000 train_time:264007ms step_avg:38.15ms +[2025-09-05 18:07:16] [Rank 0] step:6921/10000 train_time:264007ms step_avg:38.15ms +[2025-09-05 18:07:17] [Rank 0] step:6941/10000 train_time:264664ms step_avg:38.13ms +[2025-09-05 18:07:17] [Rank 0] step:6941/10000 train_time:264664ms step_avg:38.13ms +[2025-09-05 18:07:17] [Rank 0] step:6961/10000 train_time:265322ms step_avg:38.12ms +[2025-09-05 18:07:17] [Rank 0] step:6961/10000 train_time:265322ms step_avg:38.12ms +[2025-09-05 18:07:18] [Rank 0] step:6981/10000 train_time:265980ms step_avg:38.10ms +[2025-09-05 18:07:18] [Rank 0] step:6981/10000 train_time:265980ms step_avg:38.10ms +[2025-09-05 18:07:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:07:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:07:19] [Rank 0] PRINT: step:7000/10000 train_loss:0.6745 val_loss:0.6638 train_time:266872ms step_avg:38.12ms +[2025-09-05 18:07:19] [Rank 0] PRINT: step:7000/10000 train_loss:0.6745 val_loss:0.6638 train_time:266872ms step_avg:38.12ms +[2025-09-05 18:07:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:07:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:07:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:07:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:08:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:08:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:08:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:08:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:08:42] [Rank 0] Total Loss: 5.2071 +[2025-09-05 18:08:42] [Rank 0] Total Loss: 5.2071 +[2025-09-05 18:08:42] [Rank 0] Total FTA (Unweighted): 0.8550 +[2025-09-05 18:08:42] [Rank 0] Total FTA (Unweighted): 0.8550 +[2025-09-05 18:08:42] [Rank 0] Total FTA (Weighted): 0.8550 +[2025-09-05 18:08:42] [Rank 0] Total FTA (Weighted): 0.8550 +[2025-09-05 18:08:42] [Rank 0] Group 0 Loss: 5.5044 +[2025-09-05 18:08:42] [Rank 0] Group 0 Loss: 5.5044 +[2025-09-05 18:08:42] [Rank 0] Group 1 Loss: 4.8828 +[2025-09-05 18:08:42] [Rank 0] Group 1 Loss: 4.8828 +[2025-09-05 18:08:42] [Rank 0] Group 2 Loss: 4.6173 +[2025-09-05 18:08:42] [Rank 0] Group 2 Loss: 4.6173 +[2025-09-05 18:08:42] [Rank 0] Group 3 Loss: 5.0821 +[2025-09-05 18:08:42] [Rank 0] Group 3 Loss: 5.0821 +[2025-09-05 18:08:42] [Rank 0] Group 4 Loss: 5.1051 +[2025-09-05 18:08:42] [Rank 0] Group 4 Loss: 5.1051 +[2025-09-05 18:08:42] [Rank 0] Group 5 Loss: 5.1159 +[2025-09-05 18:08:42] [Rank 0] Group 5 Loss: 5.1159 +[2025-09-05 18:08:42] [Rank 0] Group 6 Loss: 5.0400 +[2025-09-05 18:08:42] [Rank 0] Group 6 Loss: 5.0400 +[2025-09-05 18:08:42] [Rank 0] Group 7 Loss: 5.1344 +[2025-09-05 18:08:42] [Rank 0] Group 7 Loss: 5.1344 +[2025-09-05 18:08:42] [Rank 0] Group 8 Loss: 5.2293 +[2025-09-05 18:08:42] [Rank 0] Group 8 Loss: 5.2293 +[2025-09-05 18:08:42] [Rank 0] Group 9 Loss: 5.1882 +[2025-09-05 18:08:42] [Rank 0] Group 9 Loss: 5.1882 +[2025-09-05 18:08:42] [Rank 0] Group 10 Loss: 5.2917 +[2025-09-05 18:08:42] [Rank 0] Group 10 Loss: 5.2917 +[2025-09-05 18:08:42] [Rank 0] Group 11 Loss: 5.2687 +[2025-09-05 18:08:42] [Rank 0] Group 11 Loss: 5.2687 +[2025-09-05 18:08:42] [Rank 0] Group 12 Loss: 5.3839 +[2025-09-05 18:08:42] [Rank 0] Group 12 Loss: 5.3839 +[2025-09-05 18:08:42] [Rank 0] Group 13 Loss: 5.4444 +[2025-09-05 18:08:42] [Rank 0] Group 13 Loss: 5.4444 +[2025-09-05 18:08:42] [Rank 0] Group 14 Loss: 5.4607 +[2025-09-05 18:08:42] [Rank 0] Group 14 Loss: 5.4607 +[2025-09-05 18:08:42] [Rank 0] Group 15 Loss: 5.5646 +[2025-09-05 18:08:42] [Rank 0] Group 15 Loss: 5.5646 +[2025-09-05 18:08:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:08:42] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:08:42] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:08:42] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:08:42] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:08:42] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 18:08:42] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 18:08:42] [Rank 0] Group 12 FTA: 0.8700 +[2025-09-05 18:08:42] [Rank 0] Group 12 FTA: 0.8700 +[2025-09-05 18:08:42] [Rank 0] Group 13 FTA: 0.5200 +[2025-09-05 18:08:42] [Rank 0] Group 13 FTA: 0.5200 +[2025-09-05 18:08:42] [Rank 0] Group 14 FTA: 0.2300 +[2025-09-05 18:08:42] [Rank 0] Group 14 FTA: 0.2300 +[2025-09-05 18:08:42] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:08:42] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:08:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:08:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:08:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:08:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:08:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:08:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:08:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:08:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:08:44] [Rank 0] step:7001/10000 train_time:266880ms step_avg:38.12ms +[2025-09-05 18:08:44] [Rank 0] step:7001/10000 train_time:266880ms step_avg:38.12ms +[2025-09-05 18:08:44] [Rank 0] step:7021/10000 train_time:267335ms step_avg:38.08ms +[2025-09-05 18:08:44] [Rank 0] step:7021/10000 train_time:267335ms step_avg:38.08ms +[2025-09-05 18:08:45] [Rank 0] step:7041/10000 train_time:267993ms step_avg:38.06ms +[2025-09-05 18:08:45] [Rank 0] step:7041/10000 train_time:267993ms step_avg:38.06ms +[2025-09-05 18:08:46] [Rank 0] step:7061/10000 train_time:268653ms step_avg:38.05ms +[2025-09-05 18:08:46] [Rank 0] step:7061/10000 train_time:268653ms step_avg:38.05ms +[2025-09-05 18:08:46] [Rank 0] step:7081/10000 train_time:269312ms step_avg:38.03ms +[2025-09-05 18:08:46] [Rank 0] step:7081/10000 train_time:269312ms step_avg:38.03ms +[2025-09-05 18:08:47] [Rank 0] step:7101/10000 train_time:269972ms step_avg:38.02ms +[2025-09-05 18:08:47] [Rank 0] step:7101/10000 train_time:269972ms step_avg:38.02ms +[2025-09-05 18:08:48] [Rank 0] step:7121/10000 train_time:270631ms step_avg:38.00ms +[2025-09-05 18:08:48] [Rank 0] step:7121/10000 train_time:270631ms step_avg:38.00ms +[2025-09-05 18:08:48] [Rank 0] step:7141/10000 train_time:271290ms step_avg:37.99ms +[2025-09-05 18:08:48] [Rank 0] step:7141/10000 train_time:271290ms step_avg:37.99ms +[2025-09-05 18:08:49] [Rank 0] step:7161/10000 train_time:271949ms step_avg:37.98ms +[2025-09-05 18:08:49] [Rank 0] step:7161/10000 train_time:271949ms step_avg:37.98ms +[2025-09-05 18:08:50] [Rank 0] step:7181/10000 train_time:272609ms step_avg:37.96ms +[2025-09-05 18:08:50] [Rank 0] step:7181/10000 train_time:272609ms step_avg:37.96ms +[2025-09-05 18:08:50] [Rank 0] step:7201/10000 train_time:273268ms step_avg:37.95ms +[2025-09-05 18:08:50] [Rank 0] step:7201/10000 train_time:273268ms step_avg:37.95ms +[2025-09-05 18:08:51] [Rank 0] step:7221/10000 train_time:273927ms step_avg:37.93ms +[2025-09-05 18:08:51] [Rank 0] step:7221/10000 train_time:273927ms step_avg:37.93ms +[2025-09-05 18:08:51] [Rank 0] step:7241/10000 train_time:274588ms step_avg:37.92ms +[2025-09-05 18:08:51] [Rank 0] step:7241/10000 train_time:274588ms step_avg:37.92ms +[2025-09-05 18:08:52] [Rank 0] step:7261/10000 train_time:275247ms step_avg:37.91ms +[2025-09-05 18:08:52] [Rank 0] step:7261/10000 train_time:275247ms step_avg:37.91ms +[2025-09-05 18:08:53] [Rank 0] step:7281/10000 train_time:275907ms step_avg:37.89ms +[2025-09-05 18:08:53] [Rank 0] step:7281/10000 train_time:275907ms step_avg:37.89ms +[2025-09-05 18:08:53] [Rank 0] step:7301/10000 train_time:276570ms step_avg:37.88ms +[2025-09-05 18:08:53] [Rank 0] step:7301/10000 train_time:276570ms step_avg:37.88ms +[2025-09-05 18:08:54] [Rank 0] step:7321/10000 train_time:277229ms step_avg:37.87ms +[2025-09-05 18:08:54] [Rank 0] step:7321/10000 train_time:277229ms step_avg:37.87ms +[2025-09-05 18:08:55] [Rank 0] step:7341/10000 train_time:277889ms step_avg:37.85ms +[2025-09-05 18:08:55] [Rank 0] step:7341/10000 train_time:277889ms step_avg:37.85ms +[2025-09-05 18:08:55] [Rank 0] step:7361/10000 train_time:278548ms step_avg:37.84ms +[2025-09-05 18:08:55] [Rank 0] step:7361/10000 train_time:278548ms step_avg:37.84ms +[2025-09-05 18:08:56] [Rank 0] step:7381/10000 train_time:279208ms step_avg:37.83ms +[2025-09-05 18:08:56] [Rank 0] step:7381/10000 train_time:279208ms step_avg:37.83ms +[2025-09-05 18:08:57] [Rank 0] step:7401/10000 train_time:279868ms step_avg:37.81ms +[2025-09-05 18:08:57] [Rank 0] step:7401/10000 train_time:279868ms step_avg:37.81ms +[2025-09-05 18:08:57] [Rank 0] step:7421/10000 train_time:280527ms step_avg:37.80ms +[2025-09-05 18:08:57] [Rank 0] step:7421/10000 train_time:280527ms step_avg:37.80ms +[2025-09-05 18:08:58] [Rank 0] step:7441/10000 train_time:281187ms step_avg:37.79ms +[2025-09-05 18:08:58] [Rank 0] step:7441/10000 train_time:281187ms step_avg:37.79ms +[2025-09-05 18:08:59] [Rank 0] step:7461/10000 train_time:281848ms step_avg:37.78ms +[2025-09-05 18:08:59] [Rank 0] step:7461/10000 train_time:281848ms step_avg:37.78ms +[2025-09-05 18:08:59] [Rank 0] step:7481/10000 train_time:282507ms step_avg:37.76ms +[2025-09-05 18:08:59] [Rank 0] step:7481/10000 train_time:282507ms step_avg:37.76ms +[2025-09-05 18:09:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:09:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:09:01] [Rank 0] PRINT: step:7500/10000 train_loss:0.6680 val_loss:0.6579 train_time:283404ms step_avg:37.79ms +[2025-09-05 18:09:01] [Rank 0] PRINT: step:7500/10000 train_loss:0.6680 val_loss:0.6579 train_time:283404ms step_avg:37.79ms +[2025-09-05 18:09:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:09:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:09:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:09:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:10:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:10:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:10:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:10:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:10:23] [Rank 0] Total Loss: 5.2120 +[2025-09-05 18:10:23] [Rank 0] Total Loss: 5.2120 +[2025-09-05 18:10:23] [Rank 0] Total FTA (Unweighted): 0.8531 +[2025-09-05 18:10:23] [Rank 0] Total FTA (Unweighted): 0.8531 +[2025-09-05 18:10:23] [Rank 0] Total FTA (Weighted): 0.8531 +[2025-09-05 18:10:23] [Rank 0] Total FTA (Weighted): 0.8531 +[2025-09-05 18:10:23] [Rank 0] Group 0 Loss: 5.3444 +[2025-09-05 18:10:23] [Rank 0] Group 0 Loss: 5.3444 +[2025-09-05 18:10:23] [Rank 0] Group 1 Loss: 4.8763 +[2025-09-05 18:10:23] [Rank 0] Group 1 Loss: 4.8763 +[2025-09-05 18:10:23] [Rank 0] Group 2 Loss: 4.6562 +[2025-09-05 18:10:23] [Rank 0] Group 2 Loss: 4.6562 +[2025-09-05 18:10:23] [Rank 0] Group 3 Loss: 5.1660 +[2025-09-05 18:10:23] [Rank 0] Group 3 Loss: 5.1660 +[2025-09-05 18:10:23] [Rank 0] Group 4 Loss: 5.1210 +[2025-09-05 18:10:23] [Rank 0] Group 4 Loss: 5.1210 +[2025-09-05 18:10:23] [Rank 0] Group 5 Loss: 5.1221 +[2025-09-05 18:10:23] [Rank 0] Group 5 Loss: 5.1221 +[2025-09-05 18:10:23] [Rank 0] Group 6 Loss: 5.0259 +[2025-09-05 18:10:23] [Rank 0] Group 6 Loss: 5.0259 +[2025-09-05 18:10:23] [Rank 0] Group 7 Loss: 5.1436 +[2025-09-05 18:10:23] [Rank 0] Group 7 Loss: 5.1436 +[2025-09-05 18:10:23] [Rank 0] Group 8 Loss: 5.2518 +[2025-09-05 18:10:23] [Rank 0] Group 8 Loss: 5.2518 +[2025-09-05 18:10:23] [Rank 0] Group 9 Loss: 5.2018 +[2025-09-05 18:10:23] [Rank 0] Group 9 Loss: 5.2018 +[2025-09-05 18:10:23] [Rank 0] Group 10 Loss: 5.3250 +[2025-09-05 18:10:23] [Rank 0] Group 10 Loss: 5.3250 +[2025-09-05 18:10:23] [Rank 0] Group 11 Loss: 5.3009 +[2025-09-05 18:10:23] [Rank 0] Group 11 Loss: 5.3009 +[2025-09-05 18:10:23] [Rank 0] Group 12 Loss: 5.3801 +[2025-09-05 18:10:23] [Rank 0] Group 12 Loss: 5.3801 +[2025-09-05 18:10:23] [Rank 0] Group 13 Loss: 5.4349 +[2025-09-05 18:10:23] [Rank 0] Group 13 Loss: 5.4349 +[2025-09-05 18:10:23] [Rank 0] Group 14 Loss: 5.4544 +[2025-09-05 18:10:23] [Rank 0] Group 14 Loss: 5.4544 +[2025-09-05 18:10:23] [Rank 0] Group 15 Loss: 5.5878 +[2025-09-05 18:10:23] [Rank 0] Group 15 Loss: 5.5878 +[2025-09-05 18:10:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:10:23] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:10:23] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:10:23] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 18:10:23] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 18:10:23] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:10:23] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:10:23] [Rank 0] Group 12 FTA: 0.8900 +[2025-09-05 18:10:23] [Rank 0] Group 12 FTA: 0.8900 +[2025-09-05 18:10:23] [Rank 0] Group 13 FTA: 0.5200 +[2025-09-05 18:10:23] [Rank 0] Group 13 FTA: 0.5200 +[2025-09-05 18:10:23] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 18:10:23] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 18:10:23] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:10:23] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:10:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:10:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:10:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:10:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:10:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:10:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:10:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:10:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:10:25] [Rank 0] step:7501/10000 train_time:283412ms step_avg:37.78ms +[2025-09-05 18:10:25] [Rank 0] step:7501/10000 train_time:283412ms step_avg:37.78ms +[2025-09-05 18:10:26] [Rank 0] step:7521/10000 train_time:283954ms step_avg:37.75ms +[2025-09-05 18:10:26] [Rank 0] step:7521/10000 train_time:283954ms step_avg:37.75ms +[2025-09-05 18:10:26] [Rank 0] step:7541/10000 train_time:284613ms step_avg:37.74ms +[2025-09-05 18:10:26] [Rank 0] step:7541/10000 train_time:284613ms step_avg:37.74ms +[2025-09-05 18:10:27] [Rank 0] step:7561/10000 train_time:285271ms step_avg:37.73ms +[2025-09-05 18:10:27] [Rank 0] step:7561/10000 train_time:285271ms step_avg:37.73ms +[2025-09-05 18:10:28] [Rank 0] step:7581/10000 train_time:285930ms step_avg:37.72ms +[2025-09-05 18:10:28] [Rank 0] step:7581/10000 train_time:285930ms step_avg:37.72ms +[2025-09-05 18:10:28] [Rank 0] step:7601/10000 train_time:286589ms step_avg:37.70ms +[2025-09-05 18:10:28] [Rank 0] step:7601/10000 train_time:286589ms step_avg:37.70ms +[2025-09-05 18:10:29] [Rank 0] step:7621/10000 train_time:287249ms step_avg:37.69ms +[2025-09-05 18:10:29] [Rank 0] step:7621/10000 train_time:287249ms step_avg:37.69ms +[2025-09-05 18:10:30] [Rank 0] step:7641/10000 train_time:288574ms step_avg:37.77ms +[2025-09-05 18:10:30] [Rank 0] step:7641/10000 train_time:288574ms step_avg:37.77ms +[2025-09-05 18:10:31] [Rank 0] step:7661/10000 train_time:289045ms step_avg:37.73ms +[2025-09-05 18:10:31] [Rank 0] step:7661/10000 train_time:289045ms step_avg:37.73ms +[2025-09-05 18:10:31] [Rank 0] step:7681/10000 train_time:289705ms step_avg:37.72ms +[2025-09-05 18:10:31] [Rank 0] step:7681/10000 train_time:289705ms step_avg:37.72ms +[2025-09-05 18:10:32] [Rank 0] step:7701/10000 train_time:290364ms step_avg:37.70ms +[2025-09-05 18:10:32] [Rank 0] step:7701/10000 train_time:290364ms step_avg:37.70ms +[2025-09-05 18:10:33] [Rank 0] step:7721/10000 train_time:291022ms step_avg:37.69ms +[2025-09-05 18:10:33] [Rank 0] step:7721/10000 train_time:291022ms step_avg:37.69ms +[2025-09-05 18:10:33] [Rank 0] step:7741/10000 train_time:291680ms step_avg:37.68ms +[2025-09-05 18:10:33] [Rank 0] step:7741/10000 train_time:291680ms step_avg:37.68ms +[2025-09-05 18:10:34] [Rank 0] step:7761/10000 train_time:292339ms step_avg:37.67ms +[2025-09-05 18:10:34] [Rank 0] step:7761/10000 train_time:292339ms step_avg:37.67ms +[2025-09-05 18:10:35] [Rank 0] step:7781/10000 train_time:292998ms step_avg:37.66ms +[2025-09-05 18:10:35] [Rank 0] step:7781/10000 train_time:292998ms step_avg:37.66ms +[2025-09-05 18:10:35] [Rank 0] step:7801/10000 train_time:293657ms step_avg:37.64ms +[2025-09-05 18:10:35] [Rank 0] step:7801/10000 train_time:293657ms step_avg:37.64ms +[2025-09-05 18:10:36] [Rank 0] step:7821/10000 train_time:294315ms step_avg:37.63ms +[2025-09-05 18:10:36] [Rank 0] step:7821/10000 train_time:294315ms step_avg:37.63ms +[2025-09-05 18:10:37] [Rank 0] step:7841/10000 train_time:294974ms step_avg:37.62ms +[2025-09-05 18:10:37] [Rank 0] step:7841/10000 train_time:294974ms step_avg:37.62ms +[2025-09-05 18:10:37] [Rank 0] step:7861/10000 train_time:295631ms step_avg:37.61ms +[2025-09-05 18:10:37] [Rank 0] step:7861/10000 train_time:295631ms step_avg:37.61ms +[2025-09-05 18:10:38] [Rank 0] step:7881/10000 train_time:296290ms step_avg:37.60ms +[2025-09-05 18:10:38] [Rank 0] step:7881/10000 train_time:296290ms step_avg:37.60ms +[2025-09-05 18:10:39] [Rank 0] step:7901/10000 train_time:296950ms step_avg:37.58ms +[2025-09-05 18:10:39] [Rank 0] step:7901/10000 train_time:296950ms step_avg:37.58ms +[2025-09-05 18:10:39] [Rank 0] step:7921/10000 train_time:297610ms step_avg:37.57ms +[2025-09-05 18:10:39] [Rank 0] step:7921/10000 train_time:297610ms step_avg:37.57ms +[2025-09-05 18:10:40] [Rank 0] step:7941/10000 train_time:298270ms step_avg:37.56ms +[2025-09-05 18:10:40] [Rank 0] step:7941/10000 train_time:298270ms step_avg:37.56ms +[2025-09-05 18:10:41] [Rank 0] step:7961/10000 train_time:298928ms step_avg:37.55ms +[2025-09-05 18:10:41] [Rank 0] step:7961/10000 train_time:298928ms step_avg:37.55ms +[2025-09-05 18:10:41] [Rank 0] step:7981/10000 train_time:299587ms step_avg:37.54ms +[2025-09-05 18:10:41] [Rank 0] step:7981/10000 train_time:299587ms step_avg:37.54ms +[2025-09-05 18:10:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:10:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:10:42] [Rank 0] PRINT: step:8000/10000 train_loss:0.6623 val_loss:0.6520 train_time:300480ms step_avg:37.56ms +[2025-09-05 18:10:42] [Rank 0] PRINT: step:8000/10000 train_loss:0.6623 val_loss:0.6520 train_time:300480ms step_avg:37.56ms +[2025-09-05 18:10:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:10:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:10:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:10:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:12:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:12:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:12:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:12:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:12:05] [Rank 0] Total Loss: 5.2382 +[2025-09-05 18:12:05] [Rank 0] Total Loss: 5.2382 +[2025-09-05 18:12:05] [Rank 0] Total FTA (Unweighted): 0.8663 +[2025-09-05 18:12:05] [Rank 0] Total FTA (Unweighted): 0.8663 +[2025-09-05 18:12:05] [Rank 0] Total FTA (Weighted): 0.8662 +[2025-09-05 18:12:05] [Rank 0] Total FTA (Weighted): 0.8662 +[2025-09-05 18:12:05] [Rank 0] Group 0 Loss: 5.3526 +[2025-09-05 18:12:05] [Rank 0] Group 0 Loss: 5.3526 +[2025-09-05 18:12:05] [Rank 0] Group 1 Loss: 4.9376 +[2025-09-05 18:12:05] [Rank 0] Group 1 Loss: 4.9376 +[2025-09-05 18:12:05] [Rank 0] Group 2 Loss: 4.6420 +[2025-09-05 18:12:05] [Rank 0] Group 2 Loss: 4.6420 +[2025-09-05 18:12:05] [Rank 0] Group 3 Loss: 5.1781 +[2025-09-05 18:12:05] [Rank 0] Group 3 Loss: 5.1781 +[2025-09-05 18:12:05] [Rank 0] Group 4 Loss: 5.1503 +[2025-09-05 18:12:05] [Rank 0] Group 4 Loss: 5.1503 +[2025-09-05 18:12:05] [Rank 0] Group 5 Loss: 5.1192 +[2025-09-05 18:12:05] [Rank 0] Group 5 Loss: 5.1192 +[2025-09-05 18:12:05] [Rank 0] Group 6 Loss: 5.0575 +[2025-09-05 18:12:05] [Rank 0] Group 6 Loss: 5.0575 +[2025-09-05 18:12:05] [Rank 0] Group 7 Loss: 5.1563 +[2025-09-05 18:12:05] [Rank 0] Group 7 Loss: 5.1563 +[2025-09-05 18:12:05] [Rank 0] Group 8 Loss: 5.2925 +[2025-09-05 18:12:05] [Rank 0] Group 8 Loss: 5.2925 +[2025-09-05 18:12:05] [Rank 0] Group 9 Loss: 5.2378 +[2025-09-05 18:12:05] [Rank 0] Group 9 Loss: 5.2378 +[2025-09-05 18:12:05] [Rank 0] Group 10 Loss: 5.3330 +[2025-09-05 18:12:05] [Rank 0] Group 10 Loss: 5.3330 +[2025-09-05 18:12:05] [Rank 0] Group 11 Loss: 5.3230 +[2025-09-05 18:12:05] [Rank 0] Group 11 Loss: 5.3230 +[2025-09-05 18:12:05] [Rank 0] Group 12 Loss: 5.4117 +[2025-09-05 18:12:05] [Rank 0] Group 12 Loss: 5.4117 +[2025-09-05 18:12:05] [Rank 0] Group 13 Loss: 5.4754 +[2025-09-05 18:12:05] [Rank 0] Group 13 Loss: 5.4754 +[2025-09-05 18:12:05] [Rank 0] Group 14 Loss: 5.5058 +[2025-09-05 18:12:05] [Rank 0] Group 14 Loss: 5.5058 +[2025-09-05 18:12:05] [Rank 0] Group 15 Loss: 5.6390 +[2025-09-05 18:12:05] [Rank 0] Group 15 Loss: 5.6390 +[2025-09-05 18:12:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:12:05] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 18:12:05] [Rank 0] Group 8 FTA: 0.9900 +[2025-09-05 18:12:05] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:12:05] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:12:05] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 18:12:05] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 18:12:05] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:12:05] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:12:05] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 18:12:05] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 18:12:05] [Rank 0] Group 13 FTA: 0.6500 +[2025-09-05 18:12:05] [Rank 0] Group 13 FTA: 0.6500 +[2025-09-05 18:12:05] [Rank 0] Group 14 FTA: 0.2300 +[2025-09-05 18:12:05] [Rank 0] Group 14 FTA: 0.2300 +[2025-09-05 18:12:05] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:12:05] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:12:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:12:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:12:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:12:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:12:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:12:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:12:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:12:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:12:07] [Rank 0] step:8001/10000 train_time:300488ms step_avg:37.56ms +[2025-09-05 18:12:07] [Rank 0] step:8001/10000 train_time:300488ms step_avg:37.56ms +[2025-09-05 18:12:08] [Rank 0] step:8021/10000 train_time:301532ms step_avg:37.59ms +[2025-09-05 18:12:08] [Rank 0] step:8021/10000 train_time:301532ms step_avg:37.59ms +[2025-09-05 18:12:08] [Rank 0] step:8041/10000 train_time:302057ms step_avg:37.56ms +[2025-09-05 18:12:08] [Rank 0] step:8041/10000 train_time:302057ms step_avg:37.56ms +[2025-09-05 18:12:09] [Rank 0] step:8061/10000 train_time:302717ms step_avg:37.55ms +[2025-09-05 18:12:09] [Rank 0] step:8061/10000 train_time:302717ms step_avg:37.55ms +[2025-09-05 18:12:10] [Rank 0] step:8081/10000 train_time:303376ms step_avg:37.54ms +[2025-09-05 18:12:10] [Rank 0] step:8081/10000 train_time:303376ms step_avg:37.54ms +[2025-09-05 18:12:10] [Rank 0] step:8101/10000 train_time:304036ms step_avg:37.53ms +[2025-09-05 18:12:10] [Rank 0] step:8101/10000 train_time:304036ms step_avg:37.53ms +[2025-09-05 18:12:11] [Rank 0] step:8121/10000 train_time:304695ms step_avg:37.52ms +[2025-09-05 18:12:11] [Rank 0] step:8121/10000 train_time:304695ms step_avg:37.52ms +[2025-09-05 18:12:12] [Rank 0] step:8141/10000 train_time:305355ms step_avg:37.51ms +[2025-09-05 18:12:12] [Rank 0] step:8141/10000 train_time:305355ms step_avg:37.51ms +[2025-09-05 18:12:12] [Rank 0] step:8161/10000 train_time:306014ms step_avg:37.50ms +[2025-09-05 18:12:12] [Rank 0] step:8161/10000 train_time:306014ms step_avg:37.50ms +[2025-09-05 18:12:13] [Rank 0] step:8181/10000 train_time:306673ms step_avg:37.49ms +[2025-09-05 18:12:13] [Rank 0] step:8181/10000 train_time:306673ms step_avg:37.49ms +[2025-09-05 18:12:14] [Rank 0] step:8201/10000 train_time:307333ms step_avg:37.48ms +[2025-09-05 18:12:14] [Rank 0] step:8201/10000 train_time:307333ms step_avg:37.48ms +[2025-09-05 18:12:14] [Rank 0] step:8221/10000 train_time:307993ms step_avg:37.46ms +[2025-09-05 18:12:14] [Rank 0] step:8221/10000 train_time:307993ms step_avg:37.46ms +[2025-09-05 18:12:15] [Rank 0] step:8241/10000 train_time:308652ms step_avg:37.45ms +[2025-09-05 18:12:15] [Rank 0] step:8241/10000 train_time:308652ms step_avg:37.45ms +[2025-09-05 18:12:16] [Rank 0] step:8261/10000 train_time:309311ms step_avg:37.44ms +[2025-09-05 18:12:16] [Rank 0] step:8261/10000 train_time:309311ms step_avg:37.44ms +[2025-09-05 18:12:16] [Rank 0] step:8281/10000 train_time:309972ms step_avg:37.43ms +[2025-09-05 18:12:16] [Rank 0] step:8281/10000 train_time:309972ms step_avg:37.43ms +[2025-09-05 18:12:17] [Rank 0] step:8301/10000 train_time:310632ms step_avg:37.42ms +[2025-09-05 18:12:17] [Rank 0] step:8301/10000 train_time:310632ms step_avg:37.42ms +[2025-09-05 18:12:18] [Rank 0] step:8321/10000 train_time:311295ms step_avg:37.41ms +[2025-09-05 18:12:18] [Rank 0] step:8321/10000 train_time:311295ms step_avg:37.41ms +[2025-09-05 18:12:18] [Rank 0] step:8341/10000 train_time:311955ms step_avg:37.40ms +[2025-09-05 18:12:18] [Rank 0] step:8341/10000 train_time:311955ms step_avg:37.40ms +[2025-09-05 18:12:19] [Rank 0] step:8361/10000 train_time:312615ms step_avg:37.39ms +[2025-09-05 18:12:19] [Rank 0] step:8361/10000 train_time:312615ms step_avg:37.39ms +[2025-09-05 18:12:20] [Rank 0] step:8381/10000 train_time:313275ms step_avg:37.38ms +[2025-09-05 18:12:20] [Rank 0] step:8381/10000 train_time:313275ms step_avg:37.38ms +[2025-09-05 18:12:20] [Rank 0] step:8401/10000 train_time:313934ms step_avg:37.37ms +[2025-09-05 18:12:20] [Rank 0] step:8401/10000 train_time:313934ms step_avg:37.37ms +[2025-09-05 18:12:21] [Rank 0] step:8421/10000 train_time:314596ms step_avg:37.36ms +[2025-09-05 18:12:21] [Rank 0] step:8421/10000 train_time:314596ms step_avg:37.36ms +[2025-09-05 18:12:22] [Rank 0] step:8441/10000 train_time:315254ms step_avg:37.35ms +[2025-09-05 18:12:22] [Rank 0] step:8441/10000 train_time:315254ms step_avg:37.35ms +[2025-09-05 18:12:22] [Rank 0] step:8461/10000 train_time:315914ms step_avg:37.34ms +[2025-09-05 18:12:22] [Rank 0] step:8461/10000 train_time:315914ms step_avg:37.34ms +[2025-09-05 18:12:23] [Rank 0] step:8481/10000 train_time:316575ms step_avg:37.33ms +[2025-09-05 18:12:23] [Rank 0] step:8481/10000 train_time:316575ms step_avg:37.33ms +[2025-09-05 18:12:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:12:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:12:24] [Rank 0] PRINT: step:8500/10000 train_loss:0.6569 val_loss:0.6470 train_time:317469ms step_avg:37.35ms +[2025-09-05 18:12:24] [Rank 0] PRINT: step:8500/10000 train_loss:0.6569 val_loss:0.6470 train_time:317469ms step_avg:37.35ms +[2025-09-05 18:12:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:12:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:12:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:12:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:13:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:13:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:13:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:13:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:13:47] [Rank 0] Total Loss: 5.1420 +[2025-09-05 18:13:47] [Rank 0] Total Loss: 5.1420 +[2025-09-05 18:13:47] [Rank 0] Total FTA (Unweighted): 0.8781 +[2025-09-05 18:13:47] [Rank 0] Total FTA (Unweighted): 0.8781 +[2025-09-05 18:13:47] [Rank 0] Total FTA (Weighted): 0.8781 +[2025-09-05 18:13:47] [Rank 0] Total FTA (Weighted): 0.8781 +[2025-09-05 18:13:47] [Rank 0] Group 0 Loss: 5.1747 +[2025-09-05 18:13:47] [Rank 0] Group 0 Loss: 5.1747 +[2025-09-05 18:13:47] [Rank 0] Group 1 Loss: 4.8473 +[2025-09-05 18:13:47] [Rank 0] Group 1 Loss: 4.8473 +[2025-09-05 18:13:47] [Rank 0] Group 2 Loss: 4.6492 +[2025-09-05 18:13:47] [Rank 0] Group 2 Loss: 4.6492 +[2025-09-05 18:13:47] [Rank 0] Group 3 Loss: 5.0518 +[2025-09-05 18:13:47] [Rank 0] Group 3 Loss: 5.0518 +[2025-09-05 18:13:47] [Rank 0] Group 4 Loss: 5.0676 +[2025-09-05 18:13:47] [Rank 0] Group 4 Loss: 5.0676 +[2025-09-05 18:13:47] [Rank 0] Group 5 Loss: 5.0480 +[2025-09-05 18:13:47] [Rank 0] Group 5 Loss: 5.0480 +[2025-09-05 18:13:47] [Rank 0] Group 6 Loss: 5.0198 +[2025-09-05 18:13:47] [Rank 0] Group 6 Loss: 5.0198 +[2025-09-05 18:13:47] [Rank 0] Group 7 Loss: 5.0827 +[2025-09-05 18:13:47] [Rank 0] Group 7 Loss: 5.0827 +[2025-09-05 18:13:47] [Rank 0] Group 8 Loss: 5.1818 +[2025-09-05 18:13:47] [Rank 0] Group 8 Loss: 5.1818 +[2025-09-05 18:13:47] [Rank 0] Group 9 Loss: 5.1361 +[2025-09-05 18:13:47] [Rank 0] Group 9 Loss: 5.1361 +[2025-09-05 18:13:47] [Rank 0] Group 10 Loss: 5.2646 +[2025-09-05 18:13:47] [Rank 0] Group 10 Loss: 5.2646 +[2025-09-05 18:13:47] [Rank 0] Group 11 Loss: 5.2231 +[2025-09-05 18:13:47] [Rank 0] Group 11 Loss: 5.2231 +[2025-09-05 18:13:47] [Rank 0] Group 12 Loss: 5.2928 +[2025-09-05 18:13:47] [Rank 0] Group 12 Loss: 5.2928 +[2025-09-05 18:13:47] [Rank 0] Group 13 Loss: 5.3227 +[2025-09-05 18:13:47] [Rank 0] Group 13 Loss: 5.3227 +[2025-09-05 18:13:47] [Rank 0] Group 14 Loss: 5.3867 +[2025-09-05 18:13:47] [Rank 0] Group 14 Loss: 5.3867 +[2025-09-05 18:13:47] [Rank 0] Group 15 Loss: 5.5228 +[2025-09-05 18:13:47] [Rank 0] Group 15 Loss: 5.5228 +[2025-09-05 18:13:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:13:47] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:13:47] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:13:47] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 18:13:47] [Rank 0] Group 10 FTA: 0.9800 +[2025-09-05 18:13:47] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:13:47] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:13:47] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-05 18:13:47] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-05 18:13:47] [Rank 0] Group 13 FTA: 0.7700 +[2025-09-05 18:13:47] [Rank 0] Group 13 FTA: 0.7700 +[2025-09-05 18:13:47] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 18:13:47] [Rank 0] Group 14 FTA: 0.2500 +[2025-09-05 18:13:47] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:13:47] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:13:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:13:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:13:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:13:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:13:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:13:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:13:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:13:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:13:48] [Rank 0] step:8501/10000 train_time:317479ms step_avg:37.35ms +[2025-09-05 18:13:48] [Rank 0] step:8501/10000 train_time:317479ms step_avg:37.35ms +[2025-09-05 18:13:49] [Rank 0] step:8521/10000 train_time:317922ms step_avg:37.31ms +[2025-09-05 18:13:49] [Rank 0] step:8521/10000 train_time:317922ms step_avg:37.31ms +[2025-09-05 18:13:50] [Rank 0] step:8541/10000 train_time:318581ms step_avg:37.30ms +[2025-09-05 18:13:50] [Rank 0] step:8541/10000 train_time:318581ms step_avg:37.30ms +[2025-09-05 18:13:50] [Rank 0] step:8561/10000 train_time:319240ms step_avg:37.29ms +[2025-09-05 18:13:50] [Rank 0] step:8561/10000 train_time:319240ms step_avg:37.29ms +[2025-09-05 18:13:51] [Rank 0] step:8581/10000 train_time:319899ms step_avg:37.28ms +[2025-09-05 18:13:51] [Rank 0] step:8581/10000 train_time:319899ms step_avg:37.28ms +[2025-09-05 18:13:52] [Rank 0] step:8601/10000 train_time:320558ms step_avg:37.27ms +[2025-09-05 18:13:52] [Rank 0] step:8601/10000 train_time:320558ms step_avg:37.27ms +[2025-09-05 18:13:52] [Rank 0] step:8621/10000 train_time:321217ms step_avg:37.26ms +[2025-09-05 18:13:52] [Rank 0] step:8621/10000 train_time:321217ms step_avg:37.26ms +[2025-09-05 18:13:53] [Rank 0] step:8641/10000 train_time:321876ms step_avg:37.25ms +[2025-09-05 18:13:53] [Rank 0] step:8641/10000 train_time:321876ms step_avg:37.25ms +[2025-09-05 18:13:54] [Rank 0] step:8661/10000 train_time:322535ms step_avg:37.24ms +[2025-09-05 18:13:54] [Rank 0] step:8661/10000 train_time:322535ms step_avg:37.24ms +[2025-09-05 18:13:54] [Rank 0] step:8681/10000 train_time:323195ms step_avg:37.23ms +[2025-09-05 18:13:54] [Rank 0] step:8681/10000 train_time:323195ms step_avg:37.23ms +[2025-09-05 18:13:55] [Rank 0] step:8701/10000 train_time:323854ms step_avg:37.22ms +[2025-09-05 18:13:55] [Rank 0] step:8701/10000 train_time:323854ms step_avg:37.22ms +[2025-09-05 18:13:56] [Rank 0] step:8721/10000 train_time:324513ms step_avg:37.21ms +[2025-09-05 18:13:56] [Rank 0] step:8721/10000 train_time:324513ms step_avg:37.21ms +[2025-09-05 18:13:56] [Rank 0] step:8741/10000 train_time:325172ms step_avg:37.20ms +[2025-09-05 18:13:56] [Rank 0] step:8741/10000 train_time:325172ms step_avg:37.20ms +[2025-09-05 18:13:57] [Rank 0] step:8761/10000 train_time:325831ms step_avg:37.19ms +[2025-09-05 18:13:57] [Rank 0] step:8761/10000 train_time:325831ms step_avg:37.19ms +[2025-09-05 18:13:58] [Rank 0] step:8781/10000 train_time:326490ms step_avg:37.18ms +[2025-09-05 18:13:58] [Rank 0] step:8781/10000 train_time:326490ms step_avg:37.18ms +[2025-09-05 18:13:58] [Rank 0] step:8801/10000 train_time:327152ms step_avg:37.17ms +[2025-09-05 18:13:58] [Rank 0] step:8801/10000 train_time:327152ms step_avg:37.17ms +[2025-09-05 18:13:59] [Rank 0] step:8821/10000 train_time:327810ms step_avg:37.16ms +[2025-09-05 18:13:59] [Rank 0] step:8821/10000 train_time:327810ms step_avg:37.16ms +[2025-09-05 18:14:00] [Rank 0] step:8841/10000 train_time:328567ms step_avg:37.16ms +[2025-09-05 18:14:00] [Rank 0] step:8841/10000 train_time:328567ms step_avg:37.16ms +[2025-09-05 18:14:00] [Rank 0] step:8861/10000 train_time:329225ms step_avg:37.15ms +[2025-09-05 18:14:00] [Rank 0] step:8861/10000 train_time:329225ms step_avg:37.15ms +[2025-09-05 18:14:01] [Rank 0] step:8881/10000 train_time:329885ms step_avg:37.15ms +[2025-09-05 18:14:01] [Rank 0] step:8881/10000 train_time:329885ms step_avg:37.15ms +[2025-09-05 18:14:02] [Rank 0] step:8901/10000 train_time:330545ms step_avg:37.14ms +[2025-09-05 18:14:02] [Rank 0] step:8901/10000 train_time:330545ms step_avg:37.14ms +[2025-09-05 18:14:02] [Rank 0] step:8921/10000 train_time:331204ms step_avg:37.13ms +[2025-09-05 18:14:02] [Rank 0] step:8921/10000 train_time:331204ms step_avg:37.13ms +[2025-09-05 18:14:03] [Rank 0] step:8941/10000 train_time:331863ms step_avg:37.12ms +[2025-09-05 18:14:03] [Rank 0] step:8941/10000 train_time:331863ms step_avg:37.12ms +[2025-09-05 18:14:04] [Rank 0] step:8961/10000 train_time:332522ms step_avg:37.11ms +[2025-09-05 18:14:04] [Rank 0] step:8961/10000 train_time:332522ms step_avg:37.11ms +[2025-09-05 18:14:04] [Rank 0] step:8981/10000 train_time:333180ms step_avg:37.10ms +[2025-09-05 18:14:04] [Rank 0] step:8981/10000 train_time:333180ms step_avg:37.10ms +[2025-09-05 18:14:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:14:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:14:05] [Rank 0] PRINT: step:9000/10000 train_loss:0.6516 val_loss:0.6423 train_time:334072ms step_avg:37.12ms +[2025-09-05 18:14:05] [Rank 0] PRINT: step:9000/10000 train_loss:0.6516 val_loss:0.6423 train_time:334072ms step_avg:37.12ms +[2025-09-05 18:14:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:14:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:14:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:14:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:15:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:15:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:15:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:15:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:15:30] [Rank 0] Total Loss: 5.2184 +[2025-09-05 18:15:30] [Rank 0] Total Loss: 5.2184 +[2025-09-05 18:15:30] [Rank 0] Total FTA (Unweighted): 0.8831 +[2025-09-05 18:15:30] [Rank 0] Total FTA (Unweighted): 0.8831 +[2025-09-05 18:15:30] [Rank 0] Total FTA (Weighted): 0.8831 +[2025-09-05 18:15:30] [Rank 0] Total FTA (Weighted): 0.8831 +[2025-09-05 18:15:30] [Rank 0] Group 0 Loss: 5.4707 +[2025-09-05 18:15:30] [Rank 0] Group 0 Loss: 5.4707 +[2025-09-05 18:15:30] [Rank 0] Group 1 Loss: 4.8808 +[2025-09-05 18:15:30] [Rank 0] Group 1 Loss: 4.8808 +[2025-09-05 18:15:30] [Rank 0] Group 2 Loss: 4.7649 +[2025-09-05 18:15:30] [Rank 0] Group 2 Loss: 4.7649 +[2025-09-05 18:15:30] [Rank 0] Group 3 Loss: 5.2133 +[2025-09-05 18:15:30] [Rank 0] Group 3 Loss: 5.2133 +[2025-09-05 18:15:30] [Rank 0] Group 4 Loss: 5.1185 +[2025-09-05 18:15:30] [Rank 0] Group 4 Loss: 5.1185 +[2025-09-05 18:15:30] [Rank 0] Group 5 Loss: 5.1008 +[2025-09-05 18:15:30] [Rank 0] Group 5 Loss: 5.1008 +[2025-09-05 18:15:30] [Rank 0] Group 6 Loss: 5.0537 +[2025-09-05 18:15:30] [Rank 0] Group 6 Loss: 5.0537 +[2025-09-05 18:15:30] [Rank 0] Group 7 Loss: 5.1274 +[2025-09-05 18:15:30] [Rank 0] Group 7 Loss: 5.1274 +[2025-09-05 18:15:30] [Rank 0] Group 8 Loss: 5.2215 +[2025-09-05 18:15:30] [Rank 0] Group 8 Loss: 5.2215 +[2025-09-05 18:15:30] [Rank 0] Group 9 Loss: 5.2103 +[2025-09-05 18:15:30] [Rank 0] Group 9 Loss: 5.2103 +[2025-09-05 18:15:30] [Rank 0] Group 10 Loss: 5.2945 +[2025-09-05 18:15:30] [Rank 0] Group 10 Loss: 5.2945 +[2025-09-05 18:15:30] [Rank 0] Group 11 Loss: 5.3092 +[2025-09-05 18:15:30] [Rank 0] Group 11 Loss: 5.3092 +[2025-09-05 18:15:30] [Rank 0] Group 12 Loss: 5.3202 +[2025-09-05 18:15:30] [Rank 0] Group 12 Loss: 5.3202 +[2025-09-05 18:15:30] [Rank 0] Group 13 Loss: 5.4109 +[2025-09-05 18:15:30] [Rank 0] Group 13 Loss: 5.4109 +[2025-09-05 18:15:30] [Rank 0] Group 14 Loss: 5.4169 +[2025-09-05 18:15:30] [Rank 0] Group 14 Loss: 5.4169 +[2025-09-05 18:15:30] [Rank 0] Group 15 Loss: 5.5810 +[2025-09-05 18:15:30] [Rank 0] Group 15 Loss: 5.5810 +[2025-09-05 18:15:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:15:30] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:15:30] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:15:30] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:15:30] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:15:30] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:15:30] [Rank 0] Group 11 FTA: 0.9800 +[2025-09-05 18:15:30] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 18:15:30] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 18:15:30] [Rank 0] Group 13 FTA: 0.8000 +[2025-09-05 18:15:30] [Rank 0] Group 13 FTA: 0.8000 +[2025-09-05 18:15:30] [Rank 0] Group 14 FTA: 0.2900 +[2025-09-05 18:15:30] [Rank 0] Group 14 FTA: 0.2900 +[2025-09-05 18:15:30] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 18:15:30] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 18:15:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:15:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:15:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:15:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:15:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:15:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:15:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:15:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:15:31] [Rank 0] step:9001/10000 train_time:334082ms step_avg:37.12ms +[2025-09-05 18:15:31] [Rank 0] step:9001/10000 train_time:334082ms step_avg:37.12ms +[2025-09-05 18:15:32] [Rank 0] step:9021/10000 train_time:334535ms step_avg:37.08ms +[2025-09-05 18:15:32] [Rank 0] step:9021/10000 train_time:334535ms step_avg:37.08ms +[2025-09-05 18:15:33] [Rank 0] step:9041/10000 train_time:335195ms step_avg:37.08ms +[2025-09-05 18:15:33] [Rank 0] step:9041/10000 train_time:335195ms step_avg:37.08ms +[2025-09-05 18:15:33] [Rank 0] step:9061/10000 train_time:335855ms step_avg:37.07ms +[2025-09-05 18:15:33] [Rank 0] step:9061/10000 train_time:335855ms step_avg:37.07ms +[2025-09-05 18:15:34] [Rank 0] step:9081/10000 train_time:336516ms step_avg:37.06ms +[2025-09-05 18:15:34] [Rank 0] step:9081/10000 train_time:336516ms step_avg:37.06ms +[2025-09-05 18:15:35] [Rank 0] step:9101/10000 train_time:337176ms step_avg:37.05ms +[2025-09-05 18:15:35] [Rank 0] step:9101/10000 train_time:337176ms step_avg:37.05ms +[2025-09-05 18:15:35] [Rank 0] step:9121/10000 train_time:337835ms step_avg:37.04ms +[2025-09-05 18:15:35] [Rank 0] step:9121/10000 train_time:337835ms step_avg:37.04ms +[2025-09-05 18:15:36] [Rank 0] step:9141/10000 train_time:338495ms step_avg:37.03ms +[2025-09-05 18:15:36] [Rank 0] step:9141/10000 train_time:338495ms step_avg:37.03ms +[2025-09-05 18:15:37] [Rank 0] step:9161/10000 train_time:339156ms step_avg:37.02ms +[2025-09-05 18:15:37] [Rank 0] step:9161/10000 train_time:339156ms step_avg:37.02ms +[2025-09-05 18:15:37] [Rank 0] step:9181/10000 train_time:339815ms step_avg:37.01ms +[2025-09-05 18:15:37] [Rank 0] step:9181/10000 train_time:339815ms step_avg:37.01ms +[2025-09-05 18:15:38] [Rank 0] step:9201/10000 train_time:340475ms step_avg:37.00ms +[2025-09-05 18:15:38] [Rank 0] step:9201/10000 train_time:340475ms step_avg:37.00ms +[2025-09-05 18:15:39] [Rank 0] step:9221/10000 train_time:341135ms step_avg:37.00ms +[2025-09-05 18:15:39] [Rank 0] step:9221/10000 train_time:341135ms step_avg:37.00ms +[2025-09-05 18:15:40] [Rank 0] step:9241/10000 train_time:341794ms step_avg:36.99ms +[2025-09-05 18:15:40] [Rank 0] step:9241/10000 train_time:341794ms step_avg:36.99ms +[2025-09-05 18:15:40] [Rank 0] step:9261/10000 train_time:342696ms step_avg:37.00ms +[2025-09-05 18:15:40] [Rank 0] step:9261/10000 train_time:342696ms step_avg:37.00ms +[2025-09-05 18:15:41] [Rank 0] step:9281/10000 train_time:343356ms step_avg:37.00ms +[2025-09-05 18:15:41] [Rank 0] step:9281/10000 train_time:343356ms step_avg:37.00ms +[2025-09-05 18:15:42] [Rank 0] step:9301/10000 train_time:344016ms step_avg:36.99ms +[2025-09-05 18:15:42] [Rank 0] step:9301/10000 train_time:344016ms step_avg:36.99ms +[2025-09-05 18:15:42] [Rank 0] step:9321/10000 train_time:344814ms step_avg:36.99ms +[2025-09-05 18:15:42] [Rank 0] step:9321/10000 train_time:344814ms step_avg:36.99ms +[2025-09-05 18:15:43] [Rank 0] step:9341/10000 train_time:345474ms step_avg:36.98ms +[2025-09-05 18:15:43] [Rank 0] step:9341/10000 train_time:345474ms step_avg:36.98ms +[2025-09-05 18:15:44] [Rank 0] step:9361/10000 train_time:346134ms step_avg:36.98ms +[2025-09-05 18:15:44] [Rank 0] step:9361/10000 train_time:346134ms step_avg:36.98ms +[2025-09-05 18:15:44] [Rank 0] step:9381/10000 train_time:346794ms step_avg:36.97ms +[2025-09-05 18:15:44] [Rank 0] step:9381/10000 train_time:346794ms step_avg:36.97ms +[2025-09-05 18:15:45] [Rank 0] step:9401/10000 train_time:347454ms step_avg:36.96ms +[2025-09-05 18:15:45] [Rank 0] step:9401/10000 train_time:347454ms step_avg:36.96ms +[2025-09-05 18:15:46] [Rank 0] step:9421/10000 train_time:348115ms step_avg:36.95ms +[2025-09-05 18:15:46] [Rank 0] step:9421/10000 train_time:348115ms step_avg:36.95ms +[2025-09-05 18:15:46] [Rank 0] step:9441/10000 train_time:348775ms step_avg:36.94ms +[2025-09-05 18:15:46] [Rank 0] step:9441/10000 train_time:348775ms step_avg:36.94ms +[2025-09-05 18:15:47] [Rank 0] step:9461/10000 train_time:349434ms step_avg:36.93ms +[2025-09-05 18:15:47] [Rank 0] step:9461/10000 train_time:349434ms step_avg:36.93ms +[2025-09-05 18:15:48] [Rank 0] step:9481/10000 train_time:350093ms step_avg:36.93ms +[2025-09-05 18:15:48] [Rank 0] step:9481/10000 train_time:350093ms step_avg:36.93ms +[2025-09-05 18:15:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:15:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:15:49] [Rank 0] PRINT: step:9500/10000 train_loss:0.6462 val_loss:0.6378 train_time:350988ms step_avg:36.95ms +[2025-09-05 18:15:49] [Rank 0] PRINT: step:9500/10000 train_loss:0.6462 val_loss:0.6378 train_time:350988ms step_avg:36.95ms +[2025-09-05 18:15:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:15:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:15:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:15:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:17:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:17:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:17:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:17:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:17:11] [Rank 0] Total Loss: 5.1677 +[2025-09-05 18:17:11] [Rank 0] Total Loss: 5.1677 +[2025-09-05 18:17:11] [Rank 0] Total FTA (Unweighted): 0.8888 +[2025-09-05 18:17:11] [Rank 0] Total FTA (Unweighted): 0.8888 +[2025-09-05 18:17:11] [Rank 0] Total FTA (Weighted): 0.8888 +[2025-09-05 18:17:11] [Rank 0] Total FTA (Weighted): 0.8888 +[2025-09-05 18:17:11] [Rank 0] Group 0 Loss: 5.3698 +[2025-09-05 18:17:11] [Rank 0] Group 0 Loss: 5.3698 +[2025-09-05 18:17:11] [Rank 0] Group 1 Loss: 4.8561 +[2025-09-05 18:17:11] [Rank 0] Group 1 Loss: 4.8561 +[2025-09-05 18:17:11] [Rank 0] Group 2 Loss: 4.6746 +[2025-09-05 18:17:11] [Rank 0] Group 2 Loss: 4.6746 +[2025-09-05 18:17:11] [Rank 0] Group 3 Loss: 5.0849 +[2025-09-05 18:17:11] [Rank 0] Group 3 Loss: 5.0849 +[2025-09-05 18:17:11] [Rank 0] Group 4 Loss: 5.0514 +[2025-09-05 18:17:11] [Rank 0] Group 4 Loss: 5.0514 +[2025-09-05 18:17:11] [Rank 0] Group 5 Loss: 5.0720 +[2025-09-05 18:17:11] [Rank 0] Group 5 Loss: 5.0720 +[2025-09-05 18:17:11] [Rank 0] Group 6 Loss: 4.9886 +[2025-09-05 18:17:11] [Rank 0] Group 6 Loss: 4.9886 +[2025-09-05 18:17:11] [Rank 0] Group 7 Loss: 5.1266 +[2025-09-05 18:17:11] [Rank 0] Group 7 Loss: 5.1266 +[2025-09-05 18:17:12] [Rank 0] Group 8 Loss: 5.1828 +[2025-09-05 18:17:12] [Rank 0] Group 8 Loss: 5.1828 +[2025-09-05 18:17:12] [Rank 0] Group 9 Loss: 5.1586 +[2025-09-05 18:17:12] [Rank 0] Group 9 Loss: 5.1586 +[2025-09-05 18:17:12] [Rank 0] Group 10 Loss: 5.2994 +[2025-09-05 18:17:12] [Rank 0] Group 10 Loss: 5.2994 +[2025-09-05 18:17:12] [Rank 0] Group 11 Loss: 5.2577 +[2025-09-05 18:17:12] [Rank 0] Group 11 Loss: 5.2577 +[2025-09-05 18:17:12] [Rank 0] Group 12 Loss: 5.2886 +[2025-09-05 18:17:12] [Rank 0] Group 12 Loss: 5.2886 +[2025-09-05 18:17:12] [Rank 0] Group 13 Loss: 5.3534 +[2025-09-05 18:17:12] [Rank 0] Group 13 Loss: 5.3534 +[2025-09-05 18:17:12] [Rank 0] Group 14 Loss: 5.3714 +[2025-09-05 18:17:12] [Rank 0] Group 14 Loss: 5.3714 +[2025-09-05 18:17:12] [Rank 0] Group 15 Loss: 5.5467 +[2025-09-05 18:17:12] [Rank 0] Group 15 Loss: 5.5467 +[2025-09-05 18:17:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:17:12] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:17:12] [Rank 0] Group 9 FTA: 0.9800 +[2025-09-05 18:17:12] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 18:17:12] [Rank 0] Group 10 FTA: 0.9700 +[2025-09-05 18:17:12] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 18:17:12] [Rank 0] Group 11 FTA: 0.9900 +[2025-09-05 18:17:12] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-05 18:17:12] [Rank 0] Group 12 FTA: 0.9600 +[2025-09-05 18:17:12] [Rank 0] Group 13 FTA: 0.8200 +[2025-09-05 18:17:12] [Rank 0] Group 13 FTA: 0.8200 +[2025-09-05 18:17:12] [Rank 0] Group 14 FTA: 0.3400 +[2025-09-05 18:17:12] [Rank 0] Group 14 FTA: 0.3400 +[2025-09-05 18:17:12] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 18:17:12] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 18:17:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:17:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:17:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:17:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:17:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:17:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:17:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:17:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:17:13] [Rank 0] step:9501/10000 train_time:350998ms step_avg:36.94ms +[2025-09-05 18:17:13] [Rank 0] step:9501/10000 train_time:350998ms step_avg:36.94ms +[2025-09-05 18:17:14] [Rank 0] step:9521/10000 train_time:351443ms step_avg:36.91ms +[2025-09-05 18:17:14] [Rank 0] step:9521/10000 train_time:351443ms step_avg:36.91ms +[2025-09-05 18:17:14] [Rank 0] step:9541/10000 train_time:352103ms step_avg:36.90ms +[2025-09-05 18:17:14] [Rank 0] step:9541/10000 train_time:352103ms step_avg:36.90ms +[2025-09-05 18:17:15] [Rank 0] step:9561/10000 train_time:352762ms step_avg:36.90ms +[2025-09-05 18:17:15] [Rank 0] step:9561/10000 train_time:352762ms step_avg:36.90ms +[2025-09-05 18:17:16] [Rank 0] step:9581/10000 train_time:353422ms step_avg:36.89ms +[2025-09-05 18:17:16] [Rank 0] step:9581/10000 train_time:353422ms step_avg:36.89ms +[2025-09-05 18:17:16] [Rank 0] step:9601/10000 train_time:354082ms step_avg:36.88ms +[2025-09-05 18:17:16] [Rank 0] step:9601/10000 train_time:354082ms step_avg:36.88ms +[2025-09-05 18:17:17] [Rank 0] step:9621/10000 train_time:354742ms step_avg:36.87ms +[2025-09-05 18:17:17] [Rank 0] step:9621/10000 train_time:354742ms step_avg:36.87ms +[2025-09-05 18:17:18] [Rank 0] step:9641/10000 train_time:355401ms step_avg:36.86ms +[2025-09-05 18:17:18] [Rank 0] step:9641/10000 train_time:355401ms step_avg:36.86ms +[2025-09-05 18:17:19] [Rank 0] step:9661/10000 train_time:356339ms step_avg:36.88ms +[2025-09-05 18:17:19] [Rank 0] step:9661/10000 train_time:356339ms step_avg:36.88ms +[2025-09-05 18:17:19] [Rank 0] step:9681/10000 train_time:356999ms step_avg:36.88ms +[2025-09-05 18:17:19] [Rank 0] step:9681/10000 train_time:356999ms step_avg:36.88ms +[2025-09-05 18:17:20] [Rank 0] step:9701/10000 train_time:357658ms step_avg:36.87ms +[2025-09-05 18:17:20] [Rank 0] step:9701/10000 train_time:357658ms step_avg:36.87ms +[2025-09-05 18:17:21] [Rank 0] step:9721/10000 train_time:358318ms step_avg:36.86ms +[2025-09-05 18:17:21] [Rank 0] step:9721/10000 train_time:358318ms step_avg:36.86ms +[2025-09-05 18:17:21] [Rank 0] step:9741/10000 train_time:358978ms step_avg:36.85ms +[2025-09-05 18:17:21] [Rank 0] step:9741/10000 train_time:358978ms step_avg:36.85ms +[2025-09-05 18:17:22] [Rank 0] step:9761/10000 train_time:359638ms step_avg:36.84ms +[2025-09-05 18:17:22] [Rank 0] step:9761/10000 train_time:359638ms step_avg:36.84ms +[2025-09-05 18:17:23] [Rank 0] step:9781/10000 train_time:360297ms step_avg:36.84ms +[2025-09-05 18:17:23] [Rank 0] step:9781/10000 train_time:360297ms step_avg:36.84ms +[2025-09-05 18:17:23] [Rank 0] step:9801/10000 train_time:360957ms step_avg:36.83ms +[2025-09-05 18:17:23] [Rank 0] step:9801/10000 train_time:360957ms step_avg:36.83ms +[2025-09-05 18:17:24] [Rank 0] step:9821/10000 train_time:361616ms step_avg:36.82ms +[2025-09-05 18:17:24] [Rank 0] step:9821/10000 train_time:361616ms step_avg:36.82ms +[2025-09-05 18:17:25] [Rank 0] step:9841/10000 train_time:362276ms step_avg:36.81ms +[2025-09-05 18:17:25] [Rank 0] step:9841/10000 train_time:362276ms step_avg:36.81ms +[2025-09-05 18:17:25] [Rank 0] step:9861/10000 train_time:362936ms step_avg:36.81ms +[2025-09-05 18:17:25] [Rank 0] step:9861/10000 train_time:362936ms step_avg:36.81ms +[2025-09-05 18:17:26] [Rank 0] step:9881/10000 train_time:363596ms step_avg:36.80ms +[2025-09-05 18:17:26] [Rank 0] step:9881/10000 train_time:363596ms step_avg:36.80ms +[2025-09-05 18:17:26] [Rank 0] step:9901/10000 train_time:364255ms step_avg:36.79ms +[2025-09-05 18:17:26] [Rank 0] step:9901/10000 train_time:364255ms step_avg:36.79ms +[2025-09-05 18:17:27] [Rank 0] step:9921/10000 train_time:364914ms step_avg:36.78ms +[2025-09-05 18:17:27] [Rank 0] step:9921/10000 train_time:364914ms step_avg:36.78ms +[2025-09-05 18:17:28] [Rank 0] step:9941/10000 train_time:365573ms step_avg:36.77ms +[2025-09-05 18:17:28] [Rank 0] step:9941/10000 train_time:365573ms step_avg:36.77ms +[2025-09-05 18:17:28] [Rank 0] step:9961/10000 train_time:366232ms step_avg:36.77ms +[2025-09-05 18:17:28] [Rank 0] step:9961/10000 train_time:366232ms step_avg:36.77ms +[2025-09-05 18:17:29] [Rank 0] step:9981/10000 train_time:366892ms step_avg:36.76ms +[2025-09-05 18:17:29] [Rank 0] step:9981/10000 train_time:366892ms step_avg:36.76ms +[2025-09-05 18:17:30] [Rank 0] step:10000/10000 train_time:367519ms step_avg:36.75ms +[2025-09-05 18:17:30] [Rank 0] step:10000/10000 train_time:367519ms step_avg:36.75ms +[2025-09-05 18:17:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:17:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:17:30] [Rank 0] PRINT: step:10000/10000 train_loss:0.6415 val_loss:0.6337 train_time:367793ms step_avg:36.78ms +[2025-09-05 18:17:30] [Rank 0] PRINT: step:10000/10000 train_loss:0.6415 val_loss:0.6337 train_time:367793ms step_avg:36.78ms +[2025-09-05 18:17:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:17:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:17:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:17:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:18:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:18:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:18:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:18:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:18:53] [Rank 0] Total Loss: 5.1670 +[2025-09-05 18:18:53] [Rank 0] Total Loss: 5.1670 +[2025-09-05 18:18:53] [Rank 0] Total FTA (Unweighted): 0.8937 +[2025-09-05 18:18:53] [Rank 0] Total FTA (Unweighted): 0.8937 +[2025-09-05 18:18:53] [Rank 0] Total FTA (Weighted): 0.8938 +[2025-09-05 18:18:53] [Rank 0] Total FTA (Weighted): 0.8938 +[2025-09-05 18:18:53] [Rank 0] Group 0 Loss: 5.3631 +[2025-09-05 18:18:53] [Rank 0] Group 0 Loss: 5.3631 +[2025-09-05 18:18:53] [Rank 0] Group 1 Loss: 4.8541 +[2025-09-05 18:18:53] [Rank 0] Group 1 Loss: 4.8541 +[2025-09-05 18:18:53] [Rank 0] Group 2 Loss: 4.6975 +[2025-09-05 18:18:53] [Rank 0] Group 2 Loss: 4.6975 +[2025-09-05 18:18:53] [Rank 0] Group 3 Loss: 5.1123 +[2025-09-05 18:18:53] [Rank 0] Group 3 Loss: 5.1123 +[2025-09-05 18:18:53] [Rank 0] Group 4 Loss: 5.0548 +[2025-09-05 18:18:53] [Rank 0] Group 4 Loss: 5.0548 +[2025-09-05 18:18:53] [Rank 0] Group 5 Loss: 5.0866 +[2025-09-05 18:18:53] [Rank 0] Group 5 Loss: 5.0866 +[2025-09-05 18:18:53] [Rank 0] Group 6 Loss: 5.0040 +[2025-09-05 18:18:53] [Rank 0] Group 6 Loss: 5.0040 +[2025-09-05 18:18:53] [Rank 0] Group 7 Loss: 5.1072 +[2025-09-05 18:18:53] [Rank 0] Group 7 Loss: 5.1072 +[2025-09-05 18:18:53] [Rank 0] Group 8 Loss: 5.1817 +[2025-09-05 18:18:53] [Rank 0] Group 8 Loss: 5.1817 +[2025-09-05 18:18:53] [Rank 0] Group 9 Loss: 5.1777 +[2025-09-05 18:18:53] [Rank 0] Group 9 Loss: 5.1777 +[2025-09-05 18:18:53] [Rank 0] Group 10 Loss: 5.2728 +[2025-09-05 18:18:53] [Rank 0] Group 10 Loss: 5.2728 +[2025-09-05 18:18:53] [Rank 0] Group 11 Loss: 5.2568 +[2025-09-05 18:18:53] [Rank 0] Group 11 Loss: 5.2568 +[2025-09-05 18:18:53] [Rank 0] Group 12 Loss: 5.2851 +[2025-09-05 18:18:53] [Rank 0] Group 12 Loss: 5.2851 +[2025-09-05 18:18:53] [Rank 0] Group 13 Loss: 5.3413 +[2025-09-05 18:18:53] [Rank 0] Group 13 Loss: 5.3413 +[2025-09-05 18:18:53] [Rank 0] Group 14 Loss: 5.3671 +[2025-09-05 18:18:53] [Rank 0] Group 14 Loss: 5.3671 +[2025-09-05 18:18:53] [Rank 0] Group 15 Loss: 5.5094 +[2025-09-05 18:18:53] [Rank 0] Group 15 Loss: 5.5094 +[2025-09-05 18:18:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 5 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 6 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 7 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 8 FTA: 1.0000 +[2025-09-05 18:18:53] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:18:53] [Rank 0] Group 9 FTA: 0.9700 +[2025-09-05 18:18:53] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:18:53] [Rank 0] Group 10 FTA: 0.9900 +[2025-09-05 18:18:53] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:18:53] [Rank 0] Group 11 FTA: 0.9600 +[2025-09-05 18:18:53] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 18:18:53] [Rank 0] Group 12 FTA: 0.9500 +[2025-09-05 18:18:53] [Rank 0] Group 13 FTA: 0.8000 +[2025-09-05 18:18:53] [Rank 0] Group 13 FTA: 0.8000 +[2025-09-05 18:18:53] [Rank 0] Group 14 FTA: 0.4500 +[2025-09-05 18:18:53] [Rank 0] Group 14 FTA: 0.4500 +[2025-09-05 18:18:53] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 18:18:53] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 18:18:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:18:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_loss_curves.png +[2025-09-05 18:18:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:18:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/per_class_acc_curves.png +[2025-09-05 18:18:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:18:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_loss_curve.png +[2025-09-05 18:18:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:18:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_adam_gated/lr_search_long/mode_5_param_gated_lr_0.01_seed_43/total_acc_curve.png +[2025-09-05 18:18:55] [Rank 0] step:10001/10000 train_time:367803ms step_avg:36.78ms +[2025-09-05 18:18:55] [Rank 0] step:10001/10000 train_time:367803ms step_avg:36.78ms +[2025-09-05 18:18:55] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 18:18:55 2025 --- +[2025-09-05 18:18:55] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 18:18:55 2025 --- +[2025-09-05 18:18:55] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB +[2025-09-05 18:18:55] [Rank 0] PRINT: Peak memory allocated: 4373 MiB reserved: 5248 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..254a4e367287d42263bd63050ef55e176e81ee13 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.05, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "b46ad90c-6a4d-4d1e-801d-aa55af1c2656", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..cc49e7298f427e3b9136b4d13066a77b5051b8cf --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0055ad3776d365bdfe31f3ecf2870a5e0c6c79b594610eade1df3b75e0dba1b6 +size 260502 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..64fa129bd5ab359fa9c7cfafd83312c82654bca1 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b23d3f8f6b4f80412dbcbf7a870443204ae2b1a92aef995da6bd632b1d7885d +size 426848 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..397c1d5fad05e9cfcb8fdae0bc92eba17d9ac40e --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e05e7962683d3757bad26b20864fc86917141ce6a071590802c5c38652d2769 +size 88180 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..11c44b884d82971bdcca2925229ab35a564bb391 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe00848aad005157cb9d7679ce1684e395c9dc9d406b11ccfcb68a4ec04fe6f6 +size 101448 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/training_log_b46ad90c-6a4d-4d1e-801d-aa55af1c2656.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/training_log_b46ad90c-6a4d-4d1e-801d-aa55af1c2656.txt new file mode 100644 index 0000000000000000000000000000000000000000..af714c94e6a608924e8e5e24292a531d9ec38a10 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/training_log_b46ad90c-6a4d-4d1e-801d-aa55af1c2656.txt @@ -0,0 +1,5614 @@ +[2025-09-06 02:26:23] [Rank 0] PRINT: --- Script Start: Sat Sep 6 02:26:23 2025 --- +[2025-09-06 02:26:23] [Rank 0] PRINT: --- Script Start: Sat Sep 6 02:26:23 2025 --- +[2025-09-06 02:26:23] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.05, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 02:26:23] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.05, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 02:26:23] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 02:26:23] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 02:26:23] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-06 02:26:23] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-06 02:26:23] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42 +[2025-09-06 02:26:23] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42 +[2025-09-06 02:26:23] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 02:26:23] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 02:26:23] [Rank 0] PRINT: Constructing model... +[2025-09-06 02:26:23] [Rank 0] PRINT: Constructing model... +[2025-09-06 02:26:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 02:26:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 02:26:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 02:26:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 02:26:25] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 02:26:25] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 02:26:28] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 02:26:28] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 02:26:28] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 02:26:28] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 02:26:28] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 02:26:28] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 02:26:28] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 02:26:28] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 02:26:28] [Rank 0] PRINT: Model returns: +[2025-09-06 02:26:28] [Rank 0] PRINT: Model returns: +[2025-09-06 02:26:28] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 02:26:28] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 02:26:28] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 02:26:28] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 02:26:28] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.05). +[2025-09-06 02:26:28] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.05). +[2025-09-06 02:26:28] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 02:26:28] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 02:26:28] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 02:26:28] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 02:26:32] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 02:26:32] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 02:26:33] [Rank 0] PRINT: Starting warmup... +[2025-09-06 02:26:33] [Rank 0] PRINT: Starting warmup... +[2025-09-06 02:27:10] [Rank 0] PRINT: Warmup complete. +[2025-09-06 02:27:10] [Rank 0] PRINT: Warmup complete. +[2025-09-06 02:27:10] [Rank 0] PRINT: Starting training... +[2025-09-06 02:27:10] [Rank 0] PRINT: Starting training... +[2025-09-06 02:27:17] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/fixed_eval_indices.json +[2025-09-06 02:27:17] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/fixed_eval_indices.json +[2025-09-06 02:27:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:27:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:27:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 02:27:20] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 02:27:52] [Rank 0] step:21/10000 train_time:31832ms step_avg:1515.82ms +[2025-09-06 02:27:52] [Rank 0] step:21/10000 train_time:31832ms step_avg:1515.82ms +[2025-09-06 02:27:53] [Rank 0] step:41/10000 train_time:32561ms step_avg:794.18ms +[2025-09-06 02:27:53] [Rank 0] step:41/10000 train_time:32561ms step_avg:794.18ms +[2025-09-06 02:27:54] [Rank 0] step:61/10000 train_time:33288ms step_avg:545.70ms +[2025-09-06 02:27:54] [Rank 0] step:61/10000 train_time:33288ms step_avg:545.70ms +[2025-09-06 02:27:54] [Rank 0] step:81/10000 train_time:34016ms step_avg:419.95ms +[2025-09-06 02:27:54] [Rank 0] step:81/10000 train_time:34016ms step_avg:419.95ms +[2025-09-06 02:27:55] [Rank 0] step:101/10000 train_time:34743ms step_avg:343.99ms +[2025-09-06 02:27:55] [Rank 0] step:101/10000 train_time:34743ms step_avg:343.99ms +[2025-09-06 02:27:56] [Rank 0] step:121/10000 train_time:35471ms step_avg:293.15ms +[2025-09-06 02:27:56] [Rank 0] step:121/10000 train_time:35471ms step_avg:293.15ms +[2025-09-06 02:27:57] [Rank 0] step:141/10000 train_time:36199ms step_avg:256.73ms +[2025-09-06 02:27:57] [Rank 0] step:141/10000 train_time:36199ms step_avg:256.73ms +[2025-09-06 02:27:57] [Rank 0] step:161/10000 train_time:36926ms step_avg:229.36ms +[2025-09-06 02:27:57] [Rank 0] step:161/10000 train_time:36926ms step_avg:229.36ms +[2025-09-06 02:27:58] [Rank 0] step:181/10000 train_time:37654ms step_avg:208.03ms +[2025-09-06 02:27:58] [Rank 0] step:181/10000 train_time:37654ms step_avg:208.03ms +[2025-09-06 02:27:59] [Rank 0] step:201/10000 train_time:38382ms step_avg:190.95ms +[2025-09-06 02:27:59] [Rank 0] step:201/10000 train_time:38382ms step_avg:190.95ms +[2025-09-06 02:27:59] [Rank 0] step:221/10000 train_time:39109ms step_avg:176.96ms +[2025-09-06 02:27:59] [Rank 0] step:221/10000 train_time:39109ms step_avg:176.96ms +[2025-09-06 02:28:00] [Rank 0] step:241/10000 train_time:39837ms step_avg:165.30ms +[2025-09-06 02:28:00] [Rank 0] step:241/10000 train_time:39837ms step_avg:165.30ms +[2025-09-06 02:28:01] [Rank 0] step:261/10000 train_time:40564ms step_avg:155.42ms +[2025-09-06 02:28:01] [Rank 0] step:261/10000 train_time:40564ms step_avg:155.42ms +[2025-09-06 02:28:02] [Rank 0] step:281/10000 train_time:41292ms step_avg:146.95ms +[2025-09-06 02:28:02] [Rank 0] step:281/10000 train_time:41292ms step_avg:146.95ms +[2025-09-06 02:28:02] [Rank 0] step:301/10000 train_time:42021ms step_avg:139.60ms +[2025-09-06 02:28:02] [Rank 0] step:301/10000 train_time:42021ms step_avg:139.60ms +[2025-09-06 02:28:03] [Rank 0] step:321/10000 train_time:42749ms step_avg:133.17ms +[2025-09-06 02:28:03] [Rank 0] step:321/10000 train_time:42749ms step_avg:133.17ms +[2025-09-06 02:28:04] [Rank 0] step:341/10000 train_time:43475ms step_avg:127.49ms +[2025-09-06 02:28:04] [Rank 0] step:341/10000 train_time:43475ms step_avg:127.49ms +[2025-09-06 02:28:05] [Rank 0] step:361/10000 train_time:44206ms step_avg:122.45ms +[2025-09-06 02:28:05] [Rank 0] step:361/10000 train_time:44206ms step_avg:122.45ms +[2025-09-06 02:28:05] [Rank 0] step:381/10000 train_time:44933ms step_avg:117.93ms +[2025-09-06 02:28:05] [Rank 0] step:381/10000 train_time:44933ms step_avg:117.93ms +[2025-09-06 02:28:06] [Rank 0] step:401/10000 train_time:45661ms step_avg:113.87ms +[2025-09-06 02:28:06] [Rank 0] step:401/10000 train_time:45661ms step_avg:113.87ms +[2025-09-06 02:28:07] [Rank 0] step:421/10000 train_time:46389ms step_avg:110.19ms +[2025-09-06 02:28:07] [Rank 0] step:421/10000 train_time:46389ms step_avg:110.19ms +[2025-09-06 02:28:07] [Rank 0] step:441/10000 train_time:47117ms step_avg:106.84ms +[2025-09-06 02:28:07] [Rank 0] step:441/10000 train_time:47117ms step_avg:106.84ms +[2025-09-06 02:28:08] [Rank 0] step:461/10000 train_time:47844ms step_avg:103.78ms +[2025-09-06 02:28:08] [Rank 0] step:461/10000 train_time:47844ms step_avg:103.78ms +[2025-09-06 02:28:09] [Rank 0] step:481/10000 train_time:48572ms step_avg:100.98ms +[2025-09-06 02:28:09] [Rank 0] step:481/10000 train_time:48572ms step_avg:100.98ms +[2025-09-06 02:28:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:28:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:28:10] [Rank 0] PRINT: step:500/10000 train_loss:6.7941 val_loss:4.9833 train_time:49378ms step_avg:98.76ms +[2025-09-06 02:28:10] [Rank 0] PRINT: step:500/10000 train_loss:6.7941 val_loss:4.9833 train_time:49378ms step_avg:98.76ms +[2025-09-06 02:28:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:28:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:28:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:28:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:29:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:29:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:29:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:29:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:29:32] [Rank 0] Total Loss: 6.6053 +[2025-09-06 02:29:32] [Rank 0] Total Loss: 6.6053 +[2025-09-06 02:29:32] [Rank 0] Total FTA (Unweighted): 0.0281 +[2025-09-06 02:29:32] [Rank 0] Total FTA (Unweighted): 0.0281 +[2025-09-06 02:29:32] [Rank 0] Total FTA (Weighted): 0.0281 +[2025-09-06 02:29:32] [Rank 0] Total FTA (Weighted): 0.0281 +[2025-09-06 02:29:32] [Rank 0] Group 0 Loss: 4.4409 +[2025-09-06 02:29:32] [Rank 0] Group 0 Loss: 4.4409 +[2025-09-06 02:29:32] [Rank 0] Group 1 Loss: 5.2577 +[2025-09-06 02:29:32] [Rank 0] Group 1 Loss: 5.2577 +[2025-09-06 02:29:32] [Rank 0] Group 2 Loss: 5.8969 +[2025-09-06 02:29:32] [Rank 0] Group 2 Loss: 5.8969 +[2025-09-06 02:29:32] [Rank 0] Group 3 Loss: 6.4121 +[2025-09-06 02:29:32] [Rank 0] Group 3 Loss: 6.4121 +[2025-09-06 02:29:32] [Rank 0] Group 4 Loss: 6.8085 +[2025-09-06 02:29:32] [Rank 0] Group 4 Loss: 6.8085 +[2025-09-06 02:29:32] [Rank 0] Group 5 Loss: 6.8883 +[2025-09-06 02:29:32] [Rank 0] Group 5 Loss: 6.8883 +[2025-09-06 02:29:32] [Rank 0] Group 6 Loss: 6.9385 +[2025-09-06 02:29:32] [Rank 0] Group 6 Loss: 6.9385 +[2025-09-06 02:29:32] [Rank 0] Group 7 Loss: 6.8614 +[2025-09-06 02:29:32] [Rank 0] Group 7 Loss: 6.8614 +[2025-09-06 02:29:32] [Rank 0] Group 8 Loss: 6.9921 +[2025-09-06 02:29:32] [Rank 0] Group 8 Loss: 6.9921 +[2025-09-06 02:29:32] [Rank 0] Group 9 Loss: 7.0782 +[2025-09-06 02:29:32] [Rank 0] Group 9 Loss: 7.0782 +[2025-09-06 02:29:32] [Rank 0] Group 10 Loss: 7.0607 +[2025-09-06 02:29:32] [Rank 0] Group 10 Loss: 7.0607 +[2025-09-06 02:29:32] [Rank 0] Group 11 Loss: 7.1071 +[2025-09-06 02:29:32] [Rank 0] Group 11 Loss: 7.1071 +[2025-09-06 02:29:32] [Rank 0] Group 12 Loss: 6.9366 +[2025-09-06 02:29:32] [Rank 0] Group 12 Loss: 6.9366 +[2025-09-06 02:29:32] [Rank 0] Group 13 Loss: 6.9612 +[2025-09-06 02:29:32] [Rank 0] Group 13 Loss: 6.9612 +[2025-09-06 02:29:32] [Rank 0] Group 14 Loss: 7.0601 +[2025-09-06 02:29:32] [Rank 0] Group 14 Loss: 7.0601 +[2025-09-06 02:29:32] [Rank 0] Group 15 Loss: 6.9852 +[2025-09-06 02:29:32] [Rank 0] Group 15 Loss: 6.9852 +[2025-09-06 02:29:32] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 02:29:32] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 02:29:32] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:29:32] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:29:32] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-06 02:29:32] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-06 02:29:32] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-06 02:29:32] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-06 02:29:32] [Rank 0] Group 4 FTA: 0.0100 +[2025-09-06 02:29:32] [Rank 0] Group 4 FTA: 0.0100 +[2025-09-06 02:29:32] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-06 02:29:32] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-06 02:29:32] [Rank 0] Group 6 FTA: 0.0200 +[2025-09-06 02:29:32] [Rank 0] Group 6 FTA: 0.0200 +[2025-09-06 02:29:32] [Rank 0] Group 7 FTA: 0.0300 +[2025-09-06 02:29:32] [Rank 0] Group 7 FTA: 0.0300 +[2025-09-06 02:29:32] [Rank 0] Group 8 FTA: 0.0400 +[2025-09-06 02:29:32] [Rank 0] Group 8 FTA: 0.0400 +[2025-09-06 02:29:32] [Rank 0] Group 9 FTA: 0.0200 +[2025-09-06 02:29:32] [Rank 0] Group 9 FTA: 0.0200 +[2025-09-06 02:29:32] [Rank 0] Group 10 FTA: 0.0200 +[2025-09-06 02:29:32] [Rank 0] Group 10 FTA: 0.0200 +[2025-09-06 02:29:32] [Rank 0] Group 11 FTA: 0.0300 +[2025-09-06 02:29:32] [Rank 0] Group 11 FTA: 0.0300 +[2025-09-06 02:29:32] [Rank 0] Group 12 FTA: 0.0100 +[2025-09-06 02:29:32] [Rank 0] Group 12 FTA: 0.0100 +[2025-09-06 02:29:32] [Rank 0] Group 13 FTA: 0.0200 +[2025-09-06 02:29:32] [Rank 0] Group 13 FTA: 0.0200 +[2025-09-06 02:29:32] [Rank 0] Group 14 FTA: 0.0100 +[2025-09-06 02:29:32] [Rank 0] Group 14 FTA: 0.0100 +[2025-09-06 02:29:32] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-06 02:29:32] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-06 02:29:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:29:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:29:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:29:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:29:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:29:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:29:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:29:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:29:34] [Rank 0] step:501/10000 train_time:49388ms step_avg:98.58ms +[2025-09-06 02:29:34] [Rank 0] step:501/10000 train_time:49388ms step_avg:98.58ms +[2025-09-06 02:29:34] [Rank 0] step:521/10000 train_time:50041ms step_avg:96.05ms +[2025-09-06 02:29:34] [Rank 0] step:521/10000 train_time:50041ms step_avg:96.05ms +[2025-09-06 02:29:35] [Rank 0] step:541/10000 train_time:50767ms step_avg:93.84ms +[2025-09-06 02:29:35] [Rank 0] step:541/10000 train_time:50767ms step_avg:93.84ms +[2025-09-06 02:29:36] [Rank 0] step:561/10000 train_time:51634ms step_avg:92.04ms +[2025-09-06 02:29:36] [Rank 0] step:561/10000 train_time:51634ms step_avg:92.04ms +[2025-09-06 02:29:37] [Rank 0] step:581/10000 train_time:52361ms step_avg:90.12ms +[2025-09-06 02:29:37] [Rank 0] step:581/10000 train_time:52361ms step_avg:90.12ms +[2025-09-06 02:29:38] [Rank 0] step:601/10000 train_time:53089ms step_avg:88.33ms +[2025-09-06 02:29:38] [Rank 0] step:601/10000 train_time:53089ms step_avg:88.33ms +[2025-09-06 02:29:38] [Rank 0] step:621/10000 train_time:53972ms step_avg:86.91ms +[2025-09-06 02:29:38] [Rank 0] step:621/10000 train_time:53972ms step_avg:86.91ms +[2025-09-06 02:29:39] [Rank 0] step:641/10000 train_time:54699ms step_avg:85.33ms +[2025-09-06 02:29:39] [Rank 0] step:641/10000 train_time:54699ms step_avg:85.33ms +[2025-09-06 02:29:40] [Rank 0] step:661/10000 train_time:55426ms step_avg:83.85ms +[2025-09-06 02:29:40] [Rank 0] step:661/10000 train_time:55426ms step_avg:83.85ms +[2025-09-06 02:29:41] [Rank 0] step:681/10000 train_time:56154ms step_avg:82.46ms +[2025-09-06 02:29:41] [Rank 0] step:681/10000 train_time:56154ms step_avg:82.46ms +[2025-09-06 02:29:41] [Rank 0] step:701/10000 train_time:56882ms step_avg:81.14ms +[2025-09-06 02:29:41] [Rank 0] step:701/10000 train_time:56882ms step_avg:81.14ms +[2025-09-06 02:29:42] [Rank 0] step:721/10000 train_time:57609ms step_avg:79.90ms +[2025-09-06 02:29:42] [Rank 0] step:721/10000 train_time:57609ms step_avg:79.90ms +[2025-09-06 02:29:43] [Rank 0] step:741/10000 train_time:58337ms step_avg:78.73ms +[2025-09-06 02:29:43] [Rank 0] step:741/10000 train_time:58337ms step_avg:78.73ms +[2025-09-06 02:29:43] [Rank 0] step:761/10000 train_time:59068ms step_avg:77.62ms +[2025-09-06 02:29:43] [Rank 0] step:761/10000 train_time:59068ms step_avg:77.62ms +[2025-09-06 02:29:44] [Rank 0] step:781/10000 train_time:59800ms step_avg:76.57ms +[2025-09-06 02:29:44] [Rank 0] step:781/10000 train_time:59800ms step_avg:76.57ms +[2025-09-06 02:29:45] [Rank 0] step:801/10000 train_time:60532ms step_avg:75.57ms +[2025-09-06 02:29:45] [Rank 0] step:801/10000 train_time:60532ms step_avg:75.57ms +[2025-09-06 02:29:46] [Rank 0] step:821/10000 train_time:61894ms step_avg:75.39ms +[2025-09-06 02:29:46] [Rank 0] step:821/10000 train_time:61894ms step_avg:75.39ms +[2025-09-06 02:29:47] [Rank 0] step:841/10000 train_time:62627ms step_avg:74.47ms +[2025-09-06 02:29:47] [Rank 0] step:841/10000 train_time:62627ms step_avg:74.47ms +[2025-09-06 02:29:48] [Rank 0] step:861/10000 train_time:63360ms step_avg:73.59ms +[2025-09-06 02:29:48] [Rank 0] step:861/10000 train_time:63360ms step_avg:73.59ms +[2025-09-06 02:29:49] [Rank 0] step:881/10000 train_time:64092ms step_avg:72.75ms +[2025-09-06 02:29:49] [Rank 0] step:881/10000 train_time:64092ms step_avg:72.75ms +[2025-09-06 02:29:49] [Rank 0] step:901/10000 train_time:64824ms step_avg:71.95ms +[2025-09-06 02:29:49] [Rank 0] step:901/10000 train_time:64824ms step_avg:71.95ms +[2025-09-06 02:29:50] [Rank 0] step:921/10000 train_time:65555ms step_avg:71.18ms +[2025-09-06 02:29:50] [Rank 0] step:921/10000 train_time:65555ms step_avg:71.18ms +[2025-09-06 02:29:51] [Rank 0] step:941/10000 train_time:66286ms step_avg:70.44ms +[2025-09-06 02:29:51] [Rank 0] step:941/10000 train_time:66286ms step_avg:70.44ms +[2025-09-06 02:29:51] [Rank 0] step:961/10000 train_time:67018ms step_avg:69.74ms +[2025-09-06 02:29:51] [Rank 0] step:961/10000 train_time:67018ms step_avg:69.74ms +[2025-09-06 02:29:52] [Rank 0] step:981/10000 train_time:67750ms step_avg:69.06ms +[2025-09-06 02:29:52] [Rank 0] step:981/10000 train_time:67750ms step_avg:69.06ms +[2025-09-06 02:29:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:29:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:29:53] [Rank 0] PRINT: step:1000/10000 train_loss:4.4491 val_loss:4.0713 train_time:68563ms step_avg:68.56ms +[2025-09-06 02:29:53] [Rank 0] PRINT: step:1000/10000 train_loss:4.4491 val_loss:4.0713 train_time:68563ms step_avg:68.56ms +[2025-09-06 02:29:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:29:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:29:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:29:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:31:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:31:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:31:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:31:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:31:15] [Rank 0] Total Loss: 6.0027 +[2025-09-06 02:31:15] [Rank 0] Total Loss: 6.0027 +[2025-09-06 02:31:15] [Rank 0] Total FTA (Unweighted): 0.0856 +[2025-09-06 02:31:15] [Rank 0] Total FTA (Unweighted): 0.0856 +[2025-09-06 02:31:15] [Rank 0] Total FTA (Weighted): 0.0856 +[2025-09-06 02:31:15] [Rank 0] Total FTA (Weighted): 0.0856 +[2025-09-06 02:31:15] [Rank 0] Group 0 Loss: 3.7610 +[2025-09-06 02:31:15] [Rank 0] Group 0 Loss: 3.7610 +[2025-09-06 02:31:15] [Rank 0] Group 1 Loss: 3.8051 +[2025-09-06 02:31:15] [Rank 0] Group 1 Loss: 3.8051 +[2025-09-06 02:31:15] [Rank 0] Group 2 Loss: 4.6078 +[2025-09-06 02:31:15] [Rank 0] Group 2 Loss: 4.6078 +[2025-09-06 02:31:15] [Rank 0] Group 3 Loss: 5.4206 +[2025-09-06 02:31:15] [Rank 0] Group 3 Loss: 5.4206 +[2025-09-06 02:31:15] [Rank 0] Group 4 Loss: 6.1887 +[2025-09-06 02:31:15] [Rank 0] Group 4 Loss: 6.1887 +[2025-09-06 02:31:15] [Rank 0] Group 5 Loss: 6.3347 +[2025-09-06 02:31:15] [Rank 0] Group 5 Loss: 6.3347 +[2025-09-06 02:31:15] [Rank 0] Group 6 Loss: 6.4621 +[2025-09-06 02:31:15] [Rank 0] Group 6 Loss: 6.4621 +[2025-09-06 02:31:15] [Rank 0] Group 7 Loss: 6.4091 +[2025-09-06 02:31:15] [Rank 0] Group 7 Loss: 6.4091 +[2025-09-06 02:31:15] [Rank 0] Group 8 Loss: 6.5582 +[2025-09-06 02:31:15] [Rank 0] Group 8 Loss: 6.5582 +[2025-09-06 02:31:15] [Rank 0] Group 9 Loss: 6.7066 +[2025-09-06 02:31:15] [Rank 0] Group 9 Loss: 6.7066 +[2025-09-06 02:31:15] [Rank 0] Group 10 Loss: 6.6597 +[2025-09-06 02:31:15] [Rank 0] Group 10 Loss: 6.6597 +[2025-09-06 02:31:15] [Rank 0] Group 11 Loss: 6.7379 +[2025-09-06 02:31:15] [Rank 0] Group 11 Loss: 6.7379 +[2025-09-06 02:31:15] [Rank 0] Group 12 Loss: 6.5698 +[2025-09-06 02:31:15] [Rank 0] Group 12 Loss: 6.5698 +[2025-09-06 02:31:15] [Rank 0] Group 13 Loss: 6.5624 +[2025-09-06 02:31:15] [Rank 0] Group 13 Loss: 6.5624 +[2025-09-06 02:31:15] [Rank 0] Group 14 Loss: 6.6701 +[2025-09-06 02:31:15] [Rank 0] Group 14 Loss: 6.6701 +[2025-09-06 02:31:15] [Rank 0] Group 15 Loss: 6.5887 +[2025-09-06 02:31:15] [Rank 0] Group 15 Loss: 6.5887 +[2025-09-06 02:31:15] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 02:31:15] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 02:31:15] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:31:15] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:31:15] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 02:31:15] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 02:31:15] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 02:31:15] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 02:31:15] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 02:31:15] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 02:31:15] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 02:31:15] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 02:31:15] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 02:31:15] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 02:31:15] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 02:31:15] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 02:31:15] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 02:31:15] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 02:31:15] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-06 02:31:15] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-06 02:31:15] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 02:31:15] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 02:31:15] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 02:31:15] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 02:31:15] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:31:15] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:31:15] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 02:31:15] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 02:31:15] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:31:15] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:31:15] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:31:15] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:31:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:31:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:31:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:31:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:31:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:31:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:31:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:31:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:31:16] [Rank 0] step:1001/10000 train_time:68573ms step_avg:68.50ms +[2025-09-06 02:31:16] [Rank 0] step:1001/10000 train_time:68573ms step_avg:68.50ms +[2025-09-06 02:31:17] [Rank 0] step:1021/10000 train_time:69232ms step_avg:67.81ms +[2025-09-06 02:31:17] [Rank 0] step:1021/10000 train_time:69232ms step_avg:67.81ms +[2025-09-06 02:31:18] [Rank 0] step:1041/10000 train_time:69964ms step_avg:67.21ms +[2025-09-06 02:31:18] [Rank 0] step:1041/10000 train_time:69964ms step_avg:67.21ms +[2025-09-06 02:31:19] [Rank 0] step:1061/10000 train_time:70696ms step_avg:66.63ms +[2025-09-06 02:31:19] [Rank 0] step:1061/10000 train_time:70696ms step_avg:66.63ms +[2025-09-06 02:31:19] [Rank 0] step:1081/10000 train_time:71427ms step_avg:66.08ms +[2025-09-06 02:31:19] [Rank 0] step:1081/10000 train_time:71427ms step_avg:66.08ms +[2025-09-06 02:31:20] [Rank 0] step:1101/10000 train_time:72160ms step_avg:65.54ms +[2025-09-06 02:31:20] [Rank 0] step:1101/10000 train_time:72160ms step_avg:65.54ms +[2025-09-06 02:31:21] [Rank 0] step:1121/10000 train_time:72892ms step_avg:65.02ms +[2025-09-06 02:31:21] [Rank 0] step:1121/10000 train_time:72892ms step_avg:65.02ms +[2025-09-06 02:31:22] [Rank 0] step:1141/10000 train_time:73624ms step_avg:64.53ms +[2025-09-06 02:31:22] [Rank 0] step:1141/10000 train_time:73624ms step_avg:64.53ms +[2025-09-06 02:31:22] [Rank 0] step:1161/10000 train_time:74357ms step_avg:64.05ms +[2025-09-06 02:31:22] [Rank 0] step:1161/10000 train_time:74357ms step_avg:64.05ms +[2025-09-06 02:31:23] [Rank 0] step:1181/10000 train_time:75088ms step_avg:63.58ms +[2025-09-06 02:31:23] [Rank 0] step:1181/10000 train_time:75088ms step_avg:63.58ms +[2025-09-06 02:31:24] [Rank 0] step:1201/10000 train_time:75820ms step_avg:63.13ms +[2025-09-06 02:31:24] [Rank 0] step:1201/10000 train_time:75820ms step_avg:63.13ms +[2025-09-06 02:31:24] [Rank 0] step:1221/10000 train_time:76552ms step_avg:62.70ms +[2025-09-06 02:31:24] [Rank 0] step:1221/10000 train_time:76552ms step_avg:62.70ms +[2025-09-06 02:31:25] [Rank 0] step:1241/10000 train_time:77284ms step_avg:62.28ms +[2025-09-06 02:31:25] [Rank 0] step:1241/10000 train_time:77284ms step_avg:62.28ms +[2025-09-06 02:31:26] [Rank 0] step:1261/10000 train_time:78016ms step_avg:61.87ms +[2025-09-06 02:31:26] [Rank 0] step:1261/10000 train_time:78016ms step_avg:61.87ms +[2025-09-06 02:31:27] [Rank 0] step:1281/10000 train_time:78756ms step_avg:61.48ms +[2025-09-06 02:31:27] [Rank 0] step:1281/10000 train_time:78756ms step_avg:61.48ms +[2025-09-06 02:31:27] [Rank 0] step:1301/10000 train_time:79487ms step_avg:61.10ms +[2025-09-06 02:31:27] [Rank 0] step:1301/10000 train_time:79487ms step_avg:61.10ms +[2025-09-06 02:31:28] [Rank 0] step:1321/10000 train_time:80219ms step_avg:60.73ms +[2025-09-06 02:31:28] [Rank 0] step:1321/10000 train_time:80219ms step_avg:60.73ms +[2025-09-06 02:31:29] [Rank 0] step:1341/10000 train_time:80952ms step_avg:60.37ms +[2025-09-06 02:31:29] [Rank 0] step:1341/10000 train_time:80952ms step_avg:60.37ms +[2025-09-06 02:31:30] [Rank 0] step:1361/10000 train_time:81685ms step_avg:60.02ms +[2025-09-06 02:31:30] [Rank 0] step:1361/10000 train_time:81685ms step_avg:60.02ms +[2025-09-06 02:31:30] [Rank 0] step:1381/10000 train_time:82416ms step_avg:59.68ms +[2025-09-06 02:31:30] [Rank 0] step:1381/10000 train_time:82416ms step_avg:59.68ms +[2025-09-06 02:31:31] [Rank 0] step:1401/10000 train_time:83148ms step_avg:59.35ms +[2025-09-06 02:31:31] [Rank 0] step:1401/10000 train_time:83148ms step_avg:59.35ms +[2025-09-06 02:31:32] [Rank 0] step:1421/10000 train_time:83880ms step_avg:59.03ms +[2025-09-06 02:31:32] [Rank 0] step:1421/10000 train_time:83880ms step_avg:59.03ms +[2025-09-06 02:31:33] [Rank 0] step:1441/10000 train_time:84612ms step_avg:58.72ms +[2025-09-06 02:31:33] [Rank 0] step:1441/10000 train_time:84612ms step_avg:58.72ms +[2025-09-06 02:31:33] [Rank 0] step:1461/10000 train_time:85344ms step_avg:58.41ms +[2025-09-06 02:31:33] [Rank 0] step:1461/10000 train_time:85344ms step_avg:58.41ms +[2025-09-06 02:31:34] [Rank 0] step:1481/10000 train_time:86075ms step_avg:58.12ms +[2025-09-06 02:31:34] [Rank 0] step:1481/10000 train_time:86075ms step_avg:58.12ms +[2025-09-06 02:31:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:31:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:31:35] [Rank 0] PRINT: step:1500/10000 train_loss:3.8350 val_loss:3.6260 train_time:86888ms step_avg:57.93ms +[2025-09-06 02:31:35] [Rank 0] PRINT: step:1500/10000 train_loss:3.8350 val_loss:3.6260 train_time:86888ms step_avg:57.93ms +[2025-09-06 02:31:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:31:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:31:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:31:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:32:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:32:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:32:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:32:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:32:56] [Rank 0] Total Loss: 5.6489 +[2025-09-06 02:32:56] [Rank 0] Total Loss: 5.6489 +[2025-09-06 02:32:56] [Rank 0] Total FTA (Unweighted): 0.0969 +[2025-09-06 02:32:56] [Rank 0] Total FTA (Unweighted): 0.0969 +[2025-09-06 02:32:56] [Rank 0] Total FTA (Weighted): 0.0969 +[2025-09-06 02:32:56] [Rank 0] Total FTA (Weighted): 0.0969 +[2025-09-06 02:32:56] [Rank 0] Group 0 Loss: 3.3785 +[2025-09-06 02:32:56] [Rank 0] Group 0 Loss: 3.3785 +[2025-09-06 02:32:56] [Rank 0] Group 1 Loss: 3.4244 +[2025-09-06 02:32:56] [Rank 0] Group 1 Loss: 3.4244 +[2025-09-06 02:32:56] [Rank 0] Group 2 Loss: 3.9646 +[2025-09-06 02:32:56] [Rank 0] Group 2 Loss: 3.9646 +[2025-09-06 02:32:56] [Rank 0] Group 3 Loss: 4.7983 +[2025-09-06 02:32:56] [Rank 0] Group 3 Loss: 4.7983 +[2025-09-06 02:32:56] [Rank 0] Group 4 Loss: 5.6965 +[2025-09-06 02:32:56] [Rank 0] Group 4 Loss: 5.6965 +[2025-09-06 02:32:56] [Rank 0] Group 5 Loss: 5.9279 +[2025-09-06 02:32:56] [Rank 0] Group 5 Loss: 5.9279 +[2025-09-06 02:32:56] [Rank 0] Group 6 Loss: 6.1051 +[2025-09-06 02:32:56] [Rank 0] Group 6 Loss: 6.1051 +[2025-09-06 02:32:56] [Rank 0] Group 7 Loss: 6.1154 +[2025-09-06 02:32:56] [Rank 0] Group 7 Loss: 6.1154 +[2025-09-06 02:32:56] [Rank 0] Group 8 Loss: 6.2962 +[2025-09-06 02:32:56] [Rank 0] Group 8 Loss: 6.2962 +[2025-09-06 02:32:56] [Rank 0] Group 9 Loss: 6.4561 +[2025-09-06 02:32:56] [Rank 0] Group 9 Loss: 6.4561 +[2025-09-06 02:32:56] [Rank 0] Group 10 Loss: 6.4028 +[2025-09-06 02:32:56] [Rank 0] Group 10 Loss: 6.4028 +[2025-09-06 02:32:56] [Rank 0] Group 11 Loss: 6.4712 +[2025-09-06 02:32:56] [Rank 0] Group 11 Loss: 6.4712 +[2025-09-06 02:32:56] [Rank 0] Group 12 Loss: 6.3067 +[2025-09-06 02:32:56] [Rank 0] Group 12 Loss: 6.3067 +[2025-09-06 02:32:56] [Rank 0] Group 13 Loss: 6.3099 +[2025-09-06 02:32:56] [Rank 0] Group 13 Loss: 6.3099 +[2025-09-06 02:32:56] [Rank 0] Group 14 Loss: 6.4030 +[2025-09-06 02:32:56] [Rank 0] Group 14 Loss: 6.4030 +[2025-09-06 02:32:56] [Rank 0] Group 15 Loss: 6.3256 +[2025-09-06 02:32:56] [Rank 0] Group 15 Loss: 6.3256 +[2025-09-06 02:32:56] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 02:32:56] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 02:32:56] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:32:56] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:32:56] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:32:56] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:32:56] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 02:32:56] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 02:32:56] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-06 02:32:56] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-06 02:32:56] [Rank 0] Group 5 FTA: 0.0800 +[2025-09-06 02:32:56] [Rank 0] Group 5 FTA: 0.0800 +[2025-09-06 02:32:56] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 02:32:56] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 02:32:56] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 02:32:56] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 02:32:56] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 02:32:56] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 02:32:56] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-06 02:32:56] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-06 02:32:56] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 02:32:56] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 02:32:56] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 02:32:56] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 02:32:56] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:32:56] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:32:56] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:32:56] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:32:56] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:32:56] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:32:56] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:32:56] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:32:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:32:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:32:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:32:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:32:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:32:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:32:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:32:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:32:58] [Rank 0] step:1501/10000 train_time:86898ms step_avg:57.89ms +[2025-09-06 02:32:58] [Rank 0] step:1501/10000 train_time:86898ms step_avg:57.89ms +[2025-09-06 02:32:59] [Rank 0] step:1521/10000 train_time:87575ms step_avg:57.58ms +[2025-09-06 02:32:59] [Rank 0] step:1521/10000 train_time:87575ms step_avg:57.58ms +[2025-09-06 02:32:59] [Rank 0] step:1541/10000 train_time:88306ms step_avg:57.30ms +[2025-09-06 02:32:59] [Rank 0] step:1541/10000 train_time:88306ms step_avg:57.30ms +[2025-09-06 02:33:00] [Rank 0] step:1561/10000 train_time:89038ms step_avg:57.04ms +[2025-09-06 02:33:00] [Rank 0] step:1561/10000 train_time:89038ms step_avg:57.04ms +[2025-09-06 02:33:01] [Rank 0] step:1581/10000 train_time:89770ms step_avg:56.78ms +[2025-09-06 02:33:01] [Rank 0] step:1581/10000 train_time:89770ms step_avg:56.78ms +[2025-09-06 02:33:01] [Rank 0] step:1601/10000 train_time:90506ms step_avg:56.53ms +[2025-09-06 02:33:01] [Rank 0] step:1601/10000 train_time:90506ms step_avg:56.53ms +[2025-09-06 02:33:02] [Rank 0] step:1621/10000 train_time:91238ms step_avg:56.28ms +[2025-09-06 02:33:02] [Rank 0] step:1621/10000 train_time:91238ms step_avg:56.28ms +[2025-09-06 02:33:04] [Rank 0] step:1641/10000 train_time:92585ms step_avg:56.42ms +[2025-09-06 02:33:04] [Rank 0] step:1641/10000 train_time:92585ms step_avg:56.42ms +[2025-09-06 02:33:04] [Rank 0] step:1661/10000 train_time:93317ms step_avg:56.18ms +[2025-09-06 02:33:04] [Rank 0] step:1661/10000 train_time:93317ms step_avg:56.18ms +[2025-09-06 02:33:05] [Rank 0] step:1681/10000 train_time:94049ms step_avg:55.95ms +[2025-09-06 02:33:05] [Rank 0] step:1681/10000 train_time:94049ms step_avg:55.95ms +[2025-09-06 02:33:06] [Rank 0] step:1701/10000 train_time:94781ms step_avg:55.72ms +[2025-09-06 02:33:06] [Rank 0] step:1701/10000 train_time:94781ms step_avg:55.72ms +[2025-09-06 02:33:06] [Rank 0] step:1721/10000 train_time:95512ms step_avg:55.50ms +[2025-09-06 02:33:06] [Rank 0] step:1721/10000 train_time:95512ms step_avg:55.50ms +[2025-09-06 02:33:07] [Rank 0] step:1741/10000 train_time:96243ms step_avg:55.28ms +[2025-09-06 02:33:07] [Rank 0] step:1741/10000 train_time:96243ms step_avg:55.28ms +[2025-09-06 02:33:08] [Rank 0] step:1761/10000 train_time:96974ms step_avg:55.07ms +[2025-09-06 02:33:08] [Rank 0] step:1761/10000 train_time:96974ms step_avg:55.07ms +[2025-09-06 02:33:09] [Rank 0] step:1781/10000 train_time:97705ms step_avg:54.86ms +[2025-09-06 02:33:09] [Rank 0] step:1781/10000 train_time:97705ms step_avg:54.86ms +[2025-09-06 02:33:09] [Rank 0] step:1801/10000 train_time:98436ms step_avg:54.66ms +[2025-09-06 02:33:09] [Rank 0] step:1801/10000 train_time:98436ms step_avg:54.66ms +[2025-09-06 02:33:10] [Rank 0] step:1821/10000 train_time:99168ms step_avg:54.46ms +[2025-09-06 02:33:10] [Rank 0] step:1821/10000 train_time:99168ms step_avg:54.46ms +[2025-09-06 02:33:11] [Rank 0] step:1841/10000 train_time:99900ms step_avg:54.26ms +[2025-09-06 02:33:11] [Rank 0] step:1841/10000 train_time:99900ms step_avg:54.26ms +[2025-09-06 02:33:12] [Rank 0] step:1861/10000 train_time:100632ms step_avg:54.07ms +[2025-09-06 02:33:12] [Rank 0] step:1861/10000 train_time:100632ms step_avg:54.07ms +[2025-09-06 02:33:12] [Rank 0] step:1881/10000 train_time:101363ms step_avg:53.89ms +[2025-09-06 02:33:12] [Rank 0] step:1881/10000 train_time:101363ms step_avg:53.89ms +[2025-09-06 02:33:13] [Rank 0] step:1901/10000 train_time:102094ms step_avg:53.71ms +[2025-09-06 02:33:13] [Rank 0] step:1901/10000 train_time:102094ms step_avg:53.71ms +[2025-09-06 02:33:14] [Rank 0] step:1921/10000 train_time:102826ms step_avg:53.53ms +[2025-09-06 02:33:14] [Rank 0] step:1921/10000 train_time:102826ms step_avg:53.53ms +[2025-09-06 02:33:15] [Rank 0] step:1941/10000 train_time:103557ms step_avg:53.35ms +[2025-09-06 02:33:15] [Rank 0] step:1941/10000 train_time:103557ms step_avg:53.35ms +[2025-09-06 02:33:15] [Rank 0] step:1961/10000 train_time:104289ms step_avg:53.18ms +[2025-09-06 02:33:15] [Rank 0] step:1961/10000 train_time:104289ms step_avg:53.18ms +[2025-09-06 02:33:16] [Rank 0] step:1981/10000 train_time:105022ms step_avg:53.01ms +[2025-09-06 02:33:16] [Rank 0] step:1981/10000 train_time:105022ms step_avg:53.01ms +[2025-09-06 02:33:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:33:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:33:17] [Rank 0] PRINT: step:2000/10000 train_loss:3.4971 val_loss:3.3721 train_time:105834ms step_avg:52.92ms +[2025-09-06 02:33:17] [Rank 0] PRINT: step:2000/10000 train_loss:3.4971 val_loss:3.3721 train_time:105834ms step_avg:52.92ms +[2025-09-06 02:33:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:33:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:33:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:33:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:34:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:34:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:34:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:34:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:34:39] [Rank 0] Total Loss: 5.4789 +[2025-09-06 02:34:39] [Rank 0] Total Loss: 5.4789 +[2025-09-06 02:34:39] [Rank 0] Total FTA (Unweighted): 0.1244 +[2025-09-06 02:34:39] [Rank 0] Total FTA (Unweighted): 0.1244 +[2025-09-06 02:34:39] [Rank 0] Total FTA (Weighted): 0.1244 +[2025-09-06 02:34:39] [Rank 0] Total FTA (Weighted): 0.1244 +[2025-09-06 02:34:39] [Rank 0] Group 0 Loss: 3.4367 +[2025-09-06 02:34:39] [Rank 0] Group 0 Loss: 3.4367 +[2025-09-06 02:34:39] [Rank 0] Group 1 Loss: 3.3028 +[2025-09-06 02:34:39] [Rank 0] Group 1 Loss: 3.3028 +[2025-09-06 02:34:39] [Rank 0] Group 2 Loss: 3.7352 +[2025-09-06 02:34:39] [Rank 0] Group 2 Loss: 3.7352 +[2025-09-06 02:34:39] [Rank 0] Group 3 Loss: 4.4666 +[2025-09-06 02:34:39] [Rank 0] Group 3 Loss: 4.4666 +[2025-09-06 02:34:39] [Rank 0] Group 4 Loss: 5.4039 +[2025-09-06 02:34:39] [Rank 0] Group 4 Loss: 5.4039 +[2025-09-06 02:34:39] [Rank 0] Group 5 Loss: 5.6917 +[2025-09-06 02:34:39] [Rank 0] Group 5 Loss: 5.6917 +[2025-09-06 02:34:39] [Rank 0] Group 6 Loss: 5.9169 +[2025-09-06 02:34:39] [Rank 0] Group 6 Loss: 5.9169 +[2025-09-06 02:34:39] [Rank 0] Group 7 Loss: 5.9460 +[2025-09-06 02:34:39] [Rank 0] Group 7 Loss: 5.9460 +[2025-09-06 02:34:39] [Rank 0] Group 8 Loss: 6.1329 +[2025-09-06 02:34:39] [Rank 0] Group 8 Loss: 6.1329 +[2025-09-06 02:34:39] [Rank 0] Group 9 Loss: 6.2954 +[2025-09-06 02:34:39] [Rank 0] Group 9 Loss: 6.2954 +[2025-09-06 02:34:39] [Rank 0] Group 10 Loss: 6.2421 +[2025-09-06 02:34:39] [Rank 0] Group 10 Loss: 6.2421 +[2025-09-06 02:34:39] [Rank 0] Group 11 Loss: 6.3319 +[2025-09-06 02:34:39] [Rank 0] Group 11 Loss: 6.3319 +[2025-09-06 02:34:39] [Rank 0] Group 12 Loss: 6.1572 +[2025-09-06 02:34:39] [Rank 0] Group 12 Loss: 6.1572 +[2025-09-06 02:34:39] [Rank 0] Group 13 Loss: 6.1710 +[2025-09-06 02:34:39] [Rank 0] Group 13 Loss: 6.1710 +[2025-09-06 02:34:39] [Rank 0] Group 14 Loss: 6.2571 +[2025-09-06 02:34:39] [Rank 0] Group 14 Loss: 6.2571 +[2025-09-06 02:34:39] [Rank 0] Group 15 Loss: 6.1750 +[2025-09-06 02:34:39] [Rank 0] Group 15 Loss: 6.1750 +[2025-09-06 02:34:39] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 02:34:39] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 02:34:39] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:34:39] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:34:39] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:34:39] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:34:39] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:34:39] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:34:39] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:34:39] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:34:39] [Rank 0] Group 5 FTA: 0.1500 +[2025-09-06 02:34:39] [Rank 0] Group 5 FTA: 0.1500 +[2025-09-06 02:34:39] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 02:34:39] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 02:34:39] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:34:39] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:34:39] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 02:34:39] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 02:34:39] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 02:34:39] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 02:34:39] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-06 02:34:39] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-06 02:34:39] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:34:39] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:34:39] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:34:39] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:34:39] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:34:39] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:34:39] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:34:39] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:34:39] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 02:34:39] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 02:34:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:34:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:34:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:34:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:34:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:34:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:34:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:34:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:34:40] [Rank 0] step:2001/10000 train_time:105843ms step_avg:52.90ms +[2025-09-06 02:34:40] [Rank 0] step:2001/10000 train_time:105843ms step_avg:52.90ms +[2025-09-06 02:34:41] [Rank 0] step:2021/10000 train_time:106712ms step_avg:52.80ms +[2025-09-06 02:34:41] [Rank 0] step:2021/10000 train_time:106712ms step_avg:52.80ms +[2025-09-06 02:34:42] [Rank 0] step:2041/10000 train_time:107443ms step_avg:52.64ms +[2025-09-06 02:34:42] [Rank 0] step:2041/10000 train_time:107443ms step_avg:52.64ms +[2025-09-06 02:34:43] [Rank 0] step:2061/10000 train_time:108174ms step_avg:52.49ms +[2025-09-06 02:34:43] [Rank 0] step:2061/10000 train_time:108174ms step_avg:52.49ms +[2025-09-06 02:34:43] [Rank 0] step:2081/10000 train_time:108906ms step_avg:52.33ms +[2025-09-06 02:34:43] [Rank 0] step:2081/10000 train_time:108906ms step_avg:52.33ms +[2025-09-06 02:34:44] [Rank 0] step:2101/10000 train_time:109637ms step_avg:52.18ms +[2025-09-06 02:34:44] [Rank 0] step:2101/10000 train_time:109637ms step_avg:52.18ms +[2025-09-06 02:34:45] [Rank 0] step:2121/10000 train_time:110368ms step_avg:52.04ms +[2025-09-06 02:34:45] [Rank 0] step:2121/10000 train_time:110368ms step_avg:52.04ms +[2025-09-06 02:34:45] [Rank 0] step:2141/10000 train_time:111103ms step_avg:51.89ms +[2025-09-06 02:34:45] [Rank 0] step:2141/10000 train_time:111103ms step_avg:51.89ms +[2025-09-06 02:34:46] [Rank 0] step:2161/10000 train_time:111834ms step_avg:51.75ms +[2025-09-06 02:34:46] [Rank 0] step:2161/10000 train_time:111834ms step_avg:51.75ms +[2025-09-06 02:34:47] [Rank 0] step:2181/10000 train_time:112566ms step_avg:51.61ms +[2025-09-06 02:34:47] [Rank 0] step:2181/10000 train_time:112566ms step_avg:51.61ms +[2025-09-06 02:34:48] [Rank 0] step:2201/10000 train_time:113297ms step_avg:51.48ms +[2025-09-06 02:34:48] [Rank 0] step:2201/10000 train_time:113297ms step_avg:51.48ms +[2025-09-06 02:34:48] [Rank 0] step:2221/10000 train_time:114028ms step_avg:51.34ms +[2025-09-06 02:34:48] [Rank 0] step:2221/10000 train_time:114028ms step_avg:51.34ms +[2025-09-06 02:34:49] [Rank 0] step:2241/10000 train_time:114764ms step_avg:51.21ms +[2025-09-06 02:34:49] [Rank 0] step:2241/10000 train_time:114764ms step_avg:51.21ms +[2025-09-06 02:34:50] [Rank 0] step:2261/10000 train_time:115501ms step_avg:51.08ms +[2025-09-06 02:34:50] [Rank 0] step:2261/10000 train_time:115501ms step_avg:51.08ms +[2025-09-06 02:34:51] [Rank 0] step:2281/10000 train_time:116239ms step_avg:50.96ms +[2025-09-06 02:34:51] [Rank 0] step:2281/10000 train_time:116239ms step_avg:50.96ms +[2025-09-06 02:34:51] [Rank 0] step:2301/10000 train_time:117117ms step_avg:50.90ms +[2025-09-06 02:34:51] [Rank 0] step:2301/10000 train_time:117117ms step_avg:50.90ms +[2025-09-06 02:34:52] [Rank 0] step:2321/10000 train_time:117882ms step_avg:50.79ms +[2025-09-06 02:34:52] [Rank 0] step:2321/10000 train_time:117882ms step_avg:50.79ms +[2025-09-06 02:34:53] [Rank 0] step:2341/10000 train_time:118620ms step_avg:50.67ms +[2025-09-06 02:34:53] [Rank 0] step:2341/10000 train_time:118620ms step_avg:50.67ms +[2025-09-06 02:34:54] [Rank 0] step:2361/10000 train_time:119357ms step_avg:50.55ms +[2025-09-06 02:34:54] [Rank 0] step:2361/10000 train_time:119357ms step_avg:50.55ms +[2025-09-06 02:34:55] [Rank 0] step:2381/10000 train_time:120232ms step_avg:50.50ms +[2025-09-06 02:34:55] [Rank 0] step:2381/10000 train_time:120232ms step_avg:50.50ms +[2025-09-06 02:34:55] [Rank 0] step:2401/10000 train_time:120970ms step_avg:50.38ms +[2025-09-06 02:34:55] [Rank 0] step:2401/10000 train_time:120970ms step_avg:50.38ms +[2025-09-06 02:34:56] [Rank 0] step:2421/10000 train_time:121708ms step_avg:50.27ms +[2025-09-06 02:34:56] [Rank 0] step:2421/10000 train_time:121708ms step_avg:50.27ms +[2025-09-06 02:34:57] [Rank 0] step:2441/10000 train_time:122446ms step_avg:50.16ms +[2025-09-06 02:34:57] [Rank 0] step:2441/10000 train_time:122446ms step_avg:50.16ms +[2025-09-06 02:34:58] [Rank 0] step:2461/10000 train_time:123183ms step_avg:50.05ms +[2025-09-06 02:34:58] [Rank 0] step:2461/10000 train_time:123183ms step_avg:50.05ms +[2025-09-06 02:34:58] [Rank 0] step:2481/10000 train_time:123920ms step_avg:49.95ms +[2025-09-06 02:34:58] [Rank 0] step:2481/10000 train_time:123920ms step_avg:49.95ms +[2025-09-06 02:34:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:34:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:34:59] [Rank 0] PRINT: step:2500/10000 train_loss:3.2814 val_loss:3.1852 train_time:124739ms step_avg:49.90ms +[2025-09-06 02:34:59] [Rank 0] PRINT: step:2500/10000 train_loss:3.2814 val_loss:3.1852 train_time:124739ms step_avg:49.90ms +[2025-09-06 02:34:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:34:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:35:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:35:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:36:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:36:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:36:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:36:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:36:20] [Rank 0] Total Loss: 5.3702 +[2025-09-06 02:36:20] [Rank 0] Total Loss: 5.3702 +[2025-09-06 02:36:20] [Rank 0] Total FTA (Unweighted): 0.1275 +[2025-09-06 02:36:20] [Rank 0] Total FTA (Unweighted): 0.1275 +[2025-09-06 02:36:20] [Rank 0] Total FTA (Weighted): 0.1275 +[2025-09-06 02:36:20] [Rank 0] Total FTA (Weighted): 0.1275 +[2025-09-06 02:36:20] [Rank 0] Group 0 Loss: 3.3808 +[2025-09-06 02:36:20] [Rank 0] Group 0 Loss: 3.3808 +[2025-09-06 02:36:20] [Rank 0] Group 1 Loss: 3.2770 +[2025-09-06 02:36:20] [Rank 0] Group 1 Loss: 3.2770 +[2025-09-06 02:36:20] [Rank 0] Group 2 Loss: 3.6614 +[2025-09-06 02:36:20] [Rank 0] Group 2 Loss: 3.6614 +[2025-09-06 02:36:20] [Rank 0] Group 3 Loss: 4.2910 +[2025-09-06 02:36:20] [Rank 0] Group 3 Loss: 4.2910 +[2025-09-06 02:36:20] [Rank 0] Group 4 Loss: 5.1883 +[2025-09-06 02:36:20] [Rank 0] Group 4 Loss: 5.1883 +[2025-09-06 02:36:20] [Rank 0] Group 5 Loss: 5.5035 +[2025-09-06 02:36:20] [Rank 0] Group 5 Loss: 5.5035 +[2025-09-06 02:36:20] [Rank 0] Group 6 Loss: 5.7759 +[2025-09-06 02:36:20] [Rank 0] Group 6 Loss: 5.7759 +[2025-09-06 02:36:20] [Rank 0] Group 7 Loss: 5.8305 +[2025-09-06 02:36:20] [Rank 0] Group 7 Loss: 5.8305 +[2025-09-06 02:36:20] [Rank 0] Group 8 Loss: 6.0292 +[2025-09-06 02:36:20] [Rank 0] Group 8 Loss: 6.0292 +[2025-09-06 02:36:20] [Rank 0] Group 9 Loss: 6.2044 +[2025-09-06 02:36:20] [Rank 0] Group 9 Loss: 6.2044 +[2025-09-06 02:36:20] [Rank 0] Group 10 Loss: 6.1463 +[2025-09-06 02:36:20] [Rank 0] Group 10 Loss: 6.1463 +[2025-09-06 02:36:20] [Rank 0] Group 11 Loss: 6.2330 +[2025-09-06 02:36:20] [Rank 0] Group 11 Loss: 6.2330 +[2025-09-06 02:36:21] [Rank 0] Group 12 Loss: 6.0931 +[2025-09-06 02:36:21] [Rank 0] Group 12 Loss: 6.0931 +[2025-09-06 02:36:21] [Rank 0] Group 13 Loss: 6.0758 +[2025-09-06 02:36:21] [Rank 0] Group 13 Loss: 6.0758 +[2025-09-06 02:36:21] [Rank 0] Group 14 Loss: 6.1591 +[2025-09-06 02:36:21] [Rank 0] Group 14 Loss: 6.1591 +[2025-09-06 02:36:21] [Rank 0] Group 15 Loss: 6.0742 +[2025-09-06 02:36:21] [Rank 0] Group 15 Loss: 6.0742 +[2025-09-06 02:36:21] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 02:36:21] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 02:36:21] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:36:21] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:36:21] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:36:21] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:36:21] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:36:21] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:36:21] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:36:21] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:36:21] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 02:36:21] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 02:36:21] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 02:36:21] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 02:36:21] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:36:21] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:36:21] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 02:36:21] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 02:36:21] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:36:21] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:36:21] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:36:21] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:36:21] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:36:21] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:36:21] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:36:21] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:36:21] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 02:36:21] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 02:36:21] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:36:21] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:36:21] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:36:21] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:36:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:36:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:36:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:36:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:36:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:36:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:36:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:36:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:36:22] [Rank 0] step:2501/10000 train_time:124748ms step_avg:49.88ms +[2025-09-06 02:36:22] [Rank 0] step:2501/10000 train_time:124748ms step_avg:49.88ms +[2025-09-06 02:36:23] [Rank 0] step:2521/10000 train_time:125416ms step_avg:49.75ms +[2025-09-06 02:36:23] [Rank 0] step:2521/10000 train_time:125416ms step_avg:49.75ms +[2025-09-06 02:36:23] [Rank 0] step:2541/10000 train_time:126154ms step_avg:49.65ms +[2025-09-06 02:36:23] [Rank 0] step:2541/10000 train_time:126154ms step_avg:49.65ms +[2025-09-06 02:36:24] [Rank 0] step:2561/10000 train_time:126891ms step_avg:49.55ms +[2025-09-06 02:36:24] [Rank 0] step:2561/10000 train_time:126891ms step_avg:49.55ms +[2025-09-06 02:36:25] [Rank 0] step:2581/10000 train_time:127629ms step_avg:49.45ms +[2025-09-06 02:36:25] [Rank 0] step:2581/10000 train_time:127629ms step_avg:49.45ms +[2025-09-06 02:36:26] [Rank 0] step:2601/10000 train_time:128366ms step_avg:49.35ms +[2025-09-06 02:36:26] [Rank 0] step:2601/10000 train_time:128366ms step_avg:49.35ms +[2025-09-06 02:36:26] [Rank 0] step:2621/10000 train_time:129103ms step_avg:49.26ms +[2025-09-06 02:36:26] [Rank 0] step:2621/10000 train_time:129103ms step_avg:49.26ms +[2025-09-06 02:36:27] [Rank 0] step:2641/10000 train_time:129841ms step_avg:49.16ms +[2025-09-06 02:36:27] [Rank 0] step:2641/10000 train_time:129841ms step_avg:49.16ms +[2025-09-06 02:36:28] [Rank 0] step:2661/10000 train_time:130578ms step_avg:49.07ms +[2025-09-06 02:36:28] [Rank 0] step:2661/10000 train_time:130578ms step_avg:49.07ms +[2025-09-06 02:36:29] [Rank 0] step:2681/10000 train_time:131316ms step_avg:48.98ms +[2025-09-06 02:36:29] [Rank 0] step:2681/10000 train_time:131316ms step_avg:48.98ms +[2025-09-06 02:36:29] [Rank 0] step:2701/10000 train_time:132054ms step_avg:48.89ms +[2025-09-06 02:36:29] [Rank 0] step:2701/10000 train_time:132054ms step_avg:48.89ms +[2025-09-06 02:36:30] [Rank 0] step:2721/10000 train_time:132792ms step_avg:48.80ms +[2025-09-06 02:36:30] [Rank 0] step:2721/10000 train_time:132792ms step_avg:48.80ms +[2025-09-06 02:36:31] [Rank 0] step:2741/10000 train_time:133530ms step_avg:48.72ms +[2025-09-06 02:36:31] [Rank 0] step:2741/10000 train_time:133530ms step_avg:48.72ms +[2025-09-06 02:36:32] [Rank 0] step:2761/10000 train_time:134268ms step_avg:48.63ms +[2025-09-06 02:36:32] [Rank 0] step:2761/10000 train_time:134268ms step_avg:48.63ms +[2025-09-06 02:36:32] [Rank 0] step:2781/10000 train_time:135006ms step_avg:48.55ms +[2025-09-06 02:36:32] [Rank 0] step:2781/10000 train_time:135006ms step_avg:48.55ms +[2025-09-06 02:36:33] [Rank 0] step:2801/10000 train_time:135743ms step_avg:48.46ms +[2025-09-06 02:36:33] [Rank 0] step:2801/10000 train_time:135743ms step_avg:48.46ms +[2025-09-06 02:36:34] [Rank 0] step:2821/10000 train_time:137102ms step_avg:48.60ms +[2025-09-06 02:36:34] [Rank 0] step:2821/10000 train_time:137102ms step_avg:48.60ms +[2025-09-06 02:36:35] [Rank 0] step:2841/10000 train_time:137839ms step_avg:48.52ms +[2025-09-06 02:36:35] [Rank 0] step:2841/10000 train_time:137839ms step_avg:48.52ms +[2025-09-06 02:36:36] [Rank 0] step:2861/10000 train_time:138577ms step_avg:48.44ms +[2025-09-06 02:36:36] [Rank 0] step:2861/10000 train_time:138577ms step_avg:48.44ms +[2025-09-06 02:36:37] [Rank 0] step:2881/10000 train_time:139315ms step_avg:48.36ms +[2025-09-06 02:36:37] [Rank 0] step:2881/10000 train_time:139315ms step_avg:48.36ms +[2025-09-06 02:36:37] [Rank 0] step:2901/10000 train_time:140052ms step_avg:48.28ms +[2025-09-06 02:36:37] [Rank 0] step:2901/10000 train_time:140052ms step_avg:48.28ms +[2025-09-06 02:36:38] [Rank 0] step:2921/10000 train_time:140789ms step_avg:48.20ms +[2025-09-06 02:36:38] [Rank 0] step:2921/10000 train_time:140789ms step_avg:48.20ms +[2025-09-06 02:36:39] [Rank 0] step:2941/10000 train_time:141526ms step_avg:48.12ms +[2025-09-06 02:36:39] [Rank 0] step:2941/10000 train_time:141526ms step_avg:48.12ms +[2025-09-06 02:36:40] [Rank 0] step:2961/10000 train_time:142264ms step_avg:48.05ms +[2025-09-06 02:36:40] [Rank 0] step:2961/10000 train_time:142264ms step_avg:48.05ms +[2025-09-06 02:36:40] [Rank 0] step:2981/10000 train_time:143002ms step_avg:47.97ms +[2025-09-06 02:36:40] [Rank 0] step:2981/10000 train_time:143002ms step_avg:47.97ms +[2025-09-06 02:36:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:36:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:36:42] [Rank 0] PRINT: step:3000/10000 train_loss:3.1236 val_loss:3.0562 train_time:143820ms step_avg:47.94ms +[2025-09-06 02:36:42] [Rank 0] PRINT: step:3000/10000 train_loss:3.1236 val_loss:3.0562 train_time:143820ms step_avg:47.94ms +[2025-09-06 02:36:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:36:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:36:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:36:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:38:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:38:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:38:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:38:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:38:02] [Rank 0] Total Loss: 5.2859 +[2025-09-06 02:38:02] [Rank 0] Total Loss: 5.2859 +[2025-09-06 02:38:02] [Rank 0] Total FTA (Unweighted): 0.1388 +[2025-09-06 02:38:02] [Rank 0] Total FTA (Unweighted): 0.1388 +[2025-09-06 02:38:02] [Rank 0] Total FTA (Weighted): 0.1388 +[2025-09-06 02:38:02] [Rank 0] Total FTA (Weighted): 0.1388 +[2025-09-06 02:38:02] [Rank 0] Group 0 Loss: 3.3969 +[2025-09-06 02:38:02] [Rank 0] Group 0 Loss: 3.3969 +[2025-09-06 02:38:02] [Rank 0] Group 1 Loss: 3.2267 +[2025-09-06 02:38:02] [Rank 0] Group 1 Loss: 3.2267 +[2025-09-06 02:38:02] [Rank 0] Group 2 Loss: 3.6153 +[2025-09-06 02:38:02] [Rank 0] Group 2 Loss: 3.6153 +[2025-09-06 02:38:02] [Rank 0] Group 3 Loss: 4.1453 +[2025-09-06 02:38:02] [Rank 0] Group 3 Loss: 4.1453 +[2025-09-06 02:38:02] [Rank 0] Group 4 Loss: 5.0153 +[2025-09-06 02:38:02] [Rank 0] Group 4 Loss: 5.0153 +[2025-09-06 02:38:02] [Rank 0] Group 5 Loss: 5.3900 +[2025-09-06 02:38:02] [Rank 0] Group 5 Loss: 5.3900 +[2025-09-06 02:38:02] [Rank 0] Group 6 Loss: 5.6624 +[2025-09-06 02:38:02] [Rank 0] Group 6 Loss: 5.6624 +[2025-09-06 02:38:02] [Rank 0] Group 7 Loss: 5.7295 +[2025-09-06 02:38:02] [Rank 0] Group 7 Loss: 5.7295 +[2025-09-06 02:38:02] [Rank 0] Group 8 Loss: 5.9678 +[2025-09-06 02:38:02] [Rank 0] Group 8 Loss: 5.9678 +[2025-09-06 02:38:02] [Rank 0] Group 9 Loss: 6.1003 +[2025-09-06 02:38:02] [Rank 0] Group 9 Loss: 6.1003 +[2025-09-06 02:38:02] [Rank 0] Group 10 Loss: 6.0719 +[2025-09-06 02:38:02] [Rank 0] Group 10 Loss: 6.0719 +[2025-09-06 02:38:02] [Rank 0] Group 11 Loss: 6.1572 +[2025-09-06 02:38:02] [Rank 0] Group 11 Loss: 6.1572 +[2025-09-06 02:38:02] [Rank 0] Group 12 Loss: 6.0154 +[2025-09-06 02:38:02] [Rank 0] Group 12 Loss: 6.0154 +[2025-09-06 02:38:02] [Rank 0] Group 13 Loss: 6.0016 +[2025-09-06 02:38:02] [Rank 0] Group 13 Loss: 6.0016 +[2025-09-06 02:38:02] [Rank 0] Group 14 Loss: 6.0726 +[2025-09-06 02:38:02] [Rank 0] Group 14 Loss: 6.0726 +[2025-09-06 02:38:02] [Rank 0] Group 15 Loss: 6.0064 +[2025-09-06 02:38:02] [Rank 0] Group 15 Loss: 6.0064 +[2025-09-06 02:38:02] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 02:38:02] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 02:38:02] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:38:02] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:38:03] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:38:03] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:38:03] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:38:03] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:38:03] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:38:03] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:38:03] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:38:03] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:38:03] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 02:38:03] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 02:38:03] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:38:03] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:38:03] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 02:38:03] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 02:38:03] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:38:03] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:38:03] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:38:03] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:38:03] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:38:03] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:38:03] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:38:03] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:38:03] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:38:03] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:38:03] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:38:03] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:38:03] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:38:03] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:38:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:38:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:38:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:38:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:38:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:38:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:38:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:38:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:38:04] [Rank 0] step:3001/10000 train_time:143829ms step_avg:47.93ms +[2025-09-06 02:38:04] [Rank 0] step:3001/10000 train_time:143829ms step_avg:47.93ms +[2025-09-06 02:38:05] [Rank 0] step:3021/10000 train_time:144501ms step_avg:47.83ms +[2025-09-06 02:38:05] [Rank 0] step:3021/10000 train_time:144501ms step_avg:47.83ms +[2025-09-06 02:38:05] [Rank 0] step:3041/10000 train_time:145239ms step_avg:47.76ms +[2025-09-06 02:38:05] [Rank 0] step:3041/10000 train_time:145239ms step_avg:47.76ms +[2025-09-06 02:38:06] [Rank 0] step:3061/10000 train_time:145978ms step_avg:47.69ms +[2025-09-06 02:38:06] [Rank 0] step:3061/10000 train_time:145978ms step_avg:47.69ms +[2025-09-06 02:38:07] [Rank 0] step:3081/10000 train_time:146717ms step_avg:47.62ms +[2025-09-06 02:38:07] [Rank 0] step:3081/10000 train_time:146717ms step_avg:47.62ms +[2025-09-06 02:38:08] [Rank 0] step:3101/10000 train_time:147455ms step_avg:47.55ms +[2025-09-06 02:38:08] [Rank 0] step:3101/10000 train_time:147455ms step_avg:47.55ms +[2025-09-06 02:38:08] [Rank 0] step:3121/10000 train_time:148193ms step_avg:47.48ms +[2025-09-06 02:38:08] [Rank 0] step:3121/10000 train_time:148193ms step_avg:47.48ms +[2025-09-06 02:38:09] [Rank 0] step:3141/10000 train_time:148931ms step_avg:47.42ms +[2025-09-06 02:38:09] [Rank 0] step:3141/10000 train_time:148931ms step_avg:47.42ms +[2025-09-06 02:38:10] [Rank 0] step:3161/10000 train_time:149669ms step_avg:47.35ms +[2025-09-06 02:38:10] [Rank 0] step:3161/10000 train_time:149669ms step_avg:47.35ms +[2025-09-06 02:38:11] [Rank 0] step:3181/10000 train_time:150409ms step_avg:47.28ms +[2025-09-06 02:38:11] [Rank 0] step:3181/10000 train_time:150409ms step_avg:47.28ms +[2025-09-06 02:38:11] [Rank 0] step:3201/10000 train_time:151148ms step_avg:47.22ms +[2025-09-06 02:38:11] [Rank 0] step:3201/10000 train_time:151148ms step_avg:47.22ms +[2025-09-06 02:38:12] [Rank 0] step:3221/10000 train_time:151886ms step_avg:47.16ms +[2025-09-06 02:38:12] [Rank 0] step:3221/10000 train_time:151886ms step_avg:47.16ms +[2025-09-06 02:38:13] [Rank 0] step:3241/10000 train_time:152625ms step_avg:47.09ms +[2025-09-06 02:38:13] [Rank 0] step:3241/10000 train_time:152625ms step_avg:47.09ms +[2025-09-06 02:38:14] [Rank 0] step:3261/10000 train_time:153363ms step_avg:47.03ms +[2025-09-06 02:38:14] [Rank 0] step:3261/10000 train_time:153363ms step_avg:47.03ms +[2025-09-06 02:38:14] [Rank 0] step:3281/10000 train_time:154102ms step_avg:46.97ms +[2025-09-06 02:38:14] [Rank 0] step:3281/10000 train_time:154102ms step_avg:46.97ms +[2025-09-06 02:38:15] [Rank 0] step:3301/10000 train_time:154840ms step_avg:46.91ms +[2025-09-06 02:38:15] [Rank 0] step:3301/10000 train_time:154840ms step_avg:46.91ms +[2025-09-06 02:38:16] [Rank 0] step:3321/10000 train_time:155579ms step_avg:46.85ms +[2025-09-06 02:38:16] [Rank 0] step:3321/10000 train_time:155579ms step_avg:46.85ms +[2025-09-06 02:38:16] [Rank 0] step:3341/10000 train_time:156317ms step_avg:46.79ms +[2025-09-06 02:38:16] [Rank 0] step:3341/10000 train_time:156317ms step_avg:46.79ms +[2025-09-06 02:38:17] [Rank 0] step:3361/10000 train_time:157056ms step_avg:46.73ms +[2025-09-06 02:38:17] [Rank 0] step:3361/10000 train_time:157056ms step_avg:46.73ms +[2025-09-06 02:38:18] [Rank 0] step:3381/10000 train_time:157795ms step_avg:46.67ms +[2025-09-06 02:38:18] [Rank 0] step:3381/10000 train_time:157795ms step_avg:46.67ms +[2025-09-06 02:38:19] [Rank 0] step:3401/10000 train_time:158534ms step_avg:46.61ms +[2025-09-06 02:38:19] [Rank 0] step:3401/10000 train_time:158534ms step_avg:46.61ms +[2025-09-06 02:38:19] [Rank 0] step:3421/10000 train_time:159272ms step_avg:46.56ms +[2025-09-06 02:38:19] [Rank 0] step:3421/10000 train_time:159272ms step_avg:46.56ms +[2025-09-06 02:38:20] [Rank 0] step:3441/10000 train_time:160011ms step_avg:46.50ms +[2025-09-06 02:38:20] [Rank 0] step:3441/10000 train_time:160011ms step_avg:46.50ms +[2025-09-06 02:38:21] [Rank 0] step:3461/10000 train_time:160749ms step_avg:46.45ms +[2025-09-06 02:38:21] [Rank 0] step:3461/10000 train_time:160749ms step_avg:46.45ms +[2025-09-06 02:38:22] [Rank 0] step:3481/10000 train_time:161488ms step_avg:46.39ms +[2025-09-06 02:38:22] [Rank 0] step:3481/10000 train_time:161488ms step_avg:46.39ms +[2025-09-06 02:38:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:38:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:38:23] [Rank 0] PRINT: step:3500/10000 train_loss:3.0086 val_loss:2.9518 train_time:162307ms step_avg:46.37ms +[2025-09-06 02:38:23] [Rank 0] PRINT: step:3500/10000 train_loss:3.0086 val_loss:2.9518 train_time:162307ms step_avg:46.37ms +[2025-09-06 02:38:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:38:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:38:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:38:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:39:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:39:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:39:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:39:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:39:43] [Rank 0] Total Loss: 5.1972 +[2025-09-06 02:39:43] [Rank 0] Total Loss: 5.1972 +[2025-09-06 02:39:43] [Rank 0] Total FTA (Unweighted): 0.1644 +[2025-09-06 02:39:43] [Rank 0] Total FTA (Unweighted): 0.1644 +[2025-09-06 02:39:43] [Rank 0] Total FTA (Weighted): 0.1644 +[2025-09-06 02:39:43] [Rank 0] Total FTA (Weighted): 0.1644 +[2025-09-06 02:39:43] [Rank 0] Group 0 Loss: 3.3748 +[2025-09-06 02:39:43] [Rank 0] Group 0 Loss: 3.3748 +[2025-09-06 02:39:43] [Rank 0] Group 1 Loss: 3.2458 +[2025-09-06 02:39:43] [Rank 0] Group 1 Loss: 3.2458 +[2025-09-06 02:39:43] [Rank 0] Group 2 Loss: 3.5513 +[2025-09-06 02:39:43] [Rank 0] Group 2 Loss: 3.5513 +[2025-09-06 02:39:43] [Rank 0] Group 3 Loss: 4.0598 +[2025-09-06 02:39:43] [Rank 0] Group 3 Loss: 4.0598 +[2025-09-06 02:39:43] [Rank 0] Group 4 Loss: 4.8634 +[2025-09-06 02:39:43] [Rank 0] Group 4 Loss: 4.8634 +[2025-09-06 02:39:43] [Rank 0] Group 5 Loss: 5.2501 +[2025-09-06 02:39:43] [Rank 0] Group 5 Loss: 5.2501 +[2025-09-06 02:39:43] [Rank 0] Group 6 Loss: 5.5397 +[2025-09-06 02:39:43] [Rank 0] Group 6 Loss: 5.5397 +[2025-09-06 02:39:43] [Rank 0] Group 7 Loss: 5.6321 +[2025-09-06 02:39:43] [Rank 0] Group 7 Loss: 5.6321 +[2025-09-06 02:39:43] [Rank 0] Group 8 Loss: 5.8428 +[2025-09-06 02:39:43] [Rank 0] Group 8 Loss: 5.8428 +[2025-09-06 02:39:43] [Rank 0] Group 9 Loss: 6.0010 +[2025-09-06 02:39:43] [Rank 0] Group 9 Loss: 6.0010 +[2025-09-06 02:39:43] [Rank 0] Group 10 Loss: 5.9812 +[2025-09-06 02:39:43] [Rank 0] Group 10 Loss: 5.9812 +[2025-09-06 02:39:43] [Rank 0] Group 11 Loss: 6.0767 +[2025-09-06 02:39:43] [Rank 0] Group 11 Loss: 6.0767 +[2025-09-06 02:39:43] [Rank 0] Group 12 Loss: 5.9217 +[2025-09-06 02:39:43] [Rank 0] Group 12 Loss: 5.9217 +[2025-09-06 02:39:43] [Rank 0] Group 13 Loss: 5.9159 +[2025-09-06 02:39:43] [Rank 0] Group 13 Loss: 5.9159 +[2025-09-06 02:39:43] [Rank 0] Group 14 Loss: 5.9761 +[2025-09-06 02:39:43] [Rank 0] Group 14 Loss: 5.9761 +[2025-09-06 02:39:43] [Rank 0] Group 15 Loss: 5.9228 +[2025-09-06 02:39:43] [Rank 0] Group 15 Loss: 5.9228 +[2025-09-06 02:39:43] [Rank 0] Group 0 FTA: 0.8200 +[2025-09-06 02:39:43] [Rank 0] Group 0 FTA: 0.8200 +[2025-09-06 02:39:43] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:39:43] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:39:43] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:39:43] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:39:43] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:39:43] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:39:43] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:39:43] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:39:43] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:39:43] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:39:43] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 02:39:43] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 02:39:43] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:39:43] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:39:43] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 02:39:43] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 02:39:43] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:39:43] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:39:43] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:39:43] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:39:43] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:39:43] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:39:43] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:39:43] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:39:43] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 02:39:43] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 02:39:43] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:39:43] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:39:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:39:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:39:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:39:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:39:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:39:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:39:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:39:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:39:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:39:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:39:45] [Rank 0] step:3501/10000 train_time:162316ms step_avg:46.36ms +[2025-09-06 02:39:45] [Rank 0] step:3501/10000 train_time:162316ms step_avg:46.36ms +[2025-09-06 02:39:46] [Rank 0] step:3521/10000 train_time:163002ms step_avg:46.29ms +[2025-09-06 02:39:46] [Rank 0] step:3521/10000 train_time:163002ms step_avg:46.29ms +[2025-09-06 02:39:46] [Rank 0] step:3541/10000 train_time:163740ms step_avg:46.24ms +[2025-09-06 02:39:46] [Rank 0] step:3541/10000 train_time:163740ms step_avg:46.24ms +[2025-09-06 02:39:47] [Rank 0] step:3561/10000 train_time:164478ms step_avg:46.19ms +[2025-09-06 02:39:47] [Rank 0] step:3561/10000 train_time:164478ms step_avg:46.19ms +[2025-09-06 02:39:48] [Rank 0] step:3581/10000 train_time:165217ms step_avg:46.14ms +[2025-09-06 02:39:48] [Rank 0] step:3581/10000 train_time:165217ms step_avg:46.14ms +[2025-09-06 02:39:49] [Rank 0] step:3601/10000 train_time:165955ms step_avg:46.09ms +[2025-09-06 02:39:49] [Rank 0] step:3601/10000 train_time:165955ms step_avg:46.09ms +[2025-09-06 02:39:49] [Rank 0] step:3621/10000 train_time:166693ms step_avg:46.03ms +[2025-09-06 02:39:49] [Rank 0] step:3621/10000 train_time:166693ms step_avg:46.03ms +[2025-09-06 02:39:51] [Rank 0] step:3641/10000 train_time:168056ms step_avg:46.16ms +[2025-09-06 02:39:51] [Rank 0] step:3641/10000 train_time:168056ms step_avg:46.16ms +[2025-09-06 02:39:51] [Rank 0] step:3661/10000 train_time:168794ms step_avg:46.11ms +[2025-09-06 02:39:51] [Rank 0] step:3661/10000 train_time:168794ms step_avg:46.11ms +[2025-09-06 02:39:52] [Rank 0] step:3681/10000 train_time:169533ms step_avg:46.06ms +[2025-09-06 02:39:52] [Rank 0] step:3681/10000 train_time:169533ms step_avg:46.06ms +[2025-09-06 02:39:53] [Rank 0] step:3701/10000 train_time:170272ms step_avg:46.01ms +[2025-09-06 02:39:53] [Rank 0] step:3701/10000 train_time:170272ms step_avg:46.01ms +[2025-09-06 02:39:54] [Rank 0] step:3721/10000 train_time:171010ms step_avg:45.96ms +[2025-09-06 02:39:54] [Rank 0] step:3721/10000 train_time:171010ms step_avg:45.96ms +[2025-09-06 02:39:54] [Rank 0] step:3741/10000 train_time:171749ms step_avg:45.91ms +[2025-09-06 02:39:54] [Rank 0] step:3741/10000 train_time:171749ms step_avg:45.91ms +[2025-09-06 02:39:55] [Rank 0] step:3761/10000 train_time:172487ms step_avg:45.86ms +[2025-09-06 02:39:55] [Rank 0] step:3761/10000 train_time:172487ms step_avg:45.86ms +[2025-09-06 02:39:56] [Rank 0] step:3781/10000 train_time:173225ms step_avg:45.81ms +[2025-09-06 02:39:56] [Rank 0] step:3781/10000 train_time:173225ms step_avg:45.81ms +[2025-09-06 02:39:57] [Rank 0] step:3801/10000 train_time:173963ms step_avg:45.77ms +[2025-09-06 02:39:57] [Rank 0] step:3801/10000 train_time:173963ms step_avg:45.77ms +[2025-09-06 02:39:57] [Rank 0] step:3821/10000 train_time:174703ms step_avg:45.72ms +[2025-09-06 02:39:57] [Rank 0] step:3821/10000 train_time:174703ms step_avg:45.72ms +[2025-09-06 02:39:58] [Rank 0] step:3841/10000 train_time:175441ms step_avg:45.68ms +[2025-09-06 02:39:58] [Rank 0] step:3841/10000 train_time:175441ms step_avg:45.68ms +[2025-09-06 02:39:59] [Rank 0] step:3861/10000 train_time:176185ms step_avg:45.63ms +[2025-09-06 02:39:59] [Rank 0] step:3861/10000 train_time:176185ms step_avg:45.63ms +[2025-09-06 02:40:00] [Rank 0] step:3881/10000 train_time:176924ms step_avg:45.59ms +[2025-09-06 02:40:00] [Rank 0] step:3881/10000 train_time:176924ms step_avg:45.59ms +[2025-09-06 02:40:00] [Rank 0] step:3901/10000 train_time:177661ms step_avg:45.54ms +[2025-09-06 02:40:00] [Rank 0] step:3901/10000 train_time:177661ms step_avg:45.54ms +[2025-09-06 02:40:01] [Rank 0] step:3921/10000 train_time:178400ms step_avg:45.50ms +[2025-09-06 02:40:01] [Rank 0] step:3921/10000 train_time:178400ms step_avg:45.50ms +[2025-09-06 02:40:02] [Rank 0] step:3941/10000 train_time:179139ms step_avg:45.46ms +[2025-09-06 02:40:02] [Rank 0] step:3941/10000 train_time:179139ms step_avg:45.46ms +[2025-09-06 02:40:03] [Rank 0] step:3961/10000 train_time:179887ms step_avg:45.41ms +[2025-09-06 02:40:03] [Rank 0] step:3961/10000 train_time:179887ms step_avg:45.41ms +[2025-09-06 02:40:03] [Rank 0] step:3981/10000 train_time:180626ms step_avg:45.37ms +[2025-09-06 02:40:03] [Rank 0] step:3981/10000 train_time:180626ms step_avg:45.37ms +[2025-09-06 02:40:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:40:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:40:04] [Rank 0] PRINT: step:4000/10000 train_loss:2.9172 val_loss:2.8674 train_time:181444ms step_avg:45.36ms +[2025-09-06 02:40:04] [Rank 0] PRINT: step:4000/10000 train_loss:2.9172 val_loss:2.8674 train_time:181444ms step_avg:45.36ms +[2025-09-06 02:40:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:40:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:40:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:40:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:41:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:41:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:41:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:41:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:41:26] [Rank 0] Total Loss: 5.1495 +[2025-09-06 02:41:26] [Rank 0] Total Loss: 5.1495 +[2025-09-06 02:41:26] [Rank 0] Total FTA (Unweighted): 0.1775 +[2025-09-06 02:41:26] [Rank 0] Total FTA (Unweighted): 0.1775 +[2025-09-06 02:41:26] [Rank 0] Total FTA (Weighted): 0.1775 +[2025-09-06 02:41:26] [Rank 0] Total FTA (Weighted): 0.1775 +[2025-09-06 02:41:26] [Rank 0] Group 0 Loss: 3.2953 +[2025-09-06 02:41:26] [Rank 0] Group 0 Loss: 3.2953 +[2025-09-06 02:41:26] [Rank 0] Group 1 Loss: 3.3162 +[2025-09-06 02:41:26] [Rank 0] Group 1 Loss: 3.3162 +[2025-09-06 02:41:26] [Rank 0] Group 2 Loss: 3.5421 +[2025-09-06 02:41:26] [Rank 0] Group 2 Loss: 3.5421 +[2025-09-06 02:41:26] [Rank 0] Group 3 Loss: 4.0031 +[2025-09-06 02:41:26] [Rank 0] Group 3 Loss: 4.0031 +[2025-09-06 02:41:26] [Rank 0] Group 4 Loss: 4.7652 +[2025-09-06 02:41:26] [Rank 0] Group 4 Loss: 4.7652 +[2025-09-06 02:41:26] [Rank 0] Group 5 Loss: 5.1815 +[2025-09-06 02:41:26] [Rank 0] Group 5 Loss: 5.1815 +[2025-09-06 02:41:26] [Rank 0] Group 6 Loss: 5.4802 +[2025-09-06 02:41:26] [Rank 0] Group 6 Loss: 5.4802 +[2025-09-06 02:41:26] [Rank 0] Group 7 Loss: 5.5613 +[2025-09-06 02:41:26] [Rank 0] Group 7 Loss: 5.5613 +[2025-09-06 02:41:26] [Rank 0] Group 8 Loss: 5.7994 +[2025-09-06 02:41:26] [Rank 0] Group 8 Loss: 5.7994 +[2025-09-06 02:41:26] [Rank 0] Group 9 Loss: 5.9537 +[2025-09-06 02:41:26] [Rank 0] Group 9 Loss: 5.9537 +[2025-09-06 02:41:26] [Rank 0] Group 10 Loss: 5.9352 +[2025-09-06 02:41:26] [Rank 0] Group 10 Loss: 5.9352 +[2025-09-06 02:41:26] [Rank 0] Group 11 Loss: 6.0100 +[2025-09-06 02:41:26] [Rank 0] Group 11 Loss: 6.0100 +[2025-09-06 02:41:26] [Rank 0] Group 12 Loss: 5.8760 +[2025-09-06 02:41:26] [Rank 0] Group 12 Loss: 5.8760 +[2025-09-06 02:41:26] [Rank 0] Group 13 Loss: 5.8784 +[2025-09-06 02:41:26] [Rank 0] Group 13 Loss: 5.8784 +[2025-09-06 02:41:26] [Rank 0] Group 14 Loss: 5.9241 +[2025-09-06 02:41:26] [Rank 0] Group 14 Loss: 5.9241 +[2025-09-06 02:41:26] [Rank 0] Group 15 Loss: 5.8696 +[2025-09-06 02:41:26] [Rank 0] Group 15 Loss: 5.8696 +[2025-09-06 02:41:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:41:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:41:26] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:41:26] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:41:26] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:41:26] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:41:26] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:41:26] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:41:26] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:41:26] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:41:26] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:41:26] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:41:26] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:41:26] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:41:26] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-06 02:41:26] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-06 02:41:26] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:41:26] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:41:26] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 02:41:26] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 02:41:26] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:41:26] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:41:26] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:41:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:41:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:41:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:41:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:41:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:41:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:41:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:41:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:41:28] [Rank 0] step:4001/10000 train_time:181453ms step_avg:45.35ms +[2025-09-06 02:41:28] [Rank 0] step:4001/10000 train_time:181453ms step_avg:45.35ms +[2025-09-06 02:41:29] [Rank 0] step:4021/10000 train_time:182323ms step_avg:45.34ms +[2025-09-06 02:41:29] [Rank 0] step:4021/10000 train_time:182323ms step_avg:45.34ms +[2025-09-06 02:41:30] [Rank 0] step:4041/10000 train_time:183060ms step_avg:45.30ms +[2025-09-06 02:41:30] [Rank 0] step:4041/10000 train_time:183060ms step_avg:45.30ms +[2025-09-06 02:41:30] [Rank 0] step:4061/10000 train_time:183798ms step_avg:45.26ms +[2025-09-06 02:41:30] [Rank 0] step:4061/10000 train_time:183798ms step_avg:45.26ms +[2025-09-06 02:41:31] [Rank 0] step:4081/10000 train_time:184536ms step_avg:45.22ms +[2025-09-06 02:41:31] [Rank 0] step:4081/10000 train_time:184536ms step_avg:45.22ms +[2025-09-06 02:41:32] [Rank 0] step:4101/10000 train_time:185273ms step_avg:45.18ms +[2025-09-06 02:41:32] [Rank 0] step:4101/10000 train_time:185273ms step_avg:45.18ms +[2025-09-06 02:41:32] [Rank 0] step:4121/10000 train_time:186011ms step_avg:45.14ms +[2025-09-06 02:41:32] [Rank 0] step:4121/10000 train_time:186011ms step_avg:45.14ms +[2025-09-06 02:41:33] [Rank 0] step:4141/10000 train_time:186749ms step_avg:45.10ms +[2025-09-06 02:41:33] [Rank 0] step:4141/10000 train_time:186749ms step_avg:45.10ms +[2025-09-06 02:41:34] [Rank 0] step:4161/10000 train_time:187487ms step_avg:45.06ms +[2025-09-06 02:41:34] [Rank 0] step:4161/10000 train_time:187487ms step_avg:45.06ms +[2025-09-06 02:41:35] [Rank 0] step:4181/10000 train_time:188224ms step_avg:45.02ms +[2025-09-06 02:41:35] [Rank 0] step:4181/10000 train_time:188224ms step_avg:45.02ms +[2025-09-06 02:41:35] [Rank 0] step:4201/10000 train_time:188963ms step_avg:44.98ms +[2025-09-06 02:41:35] [Rank 0] step:4201/10000 train_time:188963ms step_avg:44.98ms +[2025-09-06 02:41:36] [Rank 0] step:4221/10000 train_time:189700ms step_avg:44.94ms +[2025-09-06 02:41:36] [Rank 0] step:4221/10000 train_time:189700ms step_avg:44.94ms +[2025-09-06 02:41:37] [Rank 0] step:4241/10000 train_time:190439ms step_avg:44.90ms +[2025-09-06 02:41:37] [Rank 0] step:4241/10000 train_time:190439ms step_avg:44.90ms +[2025-09-06 02:41:38] [Rank 0] step:4261/10000 train_time:191177ms step_avg:44.87ms +[2025-09-06 02:41:38] [Rank 0] step:4261/10000 train_time:191177ms step_avg:44.87ms +[2025-09-06 02:41:38] [Rank 0] step:4281/10000 train_time:191915ms step_avg:44.83ms +[2025-09-06 02:41:38] [Rank 0] step:4281/10000 train_time:191915ms step_avg:44.83ms +[2025-09-06 02:41:39] [Rank 0] step:4301/10000 train_time:192653ms step_avg:44.79ms +[2025-09-06 02:41:39] [Rank 0] step:4301/10000 train_time:192653ms step_avg:44.79ms +[2025-09-06 02:41:40] [Rank 0] step:4321/10000 train_time:193392ms step_avg:44.76ms +[2025-09-06 02:41:40] [Rank 0] step:4321/10000 train_time:193392ms step_avg:44.76ms +[2025-09-06 02:41:41] [Rank 0] step:4341/10000 train_time:194130ms step_avg:44.72ms +[2025-09-06 02:41:41] [Rank 0] step:4341/10000 train_time:194130ms step_avg:44.72ms +[2025-09-06 02:41:41] [Rank 0] step:4361/10000 train_time:194868ms step_avg:44.68ms +[2025-09-06 02:41:41] [Rank 0] step:4361/10000 train_time:194868ms step_avg:44.68ms +[2025-09-06 02:41:42] [Rank 0] step:4381/10000 train_time:195606ms step_avg:44.65ms +[2025-09-06 02:41:42] [Rank 0] step:4381/10000 train_time:195606ms step_avg:44.65ms +[2025-09-06 02:41:43] [Rank 0] step:4401/10000 train_time:196343ms step_avg:44.61ms +[2025-09-06 02:41:43] [Rank 0] step:4401/10000 train_time:196343ms step_avg:44.61ms +[2025-09-06 02:41:44] [Rank 0] step:4421/10000 train_time:197080ms step_avg:44.58ms +[2025-09-06 02:41:44] [Rank 0] step:4421/10000 train_time:197080ms step_avg:44.58ms +[2025-09-06 02:41:44] [Rank 0] step:4441/10000 train_time:197818ms step_avg:44.54ms +[2025-09-06 02:41:44] [Rank 0] step:4441/10000 train_time:197818ms step_avg:44.54ms +[2025-09-06 02:41:45] [Rank 0] step:4461/10000 train_time:198555ms step_avg:44.51ms +[2025-09-06 02:41:45] [Rank 0] step:4461/10000 train_time:198555ms step_avg:44.51ms +[2025-09-06 02:41:46] [Rank 0] step:4481/10000 train_time:199293ms step_avg:44.48ms +[2025-09-06 02:41:46] [Rank 0] step:4481/10000 train_time:199293ms step_avg:44.48ms +[2025-09-06 02:41:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:41:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:41:47] [Rank 0] PRINT: step:4500/10000 train_loss:2.8423 val_loss:2.8004 train_time:200112ms step_avg:44.47ms +[2025-09-06 02:41:47] [Rank 0] PRINT: step:4500/10000 train_loss:2.8423 val_loss:2.8004 train_time:200112ms step_avg:44.47ms +[2025-09-06 02:41:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:41:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:41:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:41:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:43:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:43:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:43:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:43:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:43:08] [Rank 0] Total Loss: 5.0898 +[2025-09-06 02:43:08] [Rank 0] Total Loss: 5.0898 +[2025-09-06 02:43:08] [Rank 0] Total FTA (Unweighted): 0.1825 +[2025-09-06 02:43:08] [Rank 0] Total FTA (Unweighted): 0.1825 +[2025-09-06 02:43:08] [Rank 0] Total FTA (Weighted): 0.1825 +[2025-09-06 02:43:08] [Rank 0] Total FTA (Weighted): 0.1825 +[2025-09-06 02:43:08] [Rank 0] Group 0 Loss: 3.2442 +[2025-09-06 02:43:08] [Rank 0] Group 0 Loss: 3.2442 +[2025-09-06 02:43:08] [Rank 0] Group 1 Loss: 3.2280 +[2025-09-06 02:43:08] [Rank 0] Group 1 Loss: 3.2280 +[2025-09-06 02:43:08] [Rank 0] Group 2 Loss: 3.4305 +[2025-09-06 02:43:08] [Rank 0] Group 2 Loss: 3.4305 +[2025-09-06 02:43:08] [Rank 0] Group 3 Loss: 3.9735 +[2025-09-06 02:43:08] [Rank 0] Group 3 Loss: 3.9735 +[2025-09-06 02:43:08] [Rank 0] Group 4 Loss: 4.6892 +[2025-09-06 02:43:08] [Rank 0] Group 4 Loss: 4.6892 +[2025-09-06 02:43:08] [Rank 0] Group 5 Loss: 5.1069 +[2025-09-06 02:43:08] [Rank 0] Group 5 Loss: 5.1069 +[2025-09-06 02:43:08] [Rank 0] Group 6 Loss: 5.4202 +[2025-09-06 02:43:08] [Rank 0] Group 6 Loss: 5.4202 +[2025-09-06 02:43:08] [Rank 0] Group 7 Loss: 5.5241 +[2025-09-06 02:43:08] [Rank 0] Group 7 Loss: 5.5241 +[2025-09-06 02:43:08] [Rank 0] Group 8 Loss: 5.7477 +[2025-09-06 02:43:08] [Rank 0] Group 8 Loss: 5.7477 +[2025-09-06 02:43:08] [Rank 0] Group 9 Loss: 5.9016 +[2025-09-06 02:43:08] [Rank 0] Group 9 Loss: 5.9016 +[2025-09-06 02:43:08] [Rank 0] Group 10 Loss: 5.8789 +[2025-09-06 02:43:08] [Rank 0] Group 10 Loss: 5.8789 +[2025-09-06 02:43:08] [Rank 0] Group 11 Loss: 5.9459 +[2025-09-06 02:43:08] [Rank 0] Group 11 Loss: 5.9459 +[2025-09-06 02:43:08] [Rank 0] Group 12 Loss: 5.8287 +[2025-09-06 02:43:08] [Rank 0] Group 12 Loss: 5.8287 +[2025-09-06 02:43:08] [Rank 0] Group 13 Loss: 5.8178 +[2025-09-06 02:43:08] [Rank 0] Group 13 Loss: 5.8178 +[2025-09-06 02:43:08] [Rank 0] Group 14 Loss: 5.8767 +[2025-09-06 02:43:08] [Rank 0] Group 14 Loss: 5.8767 +[2025-09-06 02:43:08] [Rank 0] Group 15 Loss: 5.8224 +[2025-09-06 02:43:08] [Rank 0] Group 15 Loss: 5.8224 +[2025-09-06 02:43:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:43:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:43:08] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:43:08] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:43:08] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:43:08] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:43:08] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:43:08] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:43:08] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:43:08] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:43:08] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:43:08] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:43:08] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 02:43:08] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 02:43:08] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:43:08] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:43:08] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:43:08] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:43:08] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:43:08] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:43:08] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:43:08] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 02:43:08] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:43:08] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:43:08] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:43:08] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:43:08] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 02:43:08] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 02:43:08] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:43:08] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:43:08] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:43:08] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:43:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:43:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:43:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:43:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:43:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:43:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:43:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:43:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:43:10] [Rank 0] step:4501/10000 train_time:200120ms step_avg:44.46ms +[2025-09-06 02:43:10] [Rank 0] step:4501/10000 train_time:200120ms step_avg:44.46ms +[2025-09-06 02:43:11] [Rank 0] step:4521/10000 train_time:200789ms step_avg:44.41ms +[2025-09-06 02:43:11] [Rank 0] step:4521/10000 train_time:200789ms step_avg:44.41ms +[2025-09-06 02:43:11] [Rank 0] step:4541/10000 train_time:201527ms step_avg:44.38ms +[2025-09-06 02:43:11] [Rank 0] step:4541/10000 train_time:201527ms step_avg:44.38ms +[2025-09-06 02:43:12] [Rank 0] step:4561/10000 train_time:202265ms step_avg:44.35ms +[2025-09-06 02:43:12] [Rank 0] step:4561/10000 train_time:202265ms step_avg:44.35ms +[2025-09-06 02:43:13] [Rank 0] step:4581/10000 train_time:203003ms step_avg:44.31ms +[2025-09-06 02:43:13] [Rank 0] step:4581/10000 train_time:203003ms step_avg:44.31ms +[2025-09-06 02:43:14] [Rank 0] step:4601/10000 train_time:203740ms step_avg:44.28ms +[2025-09-06 02:43:14] [Rank 0] step:4601/10000 train_time:203740ms step_avg:44.28ms +[2025-09-06 02:43:14] [Rank 0] step:4621/10000 train_time:204478ms step_avg:44.25ms +[2025-09-06 02:43:14] [Rank 0] step:4621/10000 train_time:204478ms step_avg:44.25ms +[2025-09-06 02:43:15] [Rank 0] step:4641/10000 train_time:205215ms step_avg:44.22ms +[2025-09-06 02:43:15] [Rank 0] step:4641/10000 train_time:205215ms step_avg:44.22ms +[2025-09-06 02:43:16] [Rank 0] step:4661/10000 train_time:205952ms step_avg:44.19ms +[2025-09-06 02:43:16] [Rank 0] step:4661/10000 train_time:205952ms step_avg:44.19ms +[2025-09-06 02:43:16] [Rank 0] step:4681/10000 train_time:206690ms step_avg:44.16ms +[2025-09-06 02:43:16] [Rank 0] step:4681/10000 train_time:206690ms step_avg:44.16ms +[2025-09-06 02:43:17] [Rank 0] step:4701/10000 train_time:207427ms step_avg:44.12ms +[2025-09-06 02:43:17] [Rank 0] step:4701/10000 train_time:207427ms step_avg:44.12ms +[2025-09-06 02:43:18] [Rank 0] step:4721/10000 train_time:208165ms step_avg:44.09ms +[2025-09-06 02:43:18] [Rank 0] step:4721/10000 train_time:208165ms step_avg:44.09ms +[2025-09-06 02:43:19] [Rank 0] step:4741/10000 train_time:209050ms step_avg:44.09ms +[2025-09-06 02:43:19] [Rank 0] step:4741/10000 train_time:209050ms step_avg:44.09ms +[2025-09-06 02:43:20] [Rank 0] step:4761/10000 train_time:209787ms step_avg:44.06ms +[2025-09-06 02:43:20] [Rank 0] step:4761/10000 train_time:209787ms step_avg:44.06ms +[2025-09-06 02:43:20] [Rank 0] step:4781/10000 train_time:210525ms step_avg:44.03ms +[2025-09-06 02:43:20] [Rank 0] step:4781/10000 train_time:210525ms step_avg:44.03ms +[2025-09-06 02:43:21] [Rank 0] step:4801/10000 train_time:211396ms step_avg:44.03ms +[2025-09-06 02:43:21] [Rank 0] step:4801/10000 train_time:211396ms step_avg:44.03ms +[2025-09-06 02:43:22] [Rank 0] step:4821/10000 train_time:212135ms step_avg:44.00ms +[2025-09-06 02:43:22] [Rank 0] step:4821/10000 train_time:212135ms step_avg:44.00ms +[2025-09-06 02:43:23] [Rank 0] step:4841/10000 train_time:213181ms step_avg:44.04ms +[2025-09-06 02:43:23] [Rank 0] step:4841/10000 train_time:213181ms step_avg:44.04ms +[2025-09-06 02:43:24] [Rank 0] step:4861/10000 train_time:213918ms step_avg:44.01ms +[2025-09-06 02:43:24] [Rank 0] step:4861/10000 train_time:213918ms step_avg:44.01ms +[2025-09-06 02:43:24] [Rank 0] step:4881/10000 train_time:214656ms step_avg:43.98ms +[2025-09-06 02:43:24] [Rank 0] step:4881/10000 train_time:214656ms step_avg:43.98ms +[2025-09-06 02:43:25] [Rank 0] step:4901/10000 train_time:215393ms step_avg:43.95ms +[2025-09-06 02:43:25] [Rank 0] step:4901/10000 train_time:215393ms step_avg:43.95ms +[2025-09-06 02:43:26] [Rank 0] step:4921/10000 train_time:216131ms step_avg:43.92ms +[2025-09-06 02:43:26] [Rank 0] step:4921/10000 train_time:216131ms step_avg:43.92ms +[2025-09-06 02:43:27] [Rank 0] step:4941/10000 train_time:216869ms step_avg:43.89ms +[2025-09-06 02:43:27] [Rank 0] step:4941/10000 train_time:216869ms step_avg:43.89ms +[2025-09-06 02:43:27] [Rank 0] step:4961/10000 train_time:217606ms step_avg:43.86ms +[2025-09-06 02:43:27] [Rank 0] step:4961/10000 train_time:217606ms step_avg:43.86ms +[2025-09-06 02:43:28] [Rank 0] step:4981/10000 train_time:218344ms step_avg:43.84ms +[2025-09-06 02:43:28] [Rank 0] step:4981/10000 train_time:218344ms step_avg:43.84ms +[2025-09-06 02:43:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:43:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:43:29] [Rank 0] PRINT: step:5000/10000 train_loss:2.7829 val_loss:2.7478 train_time:219163ms step_avg:43.83ms +[2025-09-06 02:43:29] [Rank 0] PRINT: step:5000/10000 train_loss:2.7829 val_loss:2.7478 train_time:219163ms step_avg:43.83ms +[2025-09-06 02:43:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:43:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:43:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:43:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:44:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:44:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:44:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:44:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:44:51] [Rank 0] Total Loss: 5.0002 +[2025-09-06 02:44:51] [Rank 0] Total Loss: 5.0002 +[2025-09-06 02:44:51] [Rank 0] Total FTA (Unweighted): 0.1800 +[2025-09-06 02:44:51] [Rank 0] Total FTA (Unweighted): 0.1800 +[2025-09-06 02:44:51] [Rank 0] Total FTA (Weighted): 0.1800 +[2025-09-06 02:44:51] [Rank 0] Total FTA (Weighted): 0.1800 +[2025-09-06 02:44:51] [Rank 0] Group 0 Loss: 3.2456 +[2025-09-06 02:44:51] [Rank 0] Group 0 Loss: 3.2456 +[2025-09-06 02:44:51] [Rank 0] Group 1 Loss: 3.1917 +[2025-09-06 02:44:51] [Rank 0] Group 1 Loss: 3.1917 +[2025-09-06 02:44:51] [Rank 0] Group 2 Loss: 3.4016 +[2025-09-06 02:44:51] [Rank 0] Group 2 Loss: 3.4016 +[2025-09-06 02:44:51] [Rank 0] Group 3 Loss: 3.8173 +[2025-09-06 02:44:51] [Rank 0] Group 3 Loss: 3.8173 +[2025-09-06 02:44:51] [Rank 0] Group 4 Loss: 4.5613 +[2025-09-06 02:44:51] [Rank 0] Group 4 Loss: 4.5613 +[2025-09-06 02:44:51] [Rank 0] Group 5 Loss: 4.9915 +[2025-09-06 02:44:51] [Rank 0] Group 5 Loss: 4.9915 +[2025-09-06 02:44:51] [Rank 0] Group 6 Loss: 5.2942 +[2025-09-06 02:44:51] [Rank 0] Group 6 Loss: 5.2942 +[2025-09-06 02:44:51] [Rank 0] Group 7 Loss: 5.3889 +[2025-09-06 02:44:51] [Rank 0] Group 7 Loss: 5.3889 +[2025-09-06 02:44:51] [Rank 0] Group 8 Loss: 5.6691 +[2025-09-06 02:44:51] [Rank 0] Group 8 Loss: 5.6691 +[2025-09-06 02:44:51] [Rank 0] Group 9 Loss: 5.8196 +[2025-09-06 02:44:51] [Rank 0] Group 9 Loss: 5.8196 +[2025-09-06 02:44:51] [Rank 0] Group 10 Loss: 5.8110 +[2025-09-06 02:44:51] [Rank 0] Group 10 Loss: 5.8110 +[2025-09-06 02:44:51] [Rank 0] Group 11 Loss: 5.8694 +[2025-09-06 02:44:51] [Rank 0] Group 11 Loss: 5.8694 +[2025-09-06 02:44:51] [Rank 0] Group 12 Loss: 5.7107 +[2025-09-06 02:44:51] [Rank 0] Group 12 Loss: 5.7107 +[2025-09-06 02:44:51] [Rank 0] Group 13 Loss: 5.7370 +[2025-09-06 02:44:51] [Rank 0] Group 13 Loss: 5.7370 +[2025-09-06 02:44:51] [Rank 0] Group 14 Loss: 5.7799 +[2025-09-06 02:44:51] [Rank 0] Group 14 Loss: 5.7799 +[2025-09-06 02:44:51] [Rank 0] Group 15 Loss: 5.7145 +[2025-09-06 02:44:51] [Rank 0] Group 15 Loss: 5.7145 +[2025-09-06 02:44:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:44:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:44:51] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:44:51] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:44:51] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:44:51] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:44:51] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:44:51] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:44:51] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:44:51] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:44:51] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:44:51] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:44:51] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:44:51] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:44:51] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:44:51] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:44:51] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-06 02:44:51] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-06 02:44:51] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:44:51] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:44:51] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:44:51] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:44:51] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:44:51] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:44:51] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:44:51] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:44:51] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:44:51] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:44:51] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:44:51] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:44:51] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:44:51] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:44:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:44:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:44:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:44:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:44:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:44:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:44:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:44:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:44:52] [Rank 0] step:5001/10000 train_time:219172ms step_avg:43.83ms +[2025-09-06 02:44:52] [Rank 0] step:5001/10000 train_time:219172ms step_avg:43.83ms +[2025-09-06 02:44:53] [Rank 0] step:5021/10000 train_time:219851ms step_avg:43.79ms +[2025-09-06 02:44:53] [Rank 0] step:5021/10000 train_time:219851ms step_avg:43.79ms +[2025-09-06 02:44:54] [Rank 0] step:5041/10000 train_time:220589ms step_avg:43.76ms +[2025-09-06 02:44:54] [Rank 0] step:5041/10000 train_time:220589ms step_avg:43.76ms +[2025-09-06 02:44:55] [Rank 0] step:5061/10000 train_time:221327ms step_avg:43.73ms +[2025-09-06 02:44:55] [Rank 0] step:5061/10000 train_time:221327ms step_avg:43.73ms +[2025-09-06 02:44:55] [Rank 0] step:5081/10000 train_time:222064ms step_avg:43.70ms +[2025-09-06 02:44:55] [Rank 0] step:5081/10000 train_time:222064ms step_avg:43.70ms +[2025-09-06 02:44:56] [Rank 0] step:5101/10000 train_time:222803ms step_avg:43.68ms +[2025-09-06 02:44:56] [Rank 0] step:5101/10000 train_time:222803ms step_avg:43.68ms +[2025-09-06 02:44:57] [Rank 0] step:5121/10000 train_time:223540ms step_avg:43.65ms +[2025-09-06 02:44:57] [Rank 0] step:5121/10000 train_time:223540ms step_avg:43.65ms +[2025-09-06 02:44:57] [Rank 0] step:5141/10000 train_time:224278ms step_avg:43.63ms +[2025-09-06 02:44:57] [Rank 0] step:5141/10000 train_time:224278ms step_avg:43.63ms +[2025-09-06 02:44:58] [Rank 0] step:5161/10000 train_time:225016ms step_avg:43.60ms +[2025-09-06 02:44:58] [Rank 0] step:5161/10000 train_time:225016ms step_avg:43.60ms +[2025-09-06 02:44:59] [Rank 0] step:5181/10000 train_time:225754ms step_avg:43.57ms +[2025-09-06 02:44:59] [Rank 0] step:5181/10000 train_time:225754ms step_avg:43.57ms +[2025-09-06 02:45:00] [Rank 0] step:5201/10000 train_time:226491ms step_avg:43.55ms +[2025-09-06 02:45:00] [Rank 0] step:5201/10000 train_time:226491ms step_avg:43.55ms +[2025-09-06 02:45:00] [Rank 0] step:5221/10000 train_time:227229ms step_avg:43.52ms +[2025-09-06 02:45:00] [Rank 0] step:5221/10000 train_time:227229ms step_avg:43.52ms +[2025-09-06 02:45:01] [Rank 0] step:5241/10000 train_time:227967ms step_avg:43.50ms +[2025-09-06 02:45:01] [Rank 0] step:5241/10000 train_time:227967ms step_avg:43.50ms +[2025-09-06 02:45:02] [Rank 0] step:5261/10000 train_time:228706ms step_avg:43.47ms +[2025-09-06 02:45:02] [Rank 0] step:5261/10000 train_time:228706ms step_avg:43.47ms +[2025-09-06 02:45:03] [Rank 0] step:5281/10000 train_time:229462ms step_avg:43.45ms +[2025-09-06 02:45:03] [Rank 0] step:5281/10000 train_time:229462ms step_avg:43.45ms +[2025-09-06 02:45:03] [Rank 0] step:5301/10000 train_time:230200ms step_avg:43.43ms +[2025-09-06 02:45:03] [Rank 0] step:5301/10000 train_time:230200ms step_avg:43.43ms +[2025-09-06 02:45:04] [Rank 0] step:5321/10000 train_time:230937ms step_avg:43.40ms +[2025-09-06 02:45:04] [Rank 0] step:5321/10000 train_time:230937ms step_avg:43.40ms +[2025-09-06 02:45:05] [Rank 0] step:5341/10000 train_time:231677ms step_avg:43.38ms +[2025-09-06 02:45:05] [Rank 0] step:5341/10000 train_time:231677ms step_avg:43.38ms +[2025-09-06 02:45:06] [Rank 0] step:5361/10000 train_time:232415ms step_avg:43.35ms +[2025-09-06 02:45:06] [Rank 0] step:5361/10000 train_time:232415ms step_avg:43.35ms +[2025-09-06 02:45:06] [Rank 0] step:5381/10000 train_time:233153ms step_avg:43.33ms +[2025-09-06 02:45:06] [Rank 0] step:5381/10000 train_time:233153ms step_avg:43.33ms +[2025-09-06 02:45:07] [Rank 0] step:5401/10000 train_time:233891ms step_avg:43.31ms +[2025-09-06 02:45:07] [Rank 0] step:5401/10000 train_time:233891ms step_avg:43.31ms +[2025-09-06 02:45:08] [Rank 0] step:5421/10000 train_time:234629ms step_avg:43.28ms +[2025-09-06 02:45:08] [Rank 0] step:5421/10000 train_time:234629ms step_avg:43.28ms +[2025-09-06 02:45:09] [Rank 0] step:5441/10000 train_time:235367ms step_avg:43.26ms +[2025-09-06 02:45:09] [Rank 0] step:5441/10000 train_time:235367ms step_avg:43.26ms +[2025-09-06 02:45:09] [Rank 0] step:5461/10000 train_time:236105ms step_avg:43.23ms +[2025-09-06 02:45:09] [Rank 0] step:5461/10000 train_time:236105ms step_avg:43.23ms +[2025-09-06 02:45:10] [Rank 0] step:5481/10000 train_time:236842ms step_avg:43.21ms +[2025-09-06 02:45:10] [Rank 0] step:5481/10000 train_time:236842ms step_avg:43.21ms +[2025-09-06 02:45:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:45:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:45:11] [Rank 0] PRINT: step:5500/10000 train_loss:2.7324 val_loss:2.7017 train_time:237661ms step_avg:43.21ms +[2025-09-06 02:45:11] [Rank 0] PRINT: step:5500/10000 train_loss:2.7324 val_loss:2.7017 train_time:237661ms step_avg:43.21ms +[2025-09-06 02:45:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:45:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:45:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:45:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:46:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:46:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:46:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:46:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:46:33] [Rank 0] Total Loss: 4.9434 +[2025-09-06 02:46:33] [Rank 0] Total Loss: 4.9434 +[2025-09-06 02:46:33] [Rank 0] Total FTA (Unweighted): 0.1856 +[2025-09-06 02:46:33] [Rank 0] Total FTA (Unweighted): 0.1856 +[2025-09-06 02:46:33] [Rank 0] Total FTA (Weighted): 0.1856 +[2025-09-06 02:46:33] [Rank 0] Total FTA (Weighted): 0.1856 +[2025-09-06 02:46:33] [Rank 0] Group 0 Loss: 3.2021 +[2025-09-06 02:46:33] [Rank 0] Group 0 Loss: 3.2021 +[2025-09-06 02:46:33] [Rank 0] Group 1 Loss: 3.1335 +[2025-09-06 02:46:33] [Rank 0] Group 1 Loss: 3.1335 +[2025-09-06 02:46:33] [Rank 0] Group 2 Loss: 3.2932 +[2025-09-06 02:46:33] [Rank 0] Group 2 Loss: 3.2932 +[2025-09-06 02:46:33] [Rank 0] Group 3 Loss: 3.7939 +[2025-09-06 02:46:33] [Rank 0] Group 3 Loss: 3.7939 +[2025-09-06 02:46:33] [Rank 0] Group 4 Loss: 4.4839 +[2025-09-06 02:46:33] [Rank 0] Group 4 Loss: 4.4839 +[2025-09-06 02:46:33] [Rank 0] Group 5 Loss: 4.9319 +[2025-09-06 02:46:33] [Rank 0] Group 5 Loss: 4.9319 +[2025-09-06 02:46:33] [Rank 0] Group 6 Loss: 5.2392 +[2025-09-06 02:46:33] [Rank 0] Group 6 Loss: 5.2392 +[2025-09-06 02:46:33] [Rank 0] Group 7 Loss: 5.3439 +[2025-09-06 02:46:33] [Rank 0] Group 7 Loss: 5.3439 +[2025-09-06 02:46:33] [Rank 0] Group 8 Loss: 5.6058 +[2025-09-06 02:46:33] [Rank 0] Group 8 Loss: 5.6058 +[2025-09-06 02:46:33] [Rank 0] Group 9 Loss: 5.7579 +[2025-09-06 02:46:33] [Rank 0] Group 9 Loss: 5.7579 +[2025-09-06 02:46:33] [Rank 0] Group 10 Loss: 5.7502 +[2025-09-06 02:46:33] [Rank 0] Group 10 Loss: 5.7502 +[2025-09-06 02:46:33] [Rank 0] Group 11 Loss: 5.8060 +[2025-09-06 02:46:33] [Rank 0] Group 11 Loss: 5.8060 +[2025-09-06 02:46:33] [Rank 0] Group 12 Loss: 5.6650 +[2025-09-06 02:46:33] [Rank 0] Group 12 Loss: 5.6650 +[2025-09-06 02:46:33] [Rank 0] Group 13 Loss: 5.6802 +[2025-09-06 02:46:33] [Rank 0] Group 13 Loss: 5.6802 +[2025-09-06 02:46:33] [Rank 0] Group 14 Loss: 5.7258 +[2025-09-06 02:46:33] [Rank 0] Group 14 Loss: 5.7258 +[2025-09-06 02:46:33] [Rank 0] Group 15 Loss: 5.6820 +[2025-09-06 02:46:33] [Rank 0] Group 15 Loss: 5.6820 +[2025-09-06 02:46:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:46:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:46:33] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:46:33] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:46:33] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:46:33] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:46:33] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:46:33] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:46:33] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:46:33] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:46:33] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:46:33] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:46:33] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:46:33] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:46:33] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:46:33] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:46:33] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:46:33] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:46:33] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:46:33] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:46:33] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:46:33] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:46:33] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:46:33] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:46:33] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:46:33] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:46:33] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:46:33] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:46:33] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:46:33] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:46:33] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:46:33] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:46:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:46:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:46:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:46:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:46:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:46:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:46:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:46:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:46:34] [Rank 0] step:5501/10000 train_time:237670ms step_avg:43.20ms +[2025-09-06 02:46:34] [Rank 0] step:5501/10000 train_time:237670ms step_avg:43.20ms +[2025-09-06 02:46:35] [Rank 0] step:5521/10000 train_time:238346ms step_avg:43.17ms +[2025-09-06 02:46:35] [Rank 0] step:5521/10000 train_time:238346ms step_avg:43.17ms +[2025-09-06 02:46:36] [Rank 0] step:5541/10000 train_time:239085ms step_avg:43.15ms +[2025-09-06 02:46:36] [Rank 0] step:5541/10000 train_time:239085ms step_avg:43.15ms +[2025-09-06 02:46:36] [Rank 0] step:5561/10000 train_time:239824ms step_avg:43.13ms +[2025-09-06 02:46:36] [Rank 0] step:5561/10000 train_time:239824ms step_avg:43.13ms +[2025-09-06 02:46:37] [Rank 0] step:5581/10000 train_time:240562ms step_avg:43.10ms +[2025-09-06 02:46:37] [Rank 0] step:5581/10000 train_time:240562ms step_avg:43.10ms +[2025-09-06 02:46:38] [Rank 0] step:5601/10000 train_time:241301ms step_avg:43.08ms +[2025-09-06 02:46:38] [Rank 0] step:5601/10000 train_time:241301ms step_avg:43.08ms +[2025-09-06 02:46:39] [Rank 0] step:5621/10000 train_time:242039ms step_avg:43.06ms +[2025-09-06 02:46:39] [Rank 0] step:5621/10000 train_time:242039ms step_avg:43.06ms +[2025-09-06 02:46:40] [Rank 0] step:5641/10000 train_time:243398ms step_avg:43.15ms +[2025-09-06 02:46:40] [Rank 0] step:5641/10000 train_time:243398ms step_avg:43.15ms +[2025-09-06 02:46:41] [Rank 0] step:5661/10000 train_time:244138ms step_avg:43.13ms +[2025-09-06 02:46:41] [Rank 0] step:5661/10000 train_time:244138ms step_avg:43.13ms +[2025-09-06 02:46:41] [Rank 0] step:5681/10000 train_time:244877ms step_avg:43.10ms +[2025-09-06 02:46:41] [Rank 0] step:5681/10000 train_time:244877ms step_avg:43.10ms +[2025-09-06 02:46:42] [Rank 0] step:5701/10000 train_time:245617ms step_avg:43.08ms +[2025-09-06 02:46:42] [Rank 0] step:5701/10000 train_time:245617ms step_avg:43.08ms +[2025-09-06 02:46:43] [Rank 0] step:5721/10000 train_time:246356ms step_avg:43.06ms +[2025-09-06 02:46:43] [Rank 0] step:5721/10000 train_time:246356ms step_avg:43.06ms +[2025-09-06 02:46:44] [Rank 0] step:5741/10000 train_time:247095ms step_avg:43.04ms +[2025-09-06 02:46:44] [Rank 0] step:5741/10000 train_time:247095ms step_avg:43.04ms +[2025-09-06 02:46:44] [Rank 0] step:5761/10000 train_time:247833ms step_avg:43.02ms +[2025-09-06 02:46:44] [Rank 0] step:5761/10000 train_time:247833ms step_avg:43.02ms +[2025-09-06 02:46:45] [Rank 0] step:5781/10000 train_time:248571ms step_avg:43.00ms +[2025-09-06 02:46:45] [Rank 0] step:5781/10000 train_time:248571ms step_avg:43.00ms +[2025-09-06 02:46:46] [Rank 0] step:5801/10000 train_time:249310ms step_avg:42.98ms +[2025-09-06 02:46:46] [Rank 0] step:5801/10000 train_time:249310ms step_avg:42.98ms +[2025-09-06 02:46:47] [Rank 0] step:5821/10000 train_time:250050ms step_avg:42.96ms +[2025-09-06 02:46:47] [Rank 0] step:5821/10000 train_time:250050ms step_avg:42.96ms +[2025-09-06 02:46:47] [Rank 0] step:5841/10000 train_time:250788ms step_avg:42.94ms +[2025-09-06 02:46:47] [Rank 0] step:5841/10000 train_time:250788ms step_avg:42.94ms +[2025-09-06 02:46:48] [Rank 0] step:5861/10000 train_time:251527ms step_avg:42.92ms +[2025-09-06 02:46:48] [Rank 0] step:5861/10000 train_time:251527ms step_avg:42.92ms +[2025-09-06 02:46:49] [Rank 0] step:5881/10000 train_time:252266ms step_avg:42.90ms +[2025-09-06 02:46:49] [Rank 0] step:5881/10000 train_time:252266ms step_avg:42.90ms +[2025-09-06 02:46:50] [Rank 0] step:5901/10000 train_time:253004ms step_avg:42.87ms +[2025-09-06 02:46:50] [Rank 0] step:5901/10000 train_time:253004ms step_avg:42.87ms +[2025-09-06 02:46:50] [Rank 0] step:5921/10000 train_time:253743ms step_avg:42.85ms +[2025-09-06 02:46:50] [Rank 0] step:5921/10000 train_time:253743ms step_avg:42.85ms +[2025-09-06 02:46:51] [Rank 0] step:5941/10000 train_time:254483ms step_avg:42.84ms +[2025-09-06 02:46:51] [Rank 0] step:5941/10000 train_time:254483ms step_avg:42.84ms +[2025-09-06 02:46:52] [Rank 0] step:5961/10000 train_time:255222ms step_avg:42.82ms +[2025-09-06 02:46:52] [Rank 0] step:5961/10000 train_time:255222ms step_avg:42.82ms +[2025-09-06 02:46:53] [Rank 0] step:5981/10000 train_time:255961ms step_avg:42.80ms +[2025-09-06 02:46:53] [Rank 0] step:5981/10000 train_time:255961ms step_avg:42.80ms +[2025-09-06 02:46:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:46:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:46:54] [Rank 0] PRINT: step:6000/10000 train_loss:2.6913 val_loss:2.6640 train_time:256781ms step_avg:42.80ms +[2025-09-06 02:46:54] [Rank 0] PRINT: step:6000/10000 train_loss:2.6913 val_loss:2.6640 train_time:256781ms step_avg:42.80ms +[2025-09-06 02:46:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:46:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:46:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:46:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:48:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:48:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:48:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:48:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:48:15] [Rank 0] Total Loss: 4.9472 +[2025-09-06 02:48:15] [Rank 0] Total Loss: 4.9472 +[2025-09-06 02:48:15] [Rank 0] Total FTA (Unweighted): 0.1931 +[2025-09-06 02:48:15] [Rank 0] Total FTA (Unweighted): 0.1931 +[2025-09-06 02:48:15] [Rank 0] Total FTA (Weighted): 0.1931 +[2025-09-06 02:48:15] [Rank 0] Total FTA (Weighted): 0.1931 +[2025-09-06 02:48:15] [Rank 0] Group 0 Loss: 3.2184 +[2025-09-06 02:48:15] [Rank 0] Group 0 Loss: 3.2184 +[2025-09-06 02:48:15] [Rank 0] Group 1 Loss: 3.1917 +[2025-09-06 02:48:15] [Rank 0] Group 1 Loss: 3.1917 +[2025-09-06 02:48:15] [Rank 0] Group 2 Loss: 3.3141 +[2025-09-06 02:48:15] [Rank 0] Group 2 Loss: 3.3141 +[2025-09-06 02:48:15] [Rank 0] Group 3 Loss: 3.8363 +[2025-09-06 02:48:15] [Rank 0] Group 3 Loss: 3.8363 +[2025-09-06 02:48:15] [Rank 0] Group 4 Loss: 4.4877 +[2025-09-06 02:48:15] [Rank 0] Group 4 Loss: 4.4877 +[2025-09-06 02:48:15] [Rank 0] Group 5 Loss: 4.9201 +[2025-09-06 02:48:15] [Rank 0] Group 5 Loss: 4.9201 +[2025-09-06 02:48:15] [Rank 0] Group 6 Loss: 5.2295 +[2025-09-06 02:48:15] [Rank 0] Group 6 Loss: 5.2295 +[2025-09-06 02:48:15] [Rank 0] Group 7 Loss: 5.3344 +[2025-09-06 02:48:15] [Rank 0] Group 7 Loss: 5.3344 +[2025-09-06 02:48:15] [Rank 0] Group 8 Loss: 5.5950 +[2025-09-06 02:48:15] [Rank 0] Group 8 Loss: 5.5950 +[2025-09-06 02:48:15] [Rank 0] Group 9 Loss: 5.7360 +[2025-09-06 02:48:15] [Rank 0] Group 9 Loss: 5.7360 +[2025-09-06 02:48:15] [Rank 0] Group 10 Loss: 5.7412 +[2025-09-06 02:48:15] [Rank 0] Group 10 Loss: 5.7412 +[2025-09-06 02:48:15] [Rank 0] Group 11 Loss: 5.8084 +[2025-09-06 02:48:15] [Rank 0] Group 11 Loss: 5.8084 +[2025-09-06 02:48:15] [Rank 0] Group 12 Loss: 5.6546 +[2025-09-06 02:48:15] [Rank 0] Group 12 Loss: 5.6546 +[2025-09-06 02:48:15] [Rank 0] Group 13 Loss: 5.6744 +[2025-09-06 02:48:15] [Rank 0] Group 13 Loss: 5.6744 +[2025-09-06 02:48:15] [Rank 0] Group 14 Loss: 5.7373 +[2025-09-06 02:48:15] [Rank 0] Group 14 Loss: 5.7373 +[2025-09-06 02:48:15] [Rank 0] Group 15 Loss: 5.6768 +[2025-09-06 02:48:15] [Rank 0] Group 15 Loss: 5.6768 +[2025-09-06 02:48:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:48:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:48:15] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 02:48:15] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 02:48:15] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:48:15] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:48:15] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:48:15] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:48:15] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:48:15] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:48:15] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:48:15] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:48:15] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:48:15] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:48:15] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:48:15] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:48:15] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:48:15] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:48:15] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:48:15] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:48:15] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:48:15] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:48:15] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:48:15] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:48:15] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:48:15] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:48:15] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:48:15] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:48:15] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:48:15] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:48:15] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:48:15] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:48:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:48:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:48:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:48:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:48:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:48:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:48:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:48:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:48:17] [Rank 0] step:6001/10000 train_time:256790ms step_avg:42.79ms +[2025-09-06 02:48:17] [Rank 0] step:6001/10000 train_time:256790ms step_avg:42.79ms +[2025-09-06 02:48:18] [Rank 0] step:6021/10000 train_time:258089ms step_avg:42.86ms +[2025-09-06 02:48:18] [Rank 0] step:6021/10000 train_time:258089ms step_avg:42.86ms +[2025-09-06 02:48:19] [Rank 0] step:6041/10000 train_time:258828ms step_avg:42.85ms +[2025-09-06 02:48:19] [Rank 0] step:6041/10000 train_time:258828ms step_avg:42.85ms +[2025-09-06 02:48:20] [Rank 0] step:6061/10000 train_time:259568ms step_avg:42.83ms +[2025-09-06 02:48:20] [Rank 0] step:6061/10000 train_time:259568ms step_avg:42.83ms +[2025-09-06 02:48:20] [Rank 0] step:6081/10000 train_time:260306ms step_avg:42.81ms +[2025-09-06 02:48:20] [Rank 0] step:6081/10000 train_time:260306ms step_avg:42.81ms +[2025-09-06 02:48:21] [Rank 0] step:6101/10000 train_time:261045ms step_avg:42.79ms +[2025-09-06 02:48:21] [Rank 0] step:6101/10000 train_time:261045ms step_avg:42.79ms +[2025-09-06 02:48:22] [Rank 0] step:6121/10000 train_time:261783ms step_avg:42.77ms +[2025-09-06 02:48:22] [Rank 0] step:6121/10000 train_time:261783ms step_avg:42.77ms +[2025-09-06 02:48:23] [Rank 0] step:6141/10000 train_time:262523ms step_avg:42.75ms +[2025-09-06 02:48:23] [Rank 0] step:6141/10000 train_time:262523ms step_avg:42.75ms +[2025-09-06 02:48:23] [Rank 0] step:6161/10000 train_time:263261ms step_avg:42.73ms +[2025-09-06 02:48:23] [Rank 0] step:6161/10000 train_time:263261ms step_avg:42.73ms +[2025-09-06 02:48:24] [Rank 0] step:6181/10000 train_time:264001ms step_avg:42.71ms +[2025-09-06 02:48:24] [Rank 0] step:6181/10000 train_time:264001ms step_avg:42.71ms +[2025-09-06 02:48:25] [Rank 0] step:6201/10000 train_time:264739ms step_avg:42.69ms +[2025-09-06 02:48:25] [Rank 0] step:6201/10000 train_time:264739ms step_avg:42.69ms +[2025-09-06 02:48:25] [Rank 0] step:6221/10000 train_time:265477ms step_avg:42.67ms +[2025-09-06 02:48:25] [Rank 0] step:6221/10000 train_time:265477ms step_avg:42.67ms +[2025-09-06 02:48:26] [Rank 0] step:6241/10000 train_time:266221ms step_avg:42.66ms +[2025-09-06 02:48:26] [Rank 0] step:6241/10000 train_time:266221ms step_avg:42.66ms +[2025-09-06 02:48:27] [Rank 0] step:6261/10000 train_time:266961ms step_avg:42.64ms +[2025-09-06 02:48:27] [Rank 0] step:6261/10000 train_time:266961ms step_avg:42.64ms +[2025-09-06 02:48:28] [Rank 0] step:6281/10000 train_time:267700ms step_avg:42.62ms +[2025-09-06 02:48:28] [Rank 0] step:6281/10000 train_time:267700ms step_avg:42.62ms +[2025-09-06 02:48:28] [Rank 0] step:6301/10000 train_time:268439ms step_avg:42.60ms +[2025-09-06 02:48:28] [Rank 0] step:6301/10000 train_time:268439ms step_avg:42.60ms +[2025-09-06 02:48:29] [Rank 0] step:6321/10000 train_time:269177ms step_avg:42.58ms +[2025-09-06 02:48:29] [Rank 0] step:6321/10000 train_time:269177ms step_avg:42.58ms +[2025-09-06 02:48:30] [Rank 0] step:6341/10000 train_time:269916ms step_avg:42.57ms +[2025-09-06 02:48:30] [Rank 0] step:6341/10000 train_time:269916ms step_avg:42.57ms +[2025-09-06 02:48:31] [Rank 0] step:6361/10000 train_time:270657ms step_avg:42.55ms +[2025-09-06 02:48:31] [Rank 0] step:6361/10000 train_time:270657ms step_avg:42.55ms +[2025-09-06 02:48:31] [Rank 0] step:6381/10000 train_time:271396ms step_avg:42.53ms +[2025-09-06 02:48:31] [Rank 0] step:6381/10000 train_time:271396ms step_avg:42.53ms +[2025-09-06 02:48:32] [Rank 0] step:6401/10000 train_time:272134ms step_avg:42.51ms +[2025-09-06 02:48:32] [Rank 0] step:6401/10000 train_time:272134ms step_avg:42.51ms +[2025-09-06 02:48:33] [Rank 0] step:6421/10000 train_time:272873ms step_avg:42.50ms +[2025-09-06 02:48:33] [Rank 0] step:6421/10000 train_time:272873ms step_avg:42.50ms +[2025-09-06 02:48:34] [Rank 0] step:6441/10000 train_time:273612ms step_avg:42.48ms +[2025-09-06 02:48:34] [Rank 0] step:6441/10000 train_time:273612ms step_avg:42.48ms +[2025-09-06 02:48:34] [Rank 0] step:6461/10000 train_time:274511ms step_avg:42.49ms +[2025-09-06 02:48:34] [Rank 0] step:6461/10000 train_time:274511ms step_avg:42.49ms +[2025-09-06 02:48:35] [Rank 0] step:6481/10000 train_time:275250ms step_avg:42.47ms +[2025-09-06 02:48:35] [Rank 0] step:6481/10000 train_time:275250ms step_avg:42.47ms +[2025-09-06 02:48:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:48:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:48:36] [Rank 0] PRINT: step:6500/10000 train_loss:2.6573 val_loss:2.6317 train_time:276070ms step_avg:42.47ms +[2025-09-06 02:48:36] [Rank 0] PRINT: step:6500/10000 train_loss:2.6573 val_loss:2.6317 train_time:276070ms step_avg:42.47ms +[2025-09-06 02:48:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:48:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:48:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:48:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:49:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:49:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:49:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:49:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:49:58] [Rank 0] Total Loss: 4.9102 +[2025-09-06 02:49:58] [Rank 0] Total Loss: 4.9102 +[2025-09-06 02:49:58] [Rank 0] Total FTA (Unweighted): 0.1919 +[2025-09-06 02:49:58] [Rank 0] Total FTA (Unweighted): 0.1919 +[2025-09-06 02:49:58] [Rank 0] Total FTA (Weighted): 0.1919 +[2025-09-06 02:49:58] [Rank 0] Total FTA (Weighted): 0.1919 +[2025-09-06 02:49:58] [Rank 0] Group 0 Loss: 3.2138 +[2025-09-06 02:49:58] [Rank 0] Group 0 Loss: 3.2138 +[2025-09-06 02:49:58] [Rank 0] Group 1 Loss: 3.1844 +[2025-09-06 02:49:58] [Rank 0] Group 1 Loss: 3.1844 +[2025-09-06 02:49:59] [Rank 0] Group 2 Loss: 3.3102 +[2025-09-06 02:49:59] [Rank 0] Group 2 Loss: 3.3102 +[2025-09-06 02:49:59] [Rank 0] Group 3 Loss: 3.8045 +[2025-09-06 02:49:59] [Rank 0] Group 3 Loss: 3.8045 +[2025-09-06 02:49:59] [Rank 0] Group 4 Loss: 4.4350 +[2025-09-06 02:49:59] [Rank 0] Group 4 Loss: 4.4350 +[2025-09-06 02:49:59] [Rank 0] Group 5 Loss: 4.8779 +[2025-09-06 02:49:59] [Rank 0] Group 5 Loss: 4.8779 +[2025-09-06 02:49:59] [Rank 0] Group 6 Loss: 5.1898 +[2025-09-06 02:49:59] [Rank 0] Group 6 Loss: 5.1898 +[2025-09-06 02:49:59] [Rank 0] Group 7 Loss: 5.2785 +[2025-09-06 02:49:59] [Rank 0] Group 7 Loss: 5.2785 +[2025-09-06 02:49:59] [Rank 0] Group 8 Loss: 5.5668 +[2025-09-06 02:49:59] [Rank 0] Group 8 Loss: 5.5668 +[2025-09-06 02:49:59] [Rank 0] Group 9 Loss: 5.7031 +[2025-09-06 02:49:59] [Rank 0] Group 9 Loss: 5.7031 +[2025-09-06 02:49:59] [Rank 0] Group 10 Loss: 5.6982 +[2025-09-06 02:49:59] [Rank 0] Group 10 Loss: 5.6982 +[2025-09-06 02:49:59] [Rank 0] Group 11 Loss: 5.7358 +[2025-09-06 02:49:59] [Rank 0] Group 11 Loss: 5.7358 +[2025-09-06 02:49:59] [Rank 0] Group 12 Loss: 5.6140 +[2025-09-06 02:49:59] [Rank 0] Group 12 Loss: 5.6140 +[2025-09-06 02:49:59] [Rank 0] Group 13 Loss: 5.6354 +[2025-09-06 02:49:59] [Rank 0] Group 13 Loss: 5.6354 +[2025-09-06 02:49:59] [Rank 0] Group 14 Loss: 5.6878 +[2025-09-06 02:49:59] [Rank 0] Group 14 Loss: 5.6878 +[2025-09-06 02:49:59] [Rank 0] Group 15 Loss: 5.6274 +[2025-09-06 02:49:59] [Rank 0] Group 15 Loss: 5.6274 +[2025-09-06 02:49:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:49:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:49:59] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 02:49:59] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 02:49:59] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:49:59] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:49:59] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:49:59] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:49:59] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:49:59] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:49:59] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:49:59] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:49:59] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:49:59] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:49:59] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:49:59] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:49:59] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:49:59] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:49:59] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:49:59] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:49:59] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:49:59] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:49:59] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:49:59] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:49:59] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:49:59] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:49:59] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:49:59] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:49:59] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:49:59] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:49:59] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:49:59] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:49:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:49:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:49:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:49:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:50:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:50:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:50:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:50:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:50:00] [Rank 0] step:6501/10000 train_time:276079ms step_avg:42.47ms +[2025-09-06 02:50:00] [Rank 0] step:6501/10000 train_time:276079ms step_avg:42.47ms +[2025-09-06 02:50:01] [Rank 0] step:6521/10000 train_time:276754ms step_avg:42.44ms +[2025-09-06 02:50:01] [Rank 0] step:6521/10000 train_time:276754ms step_avg:42.44ms +[2025-09-06 02:50:02] [Rank 0] step:6541/10000 train_time:277503ms step_avg:42.43ms +[2025-09-06 02:50:02] [Rank 0] step:6541/10000 train_time:277503ms step_avg:42.43ms +[2025-09-06 02:50:02] [Rank 0] step:6561/10000 train_time:278244ms step_avg:42.41ms +[2025-09-06 02:50:02] [Rank 0] step:6561/10000 train_time:278244ms step_avg:42.41ms +[2025-09-06 02:50:03] [Rank 0] step:6581/10000 train_time:278984ms step_avg:42.39ms +[2025-09-06 02:50:03] [Rank 0] step:6581/10000 train_time:278984ms step_avg:42.39ms +[2025-09-06 02:50:04] [Rank 0] step:6601/10000 train_time:279724ms step_avg:42.38ms +[2025-09-06 02:50:04] [Rank 0] step:6601/10000 train_time:279724ms step_avg:42.38ms +[2025-09-06 02:50:05] [Rank 0] step:6621/10000 train_time:280463ms step_avg:42.36ms +[2025-09-06 02:50:05] [Rank 0] step:6621/10000 train_time:280463ms step_avg:42.36ms +[2025-09-06 02:50:05] [Rank 0] step:6641/10000 train_time:281203ms step_avg:42.34ms +[2025-09-06 02:50:05] [Rank 0] step:6641/10000 train_time:281203ms step_avg:42.34ms +[2025-09-06 02:50:06] [Rank 0] step:6661/10000 train_time:281943ms step_avg:42.33ms +[2025-09-06 02:50:06] [Rank 0] step:6661/10000 train_time:281943ms step_avg:42.33ms +[2025-09-06 02:50:07] [Rank 0] step:6681/10000 train_time:282682ms step_avg:42.31ms +[2025-09-06 02:50:07] [Rank 0] step:6681/10000 train_time:282682ms step_avg:42.31ms +[2025-09-06 02:50:07] [Rank 0] step:6701/10000 train_time:283421ms step_avg:42.30ms +[2025-09-06 02:50:07] [Rank 0] step:6701/10000 train_time:283421ms step_avg:42.30ms +[2025-09-06 02:50:08] [Rank 0] step:6721/10000 train_time:284160ms step_avg:42.28ms +[2025-09-06 02:50:08] [Rank 0] step:6721/10000 train_time:284160ms step_avg:42.28ms +[2025-09-06 02:50:09] [Rank 0] step:6741/10000 train_time:284899ms step_avg:42.26ms +[2025-09-06 02:50:09] [Rank 0] step:6741/10000 train_time:284899ms step_avg:42.26ms +[2025-09-06 02:50:10] [Rank 0] step:6761/10000 train_time:285637ms step_avg:42.25ms +[2025-09-06 02:50:10] [Rank 0] step:6761/10000 train_time:285637ms step_avg:42.25ms +[2025-09-06 02:50:10] [Rank 0] step:6781/10000 train_time:286375ms step_avg:42.23ms +[2025-09-06 02:50:10] [Rank 0] step:6781/10000 train_time:286375ms step_avg:42.23ms +[2025-09-06 02:50:11] [Rank 0] step:6801/10000 train_time:287114ms step_avg:42.22ms +[2025-09-06 02:50:11] [Rank 0] step:6801/10000 train_time:287114ms step_avg:42.22ms +[2025-09-06 02:50:12] [Rank 0] step:6821/10000 train_time:287853ms step_avg:42.20ms +[2025-09-06 02:50:12] [Rank 0] step:6821/10000 train_time:287853ms step_avg:42.20ms +[2025-09-06 02:50:13] [Rank 0] step:6841/10000 train_time:289208ms step_avg:42.28ms +[2025-09-06 02:50:13] [Rank 0] step:6841/10000 train_time:289208ms step_avg:42.28ms +[2025-09-06 02:50:14] [Rank 0] step:6861/10000 train_time:289947ms step_avg:42.26ms +[2025-09-06 02:50:14] [Rank 0] step:6861/10000 train_time:289947ms step_avg:42.26ms +[2025-09-06 02:50:15] [Rank 0] step:6881/10000 train_time:290687ms step_avg:42.24ms +[2025-09-06 02:50:15] [Rank 0] step:6881/10000 train_time:290687ms step_avg:42.24ms +[2025-09-06 02:50:15] [Rank 0] step:6901/10000 train_time:291425ms step_avg:42.23ms +[2025-09-06 02:50:15] [Rank 0] step:6901/10000 train_time:291425ms step_avg:42.23ms +[2025-09-06 02:50:16] [Rank 0] step:6921/10000 train_time:292166ms step_avg:42.21ms +[2025-09-06 02:50:16] [Rank 0] step:6921/10000 train_time:292166ms step_avg:42.21ms +[2025-09-06 02:50:17] [Rank 0] step:6941/10000 train_time:292905ms step_avg:42.20ms +[2025-09-06 02:50:17] [Rank 0] step:6941/10000 train_time:292905ms step_avg:42.20ms +[2025-09-06 02:50:18] [Rank 0] step:6961/10000 train_time:293643ms step_avg:42.18ms +[2025-09-06 02:50:18] [Rank 0] step:6961/10000 train_time:293643ms step_avg:42.18ms +[2025-09-06 02:50:18] [Rank 0] step:6981/10000 train_time:294382ms step_avg:42.17ms +[2025-09-06 02:50:18] [Rank 0] step:6981/10000 train_time:294382ms step_avg:42.17ms +[2025-09-06 02:50:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:50:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:50:20] [Rank 0] PRINT: step:7000/10000 train_loss:2.6263 val_loss:2.6053 train_time:295202ms step_avg:42.17ms +[2025-09-06 02:50:20] [Rank 0] PRINT: step:7000/10000 train_loss:2.6263 val_loss:2.6053 train_time:295202ms step_avg:42.17ms +[2025-09-06 02:50:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:50:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:50:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:50:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:51:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:51:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:51:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:51:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:51:41] [Rank 0] Total Loss: 4.8915 +[2025-09-06 02:51:41] [Rank 0] Total Loss: 4.8915 +[2025-09-06 02:51:41] [Rank 0] Total FTA (Unweighted): 0.1956 +[2025-09-06 02:51:41] [Rank 0] Total FTA (Unweighted): 0.1956 +[2025-09-06 02:51:41] [Rank 0] Total FTA (Weighted): 0.1956 +[2025-09-06 02:51:41] [Rank 0] Total FTA (Weighted): 0.1956 +[2025-09-06 02:51:41] [Rank 0] Group 0 Loss: 3.1948 +[2025-09-06 02:51:41] [Rank 0] Group 0 Loss: 3.1948 +[2025-09-06 02:51:41] [Rank 0] Group 1 Loss: 3.1340 +[2025-09-06 02:51:41] [Rank 0] Group 1 Loss: 3.1340 +[2025-09-06 02:51:41] [Rank 0] Group 2 Loss: 3.3369 +[2025-09-06 02:51:41] [Rank 0] Group 2 Loss: 3.3369 +[2025-09-06 02:51:41] [Rank 0] Group 3 Loss: 3.7579 +[2025-09-06 02:51:41] [Rank 0] Group 3 Loss: 3.7579 +[2025-09-06 02:51:41] [Rank 0] Group 4 Loss: 4.3990 +[2025-09-06 02:51:41] [Rank 0] Group 4 Loss: 4.3990 +[2025-09-06 02:51:41] [Rank 0] Group 5 Loss: 4.8357 +[2025-09-06 02:51:41] [Rank 0] Group 5 Loss: 4.8357 +[2025-09-06 02:51:41] [Rank 0] Group 6 Loss: 5.1639 +[2025-09-06 02:51:41] [Rank 0] Group 6 Loss: 5.1639 +[2025-09-06 02:51:41] [Rank 0] Group 7 Loss: 5.2660 +[2025-09-06 02:51:41] [Rank 0] Group 7 Loss: 5.2660 +[2025-09-06 02:51:41] [Rank 0] Group 8 Loss: 5.5589 +[2025-09-06 02:51:41] [Rank 0] Group 8 Loss: 5.5589 +[2025-09-06 02:51:41] [Rank 0] Group 9 Loss: 5.6957 +[2025-09-06 02:51:41] [Rank 0] Group 9 Loss: 5.6957 +[2025-09-06 02:51:41] [Rank 0] Group 10 Loss: 5.6892 +[2025-09-06 02:51:41] [Rank 0] Group 10 Loss: 5.6892 +[2025-09-06 02:51:41] [Rank 0] Group 11 Loss: 5.7190 +[2025-09-06 02:51:41] [Rank 0] Group 11 Loss: 5.7190 +[2025-09-06 02:51:41] [Rank 0] Group 12 Loss: 5.5995 +[2025-09-06 02:51:41] [Rank 0] Group 12 Loss: 5.5995 +[2025-09-06 02:51:41] [Rank 0] Group 13 Loss: 5.6197 +[2025-09-06 02:51:41] [Rank 0] Group 13 Loss: 5.6197 +[2025-09-06 02:51:41] [Rank 0] Group 14 Loss: 5.6699 +[2025-09-06 02:51:41] [Rank 0] Group 14 Loss: 5.6699 +[2025-09-06 02:51:41] [Rank 0] Group 15 Loss: 5.6240 +[2025-09-06 02:51:41] [Rank 0] Group 15 Loss: 5.6240 +[2025-09-06 02:51:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:51:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:51:41] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 02:51:41] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 02:51:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:51:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:51:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:51:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:51:41] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:51:41] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:51:41] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:51:41] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:51:41] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 02:51:41] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 02:51:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:51:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:51:41] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:51:41] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:51:41] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:51:41] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:51:41] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:51:41] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:51:41] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:51:41] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:51:41] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:51:41] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:51:41] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:51:41] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:51:41] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:51:41] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:51:41] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:51:41] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:51:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:51:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:51:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:51:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:51:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:51:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:51:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:51:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:51:43] [Rank 0] step:7001/10000 train_time:295211ms step_avg:42.17ms +[2025-09-06 02:51:43] [Rank 0] step:7001/10000 train_time:295211ms step_avg:42.17ms +[2025-09-06 02:51:43] [Rank 0] step:7021/10000 train_time:295888ms step_avg:42.14ms +[2025-09-06 02:51:43] [Rank 0] step:7021/10000 train_time:295888ms step_avg:42.14ms +[2025-09-06 02:51:44] [Rank 0] step:7041/10000 train_time:296777ms step_avg:42.15ms +[2025-09-06 02:51:44] [Rank 0] step:7041/10000 train_time:296777ms step_avg:42.15ms +[2025-09-06 02:51:45] [Rank 0] step:7061/10000 train_time:297516ms step_avg:42.14ms +[2025-09-06 02:51:45] [Rank 0] step:7061/10000 train_time:297516ms step_avg:42.14ms +[2025-09-06 02:51:46] [Rank 0] step:7081/10000 train_time:298257ms step_avg:42.12ms +[2025-09-06 02:51:46] [Rank 0] step:7081/10000 train_time:298257ms step_avg:42.12ms +[2025-09-06 02:51:47] [Rank 0] step:7101/10000 train_time:299112ms step_avg:42.12ms +[2025-09-06 02:51:47] [Rank 0] step:7101/10000 train_time:299112ms step_avg:42.12ms +[2025-09-06 02:51:47] [Rank 0] step:7121/10000 train_time:299851ms step_avg:42.11ms +[2025-09-06 02:51:47] [Rank 0] step:7121/10000 train_time:299851ms step_avg:42.11ms +[2025-09-06 02:51:48] [Rank 0] step:7141/10000 train_time:300590ms step_avg:42.09ms +[2025-09-06 02:51:48] [Rank 0] step:7141/10000 train_time:300590ms step_avg:42.09ms +[2025-09-06 02:51:49] [Rank 0] step:7161/10000 train_time:301330ms step_avg:42.08ms +[2025-09-06 02:51:49] [Rank 0] step:7161/10000 train_time:301330ms step_avg:42.08ms +[2025-09-06 02:51:50] [Rank 0] step:7181/10000 train_time:302068ms step_avg:42.06ms +[2025-09-06 02:51:50] [Rank 0] step:7181/10000 train_time:302068ms step_avg:42.06ms +[2025-09-06 02:51:50] [Rank 0] step:7201/10000 train_time:302808ms step_avg:42.05ms +[2025-09-06 02:51:50] [Rank 0] step:7201/10000 train_time:302808ms step_avg:42.05ms +[2025-09-06 02:51:51] [Rank 0] step:7221/10000 train_time:303547ms step_avg:42.04ms +[2025-09-06 02:51:51] [Rank 0] step:7221/10000 train_time:303547ms step_avg:42.04ms +[2025-09-06 02:51:52] [Rank 0] step:7241/10000 train_time:304285ms step_avg:42.02ms +[2025-09-06 02:51:52] [Rank 0] step:7241/10000 train_time:304285ms step_avg:42.02ms +[2025-09-06 02:51:53] [Rank 0] step:7261/10000 train_time:305024ms step_avg:42.01ms +[2025-09-06 02:51:53] [Rank 0] step:7261/10000 train_time:305024ms step_avg:42.01ms +[2025-09-06 02:51:53] [Rank 0] step:7281/10000 train_time:305763ms step_avg:41.99ms +[2025-09-06 02:51:53] [Rank 0] step:7281/10000 train_time:305763ms step_avg:41.99ms +[2025-09-06 02:51:54] [Rank 0] step:7301/10000 train_time:306502ms step_avg:41.98ms +[2025-09-06 02:51:54] [Rank 0] step:7301/10000 train_time:306502ms step_avg:41.98ms +[2025-09-06 02:51:55] [Rank 0] step:7321/10000 train_time:307242ms step_avg:41.97ms +[2025-09-06 02:51:55] [Rank 0] step:7321/10000 train_time:307242ms step_avg:41.97ms +[2025-09-06 02:51:56] [Rank 0] step:7341/10000 train_time:307981ms step_avg:41.95ms +[2025-09-06 02:51:56] [Rank 0] step:7341/10000 train_time:307981ms step_avg:41.95ms +[2025-09-06 02:51:56] [Rank 0] step:7361/10000 train_time:308720ms step_avg:41.94ms +[2025-09-06 02:51:56] [Rank 0] step:7361/10000 train_time:308720ms step_avg:41.94ms +[2025-09-06 02:51:57] [Rank 0] step:7381/10000 train_time:309459ms step_avg:41.93ms +[2025-09-06 02:51:57] [Rank 0] step:7381/10000 train_time:309459ms step_avg:41.93ms +[2025-09-06 02:51:58] [Rank 0] step:7401/10000 train_time:310197ms step_avg:41.91ms +[2025-09-06 02:51:58] [Rank 0] step:7401/10000 train_time:310197ms step_avg:41.91ms +[2025-09-06 02:51:59] [Rank 0] step:7421/10000 train_time:310937ms step_avg:41.90ms +[2025-09-06 02:51:59] [Rank 0] step:7421/10000 train_time:310937ms step_avg:41.90ms +[2025-09-06 02:51:59] [Rank 0] step:7441/10000 train_time:311676ms step_avg:41.89ms +[2025-09-06 02:51:59] [Rank 0] step:7441/10000 train_time:311676ms step_avg:41.89ms +[2025-09-06 02:52:00] [Rank 0] step:7461/10000 train_time:312415ms step_avg:41.87ms +[2025-09-06 02:52:00] [Rank 0] step:7461/10000 train_time:312415ms step_avg:41.87ms +[2025-09-06 02:52:01] [Rank 0] step:7481/10000 train_time:313153ms step_avg:41.86ms +[2025-09-06 02:52:01] [Rank 0] step:7481/10000 train_time:313153ms step_avg:41.86ms +[2025-09-06 02:52:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:52:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:52:02] [Rank 0] PRINT: step:7500/10000 train_loss:2.6015 val_loss:2.5823 train_time:313973ms step_avg:41.86ms +[2025-09-06 02:52:02] [Rank 0] PRINT: step:7500/10000 train_loss:2.6015 val_loss:2.5823 train_time:313973ms step_avg:41.86ms +[2025-09-06 02:52:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:52:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:52:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:52:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:53:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:53:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:53:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:53:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:53:23] [Rank 0] Total Loss: 4.8813 +[2025-09-06 02:53:23] [Rank 0] Total Loss: 4.8813 +[2025-09-06 02:53:23] [Rank 0] Total FTA (Unweighted): 0.2162 +[2025-09-06 02:53:23] [Rank 0] Total FTA (Unweighted): 0.2162 +[2025-09-06 02:53:23] [Rank 0] Total FTA (Weighted): 0.2162 +[2025-09-06 02:53:23] [Rank 0] Total FTA (Weighted): 0.2162 +[2025-09-06 02:53:23] [Rank 0] Group 0 Loss: 3.1992 +[2025-09-06 02:53:23] [Rank 0] Group 0 Loss: 3.1992 +[2025-09-06 02:53:23] [Rank 0] Group 1 Loss: 3.1364 +[2025-09-06 02:53:23] [Rank 0] Group 1 Loss: 3.1364 +[2025-09-06 02:53:23] [Rank 0] Group 2 Loss: 3.3423 +[2025-09-06 02:53:23] [Rank 0] Group 2 Loss: 3.3423 +[2025-09-06 02:53:23] [Rank 0] Group 3 Loss: 3.7578 +[2025-09-06 02:53:23] [Rank 0] Group 3 Loss: 3.7578 +[2025-09-06 02:53:23] [Rank 0] Group 4 Loss: 4.3673 +[2025-09-06 02:53:23] [Rank 0] Group 4 Loss: 4.3673 +[2025-09-06 02:53:23] [Rank 0] Group 5 Loss: 4.8371 +[2025-09-06 02:53:23] [Rank 0] Group 5 Loss: 4.8371 +[2025-09-06 02:53:23] [Rank 0] Group 6 Loss: 5.1401 +[2025-09-06 02:53:23] [Rank 0] Group 6 Loss: 5.1401 +[2025-09-06 02:53:23] [Rank 0] Group 7 Loss: 5.2573 +[2025-09-06 02:53:23] [Rank 0] Group 7 Loss: 5.2573 +[2025-09-06 02:53:23] [Rank 0] Group 8 Loss: 5.5436 +[2025-09-06 02:53:23] [Rank 0] Group 8 Loss: 5.5436 +[2025-09-06 02:53:23] [Rank 0] Group 9 Loss: 5.6767 +[2025-09-06 02:53:23] [Rank 0] Group 9 Loss: 5.6767 +[2025-09-06 02:53:23] [Rank 0] Group 10 Loss: 5.6730 +[2025-09-06 02:53:23] [Rank 0] Group 10 Loss: 5.6730 +[2025-09-06 02:53:23] [Rank 0] Group 11 Loss: 5.7043 +[2025-09-06 02:53:23] [Rank 0] Group 11 Loss: 5.7043 +[2025-09-06 02:53:23] [Rank 0] Group 12 Loss: 5.5925 +[2025-09-06 02:53:23] [Rank 0] Group 12 Loss: 5.5925 +[2025-09-06 02:53:23] [Rank 0] Group 13 Loss: 5.6160 +[2025-09-06 02:53:23] [Rank 0] Group 13 Loss: 5.6160 +[2025-09-06 02:53:23] [Rank 0] Group 14 Loss: 5.6525 +[2025-09-06 02:53:23] [Rank 0] Group 14 Loss: 5.6525 +[2025-09-06 02:53:23] [Rank 0] Group 15 Loss: 5.6041 +[2025-09-06 02:53:23] [Rank 0] Group 15 Loss: 5.6041 +[2025-09-06 02:53:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:53:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:53:23] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-06 02:53:23] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-06 02:53:23] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:53:23] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:53:23] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:53:23] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:53:23] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:53:23] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:53:23] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:53:23] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:53:23] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:53:23] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:53:24] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:53:24] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:53:24] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:53:24] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:53:24] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:53:24] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:53:24] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:53:24] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:53:24] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:53:24] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:53:24] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 02:53:24] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 02:53:24] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:53:24] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:53:24] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:53:24] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:53:24] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 02:53:24] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 02:53:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:53:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:53:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:53:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:53:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:53:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:53:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:53:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:53:25] [Rank 0] step:7501/10000 train_time:313983ms step_avg:41.86ms +[2025-09-06 02:53:25] [Rank 0] step:7501/10000 train_time:313983ms step_avg:41.86ms +[2025-09-06 02:53:26] [Rank 0] step:7521/10000 train_time:314656ms step_avg:41.84ms +[2025-09-06 02:53:26] [Rank 0] step:7521/10000 train_time:314656ms step_avg:41.84ms +[2025-09-06 02:53:26] [Rank 0] step:7541/10000 train_time:315395ms step_avg:41.82ms +[2025-09-06 02:53:26] [Rank 0] step:7541/10000 train_time:315395ms step_avg:41.82ms +[2025-09-06 02:53:27] [Rank 0] step:7561/10000 train_time:316134ms step_avg:41.81ms +[2025-09-06 02:53:27] [Rank 0] step:7561/10000 train_time:316134ms step_avg:41.81ms +[2025-09-06 02:53:28] [Rank 0] step:7581/10000 train_time:316873ms step_avg:41.80ms +[2025-09-06 02:53:28] [Rank 0] step:7581/10000 train_time:316873ms step_avg:41.80ms +[2025-09-06 02:53:29] [Rank 0] step:7601/10000 train_time:317615ms step_avg:41.79ms +[2025-09-06 02:53:29] [Rank 0] step:7601/10000 train_time:317615ms step_avg:41.79ms +[2025-09-06 02:53:29] [Rank 0] step:7621/10000 train_time:318354ms step_avg:41.77ms +[2025-09-06 02:53:29] [Rank 0] step:7621/10000 train_time:318354ms step_avg:41.77ms +[2025-09-06 02:53:31] [Rank 0] step:7641/10000 train_time:319114ms step_avg:41.76ms +[2025-09-06 02:53:31] [Rank 0] step:7641/10000 train_time:319114ms step_avg:41.76ms +[2025-09-06 02:53:31] [Rank 0] step:7661/10000 train_time:320456ms step_avg:41.83ms +[2025-09-06 02:53:31] [Rank 0] step:7661/10000 train_time:320456ms step_avg:41.83ms +[2025-09-06 02:53:32] [Rank 0] step:7681/10000 train_time:321195ms step_avg:41.82ms +[2025-09-06 02:53:32] [Rank 0] step:7681/10000 train_time:321195ms step_avg:41.82ms +[2025-09-06 02:53:33] [Rank 0] step:7701/10000 train_time:321934ms step_avg:41.80ms +[2025-09-06 02:53:33] [Rank 0] step:7701/10000 train_time:321934ms step_avg:41.80ms +[2025-09-06 02:53:34] [Rank 0] step:7721/10000 train_time:322673ms step_avg:41.79ms +[2025-09-06 02:53:34] [Rank 0] step:7721/10000 train_time:322673ms step_avg:41.79ms +[2025-09-06 02:53:34] [Rank 0] step:7741/10000 train_time:323412ms step_avg:41.78ms +[2025-09-06 02:53:34] [Rank 0] step:7741/10000 train_time:323412ms step_avg:41.78ms +[2025-09-06 02:53:35] [Rank 0] step:7761/10000 train_time:324153ms step_avg:41.77ms +[2025-09-06 02:53:35] [Rank 0] step:7761/10000 train_time:324153ms step_avg:41.77ms +[2025-09-06 02:53:36] [Rank 0] step:7781/10000 train_time:324892ms step_avg:41.75ms +[2025-09-06 02:53:36] [Rank 0] step:7781/10000 train_time:324892ms step_avg:41.75ms +[2025-09-06 02:53:37] [Rank 0] step:7801/10000 train_time:325633ms step_avg:41.74ms +[2025-09-06 02:53:37] [Rank 0] step:7801/10000 train_time:325633ms step_avg:41.74ms +[2025-09-06 02:53:37] [Rank 0] step:7821/10000 train_time:326373ms step_avg:41.73ms +[2025-09-06 02:53:37] [Rank 0] step:7821/10000 train_time:326373ms step_avg:41.73ms +[2025-09-06 02:53:38] [Rank 0] step:7841/10000 train_time:327112ms step_avg:41.72ms +[2025-09-06 02:53:38] [Rank 0] step:7841/10000 train_time:327112ms step_avg:41.72ms +[2025-09-06 02:53:39] [Rank 0] step:7861/10000 train_time:327851ms step_avg:41.71ms +[2025-09-06 02:53:39] [Rank 0] step:7861/10000 train_time:327851ms step_avg:41.71ms +[2025-09-06 02:53:40] [Rank 0] step:7881/10000 train_time:328592ms step_avg:41.69ms +[2025-09-06 02:53:40] [Rank 0] step:7881/10000 train_time:328592ms step_avg:41.69ms +[2025-09-06 02:53:40] [Rank 0] step:7901/10000 train_time:329331ms step_avg:41.68ms +[2025-09-06 02:53:40] [Rank 0] step:7901/10000 train_time:329331ms step_avg:41.68ms +[2025-09-06 02:53:41] [Rank 0] step:7921/10000 train_time:330071ms step_avg:41.67ms +[2025-09-06 02:53:41] [Rank 0] step:7921/10000 train_time:330071ms step_avg:41.67ms +[2025-09-06 02:53:42] [Rank 0] step:7941/10000 train_time:330810ms step_avg:41.66ms +[2025-09-06 02:53:42] [Rank 0] step:7941/10000 train_time:330810ms step_avg:41.66ms +[2025-09-06 02:53:43] [Rank 0] step:7961/10000 train_time:331550ms step_avg:41.65ms +[2025-09-06 02:53:43] [Rank 0] step:7961/10000 train_time:331550ms step_avg:41.65ms +[2025-09-06 02:53:43] [Rank 0] step:7981/10000 train_time:332289ms step_avg:41.63ms +[2025-09-06 02:53:43] [Rank 0] step:7981/10000 train_time:332289ms step_avg:41.63ms +[2025-09-06 02:53:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:53:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:53:45] [Rank 0] PRINT: step:8000/10000 train_loss:2.5813 val_loss:2.5634 train_time:333111ms step_avg:41.64ms +[2025-09-06 02:53:45] [Rank 0] PRINT: step:8000/10000 train_loss:2.5813 val_loss:2.5634 train_time:333111ms step_avg:41.64ms +[2025-09-06 02:53:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:53:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:53:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:53:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:55:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:55:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:55:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:55:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:55:06] [Rank 0] Total Loss: 4.8571 +[2025-09-06 02:55:06] [Rank 0] Total Loss: 4.8571 +[2025-09-06 02:55:06] [Rank 0] Total FTA (Unweighted): 0.2231 +[2025-09-06 02:55:06] [Rank 0] Total FTA (Unweighted): 0.2231 +[2025-09-06 02:55:06] [Rank 0] Total FTA (Weighted): 0.2231 +[2025-09-06 02:55:06] [Rank 0] Total FTA (Weighted): 0.2231 +[2025-09-06 02:55:06] [Rank 0] Group 0 Loss: 3.1864 +[2025-09-06 02:55:06] [Rank 0] Group 0 Loss: 3.1864 +[2025-09-06 02:55:06] [Rank 0] Group 1 Loss: 3.1764 +[2025-09-06 02:55:06] [Rank 0] Group 1 Loss: 3.1764 +[2025-09-06 02:55:06] [Rank 0] Group 2 Loss: 3.2475 +[2025-09-06 02:55:06] [Rank 0] Group 2 Loss: 3.2475 +[2025-09-06 02:55:06] [Rank 0] Group 3 Loss: 3.7649 +[2025-09-06 02:55:06] [Rank 0] Group 3 Loss: 3.7649 +[2025-09-06 02:55:06] [Rank 0] Group 4 Loss: 4.3589 +[2025-09-06 02:55:06] [Rank 0] Group 4 Loss: 4.3589 +[2025-09-06 02:55:06] [Rank 0] Group 5 Loss: 4.8001 +[2025-09-06 02:55:06] [Rank 0] Group 5 Loss: 4.8001 +[2025-09-06 02:55:06] [Rank 0] Group 6 Loss: 5.0898 +[2025-09-06 02:55:06] [Rank 0] Group 6 Loss: 5.0898 +[2025-09-06 02:55:06] [Rank 0] Group 7 Loss: 5.2313 +[2025-09-06 02:55:06] [Rank 0] Group 7 Loss: 5.2313 +[2025-09-06 02:55:06] [Rank 0] Group 8 Loss: 5.5138 +[2025-09-06 02:55:06] [Rank 0] Group 8 Loss: 5.5138 +[2025-09-06 02:55:06] [Rank 0] Group 9 Loss: 5.6519 +[2025-09-06 02:55:06] [Rank 0] Group 9 Loss: 5.6519 +[2025-09-06 02:55:06] [Rank 0] Group 10 Loss: 5.6472 +[2025-09-06 02:55:06] [Rank 0] Group 10 Loss: 5.6472 +[2025-09-06 02:55:06] [Rank 0] Group 11 Loss: 5.6727 +[2025-09-06 02:55:06] [Rank 0] Group 11 Loss: 5.6727 +[2025-09-06 02:55:06] [Rank 0] Group 12 Loss: 5.5645 +[2025-09-06 02:55:06] [Rank 0] Group 12 Loss: 5.5645 +[2025-09-06 02:55:06] [Rank 0] Group 13 Loss: 5.5882 +[2025-09-06 02:55:06] [Rank 0] Group 13 Loss: 5.5882 +[2025-09-06 02:55:06] [Rank 0] Group 14 Loss: 5.6368 +[2025-09-06 02:55:06] [Rank 0] Group 14 Loss: 5.6368 +[2025-09-06 02:55:06] [Rank 0] Group 15 Loss: 5.5839 +[2025-09-06 02:55:06] [Rank 0] Group 15 Loss: 5.5839 +[2025-09-06 02:55:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:55:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:55:06] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-06 02:55:06] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-06 02:55:06] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:55:06] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:55:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:55:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:55:06] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:55:06] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:55:06] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:55:06] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:55:06] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 02:55:06] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 02:55:06] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:55:06] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:55:06] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 02:55:06] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 02:55:06] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:55:06] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:55:06] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:55:06] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:55:06] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:55:06] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:55:06] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 02:55:06] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 02:55:06] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:55:06] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:55:06] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:55:06] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:55:06] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 02:55:06] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 02:55:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:55:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:55:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:55:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:55:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:55:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:55:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:55:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:55:08] [Rank 0] step:8001/10000 train_time:333120ms step_avg:41.63ms +[2025-09-06 02:55:08] [Rank 0] step:8001/10000 train_time:333120ms step_avg:41.63ms +[2025-09-06 02:55:09] [Rank 0] step:8021/10000 train_time:334413ms step_avg:41.69ms +[2025-09-06 02:55:09] [Rank 0] step:8021/10000 train_time:334413ms step_avg:41.69ms +[2025-09-06 02:55:10] [Rank 0] step:8041/10000 train_time:335156ms step_avg:41.68ms +[2025-09-06 02:55:10] [Rank 0] step:8041/10000 train_time:335156ms step_avg:41.68ms +[2025-09-06 02:55:10] [Rank 0] step:8061/10000 train_time:335894ms step_avg:41.67ms +[2025-09-06 02:55:10] [Rank 0] step:8061/10000 train_time:335894ms step_avg:41.67ms +[2025-09-06 02:55:11] [Rank 0] step:8081/10000 train_time:336633ms step_avg:41.66ms +[2025-09-06 02:55:11] [Rank 0] step:8081/10000 train_time:336633ms step_avg:41.66ms +[2025-09-06 02:55:12] [Rank 0] step:8101/10000 train_time:337371ms step_avg:41.65ms +[2025-09-06 02:55:12] [Rank 0] step:8101/10000 train_time:337371ms step_avg:41.65ms +[2025-09-06 02:55:13] [Rank 0] step:8121/10000 train_time:338110ms step_avg:41.63ms +[2025-09-06 02:55:13] [Rank 0] step:8121/10000 train_time:338110ms step_avg:41.63ms +[2025-09-06 02:55:13] [Rank 0] step:8141/10000 train_time:338850ms step_avg:41.62ms +[2025-09-06 02:55:13] [Rank 0] step:8141/10000 train_time:338850ms step_avg:41.62ms +[2025-09-06 02:55:14] [Rank 0] step:8161/10000 train_time:339590ms step_avg:41.61ms +[2025-09-06 02:55:14] [Rank 0] step:8161/10000 train_time:339590ms step_avg:41.61ms +[2025-09-06 02:55:15] [Rank 0] step:8181/10000 train_time:340329ms step_avg:41.60ms +[2025-09-06 02:55:15] [Rank 0] step:8181/10000 train_time:340329ms step_avg:41.60ms +[2025-09-06 02:55:16] [Rank 0] step:8201/10000 train_time:341069ms step_avg:41.59ms +[2025-09-06 02:55:16] [Rank 0] step:8201/10000 train_time:341069ms step_avg:41.59ms +[2025-09-06 02:55:16] [Rank 0] step:8221/10000 train_time:341809ms step_avg:41.58ms +[2025-09-06 02:55:16] [Rank 0] step:8221/10000 train_time:341809ms step_avg:41.58ms +[2025-09-06 02:55:17] [Rank 0] step:8241/10000 train_time:342547ms step_avg:41.57ms +[2025-09-06 02:55:17] [Rank 0] step:8241/10000 train_time:342547ms step_avg:41.57ms +[2025-09-06 02:55:18] [Rank 0] step:8261/10000 train_time:343286ms step_avg:41.56ms +[2025-09-06 02:55:18] [Rank 0] step:8261/10000 train_time:343286ms step_avg:41.56ms +[2025-09-06 02:55:19] [Rank 0] step:8281/10000 train_time:344025ms step_avg:41.54ms +[2025-09-06 02:55:19] [Rank 0] step:8281/10000 train_time:344025ms step_avg:41.54ms +[2025-09-06 02:55:19] [Rank 0] step:8301/10000 train_time:344764ms step_avg:41.53ms +[2025-09-06 02:55:19] [Rank 0] step:8301/10000 train_time:344764ms step_avg:41.53ms +[2025-09-06 02:55:20] [Rank 0] step:8321/10000 train_time:345503ms step_avg:41.52ms +[2025-09-06 02:55:20] [Rank 0] step:8321/10000 train_time:345503ms step_avg:41.52ms +[2025-09-06 02:55:21] [Rank 0] step:8341/10000 train_time:346242ms step_avg:41.51ms +[2025-09-06 02:55:21] [Rank 0] step:8341/10000 train_time:346242ms step_avg:41.51ms +[2025-09-06 02:55:21] [Rank 0] step:8361/10000 train_time:346980ms step_avg:41.50ms +[2025-09-06 02:55:21] [Rank 0] step:8361/10000 train_time:346980ms step_avg:41.50ms +[2025-09-06 02:55:22] [Rank 0] step:8381/10000 train_time:347720ms step_avg:41.49ms +[2025-09-06 02:55:22] [Rank 0] step:8381/10000 train_time:347720ms step_avg:41.49ms +[2025-09-06 02:55:23] [Rank 0] step:8401/10000 train_time:348459ms step_avg:41.48ms +[2025-09-06 02:55:23] [Rank 0] step:8401/10000 train_time:348459ms step_avg:41.48ms +[2025-09-06 02:55:24] [Rank 0] step:8421/10000 train_time:349199ms step_avg:41.47ms +[2025-09-06 02:55:24] [Rank 0] step:8421/10000 train_time:349199ms step_avg:41.47ms +[2025-09-06 02:55:24] [Rank 0] step:8441/10000 train_time:349937ms step_avg:41.46ms +[2025-09-06 02:55:24] [Rank 0] step:8441/10000 train_time:349937ms step_avg:41.46ms +[2025-09-06 02:55:25] [Rank 0] step:8461/10000 train_time:350676ms step_avg:41.45ms +[2025-09-06 02:55:25] [Rank 0] step:8461/10000 train_time:350676ms step_avg:41.45ms +[2025-09-06 02:55:26] [Rank 0] step:8481/10000 train_time:351417ms step_avg:41.44ms +[2025-09-06 02:55:26] [Rank 0] step:8481/10000 train_time:351417ms step_avg:41.44ms +[2025-09-06 02:55:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:55:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:55:27] [Rank 0] PRINT: step:8500/10000 train_loss:2.5642 val_loss:2.5470 train_time:352237ms step_avg:41.44ms +[2025-09-06 02:55:27] [Rank 0] PRINT: step:8500/10000 train_loss:2.5642 val_loss:2.5470 train_time:352237ms step_avg:41.44ms +[2025-09-06 02:55:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:55:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:55:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:55:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:56:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:56:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:56:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:56:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:56:49] [Rank 0] Total Loss: 4.8986 +[2025-09-06 02:56:49] [Rank 0] Total Loss: 4.8986 +[2025-09-06 02:56:49] [Rank 0] Total FTA (Unweighted): 0.2250 +[2025-09-06 02:56:49] [Rank 0] Total FTA (Unweighted): 0.2250 +[2025-09-06 02:56:49] [Rank 0] Total FTA (Weighted): 0.2250 +[2025-09-06 02:56:49] [Rank 0] Total FTA (Weighted): 0.2250 +[2025-09-06 02:56:49] [Rank 0] Group 0 Loss: 3.3082 +[2025-09-06 02:56:49] [Rank 0] Group 0 Loss: 3.3082 +[2025-09-06 02:56:49] [Rank 0] Group 1 Loss: 3.1573 +[2025-09-06 02:56:49] [Rank 0] Group 1 Loss: 3.1573 +[2025-09-06 02:56:49] [Rank 0] Group 2 Loss: 3.3388 +[2025-09-06 02:56:49] [Rank 0] Group 2 Loss: 3.3388 +[2025-09-06 02:56:49] [Rank 0] Group 3 Loss: 3.8136 +[2025-09-06 02:56:49] [Rank 0] Group 3 Loss: 3.8136 +[2025-09-06 02:56:49] [Rank 0] Group 4 Loss: 4.3587 +[2025-09-06 02:56:49] [Rank 0] Group 4 Loss: 4.3587 +[2025-09-06 02:56:49] [Rank 0] Group 5 Loss: 4.8206 +[2025-09-06 02:56:49] [Rank 0] Group 5 Loss: 4.8206 +[2025-09-06 02:56:49] [Rank 0] Group 6 Loss: 5.1739 +[2025-09-06 02:56:49] [Rank 0] Group 6 Loss: 5.1739 +[2025-09-06 02:56:49] [Rank 0] Group 7 Loss: 5.2697 +[2025-09-06 02:56:49] [Rank 0] Group 7 Loss: 5.2697 +[2025-09-06 02:56:49] [Rank 0] Group 8 Loss: 5.5402 +[2025-09-06 02:56:49] [Rank 0] Group 8 Loss: 5.5402 +[2025-09-06 02:56:49] [Rank 0] Group 9 Loss: 5.6688 +[2025-09-06 02:56:49] [Rank 0] Group 9 Loss: 5.6688 +[2025-09-06 02:56:49] [Rank 0] Group 10 Loss: 5.6663 +[2025-09-06 02:56:49] [Rank 0] Group 10 Loss: 5.6663 +[2025-09-06 02:56:49] [Rank 0] Group 11 Loss: 5.7212 +[2025-09-06 02:56:49] [Rank 0] Group 11 Loss: 5.7212 +[2025-09-06 02:56:49] [Rank 0] Group 12 Loss: 5.6149 +[2025-09-06 02:56:49] [Rank 0] Group 12 Loss: 5.6149 +[2025-09-06 02:56:49] [Rank 0] Group 13 Loss: 5.6242 +[2025-09-06 02:56:49] [Rank 0] Group 13 Loss: 5.6242 +[2025-09-06 02:56:49] [Rank 0] Group 14 Loss: 5.6761 +[2025-09-06 02:56:49] [Rank 0] Group 14 Loss: 5.6761 +[2025-09-06 02:56:49] [Rank 0] Group 15 Loss: 5.6251 +[2025-09-06 02:56:49] [Rank 0] Group 15 Loss: 5.6251 +[2025-09-06 02:56:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:56:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:56:49] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-06 02:56:49] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-06 02:56:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:56:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:56:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:56:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:56:49] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:56:49] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:56:49] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 02:56:49] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 02:56:49] [Rank 0] Group 6 FTA: 0.1200 +[2025-09-06 02:56:49] [Rank 0] Group 6 FTA: 0.1200 +[2025-09-06 02:56:49] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:56:49] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:56:49] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 02:56:49] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 02:56:49] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:56:49] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:56:49] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 02:56:49] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 02:56:49] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:56:49] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:56:49] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 02:56:49] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 02:56:49] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:56:49] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:56:49] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:56:49] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:56:49] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:56:49] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:56:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:56:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:56:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:56:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:56:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:56:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:56:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:56:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:56:51] [Rank 0] step:8501/10000 train_time:352246ms step_avg:41.44ms +[2025-09-06 02:56:51] [Rank 0] step:8501/10000 train_time:352246ms step_avg:41.44ms +[2025-09-06 02:56:52] [Rank 0] step:8521/10000 train_time:352917ms step_avg:41.42ms +[2025-09-06 02:56:52] [Rank 0] step:8521/10000 train_time:352917ms step_avg:41.42ms +[2025-09-06 02:56:52] [Rank 0] step:8541/10000 train_time:353656ms step_avg:41.41ms +[2025-09-06 02:56:52] [Rank 0] step:8541/10000 train_time:353656ms step_avg:41.41ms +[2025-09-06 02:56:53] [Rank 0] step:8561/10000 train_time:354395ms step_avg:41.40ms +[2025-09-06 02:56:53] [Rank 0] step:8561/10000 train_time:354395ms step_avg:41.40ms +[2025-09-06 02:56:54] [Rank 0] step:8581/10000 train_time:355134ms step_avg:41.39ms +[2025-09-06 02:56:54] [Rank 0] step:8581/10000 train_time:355134ms step_avg:41.39ms +[2025-09-06 02:56:55] [Rank 0] step:8601/10000 train_time:355874ms step_avg:41.38ms +[2025-09-06 02:56:55] [Rank 0] step:8601/10000 train_time:355874ms step_avg:41.38ms +[2025-09-06 02:56:55] [Rank 0] step:8621/10000 train_time:356613ms step_avg:41.37ms +[2025-09-06 02:56:55] [Rank 0] step:8621/10000 train_time:356613ms step_avg:41.37ms +[2025-09-06 02:56:56] [Rank 0] step:8641/10000 train_time:357352ms step_avg:41.36ms +[2025-09-06 02:56:56] [Rank 0] step:8641/10000 train_time:357352ms step_avg:41.36ms +[2025-09-06 02:56:57] [Rank 0] step:8661/10000 train_time:358092ms step_avg:41.35ms +[2025-09-06 02:56:57] [Rank 0] step:8661/10000 train_time:358092ms step_avg:41.35ms +[2025-09-06 02:56:57] [Rank 0] step:8681/10000 train_time:358831ms step_avg:41.34ms +[2025-09-06 02:56:57] [Rank 0] step:8681/10000 train_time:358831ms step_avg:41.34ms +[2025-09-06 02:56:58] [Rank 0] step:8701/10000 train_time:359570ms step_avg:41.33ms +[2025-09-06 02:56:58] [Rank 0] step:8701/10000 train_time:359570ms step_avg:41.33ms +[2025-09-06 02:56:59] [Rank 0] step:8721/10000 train_time:360309ms step_avg:41.32ms +[2025-09-06 02:56:59] [Rank 0] step:8721/10000 train_time:360309ms step_avg:41.32ms +[2025-09-06 02:57:00] [Rank 0] step:8741/10000 train_time:361048ms step_avg:41.31ms +[2025-09-06 02:57:00] [Rank 0] step:8741/10000 train_time:361048ms step_avg:41.31ms +[2025-09-06 02:57:01] [Rank 0] step:8761/10000 train_time:361963ms step_avg:41.32ms +[2025-09-06 02:57:01] [Rank 0] step:8761/10000 train_time:361963ms step_avg:41.32ms +[2025-09-06 02:57:01] [Rank 0] step:8781/10000 train_time:362702ms step_avg:41.31ms +[2025-09-06 02:57:01] [Rank 0] step:8781/10000 train_time:362702ms step_avg:41.31ms +[2025-09-06 02:57:02] [Rank 0] step:8801/10000 train_time:363442ms step_avg:41.30ms +[2025-09-06 02:57:02] [Rank 0] step:8801/10000 train_time:363442ms step_avg:41.30ms +[2025-09-06 02:57:03] [Rank 0] step:8821/10000 train_time:364290ms step_avg:41.30ms +[2025-09-06 02:57:03] [Rank 0] step:8821/10000 train_time:364290ms step_avg:41.30ms +[2025-09-06 02:57:04] [Rank 0] step:8841/10000 train_time:365633ms step_avg:41.36ms +[2025-09-06 02:57:04] [Rank 0] step:8841/10000 train_time:365633ms step_avg:41.36ms +[2025-09-06 02:57:05] [Rank 0] step:8861/10000 train_time:366372ms step_avg:41.35ms +[2025-09-06 02:57:05] [Rank 0] step:8861/10000 train_time:366372ms step_avg:41.35ms +[2025-09-06 02:57:06] [Rank 0] step:8881/10000 train_time:367112ms step_avg:41.34ms +[2025-09-06 02:57:06] [Rank 0] step:8881/10000 train_time:367112ms step_avg:41.34ms +[2025-09-06 02:57:06] [Rank 0] step:8901/10000 train_time:367852ms step_avg:41.33ms +[2025-09-06 02:57:06] [Rank 0] step:8901/10000 train_time:367852ms step_avg:41.33ms +[2025-09-06 02:57:07] [Rank 0] step:8921/10000 train_time:368592ms step_avg:41.32ms +[2025-09-06 02:57:07] [Rank 0] step:8921/10000 train_time:368592ms step_avg:41.32ms +[2025-09-06 02:57:08] [Rank 0] step:8941/10000 train_time:369330ms step_avg:41.31ms +[2025-09-06 02:57:08] [Rank 0] step:8941/10000 train_time:369330ms step_avg:41.31ms +[2025-09-06 02:57:09] [Rank 0] step:8961/10000 train_time:370070ms step_avg:41.30ms +[2025-09-06 02:57:09] [Rank 0] step:8961/10000 train_time:370070ms step_avg:41.30ms +[2025-09-06 02:57:09] [Rank 0] step:8981/10000 train_time:370809ms step_avg:41.29ms +[2025-09-06 02:57:09] [Rank 0] step:8981/10000 train_time:370809ms step_avg:41.29ms +[2025-09-06 02:57:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:57:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:57:11] [Rank 0] PRINT: step:9000/10000 train_loss:2.5479 val_loss:2.5329 train_time:371628ms step_avg:41.29ms +[2025-09-06 02:57:11] [Rank 0] PRINT: step:9000/10000 train_loss:2.5479 val_loss:2.5329 train_time:371628ms step_avg:41.29ms +[2025-09-06 02:57:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:57:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:57:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:57:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:58:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:58:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:58:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:58:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:58:32] [Rank 0] Total Loss: 4.8838 +[2025-09-06 02:58:32] [Rank 0] Total Loss: 4.8838 +[2025-09-06 02:58:32] [Rank 0] Total FTA (Unweighted): 0.2306 +[2025-09-06 02:58:32] [Rank 0] Total FTA (Unweighted): 0.2306 +[2025-09-06 02:58:32] [Rank 0] Total FTA (Weighted): 0.2306 +[2025-09-06 02:58:32] [Rank 0] Total FTA (Weighted): 0.2306 +[2025-09-06 02:58:32] [Rank 0] Group 0 Loss: 3.2532 +[2025-09-06 02:58:32] [Rank 0] Group 0 Loss: 3.2532 +[2025-09-06 02:58:32] [Rank 0] Group 1 Loss: 3.1564 +[2025-09-06 02:58:32] [Rank 0] Group 1 Loss: 3.1564 +[2025-09-06 02:58:32] [Rank 0] Group 2 Loss: 3.3375 +[2025-09-06 02:58:32] [Rank 0] Group 2 Loss: 3.3375 +[2025-09-06 02:58:32] [Rank 0] Group 3 Loss: 3.7767 +[2025-09-06 02:58:32] [Rank 0] Group 3 Loss: 3.7767 +[2025-09-06 02:58:32] [Rank 0] Group 4 Loss: 4.3484 +[2025-09-06 02:58:32] [Rank 0] Group 4 Loss: 4.3484 +[2025-09-06 02:58:32] [Rank 0] Group 5 Loss: 4.8090 +[2025-09-06 02:58:32] [Rank 0] Group 5 Loss: 4.8090 +[2025-09-06 02:58:32] [Rank 0] Group 6 Loss: 5.1392 +[2025-09-06 02:58:32] [Rank 0] Group 6 Loss: 5.1392 +[2025-09-06 02:58:32] [Rank 0] Group 7 Loss: 5.2525 +[2025-09-06 02:58:32] [Rank 0] Group 7 Loss: 5.2525 +[2025-09-06 02:58:32] [Rank 0] Group 8 Loss: 5.5297 +[2025-09-06 02:58:32] [Rank 0] Group 8 Loss: 5.5297 +[2025-09-06 02:58:32] [Rank 0] Group 9 Loss: 5.6622 +[2025-09-06 02:58:32] [Rank 0] Group 9 Loss: 5.6622 +[2025-09-06 02:58:32] [Rank 0] Group 10 Loss: 5.6819 +[2025-09-06 02:58:32] [Rank 0] Group 10 Loss: 5.6819 +[2025-09-06 02:58:32] [Rank 0] Group 11 Loss: 5.7030 +[2025-09-06 02:58:32] [Rank 0] Group 11 Loss: 5.7030 +[2025-09-06 02:58:32] [Rank 0] Group 12 Loss: 5.6039 +[2025-09-06 02:58:32] [Rank 0] Group 12 Loss: 5.6039 +[2025-09-06 02:58:32] [Rank 0] Group 13 Loss: 5.6133 +[2025-09-06 02:58:32] [Rank 0] Group 13 Loss: 5.6133 +[2025-09-06 02:58:32] [Rank 0] Group 14 Loss: 5.6602 +[2025-09-06 02:58:32] [Rank 0] Group 14 Loss: 5.6602 +[2025-09-06 02:58:32] [Rank 0] Group 15 Loss: 5.6139 +[2025-09-06 02:58:32] [Rank 0] Group 15 Loss: 5.6139 +[2025-09-06 02:58:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:58:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:58:32] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-06 02:58:32] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-06 02:58:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:58:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:58:32] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:58:32] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:58:32] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 02:58:32] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 02:58:32] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 02:58:32] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 02:58:32] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 02:58:32] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 02:58:32] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:58:32] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:58:32] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 02:58:32] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 02:58:32] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:58:32] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:58:32] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 02:58:32] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 02:58:32] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:58:32] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:58:32] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 02:58:32] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 02:58:32] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 02:58:32] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 02:58:32] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:58:32] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:58:32] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 02:58:32] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 02:58:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:58:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 02:58:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:58:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 02:58:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:58:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 02:58:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:58:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 02:58:34] [Rank 0] step:9001/10000 train_time:371637ms step_avg:41.29ms +[2025-09-06 02:58:34] [Rank 0] step:9001/10000 train_time:371637ms step_avg:41.29ms +[2025-09-06 02:58:34] [Rank 0] step:9021/10000 train_time:372303ms step_avg:41.27ms +[2025-09-06 02:58:34] [Rank 0] step:9021/10000 train_time:372303ms step_avg:41.27ms +[2025-09-06 02:58:35] [Rank 0] step:9041/10000 train_time:373042ms step_avg:41.26ms +[2025-09-06 02:58:35] [Rank 0] step:9041/10000 train_time:373042ms step_avg:41.26ms +[2025-09-06 02:58:36] [Rank 0] step:9061/10000 train_time:373781ms step_avg:41.25ms +[2025-09-06 02:58:36] [Rank 0] step:9061/10000 train_time:373781ms step_avg:41.25ms +[2025-09-06 02:58:37] [Rank 0] step:9081/10000 train_time:374520ms step_avg:41.24ms +[2025-09-06 02:58:37] [Rank 0] step:9081/10000 train_time:374520ms step_avg:41.24ms +[2025-09-06 02:58:37] [Rank 0] step:9101/10000 train_time:375260ms step_avg:41.23ms +[2025-09-06 02:58:37] [Rank 0] step:9101/10000 train_time:375260ms step_avg:41.23ms +[2025-09-06 02:58:38] [Rank 0] step:9121/10000 train_time:376000ms step_avg:41.22ms +[2025-09-06 02:58:38] [Rank 0] step:9121/10000 train_time:376000ms step_avg:41.22ms +[2025-09-06 02:58:39] [Rank 0] step:9141/10000 train_time:376740ms step_avg:41.21ms +[2025-09-06 02:58:39] [Rank 0] step:9141/10000 train_time:376740ms step_avg:41.21ms +[2025-09-06 02:58:39] [Rank 0] step:9161/10000 train_time:377480ms step_avg:41.21ms +[2025-09-06 02:58:39] [Rank 0] step:9161/10000 train_time:377480ms step_avg:41.21ms +[2025-09-06 02:58:40] [Rank 0] step:9181/10000 train_time:378220ms step_avg:41.20ms +[2025-09-06 02:58:40] [Rank 0] step:9181/10000 train_time:378220ms step_avg:41.20ms +[2025-09-06 02:58:41] [Rank 0] step:9201/10000 train_time:378958ms step_avg:41.19ms +[2025-09-06 02:58:41] [Rank 0] step:9201/10000 train_time:378958ms step_avg:41.19ms +[2025-09-06 02:58:42] [Rank 0] step:9221/10000 train_time:379697ms step_avg:41.18ms +[2025-09-06 02:58:42] [Rank 0] step:9221/10000 train_time:379697ms step_avg:41.18ms +[2025-09-06 02:58:42] [Rank 0] step:9241/10000 train_time:380436ms step_avg:41.17ms +[2025-09-06 02:58:42] [Rank 0] step:9241/10000 train_time:380436ms step_avg:41.17ms +[2025-09-06 02:58:43] [Rank 0] step:9261/10000 train_time:381175ms step_avg:41.16ms +[2025-09-06 02:58:43] [Rank 0] step:9261/10000 train_time:381175ms step_avg:41.16ms +[2025-09-06 02:58:44] [Rank 0] step:9281/10000 train_time:381914ms step_avg:41.15ms +[2025-09-06 02:58:44] [Rank 0] step:9281/10000 train_time:381914ms step_avg:41.15ms +[2025-09-06 02:58:45] [Rank 0] step:9301/10000 train_time:382653ms step_avg:41.14ms +[2025-09-06 02:58:45] [Rank 0] step:9301/10000 train_time:382653ms step_avg:41.14ms +[2025-09-06 02:58:45] [Rank 0] step:9321/10000 train_time:383393ms step_avg:41.13ms +[2025-09-06 02:58:45] [Rank 0] step:9321/10000 train_time:383393ms step_avg:41.13ms +[2025-09-06 02:58:46] [Rank 0] step:9341/10000 train_time:384133ms step_avg:41.12ms +[2025-09-06 02:58:46] [Rank 0] step:9341/10000 train_time:384133ms step_avg:41.12ms +[2025-09-06 02:58:47] [Rank 0] step:9361/10000 train_time:384872ms step_avg:41.11ms +[2025-09-06 02:58:47] [Rank 0] step:9361/10000 train_time:384872ms step_avg:41.11ms +[2025-09-06 02:58:48] [Rank 0] step:9381/10000 train_time:385611ms step_avg:41.11ms +[2025-09-06 02:58:48] [Rank 0] step:9381/10000 train_time:385611ms step_avg:41.11ms +[2025-09-06 02:58:48] [Rank 0] step:9401/10000 train_time:386350ms step_avg:41.10ms +[2025-09-06 02:58:48] [Rank 0] step:9401/10000 train_time:386350ms step_avg:41.10ms +[2025-09-06 02:58:49] [Rank 0] step:9421/10000 train_time:387090ms step_avg:41.09ms +[2025-09-06 02:58:49] [Rank 0] step:9421/10000 train_time:387090ms step_avg:41.09ms +[2025-09-06 02:58:50] [Rank 0] step:9441/10000 train_time:387828ms step_avg:41.08ms +[2025-09-06 02:58:50] [Rank 0] step:9441/10000 train_time:387828ms step_avg:41.08ms +[2025-09-06 02:58:51] [Rank 0] step:9461/10000 train_time:388568ms step_avg:41.07ms +[2025-09-06 02:58:51] [Rank 0] step:9461/10000 train_time:388568ms step_avg:41.07ms +[2025-09-06 02:58:51] [Rank 0] step:9481/10000 train_time:389306ms step_avg:41.06ms +[2025-09-06 02:58:51] [Rank 0] step:9481/10000 train_time:389306ms step_avg:41.06ms +[2025-09-06 02:58:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:58:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:58:52] [Rank 0] PRINT: step:9500/10000 train_loss:2.5343 val_loss:2.5207 train_time:390127ms step_avg:41.07ms +[2025-09-06 02:58:52] [Rank 0] PRINT: step:9500/10000 train_loss:2.5343 val_loss:2.5207 train_time:390127ms step_avg:41.07ms +[2025-09-06 02:58:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:58:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:58:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:58:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:00:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:00:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:00:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:00:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:00:14] [Rank 0] Total Loss: 4.8593 +[2025-09-06 03:00:14] [Rank 0] Total Loss: 4.8593 +[2025-09-06 03:00:14] [Rank 0] Total FTA (Unweighted): 0.2537 +[2025-09-06 03:00:14] [Rank 0] Total FTA (Unweighted): 0.2537 +[2025-09-06 03:00:14] [Rank 0] Total FTA (Weighted): 0.2537 +[2025-09-06 03:00:14] [Rank 0] Total FTA (Weighted): 0.2537 +[2025-09-06 03:00:14] [Rank 0] Group 0 Loss: 3.2425 +[2025-09-06 03:00:14] [Rank 0] Group 0 Loss: 3.2425 +[2025-09-06 03:00:14] [Rank 0] Group 1 Loss: 3.1600 +[2025-09-06 03:00:14] [Rank 0] Group 1 Loss: 3.1600 +[2025-09-06 03:00:14] [Rank 0] Group 2 Loss: 3.2920 +[2025-09-06 03:00:14] [Rank 0] Group 2 Loss: 3.2920 +[2025-09-06 03:00:14] [Rank 0] Group 3 Loss: 3.7646 +[2025-09-06 03:00:14] [Rank 0] Group 3 Loss: 3.7646 +[2025-09-06 03:00:14] [Rank 0] Group 4 Loss: 4.3083 +[2025-09-06 03:00:14] [Rank 0] Group 4 Loss: 4.3083 +[2025-09-06 03:00:14] [Rank 0] Group 5 Loss: 4.7951 +[2025-09-06 03:00:14] [Rank 0] Group 5 Loss: 4.7951 +[2025-09-06 03:00:14] [Rank 0] Group 6 Loss: 5.1063 +[2025-09-06 03:00:14] [Rank 0] Group 6 Loss: 5.1063 +[2025-09-06 03:00:14] [Rank 0] Group 7 Loss: 5.2324 +[2025-09-06 03:00:14] [Rank 0] Group 7 Loss: 5.2324 +[2025-09-06 03:00:14] [Rank 0] Group 8 Loss: 5.5084 +[2025-09-06 03:00:14] [Rank 0] Group 8 Loss: 5.5084 +[2025-09-06 03:00:14] [Rank 0] Group 9 Loss: 5.6424 +[2025-09-06 03:00:14] [Rank 0] Group 9 Loss: 5.6424 +[2025-09-06 03:00:14] [Rank 0] Group 10 Loss: 5.6548 +[2025-09-06 03:00:14] [Rank 0] Group 10 Loss: 5.6548 +[2025-09-06 03:00:14] [Rank 0] Group 11 Loss: 5.6683 +[2025-09-06 03:00:14] [Rank 0] Group 11 Loss: 5.6683 +[2025-09-06 03:00:14] [Rank 0] Group 12 Loss: 5.5667 +[2025-09-06 03:00:14] [Rank 0] Group 12 Loss: 5.5667 +[2025-09-06 03:00:14] [Rank 0] Group 13 Loss: 5.5892 +[2025-09-06 03:00:14] [Rank 0] Group 13 Loss: 5.5892 +[2025-09-06 03:00:14] [Rank 0] Group 14 Loss: 5.6327 +[2025-09-06 03:00:14] [Rank 0] Group 14 Loss: 5.6327 +[2025-09-06 03:00:14] [Rank 0] Group 15 Loss: 5.5855 +[2025-09-06 03:00:14] [Rank 0] Group 15 Loss: 5.5855 +[2025-09-06 03:00:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:00:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:00:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 03:00:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 03:00:14] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:00:14] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:00:14] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:00:14] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:00:14] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 03:00:14] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 03:00:14] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:00:14] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:00:14] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 03:00:14] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 03:00:14] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:00:14] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:00:14] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 03:00:14] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 03:00:14] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:00:14] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:00:14] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 03:00:14] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 03:00:14] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:00:14] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:00:14] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 03:00:14] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 03:00:14] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:00:14] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:00:14] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:00:14] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:00:14] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 03:00:14] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 03:00:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 03:00:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 03:00:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 03:00:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 03:00:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 03:00:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 03:00:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 03:00:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 03:00:16] [Rank 0] step:9501/10000 train_time:390136ms step_avg:41.06ms +[2025-09-06 03:00:16] [Rank 0] step:9501/10000 train_time:390136ms step_avg:41.06ms +[2025-09-06 03:00:16] [Rank 0] step:9521/10000 train_time:390823ms step_avg:41.05ms +[2025-09-06 03:00:16] [Rank 0] step:9521/10000 train_time:390823ms step_avg:41.05ms +[2025-09-06 03:00:17] [Rank 0] step:9541/10000 train_time:391562ms step_avg:41.04ms +[2025-09-06 03:00:17] [Rank 0] step:9541/10000 train_time:391562ms step_avg:41.04ms +[2025-09-06 03:00:18] [Rank 0] step:9561/10000 train_time:392301ms step_avg:41.03ms +[2025-09-06 03:00:18] [Rank 0] step:9561/10000 train_time:392301ms step_avg:41.03ms +[2025-09-06 03:00:18] [Rank 0] step:9581/10000 train_time:393040ms step_avg:41.02ms +[2025-09-06 03:00:18] [Rank 0] step:9581/10000 train_time:393040ms step_avg:41.02ms +[2025-09-06 03:00:19] [Rank 0] step:9601/10000 train_time:393779ms step_avg:41.01ms +[2025-09-06 03:00:19] [Rank 0] step:9601/10000 train_time:393779ms step_avg:41.01ms +[2025-09-06 03:00:20] [Rank 0] step:9621/10000 train_time:394518ms step_avg:41.01ms +[2025-09-06 03:00:20] [Rank 0] step:9621/10000 train_time:394518ms step_avg:41.01ms +[2025-09-06 03:00:21] [Rank 0] step:9641/10000 train_time:395257ms step_avg:41.00ms +[2025-09-06 03:00:21] [Rank 0] step:9641/10000 train_time:395257ms step_avg:41.00ms +[2025-09-06 03:00:22] [Rank 0] step:9661/10000 train_time:396272ms step_avg:41.02ms +[2025-09-06 03:00:22] [Rank 0] step:9661/10000 train_time:396272ms step_avg:41.02ms +[2025-09-06 03:00:22] [Rank 0] step:9681/10000 train_time:397010ms step_avg:41.01ms +[2025-09-06 03:00:22] [Rank 0] step:9681/10000 train_time:397010ms step_avg:41.01ms +[2025-09-06 03:00:23] [Rank 0] step:9701/10000 train_time:397750ms step_avg:41.00ms +[2025-09-06 03:00:23] [Rank 0] step:9701/10000 train_time:397750ms step_avg:41.00ms +[2025-09-06 03:00:24] [Rank 0] step:9721/10000 train_time:398490ms step_avg:40.99ms +[2025-09-06 03:00:24] [Rank 0] step:9721/10000 train_time:398490ms step_avg:40.99ms +[2025-09-06 03:00:25] [Rank 0] step:9741/10000 train_time:399229ms step_avg:40.98ms +[2025-09-06 03:00:25] [Rank 0] step:9741/10000 train_time:399229ms step_avg:40.98ms +[2025-09-06 03:00:25] [Rank 0] step:9761/10000 train_time:399969ms step_avg:40.98ms +[2025-09-06 03:00:25] [Rank 0] step:9761/10000 train_time:399969ms step_avg:40.98ms +[2025-09-06 03:00:26] [Rank 0] step:9781/10000 train_time:400709ms step_avg:40.97ms +[2025-09-06 03:00:26] [Rank 0] step:9781/10000 train_time:400709ms step_avg:40.97ms +[2025-09-06 03:00:27] [Rank 0] step:9801/10000 train_time:401448ms step_avg:40.96ms +[2025-09-06 03:00:27] [Rank 0] step:9801/10000 train_time:401448ms step_avg:40.96ms +[2025-09-06 03:00:28] [Rank 0] step:9821/10000 train_time:402188ms step_avg:40.95ms +[2025-09-06 03:00:28] [Rank 0] step:9821/10000 train_time:402188ms step_avg:40.95ms +[2025-09-06 03:00:28] [Rank 0] step:9841/10000 train_time:402927ms step_avg:40.94ms +[2025-09-06 03:00:28] [Rank 0] step:9841/10000 train_time:402927ms step_avg:40.94ms +[2025-09-06 03:00:29] [Rank 0] step:9861/10000 train_time:403666ms step_avg:40.94ms +[2025-09-06 03:00:29] [Rank 0] step:9861/10000 train_time:403666ms step_avg:40.94ms +[2025-09-06 03:00:30] [Rank 0] step:9881/10000 train_time:404406ms step_avg:40.93ms +[2025-09-06 03:00:30] [Rank 0] step:9881/10000 train_time:404406ms step_avg:40.93ms +[2025-09-06 03:00:31] [Rank 0] step:9901/10000 train_time:405145ms step_avg:40.92ms +[2025-09-06 03:00:31] [Rank 0] step:9901/10000 train_time:405145ms step_avg:40.92ms +[2025-09-06 03:00:31] [Rank 0] step:9921/10000 train_time:405884ms step_avg:40.91ms +[2025-09-06 03:00:31] [Rank 0] step:9921/10000 train_time:405884ms step_avg:40.91ms +[2025-09-06 03:00:32] [Rank 0] step:9941/10000 train_time:406622ms step_avg:40.90ms +[2025-09-06 03:00:32] [Rank 0] step:9941/10000 train_time:406622ms step_avg:40.90ms +[2025-09-06 03:00:33] [Rank 0] step:9961/10000 train_time:407361ms step_avg:40.90ms +[2025-09-06 03:00:33] [Rank 0] step:9961/10000 train_time:407361ms step_avg:40.90ms +[2025-09-06 03:00:34] [Rank 0] step:9981/10000 train_time:408100ms step_avg:40.89ms +[2025-09-06 03:00:34] [Rank 0] step:9981/10000 train_time:408100ms step_avg:40.89ms +[2025-09-06 03:00:34] [Rank 0] step:10000/10000 train_time:408803ms step_avg:40.88ms +[2025-09-06 03:00:34] [Rank 0] step:10000/10000 train_time:408803ms step_avg:40.88ms +[2025-09-06 03:00:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:00:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:00:35] [Rank 0] PRINT: step:10000/10000 train_loss:2.5236 val_loss:2.5113 train_time:408928ms step_avg:40.89ms +[2025-09-06 03:00:35] [Rank 0] PRINT: step:10000/10000 train_loss:2.5236 val_loss:2.5113 train_time:408928ms step_avg:40.89ms +[2025-09-06 03:00:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:00:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:00:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:00:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:01:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:01:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:01:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:01:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:01:56] [Rank 0] Total Loss: 4.8662 +[2025-09-06 03:01:56] [Rank 0] Total Loss: 4.8662 +[2025-09-06 03:01:56] [Rank 0] Total FTA (Unweighted): 0.2556 +[2025-09-06 03:01:56] [Rank 0] Total FTA (Unweighted): 0.2556 +[2025-09-06 03:01:56] [Rank 0] Total FTA (Weighted): 0.2556 +[2025-09-06 03:01:56] [Rank 0] Total FTA (Weighted): 0.2556 +[2025-09-06 03:01:56] [Rank 0] Group 0 Loss: 3.2603 +[2025-09-06 03:01:56] [Rank 0] Group 0 Loss: 3.2603 +[2025-09-06 03:01:56] [Rank 0] Group 1 Loss: 3.1402 +[2025-09-06 03:01:56] [Rank 0] Group 1 Loss: 3.1402 +[2025-09-06 03:01:56] [Rank 0] Group 2 Loss: 3.3250 +[2025-09-06 03:01:56] [Rank 0] Group 2 Loss: 3.3250 +[2025-09-06 03:01:56] [Rank 0] Group 3 Loss: 3.7766 +[2025-09-06 03:01:56] [Rank 0] Group 3 Loss: 3.7766 +[2025-09-06 03:01:56] [Rank 0] Group 4 Loss: 4.3079 +[2025-09-06 03:01:56] [Rank 0] Group 4 Loss: 4.3079 +[2025-09-06 03:01:56] [Rank 0] Group 5 Loss: 4.7926 +[2025-09-06 03:01:56] [Rank 0] Group 5 Loss: 4.7926 +[2025-09-06 03:01:56] [Rank 0] Group 6 Loss: 5.1148 +[2025-09-06 03:01:56] [Rank 0] Group 6 Loss: 5.1148 +[2025-09-06 03:01:56] [Rank 0] Group 7 Loss: 5.2385 +[2025-09-06 03:01:56] [Rank 0] Group 7 Loss: 5.2385 +[2025-09-06 03:01:56] [Rank 0] Group 8 Loss: 5.5118 +[2025-09-06 03:01:56] [Rank 0] Group 8 Loss: 5.5118 +[2025-09-06 03:01:56] [Rank 0] Group 9 Loss: 5.6421 +[2025-09-06 03:01:56] [Rank 0] Group 9 Loss: 5.6421 +[2025-09-06 03:01:56] [Rank 0] Group 10 Loss: 5.6665 +[2025-09-06 03:01:56] [Rank 0] Group 10 Loss: 5.6665 +[2025-09-06 03:01:56] [Rank 0] Group 11 Loss: 5.6777 +[2025-09-06 03:01:56] [Rank 0] Group 11 Loss: 5.6777 +[2025-09-06 03:01:56] [Rank 0] Group 12 Loss: 5.5796 +[2025-09-06 03:01:56] [Rank 0] Group 12 Loss: 5.5796 +[2025-09-06 03:01:56] [Rank 0] Group 13 Loss: 5.5908 +[2025-09-06 03:01:56] [Rank 0] Group 13 Loss: 5.5908 +[2025-09-06 03:01:56] [Rank 0] Group 14 Loss: 5.6424 +[2025-09-06 03:01:56] [Rank 0] Group 14 Loss: 5.6424 +[2025-09-06 03:01:56] [Rank 0] Group 15 Loss: 5.5924 +[2025-09-06 03:01:56] [Rank 0] Group 15 Loss: 5.5924 +[2025-09-06 03:01:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:01:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:01:56] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 03:01:56] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 03:01:56] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:01:56] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:01:56] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:01:56] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:01:56] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 03:01:56] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 03:01:56] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:01:56] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:01:56] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 03:01:56] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 03:01:56] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:01:56] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:01:56] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 03:01:56] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 03:01:56] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:01:56] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:01:56] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 03:01:56] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 03:01:56] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 03:01:56] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 03:01:56] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 03:01:56] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 03:01:56] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:01:56] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:01:56] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:01:56] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:01:56] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:01:56] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:01:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 03:01:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_loss_curves.png +[2025-09-06 03:01:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 03:01:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/per_class_acc_curves.png +[2025-09-06 03:01:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 03:01:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_loss_curve.png +[2025-09-06 03:01:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 03:01:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_42/total_acc_curve.png +[2025-09-06 03:01:58] [Rank 0] step:10001/10000 train_time:408936ms step_avg:40.89ms +[2025-09-06 03:01:58] [Rank 0] step:10001/10000 train_time:408936ms step_avg:40.89ms +[2025-09-06 03:01:58] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 03:01:58 2025 --- +[2025-09-06 03:01:58] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 03:01:58 2025 --- +[2025-09-06 03:01:58] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-06 03:01:58] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4c13fe26720094a9668f0206836ace647956af26 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.05, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "8af53508-0464-4794-9685-8e36286cec5a", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..845e68b978c9add4f9b7a6548df77193c52dda83 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5730fc6c92da24de803709383ea35ddf0cfc69b4cb8119c2ad629daec1af5c5b +size 268217 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..21c4f78ef762fcb7009f448b0fd9ea06b9d464ad --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94f185df5b25331eace283c7844bd838f4fb951a460c237f1ce3f102863d0825 +size 409313 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..35d9feae184c6a7e7f7e514125b5d3dd0e7bfdba --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c3cec21e62eb46545e2ef5f3ed2055701ee89bdca40540f98466c59d6736d59 +size 87612 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..821291225edced94e49656f1baaaa3c1dc391086 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38d63359a11c599103a65114694897a35848c5943616a07cefdfb9ce211645eb +size 101744 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/training_log_8af53508-0464-4794-9685-8e36286cec5a.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/training_log_8af53508-0464-4794-9685-8e36286cec5a.txt new file mode 100644 index 0000000000000000000000000000000000000000..7b889b6245d9d52e39e7e1443732e4c0db08b934 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/training_log_8af53508-0464-4794-9685-8e36286cec5a.txt @@ -0,0 +1,5614 @@ +[2025-09-06 03:02:24] [Rank 0] PRINT: --- Script Start: Sat Sep 6 03:02:24 2025 --- +[2025-09-06 03:02:24] [Rank 0] PRINT: --- Script Start: Sat Sep 6 03:02:24 2025 --- +[2025-09-06 03:02:24] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.05, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 03:02:24] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.05, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 03:02:24] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 03:02:24] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 03:02:24] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-06 03:02:24] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-06 03:02:24] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43 +[2025-09-06 03:02:24] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43 +[2025-09-06 03:02:24] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 03:02:24] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 03:02:24] [Rank 0] PRINT: Constructing model... +[2025-09-06 03:02:24] [Rank 0] PRINT: Constructing model... +[2025-09-06 03:02:26] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 03:02:26] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 03:02:26] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 03:02:26] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 03:02:26] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 03:02:26] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 03:02:29] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 03:02:29] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 03:02:29] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 03:02:29] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 03:02:29] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 03:02:29] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 03:02:29] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 03:02:29] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 03:02:30] [Rank 0] PRINT: Model returns: +[2025-09-06 03:02:30] [Rank 0] PRINT: Model returns: +[2025-09-06 03:02:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 03:02:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 03:02:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 03:02:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 03:02:30] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.05). +[2025-09-06 03:02:30] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.05). +[2025-09-06 03:02:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 03:02:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 03:02:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 03:02:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 03:02:34] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 03:02:34] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 03:02:34] [Rank 0] PRINT: Starting warmup... +[2025-09-06 03:02:34] [Rank 0] PRINT: Starting warmup... +[2025-09-06 03:03:10] [Rank 0] PRINT: Warmup complete. +[2025-09-06 03:03:10] [Rank 0] PRINT: Warmup complete. +[2025-09-06 03:03:11] [Rank 0] PRINT: Starting training... +[2025-09-06 03:03:11] [Rank 0] PRINT: Starting training... +[2025-09-06 03:03:17] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/fixed_eval_indices.json +[2025-09-06 03:03:17] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/fixed_eval_indices.json +[2025-09-06 03:03:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:03:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:03:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 03:03:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 03:03:54] [Rank 0] step:21/10000 train_time:32932ms step_avg:1568.18ms +[2025-09-06 03:03:54] [Rank 0] step:21/10000 train_time:32932ms step_avg:1568.18ms +[2025-09-06 03:03:54] [Rank 0] step:41/10000 train_time:33662ms step_avg:821.02ms +[2025-09-06 03:03:54] [Rank 0] step:41/10000 train_time:33662ms step_avg:821.02ms +[2025-09-06 03:03:55] [Rank 0] step:61/10000 train_time:34390ms step_avg:563.78ms +[2025-09-06 03:03:55] [Rank 0] step:61/10000 train_time:34390ms step_avg:563.78ms +[2025-09-06 03:03:56] [Rank 0] step:81/10000 train_time:35119ms step_avg:433.56ms +[2025-09-06 03:03:56] [Rank 0] step:81/10000 train_time:35119ms step_avg:433.56ms +[2025-09-06 03:03:57] [Rank 0] step:101/10000 train_time:35847ms step_avg:354.92ms +[2025-09-06 03:03:57] [Rank 0] step:101/10000 train_time:35847ms step_avg:354.92ms +[2025-09-06 03:03:57] [Rank 0] step:121/10000 train_time:36575ms step_avg:302.27ms +[2025-09-06 03:03:57] [Rank 0] step:121/10000 train_time:36575ms step_avg:302.27ms +[2025-09-06 03:03:58] [Rank 0] step:141/10000 train_time:37303ms step_avg:264.56ms +[2025-09-06 03:03:58] [Rank 0] step:141/10000 train_time:37303ms step_avg:264.56ms +[2025-09-06 03:03:59] [Rank 0] step:161/10000 train_time:38035ms step_avg:236.24ms +[2025-09-06 03:03:59] [Rank 0] step:161/10000 train_time:38035ms step_avg:236.24ms +[2025-09-06 03:04:00] [Rank 0] step:181/10000 train_time:38763ms step_avg:214.16ms +[2025-09-06 03:04:00] [Rank 0] step:181/10000 train_time:38763ms step_avg:214.16ms +[2025-09-06 03:04:00] [Rank 0] step:201/10000 train_time:39492ms step_avg:196.48ms +[2025-09-06 03:04:00] [Rank 0] step:201/10000 train_time:39492ms step_avg:196.48ms +[2025-09-06 03:04:01] [Rank 0] step:221/10000 train_time:40222ms step_avg:182.00ms +[2025-09-06 03:04:01] [Rank 0] step:221/10000 train_time:40222ms step_avg:182.00ms +[2025-09-06 03:04:02] [Rank 0] step:241/10000 train_time:40949ms step_avg:169.91ms +[2025-09-06 03:04:02] [Rank 0] step:241/10000 train_time:40949ms step_avg:169.91ms +[2025-09-06 03:04:02] [Rank 0] step:261/10000 train_time:41678ms step_avg:159.69ms +[2025-09-06 03:04:02] [Rank 0] step:261/10000 train_time:41678ms step_avg:159.69ms +[2025-09-06 03:04:03] [Rank 0] step:281/10000 train_time:42407ms step_avg:150.91ms +[2025-09-06 03:04:03] [Rank 0] step:281/10000 train_time:42407ms step_avg:150.91ms +[2025-09-06 03:04:04] [Rank 0] step:301/10000 train_time:43136ms step_avg:143.31ms +[2025-09-06 03:04:04] [Rank 0] step:301/10000 train_time:43136ms step_avg:143.31ms +[2025-09-06 03:04:05] [Rank 0] step:321/10000 train_time:43863ms step_avg:136.64ms +[2025-09-06 03:04:05] [Rank 0] step:321/10000 train_time:43863ms step_avg:136.64ms +[2025-09-06 03:04:05] [Rank 0] step:341/10000 train_time:44592ms step_avg:130.77ms +[2025-09-06 03:04:05] [Rank 0] step:341/10000 train_time:44592ms step_avg:130.77ms +[2025-09-06 03:04:06] [Rank 0] step:361/10000 train_time:45320ms step_avg:125.54ms +[2025-09-06 03:04:06] [Rank 0] step:361/10000 train_time:45320ms step_avg:125.54ms +[2025-09-06 03:04:07] [Rank 0] step:381/10000 train_time:46048ms step_avg:120.86ms +[2025-09-06 03:04:07] [Rank 0] step:381/10000 train_time:46048ms step_avg:120.86ms +[2025-09-06 03:04:08] [Rank 0] step:401/10000 train_time:46776ms step_avg:116.65ms +[2025-09-06 03:04:08] [Rank 0] step:401/10000 train_time:46776ms step_avg:116.65ms +[2025-09-06 03:04:08] [Rank 0] step:421/10000 train_time:47504ms step_avg:112.84ms +[2025-09-06 03:04:08] [Rank 0] step:421/10000 train_time:47504ms step_avg:112.84ms +[2025-09-06 03:04:09] [Rank 0] step:441/10000 train_time:48233ms step_avg:109.37ms +[2025-09-06 03:04:09] [Rank 0] step:441/10000 train_time:48233ms step_avg:109.37ms +[2025-09-06 03:04:10] [Rank 0] step:461/10000 train_time:48961ms step_avg:106.21ms +[2025-09-06 03:04:10] [Rank 0] step:461/10000 train_time:48961ms step_avg:106.21ms +[2025-09-06 03:04:10] [Rank 0] step:481/10000 train_time:49690ms step_avg:103.31ms +[2025-09-06 03:04:10] [Rank 0] step:481/10000 train_time:49690ms step_avg:103.31ms +[2025-09-06 03:04:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:04:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:04:12] [Rank 0] PRINT: step:500/10000 train_loss:6.7956 val_loss:5.0071 train_time:50500ms step_avg:101.00ms +[2025-09-06 03:04:12] [Rank 0] PRINT: step:500/10000 train_loss:6.7956 val_loss:5.0071 train_time:50500ms step_avg:101.00ms +[2025-09-06 03:04:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:04:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:04:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:04:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:05:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:05:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:05:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:05:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:05:33] [Rank 0] Total Loss: 6.5179 +[2025-09-06 03:05:33] [Rank 0] Total Loss: 6.5179 +[2025-09-06 03:05:33] [Rank 0] Total FTA (Unweighted): 0.0288 +[2025-09-06 03:05:33] [Rank 0] Total FTA (Unweighted): 0.0288 +[2025-09-06 03:05:33] [Rank 0] Total FTA (Weighted): 0.0288 +[2025-09-06 03:05:33] [Rank 0] Total FTA (Weighted): 0.0288 +[2025-09-06 03:05:33] [Rank 0] Group 0 Loss: 4.3535 +[2025-09-06 03:05:33] [Rank 0] Group 0 Loss: 4.3535 +[2025-09-06 03:05:33] [Rank 0] Group 1 Loss: 5.1901 +[2025-09-06 03:05:33] [Rank 0] Group 1 Loss: 5.1901 +[2025-09-06 03:05:33] [Rank 0] Group 2 Loss: 5.8056 +[2025-09-06 03:05:33] [Rank 0] Group 2 Loss: 5.8056 +[2025-09-06 03:05:33] [Rank 0] Group 3 Loss: 6.3346 +[2025-09-06 03:05:33] [Rank 0] Group 3 Loss: 6.3346 +[2025-09-06 03:05:33] [Rank 0] Group 4 Loss: 6.7212 +[2025-09-06 03:05:33] [Rank 0] Group 4 Loss: 6.7212 +[2025-09-06 03:05:33] [Rank 0] Group 5 Loss: 6.8021 +[2025-09-06 03:05:33] [Rank 0] Group 5 Loss: 6.8021 +[2025-09-06 03:05:33] [Rank 0] Group 6 Loss: 6.8600 +[2025-09-06 03:05:33] [Rank 0] Group 6 Loss: 6.8600 +[2025-09-06 03:05:33] [Rank 0] Group 7 Loss: 6.7576 +[2025-09-06 03:05:33] [Rank 0] Group 7 Loss: 6.7576 +[2025-09-06 03:05:33] [Rank 0] Group 8 Loss: 6.9075 +[2025-09-06 03:05:33] [Rank 0] Group 8 Loss: 6.9075 +[2025-09-06 03:05:33] [Rank 0] Group 9 Loss: 6.9895 +[2025-09-06 03:05:33] [Rank 0] Group 9 Loss: 6.9895 +[2025-09-06 03:05:33] [Rank 0] Group 10 Loss: 6.9608 +[2025-09-06 03:05:33] [Rank 0] Group 10 Loss: 6.9608 +[2025-09-06 03:05:33] [Rank 0] Group 11 Loss: 7.0177 +[2025-09-06 03:05:33] [Rank 0] Group 11 Loss: 7.0177 +[2025-09-06 03:05:33] [Rank 0] Group 12 Loss: 6.8511 +[2025-09-06 03:05:33] [Rank 0] Group 12 Loss: 6.8511 +[2025-09-06 03:05:33] [Rank 0] Group 13 Loss: 6.8700 +[2025-09-06 03:05:33] [Rank 0] Group 13 Loss: 6.8700 +[2025-09-06 03:05:33] [Rank 0] Group 14 Loss: 6.9853 +[2025-09-06 03:05:33] [Rank 0] Group 14 Loss: 6.9853 +[2025-09-06 03:05:33] [Rank 0] Group 15 Loss: 6.8802 +[2025-09-06 03:05:33] [Rank 0] Group 15 Loss: 6.8802 +[2025-09-06 03:05:33] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:05:33] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:05:33] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:05:33] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:05:33] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-06 03:05:33] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-06 03:05:33] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-06 03:05:33] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-06 03:05:33] [Rank 0] Group 4 FTA: 0.0100 +[2025-09-06 03:05:33] [Rank 0] Group 4 FTA: 0.0100 +[2025-09-06 03:05:33] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-06 03:05:33] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-06 03:05:33] [Rank 0] Group 6 FTA: 0.0200 +[2025-09-06 03:05:33] [Rank 0] Group 6 FTA: 0.0200 +[2025-09-06 03:05:33] [Rank 0] Group 7 FTA: 0.0300 +[2025-09-06 03:05:33] [Rank 0] Group 7 FTA: 0.0300 +[2025-09-06 03:05:33] [Rank 0] Group 8 FTA: 0.0400 +[2025-09-06 03:05:33] [Rank 0] Group 8 FTA: 0.0400 +[2025-09-06 03:05:33] [Rank 0] Group 9 FTA: 0.0200 +[2025-09-06 03:05:33] [Rank 0] Group 9 FTA: 0.0200 +[2025-09-06 03:05:33] [Rank 0] Group 10 FTA: 0.0200 +[2025-09-06 03:05:33] [Rank 0] Group 10 FTA: 0.0200 +[2025-09-06 03:05:33] [Rank 0] Group 11 FTA: 0.0400 +[2025-09-06 03:05:33] [Rank 0] Group 11 FTA: 0.0400 +[2025-09-06 03:05:33] [Rank 0] Group 12 FTA: 0.0100 +[2025-09-06 03:05:33] [Rank 0] Group 12 FTA: 0.0100 +[2025-09-06 03:05:33] [Rank 0] Group 13 FTA: 0.0200 +[2025-09-06 03:05:33] [Rank 0] Group 13 FTA: 0.0200 +[2025-09-06 03:05:33] [Rank 0] Group 14 FTA: 0.0100 +[2025-09-06 03:05:33] [Rank 0] Group 14 FTA: 0.0100 +[2025-09-06 03:05:33] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-06 03:05:33] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-06 03:05:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:05:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:05:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:05:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:05:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:05:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:05:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:05:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:05:35] [Rank 0] step:501/10000 train_time:50511ms step_avg:100.82ms +[2025-09-06 03:05:35] [Rank 0] step:501/10000 train_time:50511ms step_avg:100.82ms +[2025-09-06 03:05:36] [Rank 0] step:521/10000 train_time:51183ms step_avg:98.24ms +[2025-09-06 03:05:36] [Rank 0] step:521/10000 train_time:51183ms step_avg:98.24ms +[2025-09-06 03:05:37] [Rank 0] step:541/10000 train_time:51912ms step_avg:95.95ms +[2025-09-06 03:05:37] [Rank 0] step:541/10000 train_time:51912ms step_avg:95.95ms +[2025-09-06 03:05:37] [Rank 0] step:561/10000 train_time:52639ms step_avg:93.83ms +[2025-09-06 03:05:37] [Rank 0] step:561/10000 train_time:52639ms step_avg:93.83ms +[2025-09-06 03:05:38] [Rank 0] step:581/10000 train_time:53366ms step_avg:91.85ms +[2025-09-06 03:05:38] [Rank 0] step:581/10000 train_time:53366ms step_avg:91.85ms +[2025-09-06 03:05:39] [Rank 0] step:601/10000 train_time:54094ms step_avg:90.01ms +[2025-09-06 03:05:39] [Rank 0] step:601/10000 train_time:54094ms step_avg:90.01ms +[2025-09-06 03:05:40] [Rank 0] step:621/10000 train_time:54822ms step_avg:88.28ms +[2025-09-06 03:05:40] [Rank 0] step:621/10000 train_time:54822ms step_avg:88.28ms +[2025-09-06 03:05:40] [Rank 0] step:641/10000 train_time:55550ms step_avg:86.66ms +[2025-09-06 03:05:40] [Rank 0] step:641/10000 train_time:55550ms step_avg:86.66ms +[2025-09-06 03:05:41] [Rank 0] step:661/10000 train_time:56279ms step_avg:85.14ms +[2025-09-06 03:05:41] [Rank 0] step:661/10000 train_time:56279ms step_avg:85.14ms +[2025-09-06 03:05:42] [Rank 0] step:681/10000 train_time:57007ms step_avg:83.71ms +[2025-09-06 03:05:42] [Rank 0] step:681/10000 train_time:57007ms step_avg:83.71ms +[2025-09-06 03:05:42] [Rank 0] step:701/10000 train_time:57734ms step_avg:82.36ms +[2025-09-06 03:05:42] [Rank 0] step:701/10000 train_time:57734ms step_avg:82.36ms +[2025-09-06 03:05:43] [Rank 0] step:721/10000 train_time:58463ms step_avg:81.09ms +[2025-09-06 03:05:43] [Rank 0] step:721/10000 train_time:58463ms step_avg:81.09ms +[2025-09-06 03:05:44] [Rank 0] step:741/10000 train_time:59191ms step_avg:79.88ms +[2025-09-06 03:05:44] [Rank 0] step:741/10000 train_time:59191ms step_avg:79.88ms +[2025-09-06 03:05:45] [Rank 0] step:761/10000 train_time:59923ms step_avg:78.74ms +[2025-09-06 03:05:45] [Rank 0] step:761/10000 train_time:59923ms step_avg:78.74ms +[2025-09-06 03:05:45] [Rank 0] step:781/10000 train_time:60656ms step_avg:77.66ms +[2025-09-06 03:05:45] [Rank 0] step:781/10000 train_time:60656ms step_avg:77.66ms +[2025-09-06 03:05:46] [Rank 0] step:801/10000 train_time:61388ms step_avg:76.64ms +[2025-09-06 03:05:46] [Rank 0] step:801/10000 train_time:61388ms step_avg:76.64ms +[2025-09-06 03:05:47] [Rank 0] step:821/10000 train_time:62740ms step_avg:76.42ms +[2025-09-06 03:05:47] [Rank 0] step:821/10000 train_time:62740ms step_avg:76.42ms +[2025-09-06 03:05:48] [Rank 0] step:841/10000 train_time:63473ms step_avg:75.47ms +[2025-09-06 03:05:48] [Rank 0] step:841/10000 train_time:63473ms step_avg:75.47ms +[2025-09-06 03:05:49] [Rank 0] step:861/10000 train_time:64206ms step_avg:74.57ms +[2025-09-06 03:05:49] [Rank 0] step:861/10000 train_time:64206ms step_avg:74.57ms +[2025-09-06 03:05:50] [Rank 0] step:881/10000 train_time:64937ms step_avg:73.71ms +[2025-09-06 03:05:50] [Rank 0] step:881/10000 train_time:64937ms step_avg:73.71ms +[2025-09-06 03:05:50] [Rank 0] step:901/10000 train_time:65669ms step_avg:72.89ms +[2025-09-06 03:05:50] [Rank 0] step:901/10000 train_time:65669ms step_avg:72.89ms +[2025-09-06 03:05:51] [Rank 0] step:921/10000 train_time:66403ms step_avg:72.10ms +[2025-09-06 03:05:51] [Rank 0] step:921/10000 train_time:66403ms step_avg:72.10ms +[2025-09-06 03:05:52] [Rank 0] step:941/10000 train_time:67136ms step_avg:71.34ms +[2025-09-06 03:05:52] [Rank 0] step:941/10000 train_time:67136ms step_avg:71.34ms +[2025-09-06 03:05:53] [Rank 0] step:961/10000 train_time:67867ms step_avg:70.62ms +[2025-09-06 03:05:53] [Rank 0] step:961/10000 train_time:67867ms step_avg:70.62ms +[2025-09-06 03:05:53] [Rank 0] step:981/10000 train_time:68599ms step_avg:69.93ms +[2025-09-06 03:05:53] [Rank 0] step:981/10000 train_time:68599ms step_avg:69.93ms +[2025-09-06 03:05:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:05:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:05:55] [Rank 0] PRINT: step:1000/10000 train_loss:4.5160 val_loss:4.1382 train_time:69412ms step_avg:69.41ms +[2025-09-06 03:05:55] [Rank 0] PRINT: step:1000/10000 train_loss:4.5160 val_loss:4.1382 train_time:69412ms step_avg:69.41ms +[2025-09-06 03:05:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:05:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:05:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:05:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:07:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:07:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:07:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:07:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:07:16] [Rank 0] Total Loss: 5.9152 +[2025-09-06 03:07:16] [Rank 0] Total Loss: 5.9152 +[2025-09-06 03:07:16] [Rank 0] Total FTA (Unweighted): 0.0838 +[2025-09-06 03:07:16] [Rank 0] Total FTA (Unweighted): 0.0838 +[2025-09-06 03:07:16] [Rank 0] Total FTA (Weighted): 0.0838 +[2025-09-06 03:07:16] [Rank 0] Total FTA (Weighted): 0.0838 +[2025-09-06 03:07:16] [Rank 0] Group 0 Loss: 3.5975 +[2025-09-06 03:07:16] [Rank 0] Group 0 Loss: 3.5975 +[2025-09-06 03:07:16] [Rank 0] Group 1 Loss: 3.7308 +[2025-09-06 03:07:16] [Rank 0] Group 1 Loss: 3.7308 +[2025-09-06 03:07:16] [Rank 0] Group 2 Loss: 4.6234 +[2025-09-06 03:07:16] [Rank 0] Group 2 Loss: 4.6234 +[2025-09-06 03:07:16] [Rank 0] Group 3 Loss: 5.3757 +[2025-09-06 03:07:16] [Rank 0] Group 3 Loss: 5.3757 +[2025-09-06 03:07:16] [Rank 0] Group 4 Loss: 6.1292 +[2025-09-06 03:07:16] [Rank 0] Group 4 Loss: 6.1292 +[2025-09-06 03:07:16] [Rank 0] Group 5 Loss: 6.2703 +[2025-09-06 03:07:16] [Rank 0] Group 5 Loss: 6.2703 +[2025-09-06 03:07:16] [Rank 0] Group 6 Loss: 6.3615 +[2025-09-06 03:07:16] [Rank 0] Group 6 Loss: 6.3615 +[2025-09-06 03:07:16] [Rank 0] Group 7 Loss: 6.3259 +[2025-09-06 03:07:16] [Rank 0] Group 7 Loss: 6.3259 +[2025-09-06 03:07:16] [Rank 0] Group 8 Loss: 6.4741 +[2025-09-06 03:07:16] [Rank 0] Group 8 Loss: 6.4741 +[2025-09-06 03:07:16] [Rank 0] Group 9 Loss: 6.6065 +[2025-09-06 03:07:16] [Rank 0] Group 9 Loss: 6.6065 +[2025-09-06 03:07:16] [Rank 0] Group 10 Loss: 6.5606 +[2025-09-06 03:07:16] [Rank 0] Group 10 Loss: 6.5606 +[2025-09-06 03:07:16] [Rank 0] Group 11 Loss: 6.6247 +[2025-09-06 03:07:16] [Rank 0] Group 11 Loss: 6.6247 +[2025-09-06 03:07:16] [Rank 0] Group 12 Loss: 6.4622 +[2025-09-06 03:07:16] [Rank 0] Group 12 Loss: 6.4622 +[2025-09-06 03:07:16] [Rank 0] Group 13 Loss: 6.4548 +[2025-09-06 03:07:16] [Rank 0] Group 13 Loss: 6.4548 +[2025-09-06 03:07:16] [Rank 0] Group 14 Loss: 6.5712 +[2025-09-06 03:07:16] [Rank 0] Group 14 Loss: 6.5712 +[2025-09-06 03:07:16] [Rank 0] Group 15 Loss: 6.4751 +[2025-09-06 03:07:16] [Rank 0] Group 15 Loss: 6.4751 +[2025-09-06 03:07:16] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:07:16] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:07:16] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:07:16] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:07:16] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 03:07:16] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 03:07:16] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 03:07:16] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 03:07:16] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 03:07:16] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 03:07:16] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 03:07:16] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 03:07:16] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 03:07:16] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 03:07:16] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 03:07:16] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 03:07:16] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 03:07:16] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 03:07:16] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-06 03:07:16] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-06 03:07:16] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:07:16] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:07:16] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 03:07:16] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 03:07:16] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:07:16] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:07:16] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 03:07:16] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 03:07:16] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:07:16] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:07:16] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 03:07:16] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 03:07:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:07:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:07:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:07:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:07:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:07:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:07:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:07:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:07:18] [Rank 0] step:1001/10000 train_time:69423ms step_avg:69.35ms +[2025-09-06 03:07:18] [Rank 0] step:1001/10000 train_time:69423ms step_avg:69.35ms +[2025-09-06 03:07:19] [Rank 0] step:1021/10000 train_time:70096ms step_avg:68.65ms +[2025-09-06 03:07:19] [Rank 0] step:1021/10000 train_time:70096ms step_avg:68.65ms +[2025-09-06 03:07:19] [Rank 0] step:1041/10000 train_time:70830ms step_avg:68.04ms +[2025-09-06 03:07:19] [Rank 0] step:1041/10000 train_time:70830ms step_avg:68.04ms +[2025-09-06 03:07:20] [Rank 0] step:1061/10000 train_time:71564ms step_avg:67.45ms +[2025-09-06 03:07:20] [Rank 0] step:1061/10000 train_time:71564ms step_avg:67.45ms +[2025-09-06 03:07:21] [Rank 0] step:1081/10000 train_time:72297ms step_avg:66.88ms +[2025-09-06 03:07:21] [Rank 0] step:1081/10000 train_time:72297ms step_avg:66.88ms +[2025-09-06 03:07:22] [Rank 0] step:1101/10000 train_time:73030ms step_avg:66.33ms +[2025-09-06 03:07:22] [Rank 0] step:1101/10000 train_time:73030ms step_avg:66.33ms +[2025-09-06 03:07:22] [Rank 0] step:1121/10000 train_time:73764ms step_avg:65.80ms +[2025-09-06 03:07:22] [Rank 0] step:1121/10000 train_time:73764ms step_avg:65.80ms +[2025-09-06 03:07:23] [Rank 0] step:1141/10000 train_time:74497ms step_avg:65.29ms +[2025-09-06 03:07:23] [Rank 0] step:1141/10000 train_time:74497ms step_avg:65.29ms +[2025-09-06 03:07:24] [Rank 0] step:1161/10000 train_time:75231ms step_avg:64.80ms +[2025-09-06 03:07:24] [Rank 0] step:1161/10000 train_time:75231ms step_avg:64.80ms +[2025-09-06 03:07:25] [Rank 0] step:1181/10000 train_time:75964ms step_avg:64.32ms +[2025-09-06 03:07:25] [Rank 0] step:1181/10000 train_time:75964ms step_avg:64.32ms +[2025-09-06 03:07:25] [Rank 0] step:1201/10000 train_time:76697ms step_avg:63.86ms +[2025-09-06 03:07:25] [Rank 0] step:1201/10000 train_time:76697ms step_avg:63.86ms +[2025-09-06 03:07:26] [Rank 0] step:1221/10000 train_time:77431ms step_avg:63.42ms +[2025-09-06 03:07:26] [Rank 0] step:1221/10000 train_time:77431ms step_avg:63.42ms +[2025-09-06 03:07:27] [Rank 0] step:1241/10000 train_time:78164ms step_avg:62.98ms +[2025-09-06 03:07:27] [Rank 0] step:1241/10000 train_time:78164ms step_avg:62.98ms +[2025-09-06 03:07:27] [Rank 0] step:1261/10000 train_time:78897ms step_avg:62.57ms +[2025-09-06 03:07:27] [Rank 0] step:1261/10000 train_time:78897ms step_avg:62.57ms +[2025-09-06 03:07:28] [Rank 0] step:1281/10000 train_time:79630ms step_avg:62.16ms +[2025-09-06 03:07:28] [Rank 0] step:1281/10000 train_time:79630ms step_avg:62.16ms +[2025-09-06 03:07:29] [Rank 0] step:1301/10000 train_time:80364ms step_avg:61.77ms +[2025-09-06 03:07:29] [Rank 0] step:1301/10000 train_time:80364ms step_avg:61.77ms +[2025-09-06 03:07:30] [Rank 0] step:1321/10000 train_time:81097ms step_avg:61.39ms +[2025-09-06 03:07:30] [Rank 0] step:1321/10000 train_time:81097ms step_avg:61.39ms +[2025-09-06 03:07:30] [Rank 0] step:1341/10000 train_time:81829ms step_avg:61.02ms +[2025-09-06 03:07:30] [Rank 0] step:1341/10000 train_time:81829ms step_avg:61.02ms +[2025-09-06 03:07:31] [Rank 0] step:1361/10000 train_time:82561ms step_avg:60.66ms +[2025-09-06 03:07:31] [Rank 0] step:1361/10000 train_time:82561ms step_avg:60.66ms +[2025-09-06 03:07:32] [Rank 0] step:1381/10000 train_time:83294ms step_avg:60.31ms +[2025-09-06 03:07:32] [Rank 0] step:1381/10000 train_time:83294ms step_avg:60.31ms +[2025-09-06 03:07:33] [Rank 0] step:1401/10000 train_time:84156ms step_avg:60.07ms +[2025-09-06 03:07:33] [Rank 0] step:1401/10000 train_time:84156ms step_avg:60.07ms +[2025-09-06 03:07:33] [Rank 0] step:1421/10000 train_time:84890ms step_avg:59.74ms +[2025-09-06 03:07:33] [Rank 0] step:1421/10000 train_time:84890ms step_avg:59.74ms +[2025-09-06 03:07:34] [Rank 0] step:1441/10000 train_time:85622ms step_avg:59.42ms +[2025-09-06 03:07:34] [Rank 0] step:1441/10000 train_time:85622ms step_avg:59.42ms +[2025-09-06 03:07:35] [Rank 0] step:1461/10000 train_time:86355ms step_avg:59.11ms +[2025-09-06 03:07:35] [Rank 0] step:1461/10000 train_time:86355ms step_avg:59.11ms +[2025-09-06 03:07:36] [Rank 0] step:1481/10000 train_time:87290ms step_avg:58.94ms +[2025-09-06 03:07:36] [Rank 0] step:1481/10000 train_time:87290ms step_avg:58.94ms +[2025-09-06 03:07:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:07:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:07:37] [Rank 0] PRINT: step:1500/10000 train_loss:3.8960 val_loss:3.6817 train_time:88103ms step_avg:58.74ms +[2025-09-06 03:07:37] [Rank 0] PRINT: step:1500/10000 train_loss:3.8960 val_loss:3.6817 train_time:88103ms step_avg:58.74ms +[2025-09-06 03:07:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:07:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:07:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:07:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:08:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:08:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:08:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:08:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:08:58] [Rank 0] Total Loss: 5.6109 +[2025-09-06 03:08:58] [Rank 0] Total Loss: 5.6109 +[2025-09-06 03:08:58] [Rank 0] Total FTA (Unweighted): 0.0944 +[2025-09-06 03:08:58] [Rank 0] Total FTA (Unweighted): 0.0944 +[2025-09-06 03:08:58] [Rank 0] Total FTA (Weighted): 0.0944 +[2025-09-06 03:08:58] [Rank 0] Total FTA (Weighted): 0.0944 +[2025-09-06 03:08:58] [Rank 0] Group 0 Loss: 3.3705 +[2025-09-06 03:08:58] [Rank 0] Group 0 Loss: 3.3705 +[2025-09-06 03:08:58] [Rank 0] Group 1 Loss: 3.4210 +[2025-09-06 03:08:58] [Rank 0] Group 1 Loss: 3.4210 +[2025-09-06 03:08:58] [Rank 0] Group 2 Loss: 3.9516 +[2025-09-06 03:08:58] [Rank 0] Group 2 Loss: 3.9516 +[2025-09-06 03:08:58] [Rank 0] Group 3 Loss: 4.8060 +[2025-09-06 03:08:58] [Rank 0] Group 3 Loss: 4.8060 +[2025-09-06 03:08:58] [Rank 0] Group 4 Loss: 5.6466 +[2025-09-06 03:08:58] [Rank 0] Group 4 Loss: 5.6466 +[2025-09-06 03:08:58] [Rank 0] Group 5 Loss: 5.9130 +[2025-09-06 03:08:58] [Rank 0] Group 5 Loss: 5.9130 +[2025-09-06 03:08:58] [Rank 0] Group 6 Loss: 6.0900 +[2025-09-06 03:08:58] [Rank 0] Group 6 Loss: 6.0900 +[2025-09-06 03:08:58] [Rank 0] Group 7 Loss: 6.0472 +[2025-09-06 03:08:58] [Rank 0] Group 7 Loss: 6.0472 +[2025-09-06 03:08:58] [Rank 0] Group 8 Loss: 6.2403 +[2025-09-06 03:08:58] [Rank 0] Group 8 Loss: 6.2403 +[2025-09-06 03:08:58] [Rank 0] Group 9 Loss: 6.4022 +[2025-09-06 03:08:58] [Rank 0] Group 9 Loss: 6.4022 +[2025-09-06 03:08:58] [Rank 0] Group 10 Loss: 6.3348 +[2025-09-06 03:08:58] [Rank 0] Group 10 Loss: 6.3348 +[2025-09-06 03:08:58] [Rank 0] Group 11 Loss: 6.4142 +[2025-09-06 03:08:58] [Rank 0] Group 11 Loss: 6.4142 +[2025-09-06 03:08:58] [Rank 0] Group 12 Loss: 6.2527 +[2025-09-06 03:08:58] [Rank 0] Group 12 Loss: 6.2527 +[2025-09-06 03:08:58] [Rank 0] Group 13 Loss: 6.2578 +[2025-09-06 03:08:58] [Rank 0] Group 13 Loss: 6.2578 +[2025-09-06 03:08:58] [Rank 0] Group 14 Loss: 6.3614 +[2025-09-06 03:08:58] [Rank 0] Group 14 Loss: 6.3614 +[2025-09-06 03:08:58] [Rank 0] Group 15 Loss: 6.2646 +[2025-09-06 03:08:58] [Rank 0] Group 15 Loss: 6.2646 +[2025-09-06 03:08:58] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:08:58] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:08:58] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:08:58] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:08:58] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:08:58] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:08:58] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 03:08:58] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 03:08:58] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-06 03:08:58] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-06 03:08:58] [Rank 0] Group 5 FTA: 0.0900 +[2025-09-06 03:08:58] [Rank 0] Group 5 FTA: 0.0900 +[2025-09-06 03:08:58] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 03:08:58] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 03:08:58] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 03:08:58] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 03:08:58] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 03:08:58] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 03:08:58] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-06 03:08:58] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-06 03:08:58] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:08:58] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:08:58] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 03:08:58] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 03:08:58] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-06 03:08:58] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-06 03:08:58] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 03:08:58] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 03:08:58] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:08:58] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:08:58] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:08:58] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:08:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:08:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:08:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:08:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:08:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:08:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:09:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:09:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:09:00] [Rank 0] step:1501/10000 train_time:88114ms step_avg:58.70ms +[2025-09-06 03:09:00] [Rank 0] step:1501/10000 train_time:88114ms step_avg:58.70ms +[2025-09-06 03:09:01] [Rank 0] step:1521/10000 train_time:88790ms step_avg:58.38ms +[2025-09-06 03:09:01] [Rank 0] step:1521/10000 train_time:88790ms step_avg:58.38ms +[2025-09-06 03:09:01] [Rank 0] step:1541/10000 train_time:89523ms step_avg:58.09ms +[2025-09-06 03:09:01] [Rank 0] step:1541/10000 train_time:89523ms step_avg:58.09ms +[2025-09-06 03:09:02] [Rank 0] step:1561/10000 train_time:90257ms step_avg:57.82ms +[2025-09-06 03:09:02] [Rank 0] step:1561/10000 train_time:90257ms step_avg:57.82ms +[2025-09-06 03:09:03] [Rank 0] step:1581/10000 train_time:90990ms step_avg:57.55ms +[2025-09-06 03:09:03] [Rank 0] step:1581/10000 train_time:90990ms step_avg:57.55ms +[2025-09-06 03:09:03] [Rank 0] step:1601/10000 train_time:91723ms step_avg:57.29ms +[2025-09-06 03:09:03] [Rank 0] step:1601/10000 train_time:91723ms step_avg:57.29ms +[2025-09-06 03:09:04] [Rank 0] step:1621/10000 train_time:92456ms step_avg:57.04ms +[2025-09-06 03:09:04] [Rank 0] step:1621/10000 train_time:92456ms step_avg:57.04ms +[2025-09-06 03:09:06] [Rank 0] step:1641/10000 train_time:93799ms step_avg:57.16ms +[2025-09-06 03:09:06] [Rank 0] step:1641/10000 train_time:93799ms step_avg:57.16ms +[2025-09-06 03:09:06] [Rank 0] step:1661/10000 train_time:94532ms step_avg:56.91ms +[2025-09-06 03:09:06] [Rank 0] step:1661/10000 train_time:94532ms step_avg:56.91ms +[2025-09-06 03:09:07] [Rank 0] step:1681/10000 train_time:95266ms step_avg:56.67ms +[2025-09-06 03:09:07] [Rank 0] step:1681/10000 train_time:95266ms step_avg:56.67ms +[2025-09-06 03:09:08] [Rank 0] step:1701/10000 train_time:95999ms step_avg:56.44ms +[2025-09-06 03:09:08] [Rank 0] step:1701/10000 train_time:95999ms step_avg:56.44ms +[2025-09-06 03:09:08] [Rank 0] step:1721/10000 train_time:96732ms step_avg:56.21ms +[2025-09-06 03:09:08] [Rank 0] step:1721/10000 train_time:96732ms step_avg:56.21ms +[2025-09-06 03:09:09] [Rank 0] step:1741/10000 train_time:97463ms step_avg:55.98ms +[2025-09-06 03:09:09] [Rank 0] step:1741/10000 train_time:97463ms step_avg:55.98ms +[2025-09-06 03:09:10] [Rank 0] step:1761/10000 train_time:98196ms step_avg:55.76ms +[2025-09-06 03:09:10] [Rank 0] step:1761/10000 train_time:98196ms step_avg:55.76ms +[2025-09-06 03:09:11] [Rank 0] step:1781/10000 train_time:98928ms step_avg:55.55ms +[2025-09-06 03:09:11] [Rank 0] step:1781/10000 train_time:98928ms step_avg:55.55ms +[2025-09-06 03:09:11] [Rank 0] step:1801/10000 train_time:99660ms step_avg:55.34ms +[2025-09-06 03:09:11] [Rank 0] step:1801/10000 train_time:99660ms step_avg:55.34ms +[2025-09-06 03:09:12] [Rank 0] step:1821/10000 train_time:100393ms step_avg:55.13ms +[2025-09-06 03:09:12] [Rank 0] step:1821/10000 train_time:100393ms step_avg:55.13ms +[2025-09-06 03:09:13] [Rank 0] step:1841/10000 train_time:101124ms step_avg:54.93ms +[2025-09-06 03:09:13] [Rank 0] step:1841/10000 train_time:101124ms step_avg:54.93ms +[2025-09-06 03:09:14] [Rank 0] step:1861/10000 train_time:101855ms step_avg:54.73ms +[2025-09-06 03:09:14] [Rank 0] step:1861/10000 train_time:101855ms step_avg:54.73ms +[2025-09-06 03:09:14] [Rank 0] step:1881/10000 train_time:102588ms step_avg:54.54ms +[2025-09-06 03:09:14] [Rank 0] step:1881/10000 train_time:102588ms step_avg:54.54ms +[2025-09-06 03:09:15] [Rank 0] step:1901/10000 train_time:103321ms step_avg:54.35ms +[2025-09-06 03:09:15] [Rank 0] step:1901/10000 train_time:103321ms step_avg:54.35ms +[2025-09-06 03:09:16] [Rank 0] step:1921/10000 train_time:104054ms step_avg:54.17ms +[2025-09-06 03:09:16] [Rank 0] step:1921/10000 train_time:104054ms step_avg:54.17ms +[2025-09-06 03:09:17] [Rank 0] step:1941/10000 train_time:104787ms step_avg:53.99ms +[2025-09-06 03:09:17] [Rank 0] step:1941/10000 train_time:104787ms step_avg:53.99ms +[2025-09-06 03:09:17] [Rank 0] step:1961/10000 train_time:105520ms step_avg:53.81ms +[2025-09-06 03:09:17] [Rank 0] step:1961/10000 train_time:105520ms step_avg:53.81ms +[2025-09-06 03:09:18] [Rank 0] step:1981/10000 train_time:106253ms step_avg:53.64ms +[2025-09-06 03:09:18] [Rank 0] step:1981/10000 train_time:106253ms step_avg:53.64ms +[2025-09-06 03:09:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:09:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:09:19] [Rank 0] PRINT: step:2000/10000 train_loss:3.5387 val_loss:3.4044 train_time:107066ms step_avg:53.53ms +[2025-09-06 03:09:19] [Rank 0] PRINT: step:2000/10000 train_loss:3.5387 val_loss:3.4044 train_time:107066ms step_avg:53.53ms +[2025-09-06 03:09:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:09:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:09:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:09:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:10:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:10:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:10:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:10:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:10:41] [Rank 0] Total Loss: 5.3804 +[2025-09-06 03:10:41] [Rank 0] Total Loss: 5.3804 +[2025-09-06 03:10:41] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-06 03:10:41] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-06 03:10:41] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-06 03:10:41] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-06 03:10:41] [Rank 0] Group 0 Loss: 3.2754 +[2025-09-06 03:10:41] [Rank 0] Group 0 Loss: 3.2754 +[2025-09-06 03:10:41] [Rank 0] Group 1 Loss: 3.2773 +[2025-09-06 03:10:41] [Rank 0] Group 1 Loss: 3.2773 +[2025-09-06 03:10:41] [Rank 0] Group 2 Loss: 3.6296 +[2025-09-06 03:10:41] [Rank 0] Group 2 Loss: 3.6296 +[2025-09-06 03:10:41] [Rank 0] Group 3 Loss: 4.4133 +[2025-09-06 03:10:41] [Rank 0] Group 3 Loss: 4.4133 +[2025-09-06 03:10:41] [Rank 0] Group 4 Loss: 5.2889 +[2025-09-06 03:10:41] [Rank 0] Group 4 Loss: 5.2889 +[2025-09-06 03:10:41] [Rank 0] Group 5 Loss: 5.6326 +[2025-09-06 03:10:41] [Rank 0] Group 5 Loss: 5.6326 +[2025-09-06 03:10:41] [Rank 0] Group 6 Loss: 5.8208 +[2025-09-06 03:10:41] [Rank 0] Group 6 Loss: 5.8208 +[2025-09-06 03:10:41] [Rank 0] Group 7 Loss: 5.8254 +[2025-09-06 03:10:41] [Rank 0] Group 7 Loss: 5.8254 +[2025-09-06 03:10:41] [Rank 0] Group 8 Loss: 6.0499 +[2025-09-06 03:10:41] [Rank 0] Group 8 Loss: 6.0499 +[2025-09-06 03:10:41] [Rank 0] Group 9 Loss: 6.1874 +[2025-09-06 03:10:41] [Rank 0] Group 9 Loss: 6.1874 +[2025-09-06 03:10:41] [Rank 0] Group 10 Loss: 6.1460 +[2025-09-06 03:10:41] [Rank 0] Group 10 Loss: 6.1460 +[2025-09-06 03:10:41] [Rank 0] Group 11 Loss: 6.2221 +[2025-09-06 03:10:41] [Rank 0] Group 11 Loss: 6.2221 +[2025-09-06 03:10:41] [Rank 0] Group 12 Loss: 6.0539 +[2025-09-06 03:10:41] [Rank 0] Group 12 Loss: 6.0539 +[2025-09-06 03:10:41] [Rank 0] Group 13 Loss: 6.0516 +[2025-09-06 03:10:41] [Rank 0] Group 13 Loss: 6.0516 +[2025-09-06 03:10:41] [Rank 0] Group 14 Loss: 6.1435 +[2025-09-06 03:10:41] [Rank 0] Group 14 Loss: 6.1435 +[2025-09-06 03:10:41] [Rank 0] Group 15 Loss: 6.0690 +[2025-09-06 03:10:41] [Rank 0] Group 15 Loss: 6.0690 +[2025-09-06 03:10:41] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 03:10:41] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 03:10:41] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:10:41] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:10:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:10:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:10:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:10:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:10:41] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:10:41] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:10:41] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 03:10:41] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 03:10:41] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 03:10:41] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 03:10:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:10:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:10:41] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 03:10:41] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 03:10:41] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 03:10:41] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 03:10:41] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 03:10:41] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 03:10:41] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 03:10:41] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 03:10:41] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:10:41] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:10:41] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 03:10:41] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 03:10:41] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:10:41] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:10:41] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 03:10:41] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 03:10:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:10:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:10:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:10:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:10:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:10:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:10:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:10:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:10:43] [Rank 0] step:2001/10000 train_time:107077ms step_avg:53.51ms +[2025-09-06 03:10:43] [Rank 0] step:2001/10000 train_time:107077ms step_avg:53.51ms +[2025-09-06 03:10:44] [Rank 0] step:2021/10000 train_time:107947ms step_avg:53.41ms +[2025-09-06 03:10:44] [Rank 0] step:2021/10000 train_time:107947ms step_avg:53.41ms +[2025-09-06 03:10:45] [Rank 0] step:2041/10000 train_time:108680ms step_avg:53.25ms +[2025-09-06 03:10:45] [Rank 0] step:2041/10000 train_time:108680ms step_avg:53.25ms +[2025-09-06 03:10:45] [Rank 0] step:2061/10000 train_time:109535ms step_avg:53.15ms +[2025-09-06 03:10:45] [Rank 0] step:2061/10000 train_time:109535ms step_avg:53.15ms +[2025-09-06 03:10:46] [Rank 0] step:2081/10000 train_time:110267ms step_avg:52.99ms +[2025-09-06 03:10:46] [Rank 0] step:2081/10000 train_time:110267ms step_avg:52.99ms +[2025-09-06 03:10:47] [Rank 0] step:2101/10000 train_time:111000ms step_avg:52.83ms +[2025-09-06 03:10:47] [Rank 0] step:2101/10000 train_time:111000ms step_avg:52.83ms +[2025-09-06 03:10:48] [Rank 0] step:2121/10000 train_time:111732ms step_avg:52.68ms +[2025-09-06 03:10:48] [Rank 0] step:2121/10000 train_time:111732ms step_avg:52.68ms +[2025-09-06 03:10:48] [Rank 0] step:2141/10000 train_time:112465ms step_avg:52.53ms +[2025-09-06 03:10:48] [Rank 0] step:2141/10000 train_time:112465ms step_avg:52.53ms +[2025-09-06 03:10:49] [Rank 0] step:2161/10000 train_time:113197ms step_avg:52.38ms +[2025-09-06 03:10:49] [Rank 0] step:2161/10000 train_time:113197ms step_avg:52.38ms +[2025-09-06 03:10:50] [Rank 0] step:2181/10000 train_time:113930ms step_avg:52.24ms +[2025-09-06 03:10:50] [Rank 0] step:2181/10000 train_time:113930ms step_avg:52.24ms +[2025-09-06 03:10:50] [Rank 0] step:2201/10000 train_time:114662ms step_avg:52.10ms +[2025-09-06 03:10:50] [Rank 0] step:2201/10000 train_time:114662ms step_avg:52.10ms +[2025-09-06 03:10:51] [Rank 0] step:2221/10000 train_time:115395ms step_avg:51.96ms +[2025-09-06 03:10:51] [Rank 0] step:2221/10000 train_time:115395ms step_avg:51.96ms +[2025-09-06 03:10:52] [Rank 0] step:2241/10000 train_time:116132ms step_avg:51.82ms +[2025-09-06 03:10:52] [Rank 0] step:2241/10000 train_time:116132ms step_avg:51.82ms +[2025-09-06 03:10:53] [Rank 0] step:2261/10000 train_time:116871ms step_avg:51.69ms +[2025-09-06 03:10:53] [Rank 0] step:2261/10000 train_time:116871ms step_avg:51.69ms +[2025-09-06 03:10:53] [Rank 0] step:2281/10000 train_time:117609ms step_avg:51.56ms +[2025-09-06 03:10:53] [Rank 0] step:2281/10000 train_time:117609ms step_avg:51.56ms +[2025-09-06 03:10:54] [Rank 0] step:2301/10000 train_time:118348ms step_avg:51.43ms +[2025-09-06 03:10:54] [Rank 0] step:2301/10000 train_time:118348ms step_avg:51.43ms +[2025-09-06 03:10:55] [Rank 0] step:2321/10000 train_time:119087ms step_avg:51.31ms +[2025-09-06 03:10:55] [Rank 0] step:2321/10000 train_time:119087ms step_avg:51.31ms +[2025-09-06 03:10:56] [Rank 0] step:2341/10000 train_time:119826ms step_avg:51.19ms +[2025-09-06 03:10:56] [Rank 0] step:2341/10000 train_time:119826ms step_avg:51.19ms +[2025-09-06 03:10:56] [Rank 0] step:2361/10000 train_time:120566ms step_avg:51.07ms +[2025-09-06 03:10:56] [Rank 0] step:2361/10000 train_time:120566ms step_avg:51.07ms +[2025-09-06 03:10:57] [Rank 0] step:2381/10000 train_time:121306ms step_avg:50.95ms +[2025-09-06 03:10:57] [Rank 0] step:2381/10000 train_time:121306ms step_avg:50.95ms +[2025-09-06 03:10:58] [Rank 0] step:2401/10000 train_time:122045ms step_avg:50.83ms +[2025-09-06 03:10:58] [Rank 0] step:2401/10000 train_time:122045ms step_avg:50.83ms +[2025-09-06 03:10:59] [Rank 0] step:2421/10000 train_time:122783ms step_avg:50.72ms +[2025-09-06 03:10:59] [Rank 0] step:2421/10000 train_time:122783ms step_avg:50.72ms +[2025-09-06 03:10:59] [Rank 0] step:2441/10000 train_time:123523ms step_avg:50.60ms +[2025-09-06 03:10:59] [Rank 0] step:2441/10000 train_time:123523ms step_avg:50.60ms +[2025-09-06 03:11:00] [Rank 0] step:2461/10000 train_time:124262ms step_avg:50.49ms +[2025-09-06 03:11:00] [Rank 0] step:2461/10000 train_time:124262ms step_avg:50.49ms +[2025-09-06 03:11:01] [Rank 0] step:2481/10000 train_time:125002ms step_avg:50.38ms +[2025-09-06 03:11:01] [Rank 0] step:2481/10000 train_time:125002ms step_avg:50.38ms +[2025-09-06 03:11:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:11:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:11:02] [Rank 0] PRINT: step:2500/10000 train_loss:3.3142 val_loss:3.2128 train_time:125821ms step_avg:50.33ms +[2025-09-06 03:11:02] [Rank 0] PRINT: step:2500/10000 train_loss:3.3142 val_loss:3.2128 train_time:125821ms step_avg:50.33ms +[2025-09-06 03:11:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:11:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:11:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:11:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:12:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:12:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:12:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:12:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:12:24] [Rank 0] Total Loss: 5.2607 +[2025-09-06 03:12:24] [Rank 0] Total Loss: 5.2607 +[2025-09-06 03:12:24] [Rank 0] Total FTA (Unweighted): 0.1281 +[2025-09-06 03:12:24] [Rank 0] Total FTA (Unweighted): 0.1281 +[2025-09-06 03:12:24] [Rank 0] Total FTA (Weighted): 0.1281 +[2025-09-06 03:12:24] [Rank 0] Total FTA (Weighted): 0.1281 +[2025-09-06 03:12:24] [Rank 0] Group 0 Loss: 3.2571 +[2025-09-06 03:12:24] [Rank 0] Group 0 Loss: 3.2571 +[2025-09-06 03:12:24] [Rank 0] Group 1 Loss: 3.1860 +[2025-09-06 03:12:24] [Rank 0] Group 1 Loss: 3.1860 +[2025-09-06 03:12:24] [Rank 0] Group 2 Loss: 3.5300 +[2025-09-06 03:12:24] [Rank 0] Group 2 Loss: 3.5300 +[2025-09-06 03:12:24] [Rank 0] Group 3 Loss: 4.2050 +[2025-09-06 03:12:24] [Rank 0] Group 3 Loss: 4.2050 +[2025-09-06 03:12:24] [Rank 0] Group 4 Loss: 5.0565 +[2025-09-06 03:12:24] [Rank 0] Group 4 Loss: 5.0565 +[2025-09-06 03:12:24] [Rank 0] Group 5 Loss: 5.4295 +[2025-09-06 03:12:24] [Rank 0] Group 5 Loss: 5.4295 +[2025-09-06 03:12:24] [Rank 0] Group 6 Loss: 5.6926 +[2025-09-06 03:12:24] [Rank 0] Group 6 Loss: 5.6926 +[2025-09-06 03:12:24] [Rank 0] Group 7 Loss: 5.6919 +[2025-09-06 03:12:24] [Rank 0] Group 7 Loss: 5.6919 +[2025-09-06 03:12:24] [Rank 0] Group 8 Loss: 5.9425 +[2025-09-06 03:12:24] [Rank 0] Group 8 Loss: 5.9425 +[2025-09-06 03:12:24] [Rank 0] Group 9 Loss: 6.0966 +[2025-09-06 03:12:24] [Rank 0] Group 9 Loss: 6.0966 +[2025-09-06 03:12:24] [Rank 0] Group 10 Loss: 6.0474 +[2025-09-06 03:12:24] [Rank 0] Group 10 Loss: 6.0474 +[2025-09-06 03:12:24] [Rank 0] Group 11 Loss: 6.1138 +[2025-09-06 03:12:24] [Rank 0] Group 11 Loss: 6.1138 +[2025-09-06 03:12:24] [Rank 0] Group 12 Loss: 5.9629 +[2025-09-06 03:12:24] [Rank 0] Group 12 Loss: 5.9629 +[2025-09-06 03:12:24] [Rank 0] Group 13 Loss: 5.9468 +[2025-09-06 03:12:24] [Rank 0] Group 13 Loss: 5.9468 +[2025-09-06 03:12:24] [Rank 0] Group 14 Loss: 6.0402 +[2025-09-06 03:12:24] [Rank 0] Group 14 Loss: 6.0402 +[2025-09-06 03:12:24] [Rank 0] Group 15 Loss: 5.9732 +[2025-09-06 03:12:24] [Rank 0] Group 15 Loss: 5.9732 +[2025-09-06 03:12:24] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 03:12:24] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 03:12:24] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:12:24] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:12:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:12:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:12:24] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:12:24] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:12:24] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:12:24] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:12:24] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 03:12:24] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 03:12:24] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 03:12:24] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 03:12:24] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:12:24] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:12:24] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 03:12:24] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 03:12:24] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:12:24] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:12:24] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:12:24] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:12:24] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:12:24] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:12:24] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:12:24] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:12:24] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:12:24] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:12:24] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:12:24] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:12:24] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:12:24] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:12:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:12:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:12:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:12:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:12:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:12:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:12:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:12:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:12:26] [Rank 0] step:2501/10000 train_time:125832ms step_avg:50.31ms +[2025-09-06 03:12:26] [Rank 0] step:2501/10000 train_time:125832ms step_avg:50.31ms +[2025-09-06 03:12:26] [Rank 0] step:2521/10000 train_time:126506ms step_avg:50.18ms +[2025-09-06 03:12:26] [Rank 0] step:2521/10000 train_time:126506ms step_avg:50.18ms +[2025-09-06 03:12:27] [Rank 0] step:2541/10000 train_time:127246ms step_avg:50.08ms +[2025-09-06 03:12:27] [Rank 0] step:2541/10000 train_time:127246ms step_avg:50.08ms +[2025-09-06 03:12:28] [Rank 0] step:2561/10000 train_time:127985ms step_avg:49.97ms +[2025-09-06 03:12:28] [Rank 0] step:2561/10000 train_time:127985ms step_avg:49.97ms +[2025-09-06 03:12:28] [Rank 0] step:2581/10000 train_time:128724ms step_avg:49.87ms +[2025-09-06 03:12:28] [Rank 0] step:2581/10000 train_time:128724ms step_avg:49.87ms +[2025-09-06 03:12:29] [Rank 0] step:2601/10000 train_time:129463ms step_avg:49.77ms +[2025-09-06 03:12:29] [Rank 0] step:2601/10000 train_time:129463ms step_avg:49.77ms +[2025-09-06 03:12:30] [Rank 0] step:2621/10000 train_time:130205ms step_avg:49.68ms +[2025-09-06 03:12:30] [Rank 0] step:2621/10000 train_time:130205ms step_avg:49.68ms +[2025-09-06 03:12:31] [Rank 0] step:2641/10000 train_time:130945ms step_avg:49.58ms +[2025-09-06 03:12:31] [Rank 0] step:2641/10000 train_time:130945ms step_avg:49.58ms +[2025-09-06 03:12:31] [Rank 0] step:2661/10000 train_time:131684ms step_avg:49.49ms +[2025-09-06 03:12:31] [Rank 0] step:2661/10000 train_time:131684ms step_avg:49.49ms +[2025-09-06 03:12:32] [Rank 0] step:2681/10000 train_time:132423ms step_avg:49.39ms +[2025-09-06 03:12:32] [Rank 0] step:2681/10000 train_time:132423ms step_avg:49.39ms +[2025-09-06 03:12:33] [Rank 0] step:2701/10000 train_time:133162ms step_avg:49.30ms +[2025-09-06 03:12:33] [Rank 0] step:2701/10000 train_time:133162ms step_avg:49.30ms +[2025-09-06 03:12:34] [Rank 0] step:2721/10000 train_time:133902ms step_avg:49.21ms +[2025-09-06 03:12:34] [Rank 0] step:2721/10000 train_time:133902ms step_avg:49.21ms +[2025-09-06 03:12:34] [Rank 0] step:2741/10000 train_time:134641ms step_avg:49.12ms +[2025-09-06 03:12:34] [Rank 0] step:2741/10000 train_time:134641ms step_avg:49.12ms +[2025-09-06 03:12:35] [Rank 0] step:2761/10000 train_time:135381ms step_avg:49.03ms +[2025-09-06 03:12:35] [Rank 0] step:2761/10000 train_time:135381ms step_avg:49.03ms +[2025-09-06 03:12:36] [Rank 0] step:2781/10000 train_time:136121ms step_avg:48.95ms +[2025-09-06 03:12:36] [Rank 0] step:2781/10000 train_time:136121ms step_avg:48.95ms +[2025-09-06 03:12:37] [Rank 0] step:2801/10000 train_time:136861ms step_avg:48.86ms +[2025-09-06 03:12:37] [Rank 0] step:2801/10000 train_time:136861ms step_avg:48.86ms +[2025-09-06 03:12:38] [Rank 0] step:2821/10000 train_time:138228ms step_avg:49.00ms +[2025-09-06 03:12:38] [Rank 0] step:2821/10000 train_time:138228ms step_avg:49.00ms +[2025-09-06 03:12:39] [Rank 0] step:2841/10000 train_time:138968ms step_avg:48.92ms +[2025-09-06 03:12:39] [Rank 0] step:2841/10000 train_time:138968ms step_avg:48.92ms +[2025-09-06 03:12:39] [Rank 0] step:2861/10000 train_time:139707ms step_avg:48.83ms +[2025-09-06 03:12:39] [Rank 0] step:2861/10000 train_time:139707ms step_avg:48.83ms +[2025-09-06 03:12:40] [Rank 0] step:2881/10000 train_time:140444ms step_avg:48.75ms +[2025-09-06 03:12:40] [Rank 0] step:2881/10000 train_time:140444ms step_avg:48.75ms +[2025-09-06 03:12:41] [Rank 0] step:2901/10000 train_time:141182ms step_avg:48.67ms +[2025-09-06 03:12:41] [Rank 0] step:2901/10000 train_time:141182ms step_avg:48.67ms +[2025-09-06 03:12:42] [Rank 0] step:2921/10000 train_time:141920ms step_avg:48.59ms +[2025-09-06 03:12:42] [Rank 0] step:2921/10000 train_time:141920ms step_avg:48.59ms +[2025-09-06 03:12:42] [Rank 0] step:2941/10000 train_time:142659ms step_avg:48.51ms +[2025-09-06 03:12:42] [Rank 0] step:2941/10000 train_time:142659ms step_avg:48.51ms +[2025-09-06 03:12:43] [Rank 0] step:2961/10000 train_time:143398ms step_avg:48.43ms +[2025-09-06 03:12:43] [Rank 0] step:2961/10000 train_time:143398ms step_avg:48.43ms +[2025-09-06 03:12:44] [Rank 0] step:2981/10000 train_time:144138ms step_avg:48.35ms +[2025-09-06 03:12:44] [Rank 0] step:2981/10000 train_time:144138ms step_avg:48.35ms +[2025-09-06 03:12:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:12:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:12:45] [Rank 0] PRINT: step:3000/10000 train_loss:3.1488 val_loss:3.0790 train_time:144957ms step_avg:48.32ms +[2025-09-06 03:12:45] [Rank 0] PRINT: step:3000/10000 train_loss:3.1488 val_loss:3.0790 train_time:144957ms step_avg:48.32ms +[2025-09-06 03:12:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:12:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:12:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:12:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:14:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:14:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:14:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:14:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:14:07] [Rank 0] Total Loss: 5.1798 +[2025-09-06 03:14:07] [Rank 0] Total Loss: 5.1798 +[2025-09-06 03:14:07] [Rank 0] Total FTA (Unweighted): 0.1375 +[2025-09-06 03:14:07] [Rank 0] Total FTA (Unweighted): 0.1375 +[2025-09-06 03:14:07] [Rank 0] Total FTA (Weighted): 0.1375 +[2025-09-06 03:14:07] [Rank 0] Total FTA (Weighted): 0.1375 +[2025-09-06 03:14:07] [Rank 0] Group 0 Loss: 3.2615 +[2025-09-06 03:14:07] [Rank 0] Group 0 Loss: 3.2615 +[2025-09-06 03:14:07] [Rank 0] Group 1 Loss: 3.1691 +[2025-09-06 03:14:07] [Rank 0] Group 1 Loss: 3.1691 +[2025-09-06 03:14:07] [Rank 0] Group 2 Loss: 3.4426 +[2025-09-06 03:14:07] [Rank 0] Group 2 Loss: 3.4426 +[2025-09-06 03:14:07] [Rank 0] Group 3 Loss: 4.1019 +[2025-09-06 03:14:07] [Rank 0] Group 3 Loss: 4.1019 +[2025-09-06 03:14:07] [Rank 0] Group 4 Loss: 4.8929 +[2025-09-06 03:14:07] [Rank 0] Group 4 Loss: 4.8929 +[2025-09-06 03:14:07] [Rank 0] Group 5 Loss: 5.3078 +[2025-09-06 03:14:07] [Rank 0] Group 5 Loss: 5.3078 +[2025-09-06 03:14:07] [Rank 0] Group 6 Loss: 5.5838 +[2025-09-06 03:14:07] [Rank 0] Group 6 Loss: 5.5838 +[2025-09-06 03:14:07] [Rank 0] Group 7 Loss: 5.6077 +[2025-09-06 03:14:07] [Rank 0] Group 7 Loss: 5.6077 +[2025-09-06 03:14:07] [Rank 0] Group 8 Loss: 5.8461 +[2025-09-06 03:14:07] [Rank 0] Group 8 Loss: 5.8461 +[2025-09-06 03:14:07] [Rank 0] Group 9 Loss: 6.0017 +[2025-09-06 03:14:07] [Rank 0] Group 9 Loss: 6.0017 +[2025-09-06 03:14:07] [Rank 0] Group 10 Loss: 5.9659 +[2025-09-06 03:14:07] [Rank 0] Group 10 Loss: 5.9659 +[2025-09-06 03:14:07] [Rank 0] Group 11 Loss: 6.0360 +[2025-09-06 03:14:07] [Rank 0] Group 11 Loss: 6.0360 +[2025-09-06 03:14:07] [Rank 0] Group 12 Loss: 5.9063 +[2025-09-06 03:14:07] [Rank 0] Group 12 Loss: 5.9063 +[2025-09-06 03:14:07] [Rank 0] Group 13 Loss: 5.8894 +[2025-09-06 03:14:07] [Rank 0] Group 13 Loss: 5.8894 +[2025-09-06 03:14:08] [Rank 0] Group 14 Loss: 5.9665 +[2025-09-06 03:14:08] [Rank 0] Group 14 Loss: 5.9665 +[2025-09-06 03:14:08] [Rank 0] Group 15 Loss: 5.8971 +[2025-09-06 03:14:08] [Rank 0] Group 15 Loss: 5.8971 +[2025-09-06 03:14:08] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 03:14:08] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 03:14:08] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:14:08] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:14:08] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:14:08] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:14:08] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:14:08] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:14:08] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:14:08] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:14:08] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:14:08] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:14:08] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 03:14:08] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 03:14:08] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:14:08] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:14:08] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 03:14:08] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 03:14:08] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:14:08] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:14:08] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:14:08] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:14:08] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:14:08] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:14:08] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:14:08] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:14:08] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 03:14:08] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 03:14:08] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:14:08] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:14:08] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:14:08] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:14:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:14:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:14:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:14:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:14:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:14:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:14:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:14:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:14:09] [Rank 0] step:3001/10000 train_time:144969ms step_avg:48.31ms +[2025-09-06 03:14:09] [Rank 0] step:3001/10000 train_time:144969ms step_avg:48.31ms +[2025-09-06 03:14:10] [Rank 0] step:3021/10000 train_time:145649ms step_avg:48.21ms +[2025-09-06 03:14:10] [Rank 0] step:3021/10000 train_time:145649ms step_avg:48.21ms +[2025-09-06 03:14:10] [Rank 0] step:3041/10000 train_time:146387ms step_avg:48.14ms +[2025-09-06 03:14:10] [Rank 0] step:3041/10000 train_time:146387ms step_avg:48.14ms +[2025-09-06 03:14:11] [Rank 0] step:3061/10000 train_time:147126ms step_avg:48.06ms +[2025-09-06 03:14:11] [Rank 0] step:3061/10000 train_time:147126ms step_avg:48.06ms +[2025-09-06 03:14:12] [Rank 0] step:3081/10000 train_time:147866ms step_avg:47.99ms +[2025-09-06 03:14:12] [Rank 0] step:3081/10000 train_time:147866ms step_avg:47.99ms +[2025-09-06 03:14:13] [Rank 0] step:3101/10000 train_time:148605ms step_avg:47.92ms +[2025-09-06 03:14:13] [Rank 0] step:3101/10000 train_time:148605ms step_avg:47.92ms +[2025-09-06 03:14:13] [Rank 0] step:3121/10000 train_time:149344ms step_avg:47.85ms +[2025-09-06 03:14:13] [Rank 0] step:3121/10000 train_time:149344ms step_avg:47.85ms +[2025-09-06 03:14:14] [Rank 0] step:3141/10000 train_time:150083ms step_avg:47.78ms +[2025-09-06 03:14:14] [Rank 0] step:3141/10000 train_time:150083ms step_avg:47.78ms +[2025-09-06 03:14:15] [Rank 0] step:3161/10000 train_time:150823ms step_avg:47.71ms +[2025-09-06 03:14:15] [Rank 0] step:3161/10000 train_time:150823ms step_avg:47.71ms +[2025-09-06 03:14:16] [Rank 0] step:3181/10000 train_time:151562ms step_avg:47.65ms +[2025-09-06 03:14:16] [Rank 0] step:3181/10000 train_time:151562ms step_avg:47.65ms +[2025-09-06 03:14:16] [Rank 0] step:3201/10000 train_time:152301ms step_avg:47.58ms +[2025-09-06 03:14:16] [Rank 0] step:3201/10000 train_time:152301ms step_avg:47.58ms +[2025-09-06 03:14:17] [Rank 0] step:3221/10000 train_time:153040ms step_avg:47.51ms +[2025-09-06 03:14:17] [Rank 0] step:3221/10000 train_time:153040ms step_avg:47.51ms +[2025-09-06 03:14:18] [Rank 0] step:3241/10000 train_time:153780ms step_avg:47.45ms +[2025-09-06 03:14:18] [Rank 0] step:3241/10000 train_time:153780ms step_avg:47.45ms +[2025-09-06 03:14:19] [Rank 0] step:3261/10000 train_time:154519ms step_avg:47.38ms +[2025-09-06 03:14:19] [Rank 0] step:3261/10000 train_time:154519ms step_avg:47.38ms +[2025-09-06 03:14:19] [Rank 0] step:3281/10000 train_time:155258ms step_avg:47.32ms +[2025-09-06 03:14:19] [Rank 0] step:3281/10000 train_time:155258ms step_avg:47.32ms +[2025-09-06 03:14:20] [Rank 0] step:3301/10000 train_time:155998ms step_avg:47.26ms +[2025-09-06 03:14:20] [Rank 0] step:3301/10000 train_time:155998ms step_avg:47.26ms +[2025-09-06 03:14:21] [Rank 0] step:3321/10000 train_time:156737ms step_avg:47.20ms +[2025-09-06 03:14:21] [Rank 0] step:3321/10000 train_time:156737ms step_avg:47.20ms +[2025-09-06 03:14:22] [Rank 0] step:3341/10000 train_time:157476ms step_avg:47.13ms +[2025-09-06 03:14:22] [Rank 0] step:3341/10000 train_time:157476ms step_avg:47.13ms +[2025-09-06 03:14:22] [Rank 0] step:3361/10000 train_time:158216ms step_avg:47.07ms +[2025-09-06 03:14:22] [Rank 0] step:3361/10000 train_time:158216ms step_avg:47.07ms +[2025-09-06 03:14:23] [Rank 0] step:3381/10000 train_time:158955ms step_avg:47.01ms +[2025-09-06 03:14:23] [Rank 0] step:3381/10000 train_time:158955ms step_avg:47.01ms +[2025-09-06 03:14:24] [Rank 0] step:3401/10000 train_time:159696ms step_avg:46.96ms +[2025-09-06 03:14:24] [Rank 0] step:3401/10000 train_time:159696ms step_avg:46.96ms +[2025-09-06 03:14:25] [Rank 0] step:3421/10000 train_time:160437ms step_avg:46.90ms +[2025-09-06 03:14:25] [Rank 0] step:3421/10000 train_time:160437ms step_avg:46.90ms +[2025-09-06 03:14:25] [Rank 0] step:3441/10000 train_time:161177ms step_avg:46.84ms +[2025-09-06 03:14:25] [Rank 0] step:3441/10000 train_time:161177ms step_avg:46.84ms +[2025-09-06 03:14:26] [Rank 0] step:3461/10000 train_time:161917ms step_avg:46.78ms +[2025-09-06 03:14:26] [Rank 0] step:3461/10000 train_time:161917ms step_avg:46.78ms +[2025-09-06 03:14:27] [Rank 0] step:3481/10000 train_time:162655ms step_avg:46.73ms +[2025-09-06 03:14:27] [Rank 0] step:3481/10000 train_time:162655ms step_avg:46.73ms +[2025-09-06 03:14:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:14:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:14:28] [Rank 0] PRINT: step:3500/10000 train_loss:3.0303 val_loss:2.9681 train_time:163474ms step_avg:46.71ms +[2025-09-06 03:14:28] [Rank 0] PRINT: step:3500/10000 train_loss:3.0303 val_loss:2.9681 train_time:163474ms step_avg:46.71ms +[2025-09-06 03:14:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:14:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:14:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:14:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:15:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:15:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:15:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:15:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:15:50] [Rank 0] Total Loss: 5.1285 +[2025-09-06 03:15:50] [Rank 0] Total Loss: 5.1285 +[2025-09-06 03:15:50] [Rank 0] Total FTA (Unweighted): 0.1544 +[2025-09-06 03:15:50] [Rank 0] Total FTA (Unweighted): 0.1544 +[2025-09-06 03:15:50] [Rank 0] Total FTA (Weighted): 0.1544 +[2025-09-06 03:15:50] [Rank 0] Total FTA (Weighted): 0.1544 +[2025-09-06 03:15:50] [Rank 0] Group 0 Loss: 3.2464 +[2025-09-06 03:15:50] [Rank 0] Group 0 Loss: 3.2464 +[2025-09-06 03:15:50] [Rank 0] Group 1 Loss: 3.2105 +[2025-09-06 03:15:50] [Rank 0] Group 1 Loss: 3.2105 +[2025-09-06 03:15:50] [Rank 0] Group 2 Loss: 3.3990 +[2025-09-06 03:15:50] [Rank 0] Group 2 Loss: 3.3990 +[2025-09-06 03:15:50] [Rank 0] Group 3 Loss: 4.0211 +[2025-09-06 03:15:50] [Rank 0] Group 3 Loss: 4.0211 +[2025-09-06 03:15:50] [Rank 0] Group 4 Loss: 4.7680 +[2025-09-06 03:15:50] [Rank 0] Group 4 Loss: 4.7680 +[2025-09-06 03:15:50] [Rank 0] Group 5 Loss: 5.2137 +[2025-09-06 03:15:50] [Rank 0] Group 5 Loss: 5.2137 +[2025-09-06 03:15:50] [Rank 0] Group 6 Loss: 5.5087 +[2025-09-06 03:15:50] [Rank 0] Group 6 Loss: 5.5087 +[2025-09-06 03:15:50] [Rank 0] Group 7 Loss: 5.5555 +[2025-09-06 03:15:50] [Rank 0] Group 7 Loss: 5.5555 +[2025-09-06 03:15:50] [Rank 0] Group 8 Loss: 5.7918 +[2025-09-06 03:15:50] [Rank 0] Group 8 Loss: 5.7918 +[2025-09-06 03:15:50] [Rank 0] Group 9 Loss: 5.9414 +[2025-09-06 03:15:50] [Rank 0] Group 9 Loss: 5.9414 +[2025-09-06 03:15:50] [Rank 0] Group 10 Loss: 5.9312 +[2025-09-06 03:15:50] [Rank 0] Group 10 Loss: 5.9312 +[2025-09-06 03:15:50] [Rank 0] Group 11 Loss: 5.9945 +[2025-09-06 03:15:50] [Rank 0] Group 11 Loss: 5.9945 +[2025-09-06 03:15:50] [Rank 0] Group 12 Loss: 5.8593 +[2025-09-06 03:15:50] [Rank 0] Group 12 Loss: 5.8593 +[2025-09-06 03:15:50] [Rank 0] Group 13 Loss: 5.8528 +[2025-09-06 03:15:50] [Rank 0] Group 13 Loss: 5.8528 +[2025-09-06 03:15:50] [Rank 0] Group 14 Loss: 5.9135 +[2025-09-06 03:15:50] [Rank 0] Group 14 Loss: 5.9135 +[2025-09-06 03:15:50] [Rank 0] Group 15 Loss: 5.8484 +[2025-09-06 03:15:50] [Rank 0] Group 15 Loss: 5.8484 +[2025-09-06 03:15:50] [Rank 0] Group 0 FTA: 0.6800 +[2025-09-06 03:15:50] [Rank 0] Group 0 FTA: 0.6800 +[2025-09-06 03:15:50] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:15:50] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:15:50] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:15:50] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:15:50] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:15:50] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:15:50] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:15:50] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:15:50] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 03:15:50] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 03:15:50] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:15:50] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:15:50] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 03:15:50] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 03:15:50] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:15:50] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:15:50] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:15:50] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:15:50] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:15:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:15:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:15:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:15:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:15:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:15:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:15:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:15:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:15:52] [Rank 0] step:3501/10000 train_time:163485ms step_avg:46.70ms +[2025-09-06 03:15:52] [Rank 0] step:3501/10000 train_time:163485ms step_avg:46.70ms +[2025-09-06 03:15:52] [Rank 0] step:3521/10000 train_time:164172ms step_avg:46.63ms +[2025-09-06 03:15:52] [Rank 0] step:3521/10000 train_time:164172ms step_avg:46.63ms +[2025-09-06 03:15:53] [Rank 0] step:3541/10000 train_time:164912ms step_avg:46.57ms +[2025-09-06 03:15:53] [Rank 0] step:3541/10000 train_time:164912ms step_avg:46.57ms +[2025-09-06 03:15:54] [Rank 0] step:3561/10000 train_time:165652ms step_avg:46.52ms +[2025-09-06 03:15:54] [Rank 0] step:3561/10000 train_time:165652ms step_avg:46.52ms +[2025-09-06 03:15:55] [Rank 0] step:3581/10000 train_time:166391ms step_avg:46.46ms +[2025-09-06 03:15:55] [Rank 0] step:3581/10000 train_time:166391ms step_avg:46.46ms +[2025-09-06 03:15:55] [Rank 0] step:3601/10000 train_time:167131ms step_avg:46.41ms +[2025-09-06 03:15:55] [Rank 0] step:3601/10000 train_time:167131ms step_avg:46.41ms +[2025-09-06 03:15:56] [Rank 0] step:3621/10000 train_time:167871ms step_avg:46.36ms +[2025-09-06 03:15:56] [Rank 0] step:3621/10000 train_time:167871ms step_avg:46.36ms +[2025-09-06 03:15:57] [Rank 0] step:3641/10000 train_time:168806ms step_avg:46.36ms +[2025-09-06 03:15:57] [Rank 0] step:3641/10000 train_time:168806ms step_avg:46.36ms +[2025-09-06 03:15:58] [Rank 0] step:3661/10000 train_time:169546ms step_avg:46.31ms +[2025-09-06 03:15:58] [Rank 0] step:3661/10000 train_time:169546ms step_avg:46.31ms +[2025-09-06 03:15:59] [Rank 0] step:3681/10000 train_time:170432ms step_avg:46.30ms +[2025-09-06 03:15:59] [Rank 0] step:3681/10000 train_time:170432ms step_avg:46.30ms +[2025-09-06 03:15:59] [Rank 0] step:3701/10000 train_time:171171ms step_avg:46.25ms +[2025-09-06 03:15:59] [Rank 0] step:3701/10000 train_time:171171ms step_avg:46.25ms +[2025-09-06 03:16:00] [Rank 0] step:3721/10000 train_time:171911ms step_avg:46.20ms +[2025-09-06 03:16:00] [Rank 0] step:3721/10000 train_time:171911ms step_avg:46.20ms +[2025-09-06 03:16:01] [Rank 0] step:3741/10000 train_time:172797ms step_avg:46.19ms +[2025-09-06 03:16:01] [Rank 0] step:3741/10000 train_time:172797ms step_avg:46.19ms +[2025-09-06 03:16:02] [Rank 0] step:3761/10000 train_time:173539ms step_avg:46.14ms +[2025-09-06 03:16:02] [Rank 0] step:3761/10000 train_time:173539ms step_avg:46.14ms +[2025-09-06 03:16:03] [Rank 0] step:3781/10000 train_time:174279ms step_avg:46.09ms +[2025-09-06 03:16:03] [Rank 0] step:3781/10000 train_time:174279ms step_avg:46.09ms +[2025-09-06 03:16:03] [Rank 0] step:3801/10000 train_time:175018ms step_avg:46.05ms +[2025-09-06 03:16:03] [Rank 0] step:3801/10000 train_time:175018ms step_avg:46.05ms +[2025-09-06 03:16:04] [Rank 0] step:3821/10000 train_time:175758ms step_avg:46.00ms +[2025-09-06 03:16:04] [Rank 0] step:3821/10000 train_time:175758ms step_avg:46.00ms +[2025-09-06 03:16:05] [Rank 0] step:3841/10000 train_time:176497ms step_avg:45.95ms +[2025-09-06 03:16:05] [Rank 0] step:3841/10000 train_time:176497ms step_avg:45.95ms +[2025-09-06 03:16:06] [Rank 0] step:3861/10000 train_time:177237ms step_avg:45.90ms +[2025-09-06 03:16:06] [Rank 0] step:3861/10000 train_time:177237ms step_avg:45.90ms +[2025-09-06 03:16:06] [Rank 0] step:3881/10000 train_time:177976ms step_avg:45.86ms +[2025-09-06 03:16:06] [Rank 0] step:3881/10000 train_time:177976ms step_avg:45.86ms +[2025-09-06 03:16:07] [Rank 0] step:3901/10000 train_time:178716ms step_avg:45.81ms +[2025-09-06 03:16:07] [Rank 0] step:3901/10000 train_time:178716ms step_avg:45.81ms +[2025-09-06 03:16:08] [Rank 0] step:3921/10000 train_time:179455ms step_avg:45.77ms +[2025-09-06 03:16:08] [Rank 0] step:3921/10000 train_time:179455ms step_avg:45.77ms +[2025-09-06 03:16:08] [Rank 0] step:3941/10000 train_time:180195ms step_avg:45.72ms +[2025-09-06 03:16:08] [Rank 0] step:3941/10000 train_time:180195ms step_avg:45.72ms +[2025-09-06 03:16:09] [Rank 0] step:3961/10000 train_time:180934ms step_avg:45.68ms +[2025-09-06 03:16:09] [Rank 0] step:3961/10000 train_time:180934ms step_avg:45.68ms +[2025-09-06 03:16:10] [Rank 0] step:3981/10000 train_time:181674ms step_avg:45.64ms +[2025-09-06 03:16:10] [Rank 0] step:3981/10000 train_time:181674ms step_avg:45.64ms +[2025-09-06 03:16:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:16:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:16:11] [Rank 0] PRINT: step:4000/10000 train_loss:2.9340 val_loss:2.8847 train_time:182494ms step_avg:45.62ms +[2025-09-06 03:16:11] [Rank 0] PRINT: step:4000/10000 train_loss:2.9340 val_loss:2.8847 train_time:182494ms step_avg:45.62ms +[2025-09-06 03:16:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:16:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:16:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:16:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:17:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:17:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:17:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:17:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:17:33] [Rank 0] Total Loss: 5.0491 +[2025-09-06 03:17:33] [Rank 0] Total Loss: 5.0491 +[2025-09-06 03:17:33] [Rank 0] Total FTA (Unweighted): 0.1700 +[2025-09-06 03:17:33] [Rank 0] Total FTA (Unweighted): 0.1700 +[2025-09-06 03:17:33] [Rank 0] Total FTA (Weighted): 0.1700 +[2025-09-06 03:17:33] [Rank 0] Total FTA (Weighted): 0.1700 +[2025-09-06 03:17:33] [Rank 0] Group 0 Loss: 3.2552 +[2025-09-06 03:17:33] [Rank 0] Group 0 Loss: 3.2552 +[2025-09-06 03:17:33] [Rank 0] Group 1 Loss: 3.1488 +[2025-09-06 03:17:33] [Rank 0] Group 1 Loss: 3.1488 +[2025-09-06 03:17:33] [Rank 0] Group 2 Loss: 3.3750 +[2025-09-06 03:17:33] [Rank 0] Group 2 Loss: 3.3750 +[2025-09-06 03:17:33] [Rank 0] Group 3 Loss: 3.9488 +[2025-09-06 03:17:33] [Rank 0] Group 3 Loss: 3.9488 +[2025-09-06 03:17:33] [Rank 0] Group 4 Loss: 4.6435 +[2025-09-06 03:17:33] [Rank 0] Group 4 Loss: 4.6435 +[2025-09-06 03:17:33] [Rank 0] Group 5 Loss: 5.1045 +[2025-09-06 03:17:33] [Rank 0] Group 5 Loss: 5.1045 +[2025-09-06 03:17:33] [Rank 0] Group 6 Loss: 5.3857 +[2025-09-06 03:17:33] [Rank 0] Group 6 Loss: 5.3857 +[2025-09-06 03:17:33] [Rank 0] Group 7 Loss: 5.4528 +[2025-09-06 03:17:33] [Rank 0] Group 7 Loss: 5.4528 +[2025-09-06 03:17:33] [Rank 0] Group 8 Loss: 5.7139 +[2025-09-06 03:17:33] [Rank 0] Group 8 Loss: 5.7139 +[2025-09-06 03:17:33] [Rank 0] Group 9 Loss: 5.8501 +[2025-09-06 03:17:33] [Rank 0] Group 9 Loss: 5.8501 +[2025-09-06 03:17:33] [Rank 0] Group 10 Loss: 5.8342 +[2025-09-06 03:17:33] [Rank 0] Group 10 Loss: 5.8342 +[2025-09-06 03:17:33] [Rank 0] Group 11 Loss: 5.8987 +[2025-09-06 03:17:33] [Rank 0] Group 11 Loss: 5.8987 +[2025-09-06 03:17:33] [Rank 0] Group 12 Loss: 5.7781 +[2025-09-06 03:17:33] [Rank 0] Group 12 Loss: 5.7781 +[2025-09-06 03:17:33] [Rank 0] Group 13 Loss: 5.7806 +[2025-09-06 03:17:33] [Rank 0] Group 13 Loss: 5.7806 +[2025-09-06 03:17:33] [Rank 0] Group 14 Loss: 5.8495 +[2025-09-06 03:17:33] [Rank 0] Group 14 Loss: 5.8495 +[2025-09-06 03:17:33] [Rank 0] Group 15 Loss: 5.7659 +[2025-09-06 03:17:33] [Rank 0] Group 15 Loss: 5.7659 +[2025-09-06 03:17:33] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:17:33] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:17:33] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:17:33] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:17:33] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:17:33] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:17:33] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:17:33] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:17:33] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:17:33] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:17:33] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:17:33] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:17:33] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:17:33] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:17:33] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:17:33] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:17:33] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-06 03:17:33] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-06 03:17:33] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:17:33] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:17:33] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:17:33] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:17:33] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:17:33] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:17:33] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:17:33] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:17:33] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:17:33] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:17:33] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:17:33] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:17:34] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:17:34] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:17:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:17:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:17:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:17:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:17:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:17:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:17:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:17:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:17:35] [Rank 0] step:4001/10000 train_time:182504ms step_avg:45.61ms +[2025-09-06 03:17:35] [Rank 0] step:4001/10000 train_time:182504ms step_avg:45.61ms +[2025-09-06 03:17:36] [Rank 0] step:4021/10000 train_time:183783ms step_avg:45.71ms +[2025-09-06 03:17:36] [Rank 0] step:4021/10000 train_time:183783ms step_avg:45.71ms +[2025-09-06 03:17:37] [Rank 0] step:4041/10000 train_time:184522ms step_avg:45.66ms +[2025-09-06 03:17:37] [Rank 0] step:4041/10000 train_time:184522ms step_avg:45.66ms +[2025-09-06 03:17:38] [Rank 0] step:4061/10000 train_time:185262ms step_avg:45.62ms +[2025-09-06 03:17:38] [Rank 0] step:4061/10000 train_time:185262ms step_avg:45.62ms +[2025-09-06 03:17:38] [Rank 0] step:4081/10000 train_time:186002ms step_avg:45.58ms +[2025-09-06 03:17:38] [Rank 0] step:4081/10000 train_time:186002ms step_avg:45.58ms +[2025-09-06 03:17:39] [Rank 0] step:4101/10000 train_time:186742ms step_avg:45.54ms +[2025-09-06 03:17:39] [Rank 0] step:4101/10000 train_time:186742ms step_avg:45.54ms +[2025-09-06 03:17:40] [Rank 0] step:4121/10000 train_time:187481ms step_avg:45.49ms +[2025-09-06 03:17:40] [Rank 0] step:4121/10000 train_time:187481ms step_avg:45.49ms +[2025-09-06 03:17:41] [Rank 0] step:4141/10000 train_time:188221ms step_avg:45.45ms +[2025-09-06 03:17:41] [Rank 0] step:4141/10000 train_time:188221ms step_avg:45.45ms +[2025-09-06 03:17:41] [Rank 0] step:4161/10000 train_time:188960ms step_avg:45.41ms +[2025-09-06 03:17:41] [Rank 0] step:4161/10000 train_time:188960ms step_avg:45.41ms +[2025-09-06 03:17:42] [Rank 0] step:4181/10000 train_time:189699ms step_avg:45.37ms +[2025-09-06 03:17:42] [Rank 0] step:4181/10000 train_time:189699ms step_avg:45.37ms +[2025-09-06 03:17:43] [Rank 0] step:4201/10000 train_time:190439ms step_avg:45.33ms +[2025-09-06 03:17:43] [Rank 0] step:4201/10000 train_time:190439ms step_avg:45.33ms +[2025-09-06 03:17:44] [Rank 0] step:4221/10000 train_time:191179ms step_avg:45.29ms +[2025-09-06 03:17:44] [Rank 0] step:4221/10000 train_time:191179ms step_avg:45.29ms +[2025-09-06 03:17:44] [Rank 0] step:4241/10000 train_time:191919ms step_avg:45.25ms +[2025-09-06 03:17:44] [Rank 0] step:4241/10000 train_time:191919ms step_avg:45.25ms +[2025-09-06 03:17:45] [Rank 0] step:4261/10000 train_time:192659ms step_avg:45.21ms +[2025-09-06 03:17:45] [Rank 0] step:4261/10000 train_time:192659ms step_avg:45.21ms +[2025-09-06 03:17:46] [Rank 0] step:4281/10000 train_time:193398ms step_avg:45.18ms +[2025-09-06 03:17:46] [Rank 0] step:4281/10000 train_time:193398ms step_avg:45.18ms +[2025-09-06 03:17:47] [Rank 0] step:4301/10000 train_time:194137ms step_avg:45.14ms +[2025-09-06 03:17:47] [Rank 0] step:4301/10000 train_time:194137ms step_avg:45.14ms +[2025-09-06 03:17:47] [Rank 0] step:4321/10000 train_time:194877ms step_avg:45.10ms +[2025-09-06 03:17:47] [Rank 0] step:4321/10000 train_time:194877ms step_avg:45.10ms +[2025-09-06 03:17:48] [Rank 0] step:4341/10000 train_time:195615ms step_avg:45.06ms +[2025-09-06 03:17:48] [Rank 0] step:4341/10000 train_time:195615ms step_avg:45.06ms +[2025-09-06 03:17:49] [Rank 0] step:4361/10000 train_time:196354ms step_avg:45.03ms +[2025-09-06 03:17:49] [Rank 0] step:4361/10000 train_time:196354ms step_avg:45.03ms +[2025-09-06 03:17:50] [Rank 0] step:4381/10000 train_time:197093ms step_avg:44.99ms +[2025-09-06 03:17:50] [Rank 0] step:4381/10000 train_time:197093ms step_avg:44.99ms +[2025-09-06 03:17:50] [Rank 0] step:4401/10000 train_time:197832ms step_avg:44.95ms +[2025-09-06 03:17:50] [Rank 0] step:4401/10000 train_time:197832ms step_avg:44.95ms +[2025-09-06 03:17:51] [Rank 0] step:4421/10000 train_time:198572ms step_avg:44.92ms +[2025-09-06 03:17:51] [Rank 0] step:4421/10000 train_time:198572ms step_avg:44.92ms +[2025-09-06 03:17:52] [Rank 0] step:4441/10000 train_time:199311ms step_avg:44.88ms +[2025-09-06 03:17:52] [Rank 0] step:4441/10000 train_time:199311ms step_avg:44.88ms +[2025-09-06 03:17:53] [Rank 0] step:4461/10000 train_time:200051ms step_avg:44.84ms +[2025-09-06 03:17:53] [Rank 0] step:4461/10000 train_time:200051ms step_avg:44.84ms +[2025-09-06 03:17:53] [Rank 0] step:4481/10000 train_time:200790ms step_avg:44.81ms +[2025-09-06 03:17:53] [Rank 0] step:4481/10000 train_time:200790ms step_avg:44.81ms +[2025-09-06 03:17:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:17:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:17:54] [Rank 0] PRINT: step:4500/10000 train_loss:2.8604 val_loss:2.8193 train_time:201611ms step_avg:44.80ms +[2025-09-06 03:17:54] [Rank 0] PRINT: step:4500/10000 train_loss:2.8604 val_loss:2.8193 train_time:201611ms step_avg:44.80ms +[2025-09-06 03:17:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:17:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:17:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:17:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:19:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:19:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:19:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:19:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:19:16] [Rank 0] Total Loss: 5.0380 +[2025-09-06 03:19:16] [Rank 0] Total Loss: 5.0380 +[2025-09-06 03:19:16] [Rank 0] Total FTA (Unweighted): 0.1694 +[2025-09-06 03:19:16] [Rank 0] Total FTA (Unweighted): 0.1694 +[2025-09-06 03:19:16] [Rank 0] Total FTA (Weighted): 0.1694 +[2025-09-06 03:19:16] [Rank 0] Total FTA (Weighted): 0.1694 +[2025-09-06 03:19:16] [Rank 0] Group 0 Loss: 3.2863 +[2025-09-06 03:19:16] [Rank 0] Group 0 Loss: 3.2863 +[2025-09-06 03:19:16] [Rank 0] Group 1 Loss: 3.2110 +[2025-09-06 03:19:16] [Rank 0] Group 1 Loss: 3.2110 +[2025-09-06 03:19:16] [Rank 0] Group 2 Loss: 3.3888 +[2025-09-06 03:19:16] [Rank 0] Group 2 Loss: 3.3888 +[2025-09-06 03:19:16] [Rank 0] Group 3 Loss: 3.9538 +[2025-09-06 03:19:16] [Rank 0] Group 3 Loss: 3.9538 +[2025-09-06 03:19:16] [Rank 0] Group 4 Loss: 4.6077 +[2025-09-06 03:19:16] [Rank 0] Group 4 Loss: 4.6077 +[2025-09-06 03:19:16] [Rank 0] Group 5 Loss: 5.0484 +[2025-09-06 03:19:16] [Rank 0] Group 5 Loss: 5.0484 +[2025-09-06 03:19:16] [Rank 0] Group 6 Loss: 5.3629 +[2025-09-06 03:19:16] [Rank 0] Group 6 Loss: 5.3629 +[2025-09-06 03:19:16] [Rank 0] Group 7 Loss: 5.4287 +[2025-09-06 03:19:16] [Rank 0] Group 7 Loss: 5.4287 +[2025-09-06 03:19:16] [Rank 0] Group 8 Loss: 5.6898 +[2025-09-06 03:19:16] [Rank 0] Group 8 Loss: 5.6898 +[2025-09-06 03:19:16] [Rank 0] Group 9 Loss: 5.8363 +[2025-09-06 03:19:16] [Rank 0] Group 9 Loss: 5.8363 +[2025-09-06 03:19:16] [Rank 0] Group 10 Loss: 5.8255 +[2025-09-06 03:19:16] [Rank 0] Group 10 Loss: 5.8255 +[2025-09-06 03:19:16] [Rank 0] Group 11 Loss: 5.8824 +[2025-09-06 03:19:16] [Rank 0] Group 11 Loss: 5.8824 +[2025-09-06 03:19:16] [Rank 0] Group 12 Loss: 5.7604 +[2025-09-06 03:19:16] [Rank 0] Group 12 Loss: 5.7604 +[2025-09-06 03:19:16] [Rank 0] Group 13 Loss: 5.7449 +[2025-09-06 03:19:16] [Rank 0] Group 13 Loss: 5.7449 +[2025-09-06 03:19:16] [Rank 0] Group 14 Loss: 5.8272 +[2025-09-06 03:19:16] [Rank 0] Group 14 Loss: 5.8272 +[2025-09-06 03:19:16] [Rank 0] Group 15 Loss: 5.7531 +[2025-09-06 03:19:16] [Rank 0] Group 15 Loss: 5.7531 +[2025-09-06 03:19:16] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:19:16] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:19:16] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:19:16] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:19:16] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:19:16] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:19:16] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:19:16] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:19:16] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:19:16] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:19:16] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:19:16] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:19:16] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:19:16] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:19:16] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:19:16] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:19:16] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-06 03:19:16] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-06 03:19:16] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:19:16] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:19:16] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:19:16] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:19:16] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:19:16] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:19:16] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:19:16] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:19:16] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:19:16] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:19:16] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:19:16] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:19:16] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:19:16] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:19:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:19:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:19:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:19:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:19:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:19:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:19:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:19:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:19:17] [Rank 0] step:4501/10000 train_time:201622ms step_avg:44.79ms +[2025-09-06 03:19:17] [Rank 0] step:4501/10000 train_time:201622ms step_avg:44.79ms +[2025-09-06 03:19:18] [Rank 0] step:4521/10000 train_time:202299ms step_avg:44.75ms +[2025-09-06 03:19:18] [Rank 0] step:4521/10000 train_time:202299ms step_avg:44.75ms +[2025-09-06 03:19:19] [Rank 0] step:4541/10000 train_time:203038ms step_avg:44.71ms +[2025-09-06 03:19:19] [Rank 0] step:4541/10000 train_time:203038ms step_avg:44.71ms +[2025-09-06 03:19:20] [Rank 0] step:4561/10000 train_time:203776ms step_avg:44.68ms +[2025-09-06 03:19:20] [Rank 0] step:4561/10000 train_time:203776ms step_avg:44.68ms +[2025-09-06 03:19:20] [Rank 0] step:4581/10000 train_time:204517ms step_avg:44.64ms +[2025-09-06 03:19:20] [Rank 0] step:4581/10000 train_time:204517ms step_avg:44.64ms +[2025-09-06 03:19:21] [Rank 0] step:4601/10000 train_time:205257ms step_avg:44.61ms +[2025-09-06 03:19:21] [Rank 0] step:4601/10000 train_time:205257ms step_avg:44.61ms +[2025-09-06 03:19:22] [Rank 0] step:4621/10000 train_time:205996ms step_avg:44.58ms +[2025-09-06 03:19:22] [Rank 0] step:4621/10000 train_time:205996ms step_avg:44.58ms +[2025-09-06 03:19:23] [Rank 0] step:4641/10000 train_time:206736ms step_avg:44.55ms +[2025-09-06 03:19:23] [Rank 0] step:4641/10000 train_time:206736ms step_avg:44.55ms +[2025-09-06 03:19:23] [Rank 0] step:4661/10000 train_time:207476ms step_avg:44.51ms +[2025-09-06 03:19:23] [Rank 0] step:4661/10000 train_time:207476ms step_avg:44.51ms +[2025-09-06 03:19:24] [Rank 0] step:4681/10000 train_time:208215ms step_avg:44.48ms +[2025-09-06 03:19:24] [Rank 0] step:4681/10000 train_time:208215ms step_avg:44.48ms +[2025-09-06 03:19:25] [Rank 0] step:4701/10000 train_time:208955ms step_avg:44.45ms +[2025-09-06 03:19:25] [Rank 0] step:4701/10000 train_time:208955ms step_avg:44.45ms +[2025-09-06 03:19:26] [Rank 0] step:4721/10000 train_time:209695ms step_avg:44.42ms +[2025-09-06 03:19:26] [Rank 0] step:4721/10000 train_time:209695ms step_avg:44.42ms +[2025-09-06 03:19:26] [Rank 0] step:4741/10000 train_time:210435ms step_avg:44.39ms +[2025-09-06 03:19:26] [Rank 0] step:4741/10000 train_time:210435ms step_avg:44.39ms +[2025-09-06 03:19:27] [Rank 0] step:4761/10000 train_time:211175ms step_avg:44.36ms +[2025-09-06 03:19:27] [Rank 0] step:4761/10000 train_time:211175ms step_avg:44.36ms +[2025-09-06 03:19:28] [Rank 0] step:4781/10000 train_time:211914ms step_avg:44.32ms +[2025-09-06 03:19:28] [Rank 0] step:4781/10000 train_time:211914ms step_avg:44.32ms +[2025-09-06 03:19:28] [Rank 0] step:4801/10000 train_time:212652ms step_avg:44.29ms +[2025-09-06 03:19:28] [Rank 0] step:4801/10000 train_time:212652ms step_avg:44.29ms +[2025-09-06 03:19:29] [Rank 0] step:4821/10000 train_time:213392ms step_avg:44.26ms +[2025-09-06 03:19:29] [Rank 0] step:4821/10000 train_time:213392ms step_avg:44.26ms +[2025-09-06 03:19:30] [Rank 0] step:4841/10000 train_time:214436ms step_avg:44.30ms +[2025-09-06 03:19:30] [Rank 0] step:4841/10000 train_time:214436ms step_avg:44.30ms +[2025-09-06 03:19:31] [Rank 0] step:4861/10000 train_time:215176ms step_avg:44.27ms +[2025-09-06 03:19:31] [Rank 0] step:4861/10000 train_time:215176ms step_avg:44.27ms +[2025-09-06 03:19:32] [Rank 0] step:4881/10000 train_time:215915ms step_avg:44.24ms +[2025-09-06 03:19:32] [Rank 0] step:4881/10000 train_time:215915ms step_avg:44.24ms +[2025-09-06 03:19:32] [Rank 0] step:4901/10000 train_time:216654ms step_avg:44.21ms +[2025-09-06 03:19:32] [Rank 0] step:4901/10000 train_time:216654ms step_avg:44.21ms +[2025-09-06 03:19:33] [Rank 0] step:4921/10000 train_time:217394ms step_avg:44.18ms +[2025-09-06 03:19:33] [Rank 0] step:4921/10000 train_time:217394ms step_avg:44.18ms +[2025-09-06 03:19:34] [Rank 0] step:4941/10000 train_time:218134ms step_avg:44.15ms +[2025-09-06 03:19:34] [Rank 0] step:4941/10000 train_time:218134ms step_avg:44.15ms +[2025-09-06 03:19:35] [Rank 0] step:4961/10000 train_time:218873ms step_avg:44.12ms +[2025-09-06 03:19:35] [Rank 0] step:4961/10000 train_time:218873ms step_avg:44.12ms +[2025-09-06 03:19:35] [Rank 0] step:4981/10000 train_time:219612ms step_avg:44.09ms +[2025-09-06 03:19:35] [Rank 0] step:4981/10000 train_time:219612ms step_avg:44.09ms +[2025-09-06 03:19:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:19:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:19:37] [Rank 0] PRINT: step:5000/10000 train_loss:2.7997 val_loss:2.7654 train_time:220432ms step_avg:44.09ms +[2025-09-06 03:19:37] [Rank 0] PRINT: step:5000/10000 train_loss:2.7997 val_loss:2.7654 train_time:220432ms step_avg:44.09ms +[2025-09-06 03:19:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:19:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:19:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:19:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:20:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:20:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:20:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:20:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:20:59] [Rank 0] Total Loss: 4.9932 +[2025-09-06 03:20:59] [Rank 0] Total Loss: 4.9932 +[2025-09-06 03:20:59] [Rank 0] Total FTA (Unweighted): 0.1700 +[2025-09-06 03:20:59] [Rank 0] Total FTA (Unweighted): 0.1700 +[2025-09-06 03:20:59] [Rank 0] Total FTA (Weighted): 0.1700 +[2025-09-06 03:20:59] [Rank 0] Total FTA (Weighted): 0.1700 +[2025-09-06 03:20:59] [Rank 0] Group 0 Loss: 3.3057 +[2025-09-06 03:20:59] [Rank 0] Group 0 Loss: 3.3057 +[2025-09-06 03:20:59] [Rank 0] Group 1 Loss: 3.1434 +[2025-09-06 03:20:59] [Rank 0] Group 1 Loss: 3.1434 +[2025-09-06 03:20:59] [Rank 0] Group 2 Loss: 3.3795 +[2025-09-06 03:20:59] [Rank 0] Group 2 Loss: 3.3795 +[2025-09-06 03:20:59] [Rank 0] Group 3 Loss: 3.9154 +[2025-09-06 03:20:59] [Rank 0] Group 3 Loss: 3.9154 +[2025-09-06 03:20:59] [Rank 0] Group 4 Loss: 4.5391 +[2025-09-06 03:20:59] [Rank 0] Group 4 Loss: 4.5391 +[2025-09-06 03:20:59] [Rank 0] Group 5 Loss: 4.9859 +[2025-09-06 03:20:59] [Rank 0] Group 5 Loss: 4.9859 +[2025-09-06 03:20:59] [Rank 0] Group 6 Loss: 5.2937 +[2025-09-06 03:20:59] [Rank 0] Group 6 Loss: 5.2937 +[2025-09-06 03:20:59] [Rank 0] Group 7 Loss: 5.3750 +[2025-09-06 03:20:59] [Rank 0] Group 7 Loss: 5.3750 +[2025-09-06 03:20:59] [Rank 0] Group 8 Loss: 5.6523 +[2025-09-06 03:20:59] [Rank 0] Group 8 Loss: 5.6523 +[2025-09-06 03:20:59] [Rank 0] Group 9 Loss: 5.7869 +[2025-09-06 03:20:59] [Rank 0] Group 9 Loss: 5.7869 +[2025-09-06 03:20:59] [Rank 0] Group 10 Loss: 5.7681 +[2025-09-06 03:20:59] [Rank 0] Group 10 Loss: 5.7681 +[2025-09-06 03:20:59] [Rank 0] Group 11 Loss: 5.8396 +[2025-09-06 03:20:59] [Rank 0] Group 11 Loss: 5.8396 +[2025-09-06 03:20:59] [Rank 0] Group 12 Loss: 5.7171 +[2025-09-06 03:20:59] [Rank 0] Group 12 Loss: 5.7171 +[2025-09-06 03:20:59] [Rank 0] Group 13 Loss: 5.7042 +[2025-09-06 03:20:59] [Rank 0] Group 13 Loss: 5.7042 +[2025-09-06 03:20:59] [Rank 0] Group 14 Loss: 5.7863 +[2025-09-06 03:20:59] [Rank 0] Group 14 Loss: 5.7863 +[2025-09-06 03:20:59] [Rank 0] Group 15 Loss: 5.6998 +[2025-09-06 03:20:59] [Rank 0] Group 15 Loss: 5.6998 +[2025-09-06 03:20:59] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:20:59] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:20:59] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:20:59] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:20:59] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:20:59] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:20:59] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:20:59] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:20:59] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:20:59] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:20:59] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:20:59] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:20:59] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:20:59] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:20:59] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:20:59] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:20:59] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-06 03:20:59] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-06 03:20:59] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:20:59] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:20:59] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:20:59] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:20:59] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:20:59] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:20:59] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:20:59] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:20:59] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:20:59] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:20:59] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:20:59] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:20:59] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:20:59] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:20:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:20:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:21:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:21:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:21:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:21:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:21:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:21:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:21:00] [Rank 0] step:5001/10000 train_time:220443ms step_avg:44.08ms +[2025-09-06 03:21:00] [Rank 0] step:5001/10000 train_time:220443ms step_avg:44.08ms +[2025-09-06 03:21:01] [Rank 0] step:5021/10000 train_time:221109ms step_avg:44.04ms +[2025-09-06 03:21:01] [Rank 0] step:5021/10000 train_time:221109ms step_avg:44.04ms +[2025-09-06 03:21:02] [Rank 0] step:5041/10000 train_time:221849ms step_avg:44.01ms +[2025-09-06 03:21:02] [Rank 0] step:5041/10000 train_time:221849ms step_avg:44.01ms +[2025-09-06 03:21:03] [Rank 0] step:5061/10000 train_time:222589ms step_avg:43.98ms +[2025-09-06 03:21:03] [Rank 0] step:5061/10000 train_time:222589ms step_avg:43.98ms +[2025-09-06 03:21:03] [Rank 0] step:5081/10000 train_time:223328ms step_avg:43.95ms +[2025-09-06 03:21:03] [Rank 0] step:5081/10000 train_time:223328ms step_avg:43.95ms +[2025-09-06 03:21:04] [Rank 0] step:5101/10000 train_time:224068ms step_avg:43.93ms +[2025-09-06 03:21:04] [Rank 0] step:5101/10000 train_time:224068ms step_avg:43.93ms +[2025-09-06 03:21:05] [Rank 0] step:5121/10000 train_time:224808ms step_avg:43.90ms +[2025-09-06 03:21:05] [Rank 0] step:5121/10000 train_time:224808ms step_avg:43.90ms +[2025-09-06 03:21:06] [Rank 0] step:5141/10000 train_time:225548ms step_avg:43.87ms +[2025-09-06 03:21:06] [Rank 0] step:5141/10000 train_time:225548ms step_avg:43.87ms +[2025-09-06 03:21:06] [Rank 0] step:5161/10000 train_time:226288ms step_avg:43.85ms +[2025-09-06 03:21:06] [Rank 0] step:5161/10000 train_time:226288ms step_avg:43.85ms +[2025-09-06 03:21:07] [Rank 0] step:5181/10000 train_time:227028ms step_avg:43.82ms +[2025-09-06 03:21:07] [Rank 0] step:5181/10000 train_time:227028ms step_avg:43.82ms +[2025-09-06 03:21:08] [Rank 0] step:5201/10000 train_time:227767ms step_avg:43.79ms +[2025-09-06 03:21:08] [Rank 0] step:5201/10000 train_time:227767ms step_avg:43.79ms +[2025-09-06 03:21:09] [Rank 0] step:5221/10000 train_time:228506ms step_avg:43.77ms +[2025-09-06 03:21:09] [Rank 0] step:5221/10000 train_time:228506ms step_avg:43.77ms +[2025-09-06 03:21:09] [Rank 0] step:5241/10000 train_time:229247ms step_avg:43.74ms +[2025-09-06 03:21:09] [Rank 0] step:5241/10000 train_time:229247ms step_avg:43.74ms +[2025-09-06 03:21:10] [Rank 0] step:5261/10000 train_time:229986ms step_avg:43.72ms +[2025-09-06 03:21:10] [Rank 0] step:5261/10000 train_time:229986ms step_avg:43.72ms +[2025-09-06 03:21:11] [Rank 0] step:5281/10000 train_time:230725ms step_avg:43.69ms +[2025-09-06 03:21:11] [Rank 0] step:5281/10000 train_time:230725ms step_avg:43.69ms +[2025-09-06 03:21:11] [Rank 0] step:5301/10000 train_time:231464ms step_avg:43.66ms +[2025-09-06 03:21:11] [Rank 0] step:5301/10000 train_time:231464ms step_avg:43.66ms +[2025-09-06 03:21:12] [Rank 0] step:5321/10000 train_time:232203ms step_avg:43.64ms +[2025-09-06 03:21:12] [Rank 0] step:5321/10000 train_time:232203ms step_avg:43.64ms +[2025-09-06 03:21:13] [Rank 0] step:5341/10000 train_time:232942ms step_avg:43.61ms +[2025-09-06 03:21:13] [Rank 0] step:5341/10000 train_time:232942ms step_avg:43.61ms +[2025-09-06 03:21:14] [Rank 0] step:5361/10000 train_time:233682ms step_avg:43.59ms +[2025-09-06 03:21:14] [Rank 0] step:5361/10000 train_time:233682ms step_avg:43.59ms +[2025-09-06 03:21:15] [Rank 0] step:5381/10000 train_time:234569ms step_avg:43.59ms +[2025-09-06 03:21:15] [Rank 0] step:5381/10000 train_time:234569ms step_avg:43.59ms +[2025-09-06 03:21:15] [Rank 0] step:5401/10000 train_time:235310ms step_avg:43.57ms +[2025-09-06 03:21:15] [Rank 0] step:5401/10000 train_time:235310ms step_avg:43.57ms +[2025-09-06 03:21:16] [Rank 0] step:5421/10000 train_time:236050ms step_avg:43.54ms +[2025-09-06 03:21:16] [Rank 0] step:5421/10000 train_time:236050ms step_avg:43.54ms +[2025-09-06 03:21:17] [Rank 0] step:5441/10000 train_time:236920ms step_avg:43.54ms +[2025-09-06 03:21:17] [Rank 0] step:5441/10000 train_time:236920ms step_avg:43.54ms +[2025-09-06 03:21:18] [Rank 0] step:5461/10000 train_time:237659ms step_avg:43.52ms +[2025-09-06 03:21:18] [Rank 0] step:5461/10000 train_time:237659ms step_avg:43.52ms +[2025-09-06 03:21:18] [Rank 0] step:5481/10000 train_time:238397ms step_avg:43.50ms +[2025-09-06 03:21:18] [Rank 0] step:5481/10000 train_time:238397ms step_avg:43.50ms +[2025-09-06 03:21:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:21:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:21:20] [Rank 0] PRINT: step:5500/10000 train_loss:2.7500 val_loss:2.7195 train_time:239216ms step_avg:43.49ms +[2025-09-06 03:21:20] [Rank 0] PRINT: step:5500/10000 train_loss:2.7500 val_loss:2.7195 train_time:239216ms step_avg:43.49ms +[2025-09-06 03:21:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:21:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:21:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:21:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:22:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:22:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:22:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:22:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:22:41] [Rank 0] Total Loss: 4.9588 +[2025-09-06 03:22:41] [Rank 0] Total Loss: 4.9588 +[2025-09-06 03:22:41] [Rank 0] Total FTA (Unweighted): 0.1719 +[2025-09-06 03:22:41] [Rank 0] Total FTA (Unweighted): 0.1719 +[2025-09-06 03:22:41] [Rank 0] Total FTA (Weighted): 0.1719 +[2025-09-06 03:22:41] [Rank 0] Total FTA (Weighted): 0.1719 +[2025-09-06 03:22:41] [Rank 0] Group 0 Loss: 3.2728 +[2025-09-06 03:22:41] [Rank 0] Group 0 Loss: 3.2728 +[2025-09-06 03:22:41] [Rank 0] Group 1 Loss: 3.1457 +[2025-09-06 03:22:41] [Rank 0] Group 1 Loss: 3.1457 +[2025-09-06 03:22:41] [Rank 0] Group 2 Loss: 3.3327 +[2025-09-06 03:22:41] [Rank 0] Group 2 Loss: 3.3327 +[2025-09-06 03:22:41] [Rank 0] Group 3 Loss: 3.8696 +[2025-09-06 03:22:41] [Rank 0] Group 3 Loss: 3.8696 +[2025-09-06 03:22:41] [Rank 0] Group 4 Loss: 4.4703 +[2025-09-06 03:22:41] [Rank 0] Group 4 Loss: 4.4703 +[2025-09-06 03:22:41] [Rank 0] Group 5 Loss: 4.9571 +[2025-09-06 03:22:41] [Rank 0] Group 5 Loss: 4.9571 +[2025-09-06 03:22:41] [Rank 0] Group 6 Loss: 5.2646 +[2025-09-06 03:22:41] [Rank 0] Group 6 Loss: 5.2646 +[2025-09-06 03:22:41] [Rank 0] Group 7 Loss: 5.3522 +[2025-09-06 03:22:41] [Rank 0] Group 7 Loss: 5.3522 +[2025-09-06 03:22:41] [Rank 0] Group 8 Loss: 5.6064 +[2025-09-06 03:22:41] [Rank 0] Group 8 Loss: 5.6064 +[2025-09-06 03:22:41] [Rank 0] Group 9 Loss: 5.7561 +[2025-09-06 03:22:41] [Rank 0] Group 9 Loss: 5.7561 +[2025-09-06 03:22:41] [Rank 0] Group 10 Loss: 5.7407 +[2025-09-06 03:22:41] [Rank 0] Group 10 Loss: 5.7407 +[2025-09-06 03:22:41] [Rank 0] Group 11 Loss: 5.8001 +[2025-09-06 03:22:41] [Rank 0] Group 11 Loss: 5.8001 +[2025-09-06 03:22:41] [Rank 0] Group 12 Loss: 5.6794 +[2025-09-06 03:22:41] [Rank 0] Group 12 Loss: 5.6794 +[2025-09-06 03:22:41] [Rank 0] Group 13 Loss: 5.6743 +[2025-09-06 03:22:41] [Rank 0] Group 13 Loss: 5.6743 +[2025-09-06 03:22:41] [Rank 0] Group 14 Loss: 5.7478 +[2025-09-06 03:22:41] [Rank 0] Group 14 Loss: 5.7478 +[2025-09-06 03:22:41] [Rank 0] Group 15 Loss: 5.6712 +[2025-09-06 03:22:41] [Rank 0] Group 15 Loss: 5.6712 +[2025-09-06 03:22:41] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:22:41] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:22:41] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:22:41] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:22:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:22:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:22:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:22:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:22:41] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:22:41] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:22:41] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:22:41] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:22:41] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:22:41] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:22:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:22:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:22:41] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-06 03:22:41] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-06 03:22:41] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:22:41] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:22:41] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:22:41] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:22:41] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:22:41] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:22:41] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:22:41] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:22:41] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:22:41] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:22:41] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:22:41] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:22:41] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 03:22:41] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 03:22:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:22:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:22:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:22:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:22:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:22:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:22:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:22:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:22:43] [Rank 0] step:5501/10000 train_time:239227ms step_avg:43.49ms +[2025-09-06 03:22:43] [Rank 0] step:5501/10000 train_time:239227ms step_avg:43.49ms +[2025-09-06 03:22:44] [Rank 0] step:5521/10000 train_time:239911ms step_avg:43.45ms +[2025-09-06 03:22:44] [Rank 0] step:5521/10000 train_time:239911ms step_avg:43.45ms +[2025-09-06 03:22:44] [Rank 0] step:5541/10000 train_time:240651ms step_avg:43.43ms +[2025-09-06 03:22:44] [Rank 0] step:5541/10000 train_time:240651ms step_avg:43.43ms +[2025-09-06 03:22:45] [Rank 0] step:5561/10000 train_time:241388ms step_avg:43.41ms +[2025-09-06 03:22:45] [Rank 0] step:5561/10000 train_time:241388ms step_avg:43.41ms +[2025-09-06 03:22:46] [Rank 0] step:5581/10000 train_time:242127ms step_avg:43.38ms +[2025-09-06 03:22:46] [Rank 0] step:5581/10000 train_time:242127ms step_avg:43.38ms +[2025-09-06 03:22:46] [Rank 0] step:5601/10000 train_time:242866ms step_avg:43.36ms +[2025-09-06 03:22:46] [Rank 0] step:5601/10000 train_time:242866ms step_avg:43.36ms +[2025-09-06 03:22:47] [Rank 0] step:5621/10000 train_time:243604ms step_avg:43.34ms +[2025-09-06 03:22:47] [Rank 0] step:5621/10000 train_time:243604ms step_avg:43.34ms +[2025-09-06 03:22:49] [Rank 0] step:5641/10000 train_time:244939ms step_avg:43.42ms +[2025-09-06 03:22:49] [Rank 0] step:5641/10000 train_time:244939ms step_avg:43.42ms +[2025-09-06 03:22:49] [Rank 0] step:5661/10000 train_time:245680ms step_avg:43.40ms +[2025-09-06 03:22:49] [Rank 0] step:5661/10000 train_time:245680ms step_avg:43.40ms +[2025-09-06 03:22:50] [Rank 0] step:5681/10000 train_time:246419ms step_avg:43.38ms +[2025-09-06 03:22:50] [Rank 0] step:5681/10000 train_time:246419ms step_avg:43.38ms +[2025-09-06 03:22:51] [Rank 0] step:5701/10000 train_time:247158ms step_avg:43.35ms +[2025-09-06 03:22:51] [Rank 0] step:5701/10000 train_time:247158ms step_avg:43.35ms +[2025-09-06 03:22:52] [Rank 0] step:5721/10000 train_time:247898ms step_avg:43.33ms +[2025-09-06 03:22:52] [Rank 0] step:5721/10000 train_time:247898ms step_avg:43.33ms +[2025-09-06 03:22:52] [Rank 0] step:5741/10000 train_time:248636ms step_avg:43.31ms +[2025-09-06 03:22:52] [Rank 0] step:5741/10000 train_time:248636ms step_avg:43.31ms +[2025-09-06 03:22:53] [Rank 0] step:5761/10000 train_time:249375ms step_avg:43.29ms +[2025-09-06 03:22:53] [Rank 0] step:5761/10000 train_time:249375ms step_avg:43.29ms +[2025-09-06 03:22:54] [Rank 0] step:5781/10000 train_time:250115ms step_avg:43.26ms +[2025-09-06 03:22:54] [Rank 0] step:5781/10000 train_time:250115ms step_avg:43.26ms +[2025-09-06 03:22:54] [Rank 0] step:5801/10000 train_time:250854ms step_avg:43.24ms +[2025-09-06 03:22:54] [Rank 0] step:5801/10000 train_time:250854ms step_avg:43.24ms +[2025-09-06 03:22:55] [Rank 0] step:5821/10000 train_time:251593ms step_avg:43.22ms +[2025-09-06 03:22:55] [Rank 0] step:5821/10000 train_time:251593ms step_avg:43.22ms +[2025-09-06 03:22:56] [Rank 0] step:5841/10000 train_time:252332ms step_avg:43.20ms +[2025-09-06 03:22:56] [Rank 0] step:5841/10000 train_time:252332ms step_avg:43.20ms +[2025-09-06 03:22:57] [Rank 0] step:5861/10000 train_time:253071ms step_avg:43.18ms +[2025-09-06 03:22:57] [Rank 0] step:5861/10000 train_time:253071ms step_avg:43.18ms +[2025-09-06 03:22:57] [Rank 0] step:5881/10000 train_time:253811ms step_avg:43.16ms +[2025-09-06 03:22:57] [Rank 0] step:5881/10000 train_time:253811ms step_avg:43.16ms +[2025-09-06 03:22:58] [Rank 0] step:5901/10000 train_time:254550ms step_avg:43.14ms +[2025-09-06 03:22:58] [Rank 0] step:5901/10000 train_time:254550ms step_avg:43.14ms +[2025-09-06 03:22:59] [Rank 0] step:5921/10000 train_time:255289ms step_avg:43.12ms +[2025-09-06 03:22:59] [Rank 0] step:5921/10000 train_time:255289ms step_avg:43.12ms +[2025-09-06 03:23:00] [Rank 0] step:5941/10000 train_time:256028ms step_avg:43.10ms +[2025-09-06 03:23:00] [Rank 0] step:5941/10000 train_time:256028ms step_avg:43.10ms +[2025-09-06 03:23:00] [Rank 0] step:5961/10000 train_time:256767ms step_avg:43.07ms +[2025-09-06 03:23:00] [Rank 0] step:5961/10000 train_time:256767ms step_avg:43.07ms +[2025-09-06 03:23:01] [Rank 0] step:5981/10000 train_time:257507ms step_avg:43.05ms +[2025-09-06 03:23:01] [Rank 0] step:5981/10000 train_time:257507ms step_avg:43.05ms +[2025-09-06 03:23:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:23:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:23:02] [Rank 0] PRINT: step:6000/10000 train_loss:2.7083 val_loss:2.6811 train_time:258327ms step_avg:43.05ms +[2025-09-06 03:23:02] [Rank 0] PRINT: step:6000/10000 train_loss:2.7083 val_loss:2.6811 train_time:258327ms step_avg:43.05ms +[2025-09-06 03:23:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:23:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:23:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:23:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:24:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:24:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:24:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:24:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:24:24] [Rank 0] Total Loss: 4.9627 +[2025-09-06 03:24:24] [Rank 0] Total Loss: 4.9627 +[2025-09-06 03:24:24] [Rank 0] Total FTA (Unweighted): 0.1694 +[2025-09-06 03:24:24] [Rank 0] Total FTA (Unweighted): 0.1694 +[2025-09-06 03:24:24] [Rank 0] Total FTA (Weighted): 0.1694 +[2025-09-06 03:24:24] [Rank 0] Total FTA (Weighted): 0.1694 +[2025-09-06 03:24:24] [Rank 0] Group 0 Loss: 3.2628 +[2025-09-06 03:24:24] [Rank 0] Group 0 Loss: 3.2628 +[2025-09-06 03:24:24] [Rank 0] Group 1 Loss: 3.1675 +[2025-09-06 03:24:24] [Rank 0] Group 1 Loss: 3.1675 +[2025-09-06 03:24:24] [Rank 0] Group 2 Loss: 3.3110 +[2025-09-06 03:24:24] [Rank 0] Group 2 Loss: 3.3110 +[2025-09-06 03:24:24] [Rank 0] Group 3 Loss: 3.8930 +[2025-09-06 03:24:24] [Rank 0] Group 3 Loss: 3.8930 +[2025-09-06 03:24:24] [Rank 0] Group 4 Loss: 4.4622 +[2025-09-06 03:24:24] [Rank 0] Group 4 Loss: 4.4622 +[2025-09-06 03:24:24] [Rank 0] Group 5 Loss: 4.9439 +[2025-09-06 03:24:24] [Rank 0] Group 5 Loss: 4.9439 +[2025-09-06 03:24:24] [Rank 0] Group 6 Loss: 5.2644 +[2025-09-06 03:24:24] [Rank 0] Group 6 Loss: 5.2644 +[2025-09-06 03:24:24] [Rank 0] Group 7 Loss: 5.3622 +[2025-09-06 03:24:24] [Rank 0] Group 7 Loss: 5.3622 +[2025-09-06 03:24:24] [Rank 0] Group 8 Loss: 5.6076 +[2025-09-06 03:24:24] [Rank 0] Group 8 Loss: 5.6076 +[2025-09-06 03:24:24] [Rank 0] Group 9 Loss: 5.7612 +[2025-09-06 03:24:24] [Rank 0] Group 9 Loss: 5.7612 +[2025-09-06 03:24:24] [Rank 0] Group 10 Loss: 5.7566 +[2025-09-06 03:24:24] [Rank 0] Group 10 Loss: 5.7566 +[2025-09-06 03:24:24] [Rank 0] Group 11 Loss: 5.8190 +[2025-09-06 03:24:24] [Rank 0] Group 11 Loss: 5.8190 +[2025-09-06 03:24:24] [Rank 0] Group 12 Loss: 5.6833 +[2025-09-06 03:24:24] [Rank 0] Group 12 Loss: 5.6833 +[2025-09-06 03:24:24] [Rank 0] Group 13 Loss: 5.6784 +[2025-09-06 03:24:24] [Rank 0] Group 13 Loss: 5.6784 +[2025-09-06 03:24:24] [Rank 0] Group 14 Loss: 5.7483 +[2025-09-06 03:24:24] [Rank 0] Group 14 Loss: 5.7483 +[2025-09-06 03:24:24] [Rank 0] Group 15 Loss: 5.6822 +[2025-09-06 03:24:24] [Rank 0] Group 15 Loss: 5.6822 +[2025-09-06 03:24:24] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:24:24] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:24:24] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:24:24] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:24:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:24:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:24:24] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:24:24] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:24:24] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:24:24] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:24:24] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:24:24] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:24:24] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:24:24] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:24:24] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:24:24] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:24:24] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-06 03:24:24] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-06 03:24:24] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:24:24] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:24:24] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:24:24] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:24:24] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:24:24] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:24:24] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:24:24] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:24:24] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 03:24:24] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 03:24:24] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:24:24] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:24:24] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:24:24] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:24:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:24:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:24:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:24:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:24:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:24:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:24:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:24:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:24:25] [Rank 0] step:6001/10000 train_time:258339ms step_avg:43.05ms +[2025-09-06 03:24:25] [Rank 0] step:6001/10000 train_time:258339ms step_avg:43.05ms +[2025-09-06 03:24:26] [Rank 0] step:6021/10000 train_time:259207ms step_avg:43.05ms +[2025-09-06 03:24:26] [Rank 0] step:6021/10000 train_time:259207ms step_avg:43.05ms +[2025-09-06 03:24:27] [Rank 0] step:6041/10000 train_time:259947ms step_avg:43.03ms +[2025-09-06 03:24:27] [Rank 0] step:6041/10000 train_time:259947ms step_avg:43.03ms +[2025-09-06 03:24:28] [Rank 0] step:6061/10000 train_time:260913ms step_avg:43.05ms +[2025-09-06 03:24:28] [Rank 0] step:6061/10000 train_time:260913ms step_avg:43.05ms +[2025-09-06 03:24:29] [Rank 0] step:6081/10000 train_time:261653ms step_avg:43.03ms +[2025-09-06 03:24:29] [Rank 0] step:6081/10000 train_time:261653ms step_avg:43.03ms +[2025-09-06 03:24:29] [Rank 0] step:6101/10000 train_time:262393ms step_avg:43.01ms +[2025-09-06 03:24:29] [Rank 0] step:6101/10000 train_time:262393ms step_avg:43.01ms +[2025-09-06 03:24:30] [Rank 0] step:6121/10000 train_time:263133ms step_avg:42.99ms +[2025-09-06 03:24:30] [Rank 0] step:6121/10000 train_time:263133ms step_avg:42.99ms +[2025-09-06 03:24:31] [Rank 0] step:6141/10000 train_time:263873ms step_avg:42.97ms +[2025-09-06 03:24:31] [Rank 0] step:6141/10000 train_time:263873ms step_avg:42.97ms +[2025-09-06 03:24:32] [Rank 0] step:6161/10000 train_time:264612ms step_avg:42.95ms +[2025-09-06 03:24:32] [Rank 0] step:6161/10000 train_time:264612ms step_avg:42.95ms +[2025-09-06 03:24:32] [Rank 0] step:6181/10000 train_time:265352ms step_avg:42.93ms +[2025-09-06 03:24:32] [Rank 0] step:6181/10000 train_time:265352ms step_avg:42.93ms +[2025-09-06 03:24:33] [Rank 0] step:6201/10000 train_time:266092ms step_avg:42.91ms +[2025-09-06 03:24:33] [Rank 0] step:6201/10000 train_time:266092ms step_avg:42.91ms +[2025-09-06 03:24:34] [Rank 0] step:6221/10000 train_time:266830ms step_avg:42.89ms +[2025-09-06 03:24:34] [Rank 0] step:6221/10000 train_time:266830ms step_avg:42.89ms +[2025-09-06 03:24:35] [Rank 0] step:6241/10000 train_time:267571ms step_avg:42.87ms +[2025-09-06 03:24:35] [Rank 0] step:6241/10000 train_time:267571ms step_avg:42.87ms +[2025-09-06 03:24:35] [Rank 0] step:6261/10000 train_time:268311ms step_avg:42.85ms +[2025-09-06 03:24:35] [Rank 0] step:6261/10000 train_time:268311ms step_avg:42.85ms +[2025-09-06 03:24:36] [Rank 0] step:6281/10000 train_time:269050ms step_avg:42.84ms +[2025-09-06 03:24:36] [Rank 0] step:6281/10000 train_time:269050ms step_avg:42.84ms +[2025-09-06 03:24:37] [Rank 0] step:6301/10000 train_time:269788ms step_avg:42.82ms +[2025-09-06 03:24:37] [Rank 0] step:6301/10000 train_time:269788ms step_avg:42.82ms +[2025-09-06 03:24:37] [Rank 0] step:6321/10000 train_time:270526ms step_avg:42.80ms +[2025-09-06 03:24:37] [Rank 0] step:6321/10000 train_time:270526ms step_avg:42.80ms +[2025-09-06 03:24:38] [Rank 0] step:6341/10000 train_time:271265ms step_avg:42.78ms +[2025-09-06 03:24:38] [Rank 0] step:6341/10000 train_time:271265ms step_avg:42.78ms +[2025-09-06 03:24:39] [Rank 0] step:6361/10000 train_time:272004ms step_avg:42.76ms +[2025-09-06 03:24:39] [Rank 0] step:6361/10000 train_time:272004ms step_avg:42.76ms +[2025-09-06 03:24:40] [Rank 0] step:6381/10000 train_time:272755ms step_avg:42.74ms +[2025-09-06 03:24:40] [Rank 0] step:6381/10000 train_time:272755ms step_avg:42.74ms +[2025-09-06 03:24:40] [Rank 0] step:6401/10000 train_time:273494ms step_avg:42.73ms +[2025-09-06 03:24:40] [Rank 0] step:6401/10000 train_time:273494ms step_avg:42.73ms +[2025-09-06 03:24:41] [Rank 0] step:6421/10000 train_time:274233ms step_avg:42.71ms +[2025-09-06 03:24:41] [Rank 0] step:6421/10000 train_time:274233ms step_avg:42.71ms +[2025-09-06 03:24:42] [Rank 0] step:6441/10000 train_time:274972ms step_avg:42.69ms +[2025-09-06 03:24:42] [Rank 0] step:6441/10000 train_time:274972ms step_avg:42.69ms +[2025-09-06 03:24:43] [Rank 0] step:6461/10000 train_time:275712ms step_avg:42.67ms +[2025-09-06 03:24:43] [Rank 0] step:6461/10000 train_time:275712ms step_avg:42.67ms +[2025-09-06 03:24:43] [Rank 0] step:6481/10000 train_time:276451ms step_avg:42.66ms +[2025-09-06 03:24:43] [Rank 0] step:6481/10000 train_time:276451ms step_avg:42.66ms +[2025-09-06 03:24:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:24:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:24:45] [Rank 0] PRINT: step:6500/10000 train_loss:2.6746 val_loss:2.6499 train_time:277272ms step_avg:42.66ms +[2025-09-06 03:24:45] [Rank 0] PRINT: step:6500/10000 train_loss:2.6746 val_loss:2.6499 train_time:277272ms step_avg:42.66ms +[2025-09-06 03:24:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:24:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:24:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:24:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:26:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:26:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:26:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:26:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:26:07] [Rank 0] Total Loss: 4.9112 +[2025-09-06 03:26:07] [Rank 0] Total Loss: 4.9112 +[2025-09-06 03:26:07] [Rank 0] Total FTA (Unweighted): 0.2075 +[2025-09-06 03:26:07] [Rank 0] Total FTA (Unweighted): 0.2075 +[2025-09-06 03:26:07] [Rank 0] Total FTA (Weighted): 0.2075 +[2025-09-06 03:26:07] [Rank 0] Total FTA (Weighted): 0.2075 +[2025-09-06 03:26:07] [Rank 0] Group 0 Loss: 3.2972 +[2025-09-06 03:26:07] [Rank 0] Group 0 Loss: 3.2972 +[2025-09-06 03:26:07] [Rank 0] Group 1 Loss: 3.1267 +[2025-09-06 03:26:07] [Rank 0] Group 1 Loss: 3.1267 +[2025-09-06 03:26:07] [Rank 0] Group 2 Loss: 3.3119 +[2025-09-06 03:26:07] [Rank 0] Group 2 Loss: 3.3119 +[2025-09-06 03:26:07] [Rank 0] Group 3 Loss: 3.8549 +[2025-09-06 03:26:07] [Rank 0] Group 3 Loss: 3.8549 +[2025-09-06 03:26:07] [Rank 0] Group 4 Loss: 4.4143 +[2025-09-06 03:26:07] [Rank 0] Group 4 Loss: 4.4143 +[2025-09-06 03:26:07] [Rank 0] Group 5 Loss: 4.8612 +[2025-09-06 03:26:07] [Rank 0] Group 5 Loss: 4.8612 +[2025-09-06 03:26:07] [Rank 0] Group 6 Loss: 5.1972 +[2025-09-06 03:26:07] [Rank 0] Group 6 Loss: 5.1972 +[2025-09-06 03:26:07] [Rank 0] Group 7 Loss: 5.2781 +[2025-09-06 03:26:07] [Rank 0] Group 7 Loss: 5.2781 +[2025-09-06 03:26:07] [Rank 0] Group 8 Loss: 5.5479 +[2025-09-06 03:26:07] [Rank 0] Group 8 Loss: 5.5479 +[2025-09-06 03:26:07] [Rank 0] Group 9 Loss: 5.6932 +[2025-09-06 03:26:07] [Rank 0] Group 9 Loss: 5.6932 +[2025-09-06 03:26:07] [Rank 0] Group 10 Loss: 5.6866 +[2025-09-06 03:26:07] [Rank 0] Group 10 Loss: 5.6866 +[2025-09-06 03:26:07] [Rank 0] Group 11 Loss: 5.7308 +[2025-09-06 03:26:07] [Rank 0] Group 11 Loss: 5.7308 +[2025-09-06 03:26:07] [Rank 0] Group 12 Loss: 5.6211 +[2025-09-06 03:26:07] [Rank 0] Group 12 Loss: 5.6211 +[2025-09-06 03:26:07] [Rank 0] Group 13 Loss: 5.6315 +[2025-09-06 03:26:07] [Rank 0] Group 13 Loss: 5.6315 +[2025-09-06 03:26:07] [Rank 0] Group 14 Loss: 5.7014 +[2025-09-06 03:26:07] [Rank 0] Group 14 Loss: 5.7014 +[2025-09-06 03:26:07] [Rank 0] Group 15 Loss: 5.6248 +[2025-09-06 03:26:07] [Rank 0] Group 15 Loss: 5.6248 +[2025-09-06 03:26:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:26:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:26:07] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:26:07] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:26:07] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:26:07] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:26:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:26:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:26:07] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:26:07] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:26:07] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:26:07] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:26:07] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:26:07] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:26:07] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:26:07] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:26:07] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:26:07] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:26:07] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:26:07] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:26:07] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:26:07] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:26:07] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:26:07] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:26:07] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:26:07] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:26:07] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 03:26:07] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 03:26:07] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:26:07] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:26:07] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 03:26:07] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 03:26:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:26:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:26:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:26:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:26:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:26:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:26:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:26:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:26:09] [Rank 0] step:6501/10000 train_time:277282ms step_avg:42.65ms +[2025-09-06 03:26:09] [Rank 0] step:6501/10000 train_time:277282ms step_avg:42.65ms +[2025-09-06 03:26:09] [Rank 0] step:6521/10000 train_time:277958ms step_avg:42.63ms +[2025-09-06 03:26:09] [Rank 0] step:6521/10000 train_time:277958ms step_avg:42.63ms +[2025-09-06 03:26:10] [Rank 0] step:6541/10000 train_time:278698ms step_avg:42.61ms +[2025-09-06 03:26:10] [Rank 0] step:6541/10000 train_time:278698ms step_avg:42.61ms +[2025-09-06 03:26:11] [Rank 0] step:6561/10000 train_time:279438ms step_avg:42.59ms +[2025-09-06 03:26:11] [Rank 0] step:6561/10000 train_time:279438ms step_avg:42.59ms +[2025-09-06 03:26:12] [Rank 0] step:6581/10000 train_time:280176ms step_avg:42.57ms +[2025-09-06 03:26:12] [Rank 0] step:6581/10000 train_time:280176ms step_avg:42.57ms +[2025-09-06 03:26:12] [Rank 0] step:6601/10000 train_time:280916ms step_avg:42.56ms +[2025-09-06 03:26:12] [Rank 0] step:6601/10000 train_time:280916ms step_avg:42.56ms +[2025-09-06 03:26:13] [Rank 0] step:6621/10000 train_time:281655ms step_avg:42.54ms +[2025-09-06 03:26:13] [Rank 0] step:6621/10000 train_time:281655ms step_avg:42.54ms +[2025-09-06 03:26:14] [Rank 0] step:6641/10000 train_time:282395ms step_avg:42.52ms +[2025-09-06 03:26:14] [Rank 0] step:6641/10000 train_time:282395ms step_avg:42.52ms +[2025-09-06 03:26:15] [Rank 0] step:6661/10000 train_time:283135ms step_avg:42.51ms +[2025-09-06 03:26:15] [Rank 0] step:6661/10000 train_time:283135ms step_avg:42.51ms +[2025-09-06 03:26:15] [Rank 0] step:6681/10000 train_time:283874ms step_avg:42.49ms +[2025-09-06 03:26:15] [Rank 0] step:6681/10000 train_time:283874ms step_avg:42.49ms +[2025-09-06 03:26:16] [Rank 0] step:6701/10000 train_time:284614ms step_avg:42.47ms +[2025-09-06 03:26:16] [Rank 0] step:6701/10000 train_time:284614ms step_avg:42.47ms +[2025-09-06 03:26:17] [Rank 0] step:6721/10000 train_time:285354ms step_avg:42.46ms +[2025-09-06 03:26:17] [Rank 0] step:6721/10000 train_time:285354ms step_avg:42.46ms +[2025-09-06 03:26:18] [Rank 0] step:6741/10000 train_time:286094ms step_avg:42.44ms +[2025-09-06 03:26:18] [Rank 0] step:6741/10000 train_time:286094ms step_avg:42.44ms +[2025-09-06 03:26:18] [Rank 0] step:6761/10000 train_time:286834ms step_avg:42.42ms +[2025-09-06 03:26:18] [Rank 0] step:6761/10000 train_time:286834ms step_avg:42.42ms +[2025-09-06 03:26:19] [Rank 0] step:6781/10000 train_time:287572ms step_avg:42.41ms +[2025-09-06 03:26:19] [Rank 0] step:6781/10000 train_time:287572ms step_avg:42.41ms +[2025-09-06 03:26:20] [Rank 0] step:6801/10000 train_time:288312ms step_avg:42.39ms +[2025-09-06 03:26:20] [Rank 0] step:6801/10000 train_time:288312ms step_avg:42.39ms +[2025-09-06 03:26:21] [Rank 0] step:6821/10000 train_time:289051ms step_avg:42.38ms +[2025-09-06 03:26:21] [Rank 0] step:6821/10000 train_time:289051ms step_avg:42.38ms +[2025-09-06 03:26:22] [Rank 0] step:6841/10000 train_time:290415ms step_avg:42.45ms +[2025-09-06 03:26:22] [Rank 0] step:6841/10000 train_time:290415ms step_avg:42.45ms +[2025-09-06 03:26:23] [Rank 0] step:6861/10000 train_time:291154ms step_avg:42.44ms +[2025-09-06 03:26:23] [Rank 0] step:6861/10000 train_time:291154ms step_avg:42.44ms +[2025-09-06 03:26:23] [Rank 0] step:6881/10000 train_time:291894ms step_avg:42.42ms +[2025-09-06 03:26:23] [Rank 0] step:6881/10000 train_time:291894ms step_avg:42.42ms +[2025-09-06 03:26:24] [Rank 0] step:6901/10000 train_time:292633ms step_avg:42.40ms +[2025-09-06 03:26:24] [Rank 0] step:6901/10000 train_time:292633ms step_avg:42.40ms +[2025-09-06 03:26:25] [Rank 0] step:6921/10000 train_time:293375ms step_avg:42.39ms +[2025-09-06 03:26:25] [Rank 0] step:6921/10000 train_time:293375ms step_avg:42.39ms +[2025-09-06 03:26:26] [Rank 0] step:6941/10000 train_time:294114ms step_avg:42.37ms +[2025-09-06 03:26:26] [Rank 0] step:6941/10000 train_time:294114ms step_avg:42.37ms +[2025-09-06 03:26:26] [Rank 0] step:6961/10000 train_time:294853ms step_avg:42.36ms +[2025-09-06 03:26:26] [Rank 0] step:6961/10000 train_time:294853ms step_avg:42.36ms +[2025-09-06 03:26:27] [Rank 0] step:6981/10000 train_time:295593ms step_avg:42.34ms +[2025-09-06 03:26:27] [Rank 0] step:6981/10000 train_time:295593ms step_avg:42.34ms +[2025-09-06 03:26:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:26:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:26:28] [Rank 0] PRINT: step:7000/10000 train_loss:2.6443 val_loss:2.6230 train_time:296411ms step_avg:42.34ms +[2025-09-06 03:26:28] [Rank 0] PRINT: step:7000/10000 train_loss:2.6443 val_loss:2.6230 train_time:296411ms step_avg:42.34ms +[2025-09-06 03:26:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:26:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:26:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:26:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:27:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:27:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:27:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:27:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:27:50] [Rank 0] Total Loss: 4.8933 +[2025-09-06 03:27:50] [Rank 0] Total Loss: 4.8933 +[2025-09-06 03:27:50] [Rank 0] Total FTA (Unweighted): 0.2062 +[2025-09-06 03:27:50] [Rank 0] Total FTA (Unweighted): 0.2062 +[2025-09-06 03:27:50] [Rank 0] Total FTA (Weighted): 0.2062 +[2025-09-06 03:27:50] [Rank 0] Total FTA (Weighted): 0.2062 +[2025-09-06 03:27:50] [Rank 0] Group 0 Loss: 3.3107 +[2025-09-06 03:27:50] [Rank 0] Group 0 Loss: 3.3107 +[2025-09-06 03:27:50] [Rank 0] Group 1 Loss: 3.1208 +[2025-09-06 03:27:50] [Rank 0] Group 1 Loss: 3.1208 +[2025-09-06 03:27:50] [Rank 0] Group 2 Loss: 3.3190 +[2025-09-06 03:27:50] [Rank 0] Group 2 Loss: 3.3190 +[2025-09-06 03:27:50] [Rank 0] Group 3 Loss: 3.8577 +[2025-09-06 03:27:50] [Rank 0] Group 3 Loss: 3.8577 +[2025-09-06 03:27:50] [Rank 0] Group 4 Loss: 4.3845 +[2025-09-06 03:27:50] [Rank 0] Group 4 Loss: 4.3845 +[2025-09-06 03:27:50] [Rank 0] Group 5 Loss: 4.8437 +[2025-09-06 03:27:50] [Rank 0] Group 5 Loss: 4.8437 +[2025-09-06 03:27:50] [Rank 0] Group 6 Loss: 5.1606 +[2025-09-06 03:27:50] [Rank 0] Group 6 Loss: 5.1606 +[2025-09-06 03:27:50] [Rank 0] Group 7 Loss: 5.2425 +[2025-09-06 03:27:50] [Rank 0] Group 7 Loss: 5.2425 +[2025-09-06 03:27:50] [Rank 0] Group 8 Loss: 5.5207 +[2025-09-06 03:27:50] [Rank 0] Group 8 Loss: 5.5207 +[2025-09-06 03:27:50] [Rank 0] Group 9 Loss: 5.6700 +[2025-09-06 03:27:50] [Rank 0] Group 9 Loss: 5.6700 +[2025-09-06 03:27:50] [Rank 0] Group 10 Loss: 5.6645 +[2025-09-06 03:27:50] [Rank 0] Group 10 Loss: 5.6645 +[2025-09-06 03:27:50] [Rank 0] Group 11 Loss: 5.7027 +[2025-09-06 03:27:50] [Rank 0] Group 11 Loss: 5.7027 +[2025-09-06 03:27:50] [Rank 0] Group 12 Loss: 5.6121 +[2025-09-06 03:27:50] [Rank 0] Group 12 Loss: 5.6121 +[2025-09-06 03:27:50] [Rank 0] Group 13 Loss: 5.6098 +[2025-09-06 03:27:50] [Rank 0] Group 13 Loss: 5.6098 +[2025-09-06 03:27:50] [Rank 0] Group 14 Loss: 5.6829 +[2025-09-06 03:27:50] [Rank 0] Group 14 Loss: 5.6829 +[2025-09-06 03:27:50] [Rank 0] Group 15 Loss: 5.5905 +[2025-09-06 03:27:50] [Rank 0] Group 15 Loss: 5.5905 +[2025-09-06 03:27:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:27:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:27:50] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:27:50] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:27:50] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:27:50] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:27:50] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:27:50] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:27:50] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:27:50] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:27:50] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:27:50] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:27:50] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:27:50] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 03:27:50] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:27:50] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:27:50] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:27:50] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:27:50] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:27:50] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:27:50] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:27:50] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:27:50] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:27:50] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:27:50] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:27:50] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:27:50] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:27:50] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:27:50] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:27:50] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:27:50] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:27:50] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:27:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:27:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:27:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:27:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:27:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:27:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:27:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:27:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:27:51] [Rank 0] step:7001/10000 train_time:296422ms step_avg:42.34ms +[2025-09-06 03:27:51] [Rank 0] step:7001/10000 train_time:296422ms step_avg:42.34ms +[2025-09-06 03:27:52] [Rank 0] step:7021/10000 train_time:297085ms step_avg:42.31ms +[2025-09-06 03:27:52] [Rank 0] step:7021/10000 train_time:297085ms step_avg:42.31ms +[2025-09-06 03:27:53] [Rank 0] step:7041/10000 train_time:297825ms step_avg:42.30ms +[2025-09-06 03:27:53] [Rank 0] step:7041/10000 train_time:297825ms step_avg:42.30ms +[2025-09-06 03:27:54] [Rank 0] step:7061/10000 train_time:298564ms step_avg:42.28ms +[2025-09-06 03:27:54] [Rank 0] step:7061/10000 train_time:298564ms step_avg:42.28ms +[2025-09-06 03:27:54] [Rank 0] step:7081/10000 train_time:299304ms step_avg:42.27ms +[2025-09-06 03:27:54] [Rank 0] step:7081/10000 train_time:299304ms step_avg:42.27ms +[2025-09-06 03:27:55] [Rank 0] step:7101/10000 train_time:300043ms step_avg:42.25ms +[2025-09-06 03:27:55] [Rank 0] step:7101/10000 train_time:300043ms step_avg:42.25ms +[2025-09-06 03:27:56] [Rank 0] step:7121/10000 train_time:300782ms step_avg:42.24ms +[2025-09-06 03:27:56] [Rank 0] step:7121/10000 train_time:300782ms step_avg:42.24ms +[2025-09-06 03:27:56] [Rank 0] step:7141/10000 train_time:301522ms step_avg:42.22ms +[2025-09-06 03:27:56] [Rank 0] step:7141/10000 train_time:301522ms step_avg:42.22ms +[2025-09-06 03:27:57] [Rank 0] step:7161/10000 train_time:302269ms step_avg:42.21ms +[2025-09-06 03:27:57] [Rank 0] step:7161/10000 train_time:302269ms step_avg:42.21ms +[2025-09-06 03:27:58] [Rank 0] step:7181/10000 train_time:303008ms step_avg:42.20ms +[2025-09-06 03:27:58] [Rank 0] step:7181/10000 train_time:303008ms step_avg:42.20ms +[2025-09-06 03:27:59] [Rank 0] step:7201/10000 train_time:303746ms step_avg:42.18ms +[2025-09-06 03:27:59] [Rank 0] step:7201/10000 train_time:303746ms step_avg:42.18ms +[2025-09-06 03:27:59] [Rank 0] step:7221/10000 train_time:304484ms step_avg:42.17ms +[2025-09-06 03:27:59] [Rank 0] step:7221/10000 train_time:304484ms step_avg:42.17ms +[2025-09-06 03:28:00] [Rank 0] step:7241/10000 train_time:305223ms step_avg:42.15ms +[2025-09-06 03:28:00] [Rank 0] step:7241/10000 train_time:305223ms step_avg:42.15ms +[2025-09-06 03:28:01] [Rank 0] step:7261/10000 train_time:305963ms step_avg:42.14ms +[2025-09-06 03:28:01] [Rank 0] step:7261/10000 train_time:305963ms step_avg:42.14ms +[2025-09-06 03:28:02] [Rank 0] step:7281/10000 train_time:306702ms step_avg:42.12ms +[2025-09-06 03:28:02] [Rank 0] step:7281/10000 train_time:306702ms step_avg:42.12ms +[2025-09-06 03:28:02] [Rank 0] step:7301/10000 train_time:307442ms step_avg:42.11ms +[2025-09-06 03:28:02] [Rank 0] step:7301/10000 train_time:307442ms step_avg:42.11ms +[2025-09-06 03:28:03] [Rank 0] step:7321/10000 train_time:308182ms step_avg:42.10ms +[2025-09-06 03:28:03] [Rank 0] step:7321/10000 train_time:308182ms step_avg:42.10ms +[2025-09-06 03:28:04] [Rank 0] step:7341/10000 train_time:308922ms step_avg:42.08ms +[2025-09-06 03:28:04] [Rank 0] step:7341/10000 train_time:308922ms step_avg:42.08ms +[2025-09-06 03:28:05] [Rank 0] step:7361/10000 train_time:309659ms step_avg:42.07ms +[2025-09-06 03:28:05] [Rank 0] step:7361/10000 train_time:309659ms step_avg:42.07ms +[2025-09-06 03:28:05] [Rank 0] step:7381/10000 train_time:310398ms step_avg:42.05ms +[2025-09-06 03:28:05] [Rank 0] step:7381/10000 train_time:310398ms step_avg:42.05ms +[2025-09-06 03:28:06] [Rank 0] step:7401/10000 train_time:311137ms step_avg:42.04ms +[2025-09-06 03:28:06] [Rank 0] step:7401/10000 train_time:311137ms step_avg:42.04ms +[2025-09-06 03:28:07] [Rank 0] step:7421/10000 train_time:311877ms step_avg:42.03ms +[2025-09-06 03:28:07] [Rank 0] step:7421/10000 train_time:311877ms step_avg:42.03ms +[2025-09-06 03:28:08] [Rank 0] step:7441/10000 train_time:312617ms step_avg:42.01ms +[2025-09-06 03:28:08] [Rank 0] step:7441/10000 train_time:312617ms step_avg:42.01ms +[2025-09-06 03:28:08] [Rank 0] step:7461/10000 train_time:313356ms step_avg:42.00ms +[2025-09-06 03:28:08] [Rank 0] step:7461/10000 train_time:313356ms step_avg:42.00ms +[2025-09-06 03:28:09] [Rank 0] step:7481/10000 train_time:314096ms step_avg:41.99ms +[2025-09-06 03:28:09] [Rank 0] step:7481/10000 train_time:314096ms step_avg:41.99ms +[2025-09-06 03:28:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:28:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:28:10] [Rank 0] PRINT: step:7500/10000 train_loss:2.6194 val_loss:2.6000 train_time:314916ms step_avg:41.99ms +[2025-09-06 03:28:10] [Rank 0] PRINT: step:7500/10000 train_loss:2.6194 val_loss:2.6000 train_time:314916ms step_avg:41.99ms +[2025-09-06 03:28:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:28:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:28:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:28:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:29:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:29:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:29:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:29:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:29:32] [Rank 0] Total Loss: 4.8732 +[2025-09-06 03:29:32] [Rank 0] Total Loss: 4.8732 +[2025-09-06 03:29:32] [Rank 0] Total FTA (Unweighted): 0.2187 +[2025-09-06 03:29:32] [Rank 0] Total FTA (Unweighted): 0.2187 +[2025-09-06 03:29:33] [Rank 0] Total FTA (Weighted): 0.2188 +[2025-09-06 03:29:33] [Rank 0] Total FTA (Weighted): 0.2188 +[2025-09-06 03:29:33] [Rank 0] Group 0 Loss: 3.2947 +[2025-09-06 03:29:33] [Rank 0] Group 0 Loss: 3.2947 +[2025-09-06 03:29:33] [Rank 0] Group 1 Loss: 3.1098 +[2025-09-06 03:29:33] [Rank 0] Group 1 Loss: 3.1098 +[2025-09-06 03:29:33] [Rank 0] Group 2 Loss: 3.3251 +[2025-09-06 03:29:33] [Rank 0] Group 2 Loss: 3.3251 +[2025-09-06 03:29:33] [Rank 0] Group 3 Loss: 3.8157 +[2025-09-06 03:29:33] [Rank 0] Group 3 Loss: 3.8157 +[2025-09-06 03:29:33] [Rank 0] Group 4 Loss: 4.3515 +[2025-09-06 03:29:33] [Rank 0] Group 4 Loss: 4.3515 +[2025-09-06 03:29:33] [Rank 0] Group 5 Loss: 4.8118 +[2025-09-06 03:29:33] [Rank 0] Group 5 Loss: 4.8118 +[2025-09-06 03:29:33] [Rank 0] Group 6 Loss: 5.1220 +[2025-09-06 03:29:33] [Rank 0] Group 6 Loss: 5.1220 +[2025-09-06 03:29:33] [Rank 0] Group 7 Loss: 5.2219 +[2025-09-06 03:29:33] [Rank 0] Group 7 Loss: 5.2219 +[2025-09-06 03:29:33] [Rank 0] Group 8 Loss: 5.4955 +[2025-09-06 03:29:33] [Rank 0] Group 8 Loss: 5.4955 +[2025-09-06 03:29:33] [Rank 0] Group 9 Loss: 5.6516 +[2025-09-06 03:29:33] [Rank 0] Group 9 Loss: 5.6516 +[2025-09-06 03:29:33] [Rank 0] Group 10 Loss: 5.6493 +[2025-09-06 03:29:33] [Rank 0] Group 10 Loss: 5.6493 +[2025-09-06 03:29:33] [Rank 0] Group 11 Loss: 5.6804 +[2025-09-06 03:29:33] [Rank 0] Group 11 Loss: 5.6804 +[2025-09-06 03:29:33] [Rank 0] Group 12 Loss: 5.5945 +[2025-09-06 03:29:33] [Rank 0] Group 12 Loss: 5.5945 +[2025-09-06 03:29:33] [Rank 0] Group 13 Loss: 5.5967 +[2025-09-06 03:29:33] [Rank 0] Group 13 Loss: 5.5967 +[2025-09-06 03:29:33] [Rank 0] Group 14 Loss: 5.6634 +[2025-09-06 03:29:33] [Rank 0] Group 14 Loss: 5.6634 +[2025-09-06 03:29:33] [Rank 0] Group 15 Loss: 5.5880 +[2025-09-06 03:29:33] [Rank 0] Group 15 Loss: 5.5880 +[2025-09-06 03:29:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:29:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:29:33] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:29:33] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:29:33] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:29:33] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:29:33] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:29:33] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:29:33] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 03:29:33] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 03:29:33] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:29:33] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:29:33] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 03:29:33] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 03:29:33] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:29:33] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:29:33] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:29:33] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:29:33] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:29:33] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:29:33] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 03:29:33] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 03:29:33] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:29:33] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:29:33] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:29:33] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:29:33] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 03:29:33] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 03:29:33] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 03:29:33] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 03:29:33] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:29:33] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:29:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:29:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:29:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:29:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:29:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:29:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:29:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:29:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:29:34] [Rank 0] step:7501/10000 train_time:314927ms step_avg:41.98ms +[2025-09-06 03:29:34] [Rank 0] step:7501/10000 train_time:314927ms step_avg:41.98ms +[2025-09-06 03:29:35] [Rank 0] step:7521/10000 train_time:315603ms step_avg:41.96ms +[2025-09-06 03:29:35] [Rank 0] step:7521/10000 train_time:315603ms step_avg:41.96ms +[2025-09-06 03:29:36] [Rank 0] step:7541/10000 train_time:316340ms step_avg:41.95ms +[2025-09-06 03:29:36] [Rank 0] step:7541/10000 train_time:316340ms step_avg:41.95ms +[2025-09-06 03:29:36] [Rank 0] step:7561/10000 train_time:317079ms step_avg:41.94ms +[2025-09-06 03:29:36] [Rank 0] step:7561/10000 train_time:317079ms step_avg:41.94ms +[2025-09-06 03:29:37] [Rank 0] step:7581/10000 train_time:317818ms step_avg:41.92ms +[2025-09-06 03:29:37] [Rank 0] step:7581/10000 train_time:317818ms step_avg:41.92ms +[2025-09-06 03:29:38] [Rank 0] step:7601/10000 train_time:318557ms step_avg:41.91ms +[2025-09-06 03:29:38] [Rank 0] step:7601/10000 train_time:318557ms step_avg:41.91ms +[2025-09-06 03:29:39] [Rank 0] step:7621/10000 train_time:319297ms step_avg:41.90ms +[2025-09-06 03:29:39] [Rank 0] step:7621/10000 train_time:319297ms step_avg:41.90ms +[2025-09-06 03:29:40] [Rank 0] step:7641/10000 train_time:320685ms step_avg:41.97ms +[2025-09-06 03:29:40] [Rank 0] step:7641/10000 train_time:320685ms step_avg:41.97ms +[2025-09-06 03:29:41] [Rank 0] step:7661/10000 train_time:321532ms step_avg:41.97ms +[2025-09-06 03:29:41] [Rank 0] step:7661/10000 train_time:321532ms step_avg:41.97ms +[2025-09-06 03:29:42] [Rank 0] step:7681/10000 train_time:322271ms step_avg:41.96ms +[2025-09-06 03:29:42] [Rank 0] step:7681/10000 train_time:322271ms step_avg:41.96ms +[2025-09-06 03:29:42] [Rank 0] step:7701/10000 train_time:323010ms step_avg:41.94ms +[2025-09-06 03:29:42] [Rank 0] step:7701/10000 train_time:323010ms step_avg:41.94ms +[2025-09-06 03:29:43] [Rank 0] step:7721/10000 train_time:323749ms step_avg:41.93ms +[2025-09-06 03:29:43] [Rank 0] step:7721/10000 train_time:323749ms step_avg:41.93ms +[2025-09-06 03:29:44] [Rank 0] step:7741/10000 train_time:324613ms step_avg:41.93ms +[2025-09-06 03:29:44] [Rank 0] step:7741/10000 train_time:324613ms step_avg:41.93ms +[2025-09-06 03:29:45] [Rank 0] step:7761/10000 train_time:325353ms step_avg:41.92ms +[2025-09-06 03:29:45] [Rank 0] step:7761/10000 train_time:325353ms step_avg:41.92ms +[2025-09-06 03:29:45] [Rank 0] step:7781/10000 train_time:326092ms step_avg:41.91ms +[2025-09-06 03:29:45] [Rank 0] step:7781/10000 train_time:326092ms step_avg:41.91ms +[2025-09-06 03:29:46] [Rank 0] step:7801/10000 train_time:326831ms step_avg:41.90ms +[2025-09-06 03:29:46] [Rank 0] step:7801/10000 train_time:326831ms step_avg:41.90ms +[2025-09-06 03:29:47] [Rank 0] step:7821/10000 train_time:327570ms step_avg:41.88ms +[2025-09-06 03:29:47] [Rank 0] step:7821/10000 train_time:327570ms step_avg:41.88ms +[2025-09-06 03:29:48] [Rank 0] step:7841/10000 train_time:328309ms step_avg:41.87ms +[2025-09-06 03:29:48] [Rank 0] step:7841/10000 train_time:328309ms step_avg:41.87ms +[2025-09-06 03:29:48] [Rank 0] step:7861/10000 train_time:329049ms step_avg:41.86ms +[2025-09-06 03:29:48] [Rank 0] step:7861/10000 train_time:329049ms step_avg:41.86ms +[2025-09-06 03:29:49] [Rank 0] step:7881/10000 train_time:329790ms step_avg:41.85ms +[2025-09-06 03:29:49] [Rank 0] step:7881/10000 train_time:329790ms step_avg:41.85ms +[2025-09-06 03:29:50] [Rank 0] step:7901/10000 train_time:330528ms step_avg:41.83ms +[2025-09-06 03:29:50] [Rank 0] step:7901/10000 train_time:330528ms step_avg:41.83ms +[2025-09-06 03:29:51] [Rank 0] step:7921/10000 train_time:331268ms step_avg:41.82ms +[2025-09-06 03:29:51] [Rank 0] step:7921/10000 train_time:331268ms step_avg:41.82ms +[2025-09-06 03:29:51] [Rank 0] step:7941/10000 train_time:332006ms step_avg:41.81ms +[2025-09-06 03:29:51] [Rank 0] step:7941/10000 train_time:332006ms step_avg:41.81ms +[2025-09-06 03:29:52] [Rank 0] step:7961/10000 train_time:332745ms step_avg:41.80ms +[2025-09-06 03:29:52] [Rank 0] step:7961/10000 train_time:332745ms step_avg:41.80ms +[2025-09-06 03:29:53] [Rank 0] step:7981/10000 train_time:333484ms step_avg:41.78ms +[2025-09-06 03:29:53] [Rank 0] step:7981/10000 train_time:333484ms step_avg:41.78ms +[2025-09-06 03:29:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:29:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:29:54] [Rank 0] PRINT: step:8000/10000 train_loss:2.5985 val_loss:2.5796 train_time:334305ms step_avg:41.79ms +[2025-09-06 03:29:54] [Rank 0] PRINT: step:8000/10000 train_loss:2.5985 val_loss:2.5796 train_time:334305ms step_avg:41.79ms +[2025-09-06 03:29:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:29:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:29:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:29:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:31:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:31:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:31:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:31:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:31:16] [Rank 0] Total Loss: 4.8482 +[2025-09-06 03:31:16] [Rank 0] Total Loss: 4.8482 +[2025-09-06 03:31:16] [Rank 0] Total FTA (Unweighted): 0.2212 +[2025-09-06 03:31:16] [Rank 0] Total FTA (Unweighted): 0.2212 +[2025-09-06 03:31:16] [Rank 0] Total FTA (Weighted): 0.2213 +[2025-09-06 03:31:16] [Rank 0] Total FTA (Weighted): 0.2213 +[2025-09-06 03:31:16] [Rank 0] Group 0 Loss: 3.2602 +[2025-09-06 03:31:16] [Rank 0] Group 0 Loss: 3.2602 +[2025-09-06 03:31:16] [Rank 0] Group 1 Loss: 3.1004 +[2025-09-06 03:31:16] [Rank 0] Group 1 Loss: 3.1004 +[2025-09-06 03:31:16] [Rank 0] Group 2 Loss: 3.3027 +[2025-09-06 03:31:16] [Rank 0] Group 2 Loss: 3.3027 +[2025-09-06 03:31:16] [Rank 0] Group 3 Loss: 3.8061 +[2025-09-06 03:31:16] [Rank 0] Group 3 Loss: 3.8061 +[2025-09-06 03:31:16] [Rank 0] Group 4 Loss: 4.3235 +[2025-09-06 03:31:16] [Rank 0] Group 4 Loss: 4.3235 +[2025-09-06 03:31:16] [Rank 0] Group 5 Loss: 4.7735 +[2025-09-06 03:31:16] [Rank 0] Group 5 Loss: 4.7735 +[2025-09-06 03:31:16] [Rank 0] Group 6 Loss: 5.0958 +[2025-09-06 03:31:16] [Rank 0] Group 6 Loss: 5.0958 +[2025-09-06 03:31:16] [Rank 0] Group 7 Loss: 5.1922 +[2025-09-06 03:31:16] [Rank 0] Group 7 Loss: 5.1922 +[2025-09-06 03:31:16] [Rank 0] Group 8 Loss: 5.4741 +[2025-09-06 03:31:16] [Rank 0] Group 8 Loss: 5.4741 +[2025-09-06 03:31:16] [Rank 0] Group 9 Loss: 5.6215 +[2025-09-06 03:31:16] [Rank 0] Group 9 Loss: 5.6215 +[2025-09-06 03:31:16] [Rank 0] Group 10 Loss: 5.6325 +[2025-09-06 03:31:16] [Rank 0] Group 10 Loss: 5.6325 +[2025-09-06 03:31:16] [Rank 0] Group 11 Loss: 5.6638 +[2025-09-06 03:31:16] [Rank 0] Group 11 Loss: 5.6638 +[2025-09-06 03:31:16] [Rank 0] Group 12 Loss: 5.5620 +[2025-09-06 03:31:16] [Rank 0] Group 12 Loss: 5.5620 +[2025-09-06 03:31:16] [Rank 0] Group 13 Loss: 5.5721 +[2025-09-06 03:31:16] [Rank 0] Group 13 Loss: 5.5721 +[2025-09-06 03:31:16] [Rank 0] Group 14 Loss: 5.6376 +[2025-09-06 03:31:16] [Rank 0] Group 14 Loss: 5.6376 +[2025-09-06 03:31:16] [Rank 0] Group 15 Loss: 5.5526 +[2025-09-06 03:31:16] [Rank 0] Group 15 Loss: 5.5526 +[2025-09-06 03:31:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:31:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:31:16] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:31:16] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:31:16] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:31:16] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:31:16] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:31:16] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:31:16] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 03:31:16] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 03:31:16] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:31:16] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:31:16] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 03:31:16] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 03:31:16] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:31:16] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:31:16] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:31:16] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:31:16] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:31:16] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:31:16] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 03:31:16] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 03:31:16] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:31:16] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:31:16] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:31:16] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:31:16] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:31:16] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:31:16] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:31:16] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:31:16] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 03:31:16] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 03:31:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:31:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:31:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:31:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:31:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:31:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:31:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:31:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:31:18] [Rank 0] step:8001/10000 train_time:334316ms step_avg:41.78ms +[2025-09-06 03:31:18] [Rank 0] step:8001/10000 train_time:334316ms step_avg:41.78ms +[2025-09-06 03:31:19] [Rank 0] step:8021/10000 train_time:335609ms step_avg:41.84ms +[2025-09-06 03:31:19] [Rank 0] step:8021/10000 train_time:335609ms step_avg:41.84ms +[2025-09-06 03:31:20] [Rank 0] step:8041/10000 train_time:336358ms step_avg:41.83ms +[2025-09-06 03:31:20] [Rank 0] step:8041/10000 train_time:336358ms step_avg:41.83ms +[2025-09-06 03:31:21] [Rank 0] step:8061/10000 train_time:337098ms step_avg:41.82ms +[2025-09-06 03:31:21] [Rank 0] step:8061/10000 train_time:337098ms step_avg:41.82ms +[2025-09-06 03:31:21] [Rank 0] step:8081/10000 train_time:337839ms step_avg:41.81ms +[2025-09-06 03:31:21] [Rank 0] step:8081/10000 train_time:337839ms step_avg:41.81ms +[2025-09-06 03:31:22] [Rank 0] step:8101/10000 train_time:338578ms step_avg:41.79ms +[2025-09-06 03:31:22] [Rank 0] step:8101/10000 train_time:338578ms step_avg:41.79ms +[2025-09-06 03:31:23] [Rank 0] step:8121/10000 train_time:339323ms step_avg:41.78ms +[2025-09-06 03:31:23] [Rank 0] step:8121/10000 train_time:339323ms step_avg:41.78ms +[2025-09-06 03:31:24] [Rank 0] step:8141/10000 train_time:340060ms step_avg:41.77ms +[2025-09-06 03:31:24] [Rank 0] step:8141/10000 train_time:340060ms step_avg:41.77ms +[2025-09-06 03:31:24] [Rank 0] step:8161/10000 train_time:340798ms step_avg:41.76ms +[2025-09-06 03:31:24] [Rank 0] step:8161/10000 train_time:340798ms step_avg:41.76ms +[2025-09-06 03:31:25] [Rank 0] step:8181/10000 train_time:341539ms step_avg:41.75ms +[2025-09-06 03:31:25] [Rank 0] step:8181/10000 train_time:341539ms step_avg:41.75ms +[2025-09-06 03:31:26] [Rank 0] step:8201/10000 train_time:342276ms step_avg:41.74ms +[2025-09-06 03:31:26] [Rank 0] step:8201/10000 train_time:342276ms step_avg:41.74ms +[2025-09-06 03:31:27] [Rank 0] step:8221/10000 train_time:343015ms step_avg:41.72ms +[2025-09-06 03:31:27] [Rank 0] step:8221/10000 train_time:343015ms step_avg:41.72ms +[2025-09-06 03:31:27] [Rank 0] step:8241/10000 train_time:343765ms step_avg:41.71ms +[2025-09-06 03:31:27] [Rank 0] step:8241/10000 train_time:343765ms step_avg:41.71ms +[2025-09-06 03:31:28] [Rank 0] step:8261/10000 train_time:344504ms step_avg:41.70ms +[2025-09-06 03:31:28] [Rank 0] step:8261/10000 train_time:344504ms step_avg:41.70ms +[2025-09-06 03:31:29] [Rank 0] step:8281/10000 train_time:345244ms step_avg:41.69ms +[2025-09-06 03:31:29] [Rank 0] step:8281/10000 train_time:345244ms step_avg:41.69ms +[2025-09-06 03:31:30] [Rank 0] step:8301/10000 train_time:345983ms step_avg:41.68ms +[2025-09-06 03:31:30] [Rank 0] step:8301/10000 train_time:345983ms step_avg:41.68ms +[2025-09-06 03:31:30] [Rank 0] step:8321/10000 train_time:346723ms step_avg:41.67ms +[2025-09-06 03:31:30] [Rank 0] step:8321/10000 train_time:346723ms step_avg:41.67ms +[2025-09-06 03:31:31] [Rank 0] step:8341/10000 train_time:347462ms step_avg:41.66ms +[2025-09-06 03:31:31] [Rank 0] step:8341/10000 train_time:347462ms step_avg:41.66ms +[2025-09-06 03:31:32] [Rank 0] step:8361/10000 train_time:348201ms step_avg:41.65ms +[2025-09-06 03:31:32] [Rank 0] step:8361/10000 train_time:348201ms step_avg:41.65ms +[2025-09-06 03:31:33] [Rank 0] step:8381/10000 train_time:348940ms step_avg:41.63ms +[2025-09-06 03:31:33] [Rank 0] step:8381/10000 train_time:348940ms step_avg:41.63ms +[2025-09-06 03:31:33] [Rank 0] step:8401/10000 train_time:349679ms step_avg:41.62ms +[2025-09-06 03:31:33] [Rank 0] step:8401/10000 train_time:349679ms step_avg:41.62ms +[2025-09-06 03:31:34] [Rank 0] step:8421/10000 train_time:350419ms step_avg:41.61ms +[2025-09-06 03:31:34] [Rank 0] step:8421/10000 train_time:350419ms step_avg:41.61ms +[2025-09-06 03:31:35] [Rank 0] step:8441/10000 train_time:351158ms step_avg:41.60ms +[2025-09-06 03:31:35] [Rank 0] step:8441/10000 train_time:351158ms step_avg:41.60ms +[2025-09-06 03:31:35] [Rank 0] step:8461/10000 train_time:351897ms step_avg:41.59ms +[2025-09-06 03:31:35] [Rank 0] step:8461/10000 train_time:351897ms step_avg:41.59ms +[2025-09-06 03:31:36] [Rank 0] step:8481/10000 train_time:352637ms step_avg:41.58ms +[2025-09-06 03:31:36] [Rank 0] step:8481/10000 train_time:352637ms step_avg:41.58ms +[2025-09-06 03:31:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:31:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:31:37] [Rank 0] PRINT: step:8500/10000 train_loss:2.5809 val_loss:2.5635 train_time:353456ms step_avg:41.58ms +[2025-09-06 03:31:37] [Rank 0] PRINT: step:8500/10000 train_loss:2.5809 val_loss:2.5635 train_time:353456ms step_avg:41.58ms +[2025-09-06 03:31:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:31:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:31:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:31:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:33:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:33:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:33:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:33:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:33:00] [Rank 0] Total Loss: 4.8397 +[2025-09-06 03:33:00] [Rank 0] Total Loss: 4.8397 +[2025-09-06 03:33:00] [Rank 0] Total FTA (Unweighted): 0.2213 +[2025-09-06 03:33:00] [Rank 0] Total FTA (Unweighted): 0.2213 +[2025-09-06 03:33:00] [Rank 0] Total FTA (Weighted): 0.2213 +[2025-09-06 03:33:00] [Rank 0] Total FTA (Weighted): 0.2213 +[2025-09-06 03:33:00] [Rank 0] Group 0 Loss: 3.2756 +[2025-09-06 03:33:00] [Rank 0] Group 0 Loss: 3.2756 +[2025-09-06 03:33:00] [Rank 0] Group 1 Loss: 3.1074 +[2025-09-06 03:33:00] [Rank 0] Group 1 Loss: 3.1074 +[2025-09-06 03:33:00] [Rank 0] Group 2 Loss: 3.2826 +[2025-09-06 03:33:00] [Rank 0] Group 2 Loss: 3.2826 +[2025-09-06 03:33:00] [Rank 0] Group 3 Loss: 3.8130 +[2025-09-06 03:33:00] [Rank 0] Group 3 Loss: 3.8130 +[2025-09-06 03:33:00] [Rank 0] Group 4 Loss: 4.2983 +[2025-09-06 03:33:00] [Rank 0] Group 4 Loss: 4.2983 +[2025-09-06 03:33:00] [Rank 0] Group 5 Loss: 4.7559 +[2025-09-06 03:33:00] [Rank 0] Group 5 Loss: 4.7559 +[2025-09-06 03:33:00] [Rank 0] Group 6 Loss: 5.0939 +[2025-09-06 03:33:00] [Rank 0] Group 6 Loss: 5.0939 +[2025-09-06 03:33:00] [Rank 0] Group 7 Loss: 5.1805 +[2025-09-06 03:33:00] [Rank 0] Group 7 Loss: 5.1805 +[2025-09-06 03:33:00] [Rank 0] Group 8 Loss: 5.4668 +[2025-09-06 03:33:00] [Rank 0] Group 8 Loss: 5.4668 +[2025-09-06 03:33:00] [Rank 0] Group 9 Loss: 5.6073 +[2025-09-06 03:33:00] [Rank 0] Group 9 Loss: 5.6073 +[2025-09-06 03:33:00] [Rank 0] Group 10 Loss: 5.6197 +[2025-09-06 03:33:00] [Rank 0] Group 10 Loss: 5.6197 +[2025-09-06 03:33:00] [Rank 0] Group 11 Loss: 5.6438 +[2025-09-06 03:33:00] [Rank 0] Group 11 Loss: 5.6438 +[2025-09-06 03:33:00] [Rank 0] Group 12 Loss: 5.5512 +[2025-09-06 03:33:00] [Rank 0] Group 12 Loss: 5.5512 +[2025-09-06 03:33:00] [Rank 0] Group 13 Loss: 5.5644 +[2025-09-06 03:33:00] [Rank 0] Group 13 Loss: 5.5644 +[2025-09-06 03:33:00] [Rank 0] Group 14 Loss: 5.6303 +[2025-09-06 03:33:00] [Rank 0] Group 14 Loss: 5.6303 +[2025-09-06 03:33:00] [Rank 0] Group 15 Loss: 5.5446 +[2025-09-06 03:33:00] [Rank 0] Group 15 Loss: 5.5446 +[2025-09-06 03:33:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:33:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:33:00] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:33:00] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 03:33:00] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:33:00] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:33:00] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:33:00] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:33:00] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 03:33:00] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 03:33:00] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:33:00] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:33:00] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 03:33:00] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 03:33:00] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:33:00] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:33:00] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:33:00] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:33:00] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:33:00] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:33:00] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 03:33:00] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 03:33:00] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:33:00] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:33:00] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:33:00] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:33:00] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-06 03:33:00] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-06 03:33:00] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:33:00] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:33:00] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:33:00] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:33:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:33:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:33:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:33:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:33:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:33:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:33:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:33:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:33:02] [Rank 0] step:8501/10000 train_time:353467ms step_avg:41.58ms +[2025-09-06 03:33:02] [Rank 0] step:8501/10000 train_time:353467ms step_avg:41.58ms +[2025-09-06 03:33:02] [Rank 0] step:8521/10000 train_time:354151ms step_avg:41.56ms +[2025-09-06 03:33:02] [Rank 0] step:8521/10000 train_time:354151ms step_avg:41.56ms +[2025-09-06 03:33:03] [Rank 0] step:8541/10000 train_time:354890ms step_avg:41.55ms +[2025-09-06 03:33:03] [Rank 0] step:8541/10000 train_time:354890ms step_avg:41.55ms +[2025-09-06 03:33:04] [Rank 0] step:8561/10000 train_time:355629ms step_avg:41.54ms +[2025-09-06 03:33:04] [Rank 0] step:8561/10000 train_time:355629ms step_avg:41.54ms +[2025-09-06 03:33:05] [Rank 0] step:8581/10000 train_time:356369ms step_avg:41.53ms +[2025-09-06 03:33:05] [Rank 0] step:8581/10000 train_time:356369ms step_avg:41.53ms +[2025-09-06 03:33:05] [Rank 0] step:8601/10000 train_time:357108ms step_avg:41.52ms +[2025-09-06 03:33:05] [Rank 0] step:8601/10000 train_time:357108ms step_avg:41.52ms +[2025-09-06 03:33:06] [Rank 0] step:8621/10000 train_time:357848ms step_avg:41.51ms +[2025-09-06 03:33:06] [Rank 0] step:8621/10000 train_time:357848ms step_avg:41.51ms +[2025-09-06 03:33:07] [Rank 0] step:8641/10000 train_time:358587ms step_avg:41.50ms +[2025-09-06 03:33:07] [Rank 0] step:8641/10000 train_time:358587ms step_avg:41.50ms +[2025-09-06 03:33:08] [Rank 0] step:8661/10000 train_time:359326ms step_avg:41.49ms +[2025-09-06 03:33:08] [Rank 0] step:8661/10000 train_time:359326ms step_avg:41.49ms +[2025-09-06 03:33:08] [Rank 0] step:8681/10000 train_time:360065ms step_avg:41.48ms +[2025-09-06 03:33:08] [Rank 0] step:8681/10000 train_time:360065ms step_avg:41.48ms +[2025-09-06 03:33:09] [Rank 0] step:8701/10000 train_time:360804ms step_avg:41.47ms +[2025-09-06 03:33:09] [Rank 0] step:8701/10000 train_time:360804ms step_avg:41.47ms +[2025-09-06 03:33:10] [Rank 0] step:8721/10000 train_time:361544ms step_avg:41.46ms +[2025-09-06 03:33:10] [Rank 0] step:8721/10000 train_time:361544ms step_avg:41.46ms +[2025-09-06 03:33:11] [Rank 0] step:8741/10000 train_time:362283ms step_avg:41.45ms +[2025-09-06 03:33:11] [Rank 0] step:8741/10000 train_time:362283ms step_avg:41.45ms +[2025-09-06 03:33:11] [Rank 0] step:8761/10000 train_time:363022ms step_avg:41.44ms +[2025-09-06 03:33:11] [Rank 0] step:8761/10000 train_time:363022ms step_avg:41.44ms +[2025-09-06 03:33:12] [Rank 0] step:8781/10000 train_time:363762ms step_avg:41.43ms +[2025-09-06 03:33:12] [Rank 0] step:8781/10000 train_time:363762ms step_avg:41.43ms +[2025-09-06 03:33:13] [Rank 0] step:8801/10000 train_time:364500ms step_avg:41.42ms +[2025-09-06 03:33:13] [Rank 0] step:8801/10000 train_time:364500ms step_avg:41.42ms +[2025-09-06 03:33:14] [Rank 0] step:8821/10000 train_time:365239ms step_avg:41.41ms +[2025-09-06 03:33:14] [Rank 0] step:8821/10000 train_time:365239ms step_avg:41.41ms +[2025-09-06 03:33:15] [Rank 0] step:8841/10000 train_time:366591ms step_avg:41.46ms +[2025-09-06 03:33:15] [Rank 0] step:8841/10000 train_time:366591ms step_avg:41.46ms +[2025-09-06 03:33:16] [Rank 0] step:8861/10000 train_time:367331ms step_avg:41.45ms +[2025-09-06 03:33:16] [Rank 0] step:8861/10000 train_time:367331ms step_avg:41.45ms +[2025-09-06 03:33:16] [Rank 0] step:8881/10000 train_time:368070ms step_avg:41.44ms +[2025-09-06 03:33:16] [Rank 0] step:8881/10000 train_time:368070ms step_avg:41.44ms +[2025-09-06 03:33:17] [Rank 0] step:8901/10000 train_time:368810ms step_avg:41.43ms +[2025-09-06 03:33:17] [Rank 0] step:8901/10000 train_time:368810ms step_avg:41.43ms +[2025-09-06 03:33:18] [Rank 0] step:8921/10000 train_time:369549ms step_avg:41.42ms +[2025-09-06 03:33:18] [Rank 0] step:8921/10000 train_time:369549ms step_avg:41.42ms +[2025-09-06 03:33:19] [Rank 0] step:8941/10000 train_time:370289ms step_avg:41.41ms +[2025-09-06 03:33:19] [Rank 0] step:8941/10000 train_time:370289ms step_avg:41.41ms +[2025-09-06 03:33:19] [Rank 0] step:8961/10000 train_time:371029ms step_avg:41.40ms +[2025-09-06 03:33:19] [Rank 0] step:8961/10000 train_time:371029ms step_avg:41.40ms +[2025-09-06 03:33:20] [Rank 0] step:8981/10000 train_time:371769ms step_avg:41.40ms +[2025-09-06 03:33:20] [Rank 0] step:8981/10000 train_time:371769ms step_avg:41.40ms +[2025-09-06 03:33:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:33:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:33:21] [Rank 0] PRINT: step:9000/10000 train_loss:2.5645 val_loss:2.5496 train_time:372588ms step_avg:41.40ms +[2025-09-06 03:33:21] [Rank 0] PRINT: step:9000/10000 train_loss:2.5645 val_loss:2.5496 train_time:372588ms step_avg:41.40ms +[2025-09-06 03:33:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:33:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:33:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:33:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:34:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:34:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:34:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:34:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:34:43] [Rank 0] Total Loss: 4.8267 +[2025-09-06 03:34:43] [Rank 0] Total Loss: 4.8267 +[2025-09-06 03:34:43] [Rank 0] Total FTA (Unweighted): 0.2325 +[2025-09-06 03:34:43] [Rank 0] Total FTA (Unweighted): 0.2325 +[2025-09-06 03:34:43] [Rank 0] Total FTA (Weighted): 0.2325 +[2025-09-06 03:34:43] [Rank 0] Total FTA (Weighted): 0.2325 +[2025-09-06 03:34:43] [Rank 0] Group 0 Loss: 3.2857 +[2025-09-06 03:34:43] [Rank 0] Group 0 Loss: 3.2857 +[2025-09-06 03:34:43] [Rank 0] Group 1 Loss: 3.0855 +[2025-09-06 03:34:43] [Rank 0] Group 1 Loss: 3.0855 +[2025-09-06 03:34:43] [Rank 0] Group 2 Loss: 3.2776 +[2025-09-06 03:34:43] [Rank 0] Group 2 Loss: 3.2776 +[2025-09-06 03:34:43] [Rank 0] Group 3 Loss: 3.7887 +[2025-09-06 03:34:43] [Rank 0] Group 3 Loss: 3.7887 +[2025-09-06 03:34:43] [Rank 0] Group 4 Loss: 4.2916 +[2025-09-06 03:34:43] [Rank 0] Group 4 Loss: 4.2916 +[2025-09-06 03:34:43] [Rank 0] Group 5 Loss: 4.7480 +[2025-09-06 03:34:43] [Rank 0] Group 5 Loss: 4.7480 +[2025-09-06 03:34:43] [Rank 0] Group 6 Loss: 5.0782 +[2025-09-06 03:34:43] [Rank 0] Group 6 Loss: 5.0782 +[2025-09-06 03:34:43] [Rank 0] Group 7 Loss: 5.1580 +[2025-09-06 03:34:43] [Rank 0] Group 7 Loss: 5.1580 +[2025-09-06 03:34:43] [Rank 0] Group 8 Loss: 5.4529 +[2025-09-06 03:34:43] [Rank 0] Group 8 Loss: 5.4529 +[2025-09-06 03:34:43] [Rank 0] Group 9 Loss: 5.5877 +[2025-09-06 03:34:43] [Rank 0] Group 9 Loss: 5.5877 +[2025-09-06 03:34:43] [Rank 0] Group 10 Loss: 5.5967 +[2025-09-06 03:34:43] [Rank 0] Group 10 Loss: 5.5967 +[2025-09-06 03:34:43] [Rank 0] Group 11 Loss: 5.6346 +[2025-09-06 03:34:43] [Rank 0] Group 11 Loss: 5.6346 +[2025-09-06 03:34:43] [Rank 0] Group 12 Loss: 5.5350 +[2025-09-06 03:34:43] [Rank 0] Group 12 Loss: 5.5350 +[2025-09-06 03:34:43] [Rank 0] Group 13 Loss: 5.5567 +[2025-09-06 03:34:43] [Rank 0] Group 13 Loss: 5.5567 +[2025-09-06 03:34:43] [Rank 0] Group 14 Loss: 5.6123 +[2025-09-06 03:34:43] [Rank 0] Group 14 Loss: 5.6123 +[2025-09-06 03:34:43] [Rank 0] Group 15 Loss: 5.5375 +[2025-09-06 03:34:43] [Rank 0] Group 15 Loss: 5.5375 +[2025-09-06 03:34:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:34:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:34:43] [Rank 0] Group 1 FTA: 0.6900 +[2025-09-06 03:34:43] [Rank 0] Group 1 FTA: 0.6900 +[2025-09-06 03:34:43] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:34:43] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:34:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:34:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:34:43] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 03:34:43] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 03:34:43] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:34:43] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:34:43] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 03:34:43] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 03:34:43] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:34:43] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:34:43] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:34:43] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:34:43] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:34:43] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:34:43] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 03:34:43] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 03:34:43] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:34:43] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:34:43] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:34:43] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:34:43] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:34:43] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:34:43] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:34:43] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:34:43] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 03:34:43] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 03:34:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:34:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:34:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:34:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:34:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:34:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:34:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:34:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:34:44] [Rank 0] step:9001/10000 train_time:372599ms step_avg:41.40ms +[2025-09-06 03:34:44] [Rank 0] step:9001/10000 train_time:372599ms step_avg:41.40ms +[2025-09-06 03:34:45] [Rank 0] step:9021/10000 train_time:373272ms step_avg:41.38ms +[2025-09-06 03:34:45] [Rank 0] step:9021/10000 train_time:373272ms step_avg:41.38ms +[2025-09-06 03:34:46] [Rank 0] step:9041/10000 train_time:374011ms step_avg:41.37ms +[2025-09-06 03:34:46] [Rank 0] step:9041/10000 train_time:374011ms step_avg:41.37ms +[2025-09-06 03:34:47] [Rank 0] step:9061/10000 train_time:374751ms step_avg:41.36ms +[2025-09-06 03:34:47] [Rank 0] step:9061/10000 train_time:374751ms step_avg:41.36ms +[2025-09-06 03:34:47] [Rank 0] step:9081/10000 train_time:375490ms step_avg:41.35ms +[2025-09-06 03:34:47] [Rank 0] step:9081/10000 train_time:375490ms step_avg:41.35ms +[2025-09-06 03:34:48] [Rank 0] step:9101/10000 train_time:376230ms step_avg:41.34ms +[2025-09-06 03:34:48] [Rank 0] step:9101/10000 train_time:376230ms step_avg:41.34ms +[2025-09-06 03:34:49] [Rank 0] step:9121/10000 train_time:376968ms step_avg:41.33ms +[2025-09-06 03:34:49] [Rank 0] step:9121/10000 train_time:376968ms step_avg:41.33ms +[2025-09-06 03:34:50] [Rank 0] step:9141/10000 train_time:377708ms step_avg:41.32ms +[2025-09-06 03:34:50] [Rank 0] step:9141/10000 train_time:377708ms step_avg:41.32ms +[2025-09-06 03:34:50] [Rank 0] step:9161/10000 train_time:378447ms step_avg:41.31ms +[2025-09-06 03:34:50] [Rank 0] step:9161/10000 train_time:378447ms step_avg:41.31ms +[2025-09-06 03:34:51] [Rank 0] step:9181/10000 train_time:379185ms step_avg:41.30ms +[2025-09-06 03:34:51] [Rank 0] step:9181/10000 train_time:379185ms step_avg:41.30ms +[2025-09-06 03:34:52] [Rank 0] step:9201/10000 train_time:379924ms step_avg:41.29ms +[2025-09-06 03:34:52] [Rank 0] step:9201/10000 train_time:379924ms step_avg:41.29ms +[2025-09-06 03:34:52] [Rank 0] step:9221/10000 train_time:380664ms step_avg:41.28ms +[2025-09-06 03:34:52] [Rank 0] step:9221/10000 train_time:380664ms step_avg:41.28ms +[2025-09-06 03:34:53] [Rank 0] step:9241/10000 train_time:381403ms step_avg:41.27ms +[2025-09-06 03:34:53] [Rank 0] step:9241/10000 train_time:381403ms step_avg:41.27ms +[2025-09-06 03:34:54] [Rank 0] step:9261/10000 train_time:382143ms step_avg:41.26ms +[2025-09-06 03:34:54] [Rank 0] step:9261/10000 train_time:382143ms step_avg:41.26ms +[2025-09-06 03:34:55] [Rank 0] step:9281/10000 train_time:382882ms step_avg:41.25ms +[2025-09-06 03:34:55] [Rank 0] step:9281/10000 train_time:382882ms step_avg:41.25ms +[2025-09-06 03:34:55] [Rank 0] step:9301/10000 train_time:383621ms step_avg:41.25ms +[2025-09-06 03:34:55] [Rank 0] step:9301/10000 train_time:383621ms step_avg:41.25ms +[2025-09-06 03:34:56] [Rank 0] step:9321/10000 train_time:384361ms step_avg:41.24ms +[2025-09-06 03:34:56] [Rank 0] step:9321/10000 train_time:384361ms step_avg:41.24ms +[2025-09-06 03:34:57] [Rank 0] step:9341/10000 train_time:385205ms step_avg:41.24ms +[2025-09-06 03:34:57] [Rank 0] step:9341/10000 train_time:385205ms step_avg:41.24ms +[2025-09-06 03:34:58] [Rank 0] step:9361/10000 train_time:385944ms step_avg:41.23ms +[2025-09-06 03:34:58] [Rank 0] step:9361/10000 train_time:385944ms step_avg:41.23ms +[2025-09-06 03:34:58] [Rank 0] step:9381/10000 train_time:386683ms step_avg:41.22ms +[2025-09-06 03:34:58] [Rank 0] step:9381/10000 train_time:386683ms step_avg:41.22ms +[2025-09-06 03:34:59] [Rank 0] step:9401/10000 train_time:387567ms step_avg:41.23ms +[2025-09-06 03:34:59] [Rank 0] step:9401/10000 train_time:387567ms step_avg:41.23ms +[2025-09-06 03:35:00] [Rank 0] step:9421/10000 train_time:388307ms step_avg:41.22ms +[2025-09-06 03:35:00] [Rank 0] step:9421/10000 train_time:388307ms step_avg:41.22ms +[2025-09-06 03:35:01] [Rank 0] step:9441/10000 train_time:389045ms step_avg:41.21ms +[2025-09-06 03:35:01] [Rank 0] step:9441/10000 train_time:389045ms step_avg:41.21ms +[2025-09-06 03:35:02] [Rank 0] step:9461/10000 train_time:389806ms step_avg:41.20ms +[2025-09-06 03:35:02] [Rank 0] step:9461/10000 train_time:389806ms step_avg:41.20ms +[2025-09-06 03:35:02] [Rank 0] step:9481/10000 train_time:390545ms step_avg:41.19ms +[2025-09-06 03:35:02] [Rank 0] step:9481/10000 train_time:390545ms step_avg:41.19ms +[2025-09-06 03:35:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:35:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:35:04] [Rank 0] PRINT: step:9500/10000 train_loss:2.5509 val_loss:2.5377 train_time:391366ms step_avg:41.20ms +[2025-09-06 03:35:04] [Rank 0] PRINT: step:9500/10000 train_loss:2.5509 val_loss:2.5377 train_time:391366ms step_avg:41.20ms +[2025-09-06 03:35:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:35:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:35:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:35:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:36:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:36:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:36:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:36:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:36:25] [Rank 0] Total Loss: 4.8078 +[2025-09-06 03:36:25] [Rank 0] Total Loss: 4.8078 +[2025-09-06 03:36:25] [Rank 0] Total FTA (Unweighted): 0.2350 +[2025-09-06 03:36:25] [Rank 0] Total FTA (Unweighted): 0.2350 +[2025-09-06 03:36:25] [Rank 0] Total FTA (Weighted): 0.2350 +[2025-09-06 03:36:25] [Rank 0] Total FTA (Weighted): 0.2350 +[2025-09-06 03:36:25] [Rank 0] Group 0 Loss: 3.2740 +[2025-09-06 03:36:25] [Rank 0] Group 0 Loss: 3.2740 +[2025-09-06 03:36:25] [Rank 0] Group 1 Loss: 3.0987 +[2025-09-06 03:36:25] [Rank 0] Group 1 Loss: 3.0987 +[2025-09-06 03:36:25] [Rank 0] Group 2 Loss: 3.2772 +[2025-09-06 03:36:25] [Rank 0] Group 2 Loss: 3.2772 +[2025-09-06 03:36:25] [Rank 0] Group 3 Loss: 3.7697 +[2025-09-06 03:36:25] [Rank 0] Group 3 Loss: 3.7697 +[2025-09-06 03:36:25] [Rank 0] Group 4 Loss: 4.2596 +[2025-09-06 03:36:25] [Rank 0] Group 4 Loss: 4.2596 +[2025-09-06 03:36:25] [Rank 0] Group 5 Loss: 4.7140 +[2025-09-06 03:36:25] [Rank 0] Group 5 Loss: 4.7140 +[2025-09-06 03:36:25] [Rank 0] Group 6 Loss: 5.0341 +[2025-09-06 03:36:25] [Rank 0] Group 6 Loss: 5.0341 +[2025-09-06 03:36:25] [Rank 0] Group 7 Loss: 5.1459 +[2025-09-06 03:36:25] [Rank 0] Group 7 Loss: 5.1459 +[2025-09-06 03:36:25] [Rank 0] Group 8 Loss: 5.4334 +[2025-09-06 03:36:25] [Rank 0] Group 8 Loss: 5.4334 +[2025-09-06 03:36:25] [Rank 0] Group 9 Loss: 5.5711 +[2025-09-06 03:36:25] [Rank 0] Group 9 Loss: 5.5711 +[2025-09-06 03:36:25] [Rank 0] Group 10 Loss: 5.5755 +[2025-09-06 03:36:25] [Rank 0] Group 10 Loss: 5.5755 +[2025-09-06 03:36:25] [Rank 0] Group 11 Loss: 5.6127 +[2025-09-06 03:36:25] [Rank 0] Group 11 Loss: 5.6127 +[2025-09-06 03:36:25] [Rank 0] Group 12 Loss: 5.5162 +[2025-09-06 03:36:25] [Rank 0] Group 12 Loss: 5.5162 +[2025-09-06 03:36:25] [Rank 0] Group 13 Loss: 5.5311 +[2025-09-06 03:36:25] [Rank 0] Group 13 Loss: 5.5311 +[2025-09-06 03:36:25] [Rank 0] Group 14 Loss: 5.5967 +[2025-09-06 03:36:25] [Rank 0] Group 14 Loss: 5.5967 +[2025-09-06 03:36:25] [Rank 0] Group 15 Loss: 5.5153 +[2025-09-06 03:36:25] [Rank 0] Group 15 Loss: 5.5153 +[2025-09-06 03:36:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:36:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:36:25] [Rank 0] Group 1 FTA: 0.6900 +[2025-09-06 03:36:25] [Rank 0] Group 1 FTA: 0.6900 +[2025-09-06 03:36:25] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:36:25] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:36:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:36:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:36:25] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 03:36:25] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 03:36:25] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:36:25] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:36:25] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 03:36:25] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 03:36:25] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:36:25] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:36:25] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:36:25] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:36:25] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:36:25] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 03:36:25] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 03:36:25] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 03:36:25] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:36:25] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:36:25] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 03:36:25] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 03:36:25] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:36:25] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:36:25] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 03:36:25] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 03:36:25] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 03:36:25] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 03:36:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:36:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:36:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:36:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:36:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:36:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:36:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:36:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:36:27] [Rank 0] step:9501/10000 train_time:391377ms step_avg:41.19ms +[2025-09-06 03:36:27] [Rank 0] step:9501/10000 train_time:391377ms step_avg:41.19ms +[2025-09-06 03:36:28] [Rank 0] step:9521/10000 train_time:392057ms step_avg:41.18ms +[2025-09-06 03:36:28] [Rank 0] step:9521/10000 train_time:392057ms step_avg:41.18ms +[2025-09-06 03:36:28] [Rank 0] step:9541/10000 train_time:392796ms step_avg:41.17ms +[2025-09-06 03:36:28] [Rank 0] step:9541/10000 train_time:392796ms step_avg:41.17ms +[2025-09-06 03:36:29] [Rank 0] step:9561/10000 train_time:393535ms step_avg:41.16ms +[2025-09-06 03:36:29] [Rank 0] step:9561/10000 train_time:393535ms step_avg:41.16ms +[2025-09-06 03:36:30] [Rank 0] step:9581/10000 train_time:394275ms step_avg:41.15ms +[2025-09-06 03:36:30] [Rank 0] step:9581/10000 train_time:394275ms step_avg:41.15ms +[2025-09-06 03:36:31] [Rank 0] step:9601/10000 train_time:395015ms step_avg:41.14ms +[2025-09-06 03:36:31] [Rank 0] step:9601/10000 train_time:395015ms step_avg:41.14ms +[2025-09-06 03:36:31] [Rank 0] step:9621/10000 train_time:395754ms step_avg:41.13ms +[2025-09-06 03:36:31] [Rank 0] step:9621/10000 train_time:395754ms step_avg:41.13ms +[2025-09-06 03:36:32] [Rank 0] step:9641/10000 train_time:396492ms step_avg:41.13ms +[2025-09-06 03:36:32] [Rank 0] step:9641/10000 train_time:396492ms step_avg:41.13ms +[2025-09-06 03:36:33] [Rank 0] step:9661/10000 train_time:397507ms step_avg:41.15ms +[2025-09-06 03:36:33] [Rank 0] step:9661/10000 train_time:397507ms step_avg:41.15ms +[2025-09-06 03:36:34] [Rank 0] step:9681/10000 train_time:398247ms step_avg:41.14ms +[2025-09-06 03:36:34] [Rank 0] step:9681/10000 train_time:398247ms step_avg:41.14ms +[2025-09-06 03:36:34] [Rank 0] step:9701/10000 train_time:398986ms step_avg:41.13ms +[2025-09-06 03:36:34] [Rank 0] step:9701/10000 train_time:398986ms step_avg:41.13ms +[2025-09-06 03:36:35] [Rank 0] step:9721/10000 train_time:399725ms step_avg:41.12ms +[2025-09-06 03:36:35] [Rank 0] step:9721/10000 train_time:399725ms step_avg:41.12ms +[2025-09-06 03:36:36] [Rank 0] step:9741/10000 train_time:400465ms step_avg:41.11ms +[2025-09-06 03:36:36] [Rank 0] step:9741/10000 train_time:400465ms step_avg:41.11ms +[2025-09-06 03:36:37] [Rank 0] step:9761/10000 train_time:401204ms step_avg:41.10ms +[2025-09-06 03:36:37] [Rank 0] step:9761/10000 train_time:401204ms step_avg:41.10ms +[2025-09-06 03:36:37] [Rank 0] step:9781/10000 train_time:401943ms step_avg:41.09ms +[2025-09-06 03:36:37] [Rank 0] step:9781/10000 train_time:401943ms step_avg:41.09ms +[2025-09-06 03:36:38] [Rank 0] step:9801/10000 train_time:402683ms step_avg:41.09ms +[2025-09-06 03:36:38] [Rank 0] step:9801/10000 train_time:402683ms step_avg:41.09ms +[2025-09-06 03:36:39] [Rank 0] step:9821/10000 train_time:403422ms step_avg:41.08ms +[2025-09-06 03:36:39] [Rank 0] step:9821/10000 train_time:403422ms step_avg:41.08ms +[2025-09-06 03:36:40] [Rank 0] step:9841/10000 train_time:404161ms step_avg:41.07ms +[2025-09-06 03:36:40] [Rank 0] step:9841/10000 train_time:404161ms step_avg:41.07ms +[2025-09-06 03:36:40] [Rank 0] step:9861/10000 train_time:404900ms step_avg:41.06ms +[2025-09-06 03:36:40] [Rank 0] step:9861/10000 train_time:404900ms step_avg:41.06ms +[2025-09-06 03:36:41] [Rank 0] step:9881/10000 train_time:405640ms step_avg:41.05ms +[2025-09-06 03:36:41] [Rank 0] step:9881/10000 train_time:405640ms step_avg:41.05ms +[2025-09-06 03:36:42] [Rank 0] step:9901/10000 train_time:406379ms step_avg:41.04ms +[2025-09-06 03:36:42] [Rank 0] step:9901/10000 train_time:406379ms step_avg:41.04ms +[2025-09-06 03:36:43] [Rank 0] step:9921/10000 train_time:407118ms step_avg:41.04ms +[2025-09-06 03:36:43] [Rank 0] step:9921/10000 train_time:407118ms step_avg:41.04ms +[2025-09-06 03:36:43] [Rank 0] step:9941/10000 train_time:407857ms step_avg:41.03ms +[2025-09-06 03:36:43] [Rank 0] step:9941/10000 train_time:407857ms step_avg:41.03ms +[2025-09-06 03:36:44] [Rank 0] step:9961/10000 train_time:408596ms step_avg:41.02ms +[2025-09-06 03:36:44] [Rank 0] step:9961/10000 train_time:408596ms step_avg:41.02ms +[2025-09-06 03:36:45] [Rank 0] step:9981/10000 train_time:409335ms step_avg:41.01ms +[2025-09-06 03:36:45] [Rank 0] step:9981/10000 train_time:409335ms step_avg:41.01ms +[2025-09-06 03:36:46] [Rank 0] step:10000/10000 train_time:410038ms step_avg:41.00ms +[2025-09-06 03:36:46] [Rank 0] step:10000/10000 train_time:410038ms step_avg:41.00ms +[2025-09-06 03:36:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:36:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:36:46] [Rank 0] PRINT: step:10000/10000 train_loss:2.5403 val_loss:2.5281 train_time:410163ms step_avg:41.02ms +[2025-09-06 03:36:46] [Rank 0] PRINT: step:10000/10000 train_loss:2.5403 val_loss:2.5281 train_time:410163ms step_avg:41.02ms +[2025-09-06 03:36:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:36:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:36:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:36:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:38:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:38:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:38:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:38:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:38:08] [Rank 0] Total Loss: 4.8118 +[2025-09-06 03:38:08] [Rank 0] Total Loss: 4.8118 +[2025-09-06 03:38:08] [Rank 0] Total FTA (Unweighted): 0.2394 +[2025-09-06 03:38:08] [Rank 0] Total FTA (Unweighted): 0.2394 +[2025-09-06 03:38:08] [Rank 0] Total FTA (Weighted): 0.2394 +[2025-09-06 03:38:08] [Rank 0] Total FTA (Weighted): 0.2394 +[2025-09-06 03:38:08] [Rank 0] Group 0 Loss: 3.2716 +[2025-09-06 03:38:08] [Rank 0] Group 0 Loss: 3.2716 +[2025-09-06 03:38:08] [Rank 0] Group 1 Loss: 3.0954 +[2025-09-06 03:38:08] [Rank 0] Group 1 Loss: 3.0954 +[2025-09-06 03:38:08] [Rank 0] Group 2 Loss: 3.2775 +[2025-09-06 03:38:08] [Rank 0] Group 2 Loss: 3.2775 +[2025-09-06 03:38:08] [Rank 0] Group 3 Loss: 3.7850 +[2025-09-06 03:38:08] [Rank 0] Group 3 Loss: 3.7850 +[2025-09-06 03:38:08] [Rank 0] Group 4 Loss: 4.2638 +[2025-09-06 03:38:08] [Rank 0] Group 4 Loss: 4.2638 +[2025-09-06 03:38:08] [Rank 0] Group 5 Loss: 4.7162 +[2025-09-06 03:38:08] [Rank 0] Group 5 Loss: 4.7162 +[2025-09-06 03:38:08] [Rank 0] Group 6 Loss: 5.0480 +[2025-09-06 03:38:08] [Rank 0] Group 6 Loss: 5.0480 +[2025-09-06 03:38:08] [Rank 0] Group 7 Loss: 5.1468 +[2025-09-06 03:38:08] [Rank 0] Group 7 Loss: 5.1468 +[2025-09-06 03:38:08] [Rank 0] Group 8 Loss: 5.4312 +[2025-09-06 03:38:08] [Rank 0] Group 8 Loss: 5.4312 +[2025-09-06 03:38:08] [Rank 0] Group 9 Loss: 5.5756 +[2025-09-06 03:38:08] [Rank 0] Group 9 Loss: 5.5756 +[2025-09-06 03:38:08] [Rank 0] Group 10 Loss: 5.5845 +[2025-09-06 03:38:08] [Rank 0] Group 10 Loss: 5.5845 +[2025-09-06 03:38:08] [Rank 0] Group 11 Loss: 5.6196 +[2025-09-06 03:38:08] [Rank 0] Group 11 Loss: 5.6196 +[2025-09-06 03:38:08] [Rank 0] Group 12 Loss: 5.5177 +[2025-09-06 03:38:08] [Rank 0] Group 12 Loss: 5.5177 +[2025-09-06 03:38:08] [Rank 0] Group 13 Loss: 5.5410 +[2025-09-06 03:38:08] [Rank 0] Group 13 Loss: 5.5410 +[2025-09-06 03:38:08] [Rank 0] Group 14 Loss: 5.5958 +[2025-09-06 03:38:08] [Rank 0] Group 14 Loss: 5.5958 +[2025-09-06 03:38:08] [Rank 0] Group 15 Loss: 5.5186 +[2025-09-06 03:38:08] [Rank 0] Group 15 Loss: 5.5186 +[2025-09-06 03:38:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:38:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:38:08] [Rank 0] Group 1 FTA: 0.6900 +[2025-09-06 03:38:08] [Rank 0] Group 1 FTA: 0.6900 +[2025-09-06 03:38:08] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:38:08] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:38:08] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:38:08] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 03:38:08] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 03:38:08] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 03:38:08] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:38:08] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 03:38:08] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 03:38:08] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 03:38:08] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:38:08] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 03:38:08] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:38:08] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 03:38:08] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:38:08] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:38:08] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 03:38:08] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 03:38:08] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:38:08] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:38:08] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 03:38:08] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 03:38:08] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:38:08] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 03:38:08] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:38:08] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 03:38:08] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-06 03:38:08] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-06 03:38:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:38:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_loss_curves.png +[2025-09-06 03:38:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:38:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/per_class_acc_curves.png +[2025-09-06 03:38:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:38:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_loss_curve.png +[2025-09-06 03:38:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:38:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_43/total_acc_curve.png +[2025-09-06 03:38:09] [Rank 0] step:10001/10000 train_time:410174ms step_avg:41.01ms +[2025-09-06 03:38:09] [Rank 0] step:10001/10000 train_time:410174ms step_avg:41.01ms +[2025-09-06 03:38:09] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 03:38:09 2025 --- +[2025-09-06 03:38:09] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 03:38:09 2025 --- +[2025-09-06 03:38:09] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-06 03:38:09] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..da993897f56c70f4c125c5637543e64ba8f45b7a --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.05, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "8ddf1100-df78-44c6-97f4-17c64b5cb4f1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..3a5fea5cc69ba471f470a78c4e2e238612b8b89a --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9d8800912f8932dc63bbc8b5da4685ac237d6106eb2a64d0c372cb9506398cc +size 251926 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..bd7639e81940b0eeed534a6678f6900f2a60eeb9 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac8f6d05ff58c0e24384a14f5184df4e73e73f959d4812730b7ee23895df278a +size 412706 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..f2ea9ef2b256a6488a1ce00595841d965b668358 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa41945fe503827883f280c0b03602244c32cb6ac68fa6b1d24bee16eb615143 +size 86831 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ea3ed3442ccd44e1756e4288434f3afb5e979640 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4be44d3213ee2f7ff8a436fe5976ca5a02db3eaada27a70ee8ea114289a26098 +size 120823 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/training_log_8ddf1100-df78-44c6-97f4-17c64b5cb4f1.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/training_log_8ddf1100-df78-44c6-97f4-17c64b5cb4f1.txt new file mode 100644 index 0000000000000000000000000000000000000000..95e90ffff8e0139e24764a5a64d758d2ca1e8eca --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/training_log_8ddf1100-df78-44c6-97f4-17c64b5cb4f1.txt @@ -0,0 +1,5614 @@ +[2025-09-06 03:38:31] [Rank 0] PRINT: --- Script Start: Sat Sep 6 03:38:31 2025 --- +[2025-09-06 03:38:31] [Rank 0] PRINT: --- Script Start: Sat Sep 6 03:38:31 2025 --- +[2025-09-06 03:38:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.05, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 03:38:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.05, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 03:38:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 03:38:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 03:38:31] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-06 03:38:31] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-06 03:38:31] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44 +[2025-09-06 03:38:31] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44 +[2025-09-06 03:38:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 03:38:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 03:38:31] [Rank 0] PRINT: Constructing model... +[2025-09-06 03:38:31] [Rank 0] PRINT: Constructing model... +[2025-09-06 03:38:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 03:38:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 03:38:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 03:38:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 03:38:32] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 03:38:32] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 03:38:36] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 03:38:36] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 03:38:36] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 03:38:36] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 03:38:36] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 03:38:36] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 03:38:36] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 03:38:36] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 03:38:36] [Rank 0] PRINT: Model returns: +[2025-09-06 03:38:36] [Rank 0] PRINT: Model returns: +[2025-09-06 03:38:36] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 03:38:36] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 03:38:36] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 03:38:36] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 03:38:36] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.05). +[2025-09-06 03:38:36] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.05). +[2025-09-06 03:38:36] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 03:38:36] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 03:38:36] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 03:38:36] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 03:38:40] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 03:38:40] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 03:38:40] [Rank 0] PRINT: Starting warmup... +[2025-09-06 03:38:40] [Rank 0] PRINT: Starting warmup... +[2025-09-06 03:39:19] [Rank 0] PRINT: Warmup complete. +[2025-09-06 03:39:19] [Rank 0] PRINT: Warmup complete. +[2025-09-06 03:39:19] [Rank 0] PRINT: Starting training... +[2025-09-06 03:39:19] [Rank 0] PRINT: Starting training... +[2025-09-06 03:39:26] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/fixed_eval_indices.json +[2025-09-06 03:39:26] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/fixed_eval_indices.json +[2025-09-06 03:39:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:39:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:39:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 03:39:30] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 03:40:03] [Rank 0] step:21/10000 train_time:33167ms step_avg:1579.40ms +[2025-09-06 03:40:03] [Rank 0] step:21/10000 train_time:33167ms step_avg:1579.40ms +[2025-09-06 03:40:04] [Rank 0] step:41/10000 train_time:33896ms step_avg:826.74ms +[2025-09-06 03:40:04] [Rank 0] step:41/10000 train_time:33896ms step_avg:826.74ms +[2025-09-06 03:40:04] [Rank 0] step:61/10000 train_time:34624ms step_avg:567.60ms +[2025-09-06 03:40:04] [Rank 0] step:61/10000 train_time:34624ms step_avg:567.60ms +[2025-09-06 03:40:05] [Rank 0] step:81/10000 train_time:35352ms step_avg:436.45ms +[2025-09-06 03:40:05] [Rank 0] step:81/10000 train_time:35352ms step_avg:436.45ms +[2025-09-06 03:40:06] [Rank 0] step:101/10000 train_time:36081ms step_avg:357.24ms +[2025-09-06 03:40:06] [Rank 0] step:101/10000 train_time:36081ms step_avg:357.24ms +[2025-09-06 03:40:06] [Rank 0] step:121/10000 train_time:36811ms step_avg:304.22ms +[2025-09-06 03:40:06] [Rank 0] step:121/10000 train_time:36811ms step_avg:304.22ms +[2025-09-06 03:40:07] [Rank 0] step:141/10000 train_time:37539ms step_avg:266.23ms +[2025-09-06 03:40:07] [Rank 0] step:141/10000 train_time:37539ms step_avg:266.23ms +[2025-09-06 03:40:08] [Rank 0] step:161/10000 train_time:38268ms step_avg:237.69ms +[2025-09-06 03:40:08] [Rank 0] step:161/10000 train_time:38268ms step_avg:237.69ms +[2025-09-06 03:40:09] [Rank 0] step:181/10000 train_time:38996ms step_avg:215.45ms +[2025-09-06 03:40:09] [Rank 0] step:181/10000 train_time:38996ms step_avg:215.45ms +[2025-09-06 03:40:09] [Rank 0] step:201/10000 train_time:39725ms step_avg:197.64ms +[2025-09-06 03:40:09] [Rank 0] step:201/10000 train_time:39725ms step_avg:197.64ms +[2025-09-06 03:40:10] [Rank 0] step:221/10000 train_time:40454ms step_avg:183.05ms +[2025-09-06 03:40:10] [Rank 0] step:221/10000 train_time:40454ms step_avg:183.05ms +[2025-09-06 03:40:11] [Rank 0] step:241/10000 train_time:41182ms step_avg:170.88ms +[2025-09-06 03:40:11] [Rank 0] step:241/10000 train_time:41182ms step_avg:170.88ms +[2025-09-06 03:40:12] [Rank 0] step:261/10000 train_time:41911ms step_avg:160.58ms +[2025-09-06 03:40:12] [Rank 0] step:261/10000 train_time:41911ms step_avg:160.58ms +[2025-09-06 03:40:12] [Rank 0] step:281/10000 train_time:42641ms step_avg:151.75ms +[2025-09-06 03:40:12] [Rank 0] step:281/10000 train_time:42641ms step_avg:151.75ms +[2025-09-06 03:40:13] [Rank 0] step:301/10000 train_time:43511ms step_avg:144.56ms +[2025-09-06 03:40:13] [Rank 0] step:301/10000 train_time:43511ms step_avg:144.56ms +[2025-09-06 03:40:14] [Rank 0] step:321/10000 train_time:44240ms step_avg:137.82ms +[2025-09-06 03:40:14] [Rank 0] step:321/10000 train_time:44240ms step_avg:137.82ms +[2025-09-06 03:40:15] [Rank 0] step:341/10000 train_time:44969ms step_avg:131.87ms +[2025-09-06 03:40:15] [Rank 0] step:341/10000 train_time:44969ms step_avg:131.87ms +[2025-09-06 03:40:16] [Rank 0] step:361/10000 train_time:45853ms step_avg:127.02ms +[2025-09-06 03:40:16] [Rank 0] step:361/10000 train_time:45853ms step_avg:127.02ms +[2025-09-06 03:40:16] [Rank 0] step:381/10000 train_time:46581ms step_avg:122.26ms +[2025-09-06 03:40:16] [Rank 0] step:381/10000 train_time:46581ms step_avg:122.26ms +[2025-09-06 03:40:17] [Rank 0] step:401/10000 train_time:47311ms step_avg:117.98ms +[2025-09-06 03:40:17] [Rank 0] step:401/10000 train_time:47311ms step_avg:117.98ms +[2025-09-06 03:40:18] [Rank 0] step:421/10000 train_time:48040ms step_avg:114.11ms +[2025-09-06 03:40:18] [Rank 0] step:421/10000 train_time:48040ms step_avg:114.11ms +[2025-09-06 03:40:18] [Rank 0] step:441/10000 train_time:48769ms step_avg:110.59ms +[2025-09-06 03:40:18] [Rank 0] step:441/10000 train_time:48769ms step_avg:110.59ms +[2025-09-06 03:40:19] [Rank 0] step:461/10000 train_time:49498ms step_avg:107.37ms +[2025-09-06 03:40:19] [Rank 0] step:461/10000 train_time:49498ms step_avg:107.37ms +[2025-09-06 03:40:20] [Rank 0] step:481/10000 train_time:50227ms step_avg:104.42ms +[2025-09-06 03:40:20] [Rank 0] step:481/10000 train_time:50227ms step_avg:104.42ms +[2025-09-06 03:40:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:40:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:40:21] [Rank 0] PRINT: step:500/10000 train_loss:6.7734 val_loss:4.9340 train_time:51041ms step_avg:102.08ms +[2025-09-06 03:40:21] [Rank 0] PRINT: step:500/10000 train_loss:6.7734 val_loss:4.9340 train_time:51041ms step_avg:102.08ms +[2025-09-06 03:40:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:40:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:40:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:40:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:41:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:41:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:41:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:41:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:41:44] [Rank 0] Total Loss: 6.5621 +[2025-09-06 03:41:44] [Rank 0] Total Loss: 6.5621 +[2025-09-06 03:41:44] [Rank 0] Total FTA (Unweighted): 0.0288 +[2025-09-06 03:41:44] [Rank 0] Total FTA (Unweighted): 0.0288 +[2025-09-06 03:41:44] [Rank 0] Total FTA (Weighted): 0.0288 +[2025-09-06 03:41:44] [Rank 0] Total FTA (Weighted): 0.0288 +[2025-09-06 03:41:44] [Rank 0] Group 0 Loss: 4.3914 +[2025-09-06 03:41:44] [Rank 0] Group 0 Loss: 4.3914 +[2025-09-06 03:41:44] [Rank 0] Group 1 Loss: 5.1702 +[2025-09-06 03:41:44] [Rank 0] Group 1 Loss: 5.1702 +[2025-09-06 03:41:44] [Rank 0] Group 2 Loss: 5.7936 +[2025-09-06 03:41:44] [Rank 0] Group 2 Loss: 5.7936 +[2025-09-06 03:41:44] [Rank 0] Group 3 Loss: 6.3719 +[2025-09-06 03:41:44] [Rank 0] Group 3 Loss: 6.3719 +[2025-09-06 03:41:44] [Rank 0] Group 4 Loss: 6.7443 +[2025-09-06 03:41:44] [Rank 0] Group 4 Loss: 6.7443 +[2025-09-06 03:41:44] [Rank 0] Group 5 Loss: 6.8706 +[2025-09-06 03:41:44] [Rank 0] Group 5 Loss: 6.8706 +[2025-09-06 03:41:44] [Rank 0] Group 6 Loss: 6.9176 +[2025-09-06 03:41:44] [Rank 0] Group 6 Loss: 6.9176 +[2025-09-06 03:41:44] [Rank 0] Group 7 Loss: 6.8177 +[2025-09-06 03:41:44] [Rank 0] Group 7 Loss: 6.8177 +[2025-09-06 03:41:44] [Rank 0] Group 8 Loss: 6.9770 +[2025-09-06 03:41:44] [Rank 0] Group 8 Loss: 6.9770 +[2025-09-06 03:41:44] [Rank 0] Group 9 Loss: 7.0307 +[2025-09-06 03:41:44] [Rank 0] Group 9 Loss: 7.0307 +[2025-09-06 03:41:44] [Rank 0] Group 10 Loss: 7.0234 +[2025-09-06 03:41:44] [Rank 0] Group 10 Loss: 7.0234 +[2025-09-06 03:41:44] [Rank 0] Group 11 Loss: 7.0779 +[2025-09-06 03:41:44] [Rank 0] Group 11 Loss: 7.0779 +[2025-09-06 03:41:44] [Rank 0] Group 12 Loss: 6.8923 +[2025-09-06 03:41:44] [Rank 0] Group 12 Loss: 6.8923 +[2025-09-06 03:41:44] [Rank 0] Group 13 Loss: 6.9317 +[2025-09-06 03:41:44] [Rank 0] Group 13 Loss: 6.9317 +[2025-09-06 03:41:44] [Rank 0] Group 14 Loss: 7.0415 +[2025-09-06 03:41:44] [Rank 0] Group 14 Loss: 7.0415 +[2025-09-06 03:41:44] [Rank 0] Group 15 Loss: 6.9424 +[2025-09-06 03:41:44] [Rank 0] Group 15 Loss: 6.9424 +[2025-09-06 03:41:44] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:41:44] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:41:44] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:41:44] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:41:44] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-06 03:41:44] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-06 03:41:44] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-06 03:41:44] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-06 03:41:44] [Rank 0] Group 4 FTA: 0.0100 +[2025-09-06 03:41:44] [Rank 0] Group 4 FTA: 0.0100 +[2025-09-06 03:41:44] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-06 03:41:44] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-06 03:41:44] [Rank 0] Group 6 FTA: 0.0200 +[2025-09-06 03:41:44] [Rank 0] Group 6 FTA: 0.0200 +[2025-09-06 03:41:44] [Rank 0] Group 7 FTA: 0.0300 +[2025-09-06 03:41:44] [Rank 0] Group 7 FTA: 0.0300 +[2025-09-06 03:41:44] [Rank 0] Group 8 FTA: 0.0400 +[2025-09-06 03:41:44] [Rank 0] Group 8 FTA: 0.0400 +[2025-09-06 03:41:44] [Rank 0] Group 9 FTA: 0.0200 +[2025-09-06 03:41:44] [Rank 0] Group 9 FTA: 0.0200 +[2025-09-06 03:41:44] [Rank 0] Group 10 FTA: 0.0200 +[2025-09-06 03:41:44] [Rank 0] Group 10 FTA: 0.0200 +[2025-09-06 03:41:44] [Rank 0] Group 11 FTA: 0.0400 +[2025-09-06 03:41:44] [Rank 0] Group 11 FTA: 0.0400 +[2025-09-06 03:41:44] [Rank 0] Group 12 FTA: 0.0100 +[2025-09-06 03:41:44] [Rank 0] Group 12 FTA: 0.0100 +[2025-09-06 03:41:44] [Rank 0] Group 13 FTA: 0.0200 +[2025-09-06 03:41:44] [Rank 0] Group 13 FTA: 0.0200 +[2025-09-06 03:41:44] [Rank 0] Group 14 FTA: 0.0100 +[2025-09-06 03:41:44] [Rank 0] Group 14 FTA: 0.0100 +[2025-09-06 03:41:44] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-06 03:41:44] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-06 03:41:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:41:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:41:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:41:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:41:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:41:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:41:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:41:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:41:45] [Rank 0] step:501/10000 train_time:51052ms step_avg:101.90ms +[2025-09-06 03:41:45] [Rank 0] step:501/10000 train_time:51052ms step_avg:101.90ms +[2025-09-06 03:41:46] [Rank 0] step:521/10000 train_time:51713ms step_avg:99.26ms +[2025-09-06 03:41:46] [Rank 0] step:521/10000 train_time:51713ms step_avg:99.26ms +[2025-09-06 03:41:47] [Rank 0] step:541/10000 train_time:52441ms step_avg:96.93ms +[2025-09-06 03:41:47] [Rank 0] step:541/10000 train_time:52441ms step_avg:96.93ms +[2025-09-06 03:41:47] [Rank 0] step:561/10000 train_time:53170ms step_avg:94.78ms +[2025-09-06 03:41:47] [Rank 0] step:561/10000 train_time:53170ms step_avg:94.78ms +[2025-09-06 03:41:48] [Rank 0] step:581/10000 train_time:53899ms step_avg:92.77ms +[2025-09-06 03:41:48] [Rank 0] step:581/10000 train_time:53899ms step_avg:92.77ms +[2025-09-06 03:41:49] [Rank 0] step:601/10000 train_time:54628ms step_avg:90.89ms +[2025-09-06 03:41:49] [Rank 0] step:601/10000 train_time:54628ms step_avg:90.89ms +[2025-09-06 03:41:49] [Rank 0] step:621/10000 train_time:55356ms step_avg:89.14ms +[2025-09-06 03:41:49] [Rank 0] step:621/10000 train_time:55356ms step_avg:89.14ms +[2025-09-06 03:41:50] [Rank 0] step:641/10000 train_time:56088ms step_avg:87.50ms +[2025-09-06 03:41:50] [Rank 0] step:641/10000 train_time:56088ms step_avg:87.50ms +[2025-09-06 03:41:51] [Rank 0] step:661/10000 train_time:56815ms step_avg:85.95ms +[2025-09-06 03:41:51] [Rank 0] step:661/10000 train_time:56815ms step_avg:85.95ms +[2025-09-06 03:41:52] [Rank 0] step:681/10000 train_time:57543ms step_avg:84.50ms +[2025-09-06 03:41:52] [Rank 0] step:681/10000 train_time:57543ms step_avg:84.50ms +[2025-09-06 03:41:52] [Rank 0] step:701/10000 train_time:58271ms step_avg:83.13ms +[2025-09-06 03:41:52] [Rank 0] step:701/10000 train_time:58271ms step_avg:83.13ms +[2025-09-06 03:41:53] [Rank 0] step:721/10000 train_time:58999ms step_avg:81.83ms +[2025-09-06 03:41:53] [Rank 0] step:721/10000 train_time:58999ms step_avg:81.83ms +[2025-09-06 03:41:54] [Rank 0] step:741/10000 train_time:59726ms step_avg:80.60ms +[2025-09-06 03:41:54] [Rank 0] step:741/10000 train_time:59726ms step_avg:80.60ms +[2025-09-06 03:41:55] [Rank 0] step:761/10000 train_time:60459ms step_avg:79.45ms +[2025-09-06 03:41:55] [Rank 0] step:761/10000 train_time:60459ms step_avg:79.45ms +[2025-09-06 03:41:55] [Rank 0] step:781/10000 train_time:61192ms step_avg:78.35ms +[2025-09-06 03:41:55] [Rank 0] step:781/10000 train_time:61192ms step_avg:78.35ms +[2025-09-06 03:41:56] [Rank 0] step:801/10000 train_time:61925ms step_avg:77.31ms +[2025-09-06 03:41:56] [Rank 0] step:801/10000 train_time:61925ms step_avg:77.31ms +[2025-09-06 03:41:57] [Rank 0] step:821/10000 train_time:63265ms step_avg:77.06ms +[2025-09-06 03:41:57] [Rank 0] step:821/10000 train_time:63265ms step_avg:77.06ms +[2025-09-06 03:41:58] [Rank 0] step:841/10000 train_time:63999ms step_avg:76.10ms +[2025-09-06 03:41:58] [Rank 0] step:841/10000 train_time:63999ms step_avg:76.10ms +[2025-09-06 03:41:59] [Rank 0] step:861/10000 train_time:64733ms step_avg:75.18ms +[2025-09-06 03:41:59] [Rank 0] step:861/10000 train_time:64733ms step_avg:75.18ms +[2025-09-06 03:42:00] [Rank 0] step:881/10000 train_time:65466ms step_avg:74.31ms +[2025-09-06 03:42:00] [Rank 0] step:881/10000 train_time:65466ms step_avg:74.31ms +[2025-09-06 03:42:00] [Rank 0] step:901/10000 train_time:66201ms step_avg:73.47ms +[2025-09-06 03:42:00] [Rank 0] step:901/10000 train_time:66201ms step_avg:73.47ms +[2025-09-06 03:42:01] [Rank 0] step:921/10000 train_time:66935ms step_avg:72.68ms +[2025-09-06 03:42:01] [Rank 0] step:921/10000 train_time:66935ms step_avg:72.68ms +[2025-09-06 03:42:02] [Rank 0] step:941/10000 train_time:67669ms step_avg:71.91ms +[2025-09-06 03:42:02] [Rank 0] step:941/10000 train_time:67669ms step_avg:71.91ms +[2025-09-06 03:42:03] [Rank 0] step:961/10000 train_time:68403ms step_avg:71.18ms +[2025-09-06 03:42:03] [Rank 0] step:961/10000 train_time:68403ms step_avg:71.18ms +[2025-09-06 03:42:03] [Rank 0] step:981/10000 train_time:69137ms step_avg:70.48ms +[2025-09-06 03:42:03] [Rank 0] step:981/10000 train_time:69137ms step_avg:70.48ms +[2025-09-06 03:42:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:42:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:42:04] [Rank 0] PRINT: step:1000/10000 train_loss:4.4389 val_loss:4.0491 train_time:69951ms step_avg:69.95ms +[2025-09-06 03:42:04] [Rank 0] PRINT: step:1000/10000 train_loss:4.4389 val_loss:4.0491 train_time:69951ms step_avg:69.95ms +[2025-09-06 03:42:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:42:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:42:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:42:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:43:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:43:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:43:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:43:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:43:26] [Rank 0] Total Loss: 5.9477 +[2025-09-06 03:43:26] [Rank 0] Total Loss: 5.9477 +[2025-09-06 03:43:26] [Rank 0] Total FTA (Unweighted): 0.0838 +[2025-09-06 03:43:26] [Rank 0] Total FTA (Unweighted): 0.0838 +[2025-09-06 03:43:26] [Rank 0] Total FTA (Weighted): 0.0838 +[2025-09-06 03:43:26] [Rank 0] Total FTA (Weighted): 0.0838 +[2025-09-06 03:43:26] [Rank 0] Group 0 Loss: 3.6493 +[2025-09-06 03:43:26] [Rank 0] Group 0 Loss: 3.6493 +[2025-09-06 03:43:26] [Rank 0] Group 1 Loss: 3.7492 +[2025-09-06 03:43:26] [Rank 0] Group 1 Loss: 3.7492 +[2025-09-06 03:43:26] [Rank 0] Group 2 Loss: 4.5609 +[2025-09-06 03:43:26] [Rank 0] Group 2 Loss: 4.5609 +[2025-09-06 03:43:26] [Rank 0] Group 3 Loss: 5.3519 +[2025-09-06 03:43:26] [Rank 0] Group 3 Loss: 5.3519 +[2025-09-06 03:43:26] [Rank 0] Group 4 Loss: 6.1232 +[2025-09-06 03:43:26] [Rank 0] Group 4 Loss: 6.1232 +[2025-09-06 03:43:26] [Rank 0] Group 5 Loss: 6.3218 +[2025-09-06 03:43:26] [Rank 0] Group 5 Loss: 6.3218 +[2025-09-06 03:43:26] [Rank 0] Group 6 Loss: 6.4114 +[2025-09-06 03:43:26] [Rank 0] Group 6 Loss: 6.4114 +[2025-09-06 03:43:26] [Rank 0] Group 7 Loss: 6.3768 +[2025-09-06 03:43:26] [Rank 0] Group 7 Loss: 6.3768 +[2025-09-06 03:43:26] [Rank 0] Group 8 Loss: 6.5128 +[2025-09-06 03:43:26] [Rank 0] Group 8 Loss: 6.5128 +[2025-09-06 03:43:26] [Rank 0] Group 9 Loss: 6.6355 +[2025-09-06 03:43:26] [Rank 0] Group 9 Loss: 6.6355 +[2025-09-06 03:43:26] [Rank 0] Group 10 Loss: 6.6219 +[2025-09-06 03:43:26] [Rank 0] Group 10 Loss: 6.6219 +[2025-09-06 03:43:26] [Rank 0] Group 11 Loss: 6.6881 +[2025-09-06 03:43:26] [Rank 0] Group 11 Loss: 6.6881 +[2025-09-06 03:43:26] [Rank 0] Group 12 Loss: 6.5012 +[2025-09-06 03:43:26] [Rank 0] Group 12 Loss: 6.5012 +[2025-09-06 03:43:26] [Rank 0] Group 13 Loss: 6.5047 +[2025-09-06 03:43:26] [Rank 0] Group 13 Loss: 6.5047 +[2025-09-06 03:43:26] [Rank 0] Group 14 Loss: 6.6303 +[2025-09-06 03:43:26] [Rank 0] Group 14 Loss: 6.6303 +[2025-09-06 03:43:26] [Rank 0] Group 15 Loss: 6.5240 +[2025-09-06 03:43:26] [Rank 0] Group 15 Loss: 6.5240 +[2025-09-06 03:43:26] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:43:26] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:43:26] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:43:26] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:43:26] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 03:43:26] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 03:43:26] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 03:43:26] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 03:43:26] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 03:43:26] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 03:43:26] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 03:43:26] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 03:43:26] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 03:43:26] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 03:43:26] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 03:43:26] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 03:43:26] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 03:43:26] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 03:43:26] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-06 03:43:26] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-06 03:43:26] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:43:26] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:43:26] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 03:43:26] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 03:43:26] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:43:26] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:43:26] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 03:43:26] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 03:43:26] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:43:26] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:43:26] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 03:43:26] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 03:43:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:43:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:43:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:43:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:43:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:43:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:43:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:43:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:43:28] [Rank 0] step:1001/10000 train_time:69962ms step_avg:69.89ms +[2025-09-06 03:43:28] [Rank 0] step:1001/10000 train_time:69962ms step_avg:69.89ms +[2025-09-06 03:43:29] [Rank 0] step:1021/10000 train_time:70621ms step_avg:69.17ms +[2025-09-06 03:43:29] [Rank 0] step:1021/10000 train_time:70621ms step_avg:69.17ms +[2025-09-06 03:43:30] [Rank 0] step:1041/10000 train_time:71355ms step_avg:68.54ms +[2025-09-06 03:43:30] [Rank 0] step:1041/10000 train_time:71355ms step_avg:68.54ms +[2025-09-06 03:43:31] [Rank 0] step:1061/10000 train_time:72089ms step_avg:67.94ms +[2025-09-06 03:43:31] [Rank 0] step:1061/10000 train_time:72089ms step_avg:67.94ms +[2025-09-06 03:43:31] [Rank 0] step:1081/10000 train_time:72823ms step_avg:67.37ms +[2025-09-06 03:43:31] [Rank 0] step:1081/10000 train_time:72823ms step_avg:67.37ms +[2025-09-06 03:43:32] [Rank 0] step:1101/10000 train_time:73557ms step_avg:66.81ms +[2025-09-06 03:43:32] [Rank 0] step:1101/10000 train_time:73557ms step_avg:66.81ms +[2025-09-06 03:43:33] [Rank 0] step:1121/10000 train_time:74291ms step_avg:66.27ms +[2025-09-06 03:43:33] [Rank 0] step:1121/10000 train_time:74291ms step_avg:66.27ms +[2025-09-06 03:43:34] [Rank 0] step:1141/10000 train_time:75024ms step_avg:65.75ms +[2025-09-06 03:43:34] [Rank 0] step:1141/10000 train_time:75024ms step_avg:65.75ms +[2025-09-06 03:43:34] [Rank 0] step:1161/10000 train_time:75757ms step_avg:65.25ms +[2025-09-06 03:43:34] [Rank 0] step:1161/10000 train_time:75757ms step_avg:65.25ms +[2025-09-06 03:43:35] [Rank 0] step:1181/10000 train_time:76491ms step_avg:64.77ms +[2025-09-06 03:43:35] [Rank 0] step:1181/10000 train_time:76491ms step_avg:64.77ms +[2025-09-06 03:43:36] [Rank 0] step:1201/10000 train_time:77225ms step_avg:64.30ms +[2025-09-06 03:43:36] [Rank 0] step:1201/10000 train_time:77225ms step_avg:64.30ms +[2025-09-06 03:43:37] [Rank 0] step:1221/10000 train_time:77959ms step_avg:63.85ms +[2025-09-06 03:43:37] [Rank 0] step:1221/10000 train_time:77959ms step_avg:63.85ms +[2025-09-06 03:43:37] [Rank 0] step:1241/10000 train_time:78692ms step_avg:63.41ms +[2025-09-06 03:43:37] [Rank 0] step:1241/10000 train_time:78692ms step_avg:63.41ms +[2025-09-06 03:43:38] [Rank 0] step:1261/10000 train_time:79426ms step_avg:62.99ms +[2025-09-06 03:43:38] [Rank 0] step:1261/10000 train_time:79426ms step_avg:62.99ms +[2025-09-06 03:43:39] [Rank 0] step:1281/10000 train_time:80160ms step_avg:62.58ms +[2025-09-06 03:43:39] [Rank 0] step:1281/10000 train_time:80160ms step_avg:62.58ms +[2025-09-06 03:43:39] [Rank 0] step:1301/10000 train_time:80894ms step_avg:62.18ms +[2025-09-06 03:43:39] [Rank 0] step:1301/10000 train_time:80894ms step_avg:62.18ms +[2025-09-06 03:43:40] [Rank 0] step:1321/10000 train_time:81627ms step_avg:61.79ms +[2025-09-06 03:43:40] [Rank 0] step:1321/10000 train_time:81627ms step_avg:61.79ms +[2025-09-06 03:43:41] [Rank 0] step:1341/10000 train_time:82360ms step_avg:61.42ms +[2025-09-06 03:43:41] [Rank 0] step:1341/10000 train_time:82360ms step_avg:61.42ms +[2025-09-06 03:43:42] [Rank 0] step:1361/10000 train_time:83094ms step_avg:61.05ms +[2025-09-06 03:43:42] [Rank 0] step:1361/10000 train_time:83094ms step_avg:61.05ms +[2025-09-06 03:43:42] [Rank 0] step:1381/10000 train_time:83827ms step_avg:60.70ms +[2025-09-06 03:43:42] [Rank 0] step:1381/10000 train_time:83827ms step_avg:60.70ms +[2025-09-06 03:43:43] [Rank 0] step:1401/10000 train_time:84561ms step_avg:60.36ms +[2025-09-06 03:43:43] [Rank 0] step:1401/10000 train_time:84561ms step_avg:60.36ms +[2025-09-06 03:43:44] [Rank 0] step:1421/10000 train_time:85294ms step_avg:60.02ms +[2025-09-06 03:43:44] [Rank 0] step:1421/10000 train_time:85294ms step_avg:60.02ms +[2025-09-06 03:43:45] [Rank 0] step:1441/10000 train_time:86029ms step_avg:59.70ms +[2025-09-06 03:43:45] [Rank 0] step:1441/10000 train_time:86029ms step_avg:59.70ms +[2025-09-06 03:43:45] [Rank 0] step:1461/10000 train_time:86762ms step_avg:59.39ms +[2025-09-06 03:43:45] [Rank 0] step:1461/10000 train_time:86762ms step_avg:59.39ms +[2025-09-06 03:43:46] [Rank 0] step:1481/10000 train_time:87496ms step_avg:59.08ms +[2025-09-06 03:43:46] [Rank 0] step:1481/10000 train_time:87496ms step_avg:59.08ms +[2025-09-06 03:43:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:43:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:43:47] [Rank 0] PRINT: step:1500/10000 train_loss:3.8350 val_loss:3.6386 train_time:88310ms step_avg:58.87ms +[2025-09-06 03:43:47] [Rank 0] PRINT: step:1500/10000 train_loss:3.8350 val_loss:3.6386 train_time:88310ms step_avg:58.87ms +[2025-09-06 03:43:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:43:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:43:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:43:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:45:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:45:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:45:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:45:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:45:09] [Rank 0] Total Loss: 5.7131 +[2025-09-06 03:45:09] [Rank 0] Total Loss: 5.7131 +[2025-09-06 03:45:09] [Rank 0] Total FTA (Unweighted): 0.0944 +[2025-09-06 03:45:09] [Rank 0] Total FTA (Unweighted): 0.0944 +[2025-09-06 03:45:09] [Rank 0] Total FTA (Weighted): 0.0944 +[2025-09-06 03:45:09] [Rank 0] Total FTA (Weighted): 0.0944 +[2025-09-06 03:45:09] [Rank 0] Group 0 Loss: 3.4963 +[2025-09-06 03:45:09] [Rank 0] Group 0 Loss: 3.4963 +[2025-09-06 03:45:09] [Rank 0] Group 1 Loss: 3.5013 +[2025-09-06 03:45:09] [Rank 0] Group 1 Loss: 3.5013 +[2025-09-06 03:45:09] [Rank 0] Group 2 Loss: 4.0671 +[2025-09-06 03:45:09] [Rank 0] Group 2 Loss: 4.0671 +[2025-09-06 03:45:09] [Rank 0] Group 3 Loss: 4.8682 +[2025-09-06 03:45:09] [Rank 0] Group 3 Loss: 4.8682 +[2025-09-06 03:45:09] [Rank 0] Group 4 Loss: 5.7389 +[2025-09-06 03:45:09] [Rank 0] Group 4 Loss: 5.7389 +[2025-09-06 03:45:09] [Rank 0] Group 5 Loss: 6.0107 +[2025-09-06 03:45:09] [Rank 0] Group 5 Loss: 6.0107 +[2025-09-06 03:45:09] [Rank 0] Group 6 Loss: 6.1679 +[2025-09-06 03:45:09] [Rank 0] Group 6 Loss: 6.1679 +[2025-09-06 03:45:09] [Rank 0] Group 7 Loss: 6.1672 +[2025-09-06 03:45:09] [Rank 0] Group 7 Loss: 6.1672 +[2025-09-06 03:45:09] [Rank 0] Group 8 Loss: 6.3420 +[2025-09-06 03:45:09] [Rank 0] Group 8 Loss: 6.3420 +[2025-09-06 03:45:09] [Rank 0] Group 9 Loss: 6.4890 +[2025-09-06 03:45:09] [Rank 0] Group 9 Loss: 6.4890 +[2025-09-06 03:45:09] [Rank 0] Group 10 Loss: 6.4639 +[2025-09-06 03:45:09] [Rank 0] Group 10 Loss: 6.4639 +[2025-09-06 03:45:09] [Rank 0] Group 11 Loss: 6.5344 +[2025-09-06 03:45:09] [Rank 0] Group 11 Loss: 6.5344 +[2025-09-06 03:45:09] [Rank 0] Group 12 Loss: 6.3506 +[2025-09-06 03:45:09] [Rank 0] Group 12 Loss: 6.3506 +[2025-09-06 03:45:09] [Rank 0] Group 13 Loss: 6.3612 +[2025-09-06 03:45:09] [Rank 0] Group 13 Loss: 6.3612 +[2025-09-06 03:45:09] [Rank 0] Group 14 Loss: 6.4660 +[2025-09-06 03:45:09] [Rank 0] Group 14 Loss: 6.4660 +[2025-09-06 03:45:09] [Rank 0] Group 15 Loss: 6.3851 +[2025-09-06 03:45:09] [Rank 0] Group 15 Loss: 6.3851 +[2025-09-06 03:45:09] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:45:09] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 03:45:09] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:45:09] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:45:09] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:45:09] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:45:09] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 03:45:09] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 03:45:09] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-06 03:45:09] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-06 03:45:09] [Rank 0] Group 5 FTA: 0.0900 +[2025-09-06 03:45:09] [Rank 0] Group 5 FTA: 0.0900 +[2025-09-06 03:45:09] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 03:45:09] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 03:45:09] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 03:45:09] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 03:45:09] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 03:45:09] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 03:45:09] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-06 03:45:09] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-06 03:45:09] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:45:09] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:45:09] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:45:09] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:45:09] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:45:09] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:45:09] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 03:45:09] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 03:45:09] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:45:09] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:45:09] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:45:09] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:45:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:45:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:45:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:45:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:45:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:45:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:45:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:45:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:45:10] [Rank 0] step:1501/10000 train_time:88320ms step_avg:58.84ms +[2025-09-06 03:45:10] [Rank 0] step:1501/10000 train_time:88320ms step_avg:58.84ms +[2025-09-06 03:45:11] [Rank 0] step:1521/10000 train_time:88993ms step_avg:58.51ms +[2025-09-06 03:45:11] [Rank 0] step:1521/10000 train_time:88993ms step_avg:58.51ms +[2025-09-06 03:45:12] [Rank 0] step:1541/10000 train_time:89725ms step_avg:58.23ms +[2025-09-06 03:45:12] [Rank 0] step:1541/10000 train_time:89725ms step_avg:58.23ms +[2025-09-06 03:45:13] [Rank 0] step:1561/10000 train_time:90458ms step_avg:57.95ms +[2025-09-06 03:45:13] [Rank 0] step:1561/10000 train_time:90458ms step_avg:57.95ms +[2025-09-06 03:45:13] [Rank 0] step:1581/10000 train_time:91192ms step_avg:57.68ms +[2025-09-06 03:45:13] [Rank 0] step:1581/10000 train_time:91192ms step_avg:57.68ms +[2025-09-06 03:45:14] [Rank 0] step:1601/10000 train_time:91924ms step_avg:57.42ms +[2025-09-06 03:45:14] [Rank 0] step:1601/10000 train_time:91924ms step_avg:57.42ms +[2025-09-06 03:45:15] [Rank 0] step:1621/10000 train_time:92657ms step_avg:57.16ms +[2025-09-06 03:45:15] [Rank 0] step:1621/10000 train_time:92657ms step_avg:57.16ms +[2025-09-06 03:45:16] [Rank 0] step:1641/10000 train_time:94002ms step_avg:57.28ms +[2025-09-06 03:45:16] [Rank 0] step:1641/10000 train_time:94002ms step_avg:57.28ms +[2025-09-06 03:45:17] [Rank 0] step:1661/10000 train_time:94737ms step_avg:57.04ms +[2025-09-06 03:45:17] [Rank 0] step:1661/10000 train_time:94737ms step_avg:57.04ms +[2025-09-06 03:45:18] [Rank 0] step:1681/10000 train_time:95472ms step_avg:56.79ms +[2025-09-06 03:45:18] [Rank 0] step:1681/10000 train_time:95472ms step_avg:56.79ms +[2025-09-06 03:45:18] [Rank 0] step:1701/10000 train_time:96206ms step_avg:56.56ms +[2025-09-06 03:45:18] [Rank 0] step:1701/10000 train_time:96206ms step_avg:56.56ms +[2025-09-06 03:45:19] [Rank 0] step:1721/10000 train_time:96938ms step_avg:56.33ms +[2025-09-06 03:45:19] [Rank 0] step:1721/10000 train_time:96938ms step_avg:56.33ms +[2025-09-06 03:45:20] [Rank 0] step:1741/10000 train_time:97672ms step_avg:56.10ms +[2025-09-06 03:45:20] [Rank 0] step:1741/10000 train_time:97672ms step_avg:56.10ms +[2025-09-06 03:45:21] [Rank 0] step:1761/10000 train_time:98406ms step_avg:55.88ms +[2025-09-06 03:45:21] [Rank 0] step:1761/10000 train_time:98406ms step_avg:55.88ms +[2025-09-06 03:45:21] [Rank 0] step:1781/10000 train_time:99141ms step_avg:55.67ms +[2025-09-06 03:45:21] [Rank 0] step:1781/10000 train_time:99141ms step_avg:55.67ms +[2025-09-06 03:45:22] [Rank 0] step:1801/10000 train_time:99875ms step_avg:55.46ms +[2025-09-06 03:45:22] [Rank 0] step:1801/10000 train_time:99875ms step_avg:55.46ms +[2025-09-06 03:45:23] [Rank 0] step:1821/10000 train_time:100609ms step_avg:55.25ms +[2025-09-06 03:45:23] [Rank 0] step:1821/10000 train_time:100609ms step_avg:55.25ms +[2025-09-06 03:45:24] [Rank 0] step:1841/10000 train_time:101342ms step_avg:55.05ms +[2025-09-06 03:45:24] [Rank 0] step:1841/10000 train_time:101342ms step_avg:55.05ms +[2025-09-06 03:45:24] [Rank 0] step:1861/10000 train_time:102075ms step_avg:54.85ms +[2025-09-06 03:45:24] [Rank 0] step:1861/10000 train_time:102075ms step_avg:54.85ms +[2025-09-06 03:45:25] [Rank 0] step:1881/10000 train_time:102810ms step_avg:54.66ms +[2025-09-06 03:45:25] [Rank 0] step:1881/10000 train_time:102810ms step_avg:54.66ms +[2025-09-06 03:45:26] [Rank 0] step:1901/10000 train_time:103544ms step_avg:54.47ms +[2025-09-06 03:45:26] [Rank 0] step:1901/10000 train_time:103544ms step_avg:54.47ms +[2025-09-06 03:45:27] [Rank 0] step:1921/10000 train_time:104278ms step_avg:54.28ms +[2025-09-06 03:45:27] [Rank 0] step:1921/10000 train_time:104278ms step_avg:54.28ms +[2025-09-06 03:45:27] [Rank 0] step:1941/10000 train_time:105012ms step_avg:54.10ms +[2025-09-06 03:45:27] [Rank 0] step:1941/10000 train_time:105012ms step_avg:54.10ms +[2025-09-06 03:45:28] [Rank 0] step:1961/10000 train_time:105745ms step_avg:53.92ms +[2025-09-06 03:45:28] [Rank 0] step:1961/10000 train_time:105745ms step_avg:53.92ms +[2025-09-06 03:45:29] [Rank 0] step:1981/10000 train_time:106479ms step_avg:53.75ms +[2025-09-06 03:45:29] [Rank 0] step:1981/10000 train_time:106479ms step_avg:53.75ms +[2025-09-06 03:45:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:45:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:45:30] [Rank 0] PRINT: step:2000/10000 train_loss:3.5075 val_loss:3.3836 train_time:107436ms step_avg:53.72ms +[2025-09-06 03:45:30] [Rank 0] PRINT: step:2000/10000 train_loss:3.5075 val_loss:3.3836 train_time:107436ms step_avg:53.72ms +[2025-09-06 03:45:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:45:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:45:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:45:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:46:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:46:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:46:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:46:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:46:52] [Rank 0] Total Loss: 5.4587 +[2025-09-06 03:46:52] [Rank 0] Total Loss: 5.4587 +[2025-09-06 03:46:52] [Rank 0] Total FTA (Unweighted): 0.1225 +[2025-09-06 03:46:52] [Rank 0] Total FTA (Unweighted): 0.1225 +[2025-09-06 03:46:52] [Rank 0] Total FTA (Weighted): 0.1225 +[2025-09-06 03:46:52] [Rank 0] Total FTA (Weighted): 0.1225 +[2025-09-06 03:46:52] [Rank 0] Group 0 Loss: 3.2980 +[2025-09-06 03:46:52] [Rank 0] Group 0 Loss: 3.2980 +[2025-09-06 03:46:52] [Rank 0] Group 1 Loss: 3.3891 +[2025-09-06 03:46:52] [Rank 0] Group 1 Loss: 3.3891 +[2025-09-06 03:46:52] [Rank 0] Group 2 Loss: 3.7178 +[2025-09-06 03:46:52] [Rank 0] Group 2 Loss: 3.7178 +[2025-09-06 03:46:52] [Rank 0] Group 3 Loss: 4.4396 +[2025-09-06 03:46:52] [Rank 0] Group 3 Loss: 4.4396 +[2025-09-06 03:46:52] [Rank 0] Group 4 Loss: 5.3426 +[2025-09-06 03:46:52] [Rank 0] Group 4 Loss: 5.3426 +[2025-09-06 03:46:52] [Rank 0] Group 5 Loss: 5.7096 +[2025-09-06 03:46:52] [Rank 0] Group 5 Loss: 5.7096 +[2025-09-06 03:46:52] [Rank 0] Group 6 Loss: 5.9093 +[2025-09-06 03:46:52] [Rank 0] Group 6 Loss: 5.9093 +[2025-09-06 03:46:52] [Rank 0] Group 7 Loss: 5.9270 +[2025-09-06 03:46:52] [Rank 0] Group 7 Loss: 5.9270 +[2025-09-06 03:46:52] [Rank 0] Group 8 Loss: 6.1151 +[2025-09-06 03:46:52] [Rank 0] Group 8 Loss: 6.1151 +[2025-09-06 03:46:52] [Rank 0] Group 9 Loss: 6.2668 +[2025-09-06 03:46:52] [Rank 0] Group 9 Loss: 6.2668 +[2025-09-06 03:46:52] [Rank 0] Group 10 Loss: 6.2531 +[2025-09-06 03:46:52] [Rank 0] Group 10 Loss: 6.2531 +[2025-09-06 03:46:52] [Rank 0] Group 11 Loss: 6.3132 +[2025-09-06 03:46:52] [Rank 0] Group 11 Loss: 6.3132 +[2025-09-06 03:46:52] [Rank 0] Group 12 Loss: 6.1364 +[2025-09-06 03:46:52] [Rank 0] Group 12 Loss: 6.1364 +[2025-09-06 03:46:52] [Rank 0] Group 13 Loss: 6.1266 +[2025-09-06 03:46:52] [Rank 0] Group 13 Loss: 6.1266 +[2025-09-06 03:46:53] [Rank 0] Group 14 Loss: 6.2410 +[2025-09-06 03:46:53] [Rank 0] Group 14 Loss: 6.2410 +[2025-09-06 03:46:53] [Rank 0] Group 15 Loss: 6.1536 +[2025-09-06 03:46:53] [Rank 0] Group 15 Loss: 6.1536 +[2025-09-06 03:46:53] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 03:46:53] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 03:46:53] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:46:53] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:46:53] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:46:53] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:46:53] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:46:53] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:46:53] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:46:53] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:46:53] [Rank 0] Group 5 FTA: 0.1200 +[2025-09-06 03:46:53] [Rank 0] Group 5 FTA: 0.1200 +[2025-09-06 03:46:53] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 03:46:53] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 03:46:53] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 03:46:53] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 03:46:53] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 03:46:53] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 03:46:53] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 03:46:53] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 03:46:53] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:46:53] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 03:46:53] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:46:53] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:46:53] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:46:53] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 03:46:53] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:46:53] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:46:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:46:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:46:53] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 03:46:53] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 03:46:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:46:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:46:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:46:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:46:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:46:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:46:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:46:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:46:54] [Rank 0] step:2001/10000 train_time:107447ms step_avg:53.70ms +[2025-09-06 03:46:54] [Rank 0] step:2001/10000 train_time:107447ms step_avg:53.70ms +[2025-09-06 03:46:55] [Rank 0] step:2021/10000 train_time:108315ms step_avg:53.59ms +[2025-09-06 03:46:55] [Rank 0] step:2021/10000 train_time:108315ms step_avg:53.59ms +[2025-09-06 03:46:56] [Rank 0] step:2041/10000 train_time:109049ms step_avg:53.43ms +[2025-09-06 03:46:56] [Rank 0] step:2041/10000 train_time:109049ms step_avg:53.43ms +[2025-09-06 03:46:56] [Rank 0] step:2061/10000 train_time:109783ms step_avg:53.27ms +[2025-09-06 03:46:56] [Rank 0] step:2061/10000 train_time:109783ms step_avg:53.27ms +[2025-09-06 03:46:57] [Rank 0] step:2081/10000 train_time:110516ms step_avg:53.11ms +[2025-09-06 03:46:57] [Rank 0] step:2081/10000 train_time:110516ms step_avg:53.11ms +[2025-09-06 03:46:58] [Rank 0] step:2101/10000 train_time:111250ms step_avg:52.95ms +[2025-09-06 03:46:58] [Rank 0] step:2101/10000 train_time:111250ms step_avg:52.95ms +[2025-09-06 03:46:59] [Rank 0] step:2121/10000 train_time:111983ms step_avg:52.80ms +[2025-09-06 03:46:59] [Rank 0] step:2121/10000 train_time:111983ms step_avg:52.80ms +[2025-09-06 03:46:59] [Rank 0] step:2141/10000 train_time:112717ms step_avg:52.65ms +[2025-09-06 03:46:59] [Rank 0] step:2141/10000 train_time:112717ms step_avg:52.65ms +[2025-09-06 03:47:00] [Rank 0] step:2161/10000 train_time:113451ms step_avg:52.50ms +[2025-09-06 03:47:00] [Rank 0] step:2161/10000 train_time:113451ms step_avg:52.50ms +[2025-09-06 03:47:01] [Rank 0] step:2181/10000 train_time:114186ms step_avg:52.36ms +[2025-09-06 03:47:01] [Rank 0] step:2181/10000 train_time:114186ms step_avg:52.36ms +[2025-09-06 03:47:02] [Rank 0] step:2201/10000 train_time:114919ms step_avg:52.21ms +[2025-09-06 03:47:02] [Rank 0] step:2201/10000 train_time:114919ms step_avg:52.21ms +[2025-09-06 03:47:02] [Rank 0] step:2221/10000 train_time:115652ms step_avg:52.07ms +[2025-09-06 03:47:02] [Rank 0] step:2221/10000 train_time:115652ms step_avg:52.07ms +[2025-09-06 03:47:03] [Rank 0] step:2241/10000 train_time:116391ms step_avg:51.94ms +[2025-09-06 03:47:03] [Rank 0] step:2241/10000 train_time:116391ms step_avg:51.94ms +[2025-09-06 03:47:04] [Rank 0] step:2261/10000 train_time:117131ms step_avg:51.80ms +[2025-09-06 03:47:04] [Rank 0] step:2261/10000 train_time:117131ms step_avg:51.80ms +[2025-09-06 03:47:05] [Rank 0] step:2281/10000 train_time:117870ms step_avg:51.67ms +[2025-09-06 03:47:05] [Rank 0] step:2281/10000 train_time:117870ms step_avg:51.67ms +[2025-09-06 03:47:05] [Rank 0] step:2301/10000 train_time:118610ms step_avg:51.55ms +[2025-09-06 03:47:05] [Rank 0] step:2301/10000 train_time:118610ms step_avg:51.55ms +[2025-09-06 03:47:06] [Rank 0] step:2321/10000 train_time:119351ms step_avg:51.42ms +[2025-09-06 03:47:06] [Rank 0] step:2321/10000 train_time:119351ms step_avg:51.42ms +[2025-09-06 03:47:07] [Rank 0] step:2341/10000 train_time:120091ms step_avg:51.30ms +[2025-09-06 03:47:07] [Rank 0] step:2341/10000 train_time:120091ms step_avg:51.30ms +[2025-09-06 03:47:07] [Rank 0] step:2361/10000 train_time:120832ms step_avg:51.18ms +[2025-09-06 03:47:07] [Rank 0] step:2361/10000 train_time:120832ms step_avg:51.18ms +[2025-09-06 03:47:08] [Rank 0] step:2381/10000 train_time:121572ms step_avg:51.06ms +[2025-09-06 03:47:08] [Rank 0] step:2381/10000 train_time:121572ms step_avg:51.06ms +[2025-09-06 03:47:09] [Rank 0] step:2401/10000 train_time:122312ms step_avg:50.94ms +[2025-09-06 03:47:09] [Rank 0] step:2401/10000 train_time:122312ms step_avg:50.94ms +[2025-09-06 03:47:10] [Rank 0] step:2421/10000 train_time:123052ms step_avg:50.83ms +[2025-09-06 03:47:10] [Rank 0] step:2421/10000 train_time:123052ms step_avg:50.83ms +[2025-09-06 03:47:10] [Rank 0] step:2441/10000 train_time:123792ms step_avg:50.71ms +[2025-09-06 03:47:10] [Rank 0] step:2441/10000 train_time:123792ms step_avg:50.71ms +[2025-09-06 03:47:11] [Rank 0] step:2461/10000 train_time:124532ms step_avg:50.60ms +[2025-09-06 03:47:11] [Rank 0] step:2461/10000 train_time:124532ms step_avg:50.60ms +[2025-09-06 03:47:12] [Rank 0] step:2481/10000 train_time:125271ms step_avg:50.49ms +[2025-09-06 03:47:12] [Rank 0] step:2481/10000 train_time:125271ms step_avg:50.49ms +[2025-09-06 03:47:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:47:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:47:13] [Rank 0] PRINT: step:2500/10000 train_loss:3.2966 val_loss:3.1993 train_time:126091ms step_avg:50.44ms +[2025-09-06 03:47:13] [Rank 0] PRINT: step:2500/10000 train_loss:3.2966 val_loss:3.1993 train_time:126091ms step_avg:50.44ms +[2025-09-06 03:47:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:47:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:47:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:47:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:48:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:48:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:48:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:48:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:48:35] [Rank 0] Total Loss: 5.3992 +[2025-09-06 03:48:35] [Rank 0] Total Loss: 5.3992 +[2025-09-06 03:48:35] [Rank 0] Total FTA (Unweighted): 0.1300 +[2025-09-06 03:48:35] [Rank 0] Total FTA (Unweighted): 0.1300 +[2025-09-06 03:48:35] [Rank 0] Total FTA (Weighted): 0.1300 +[2025-09-06 03:48:35] [Rank 0] Total FTA (Weighted): 0.1300 +[2025-09-06 03:48:35] [Rank 0] Group 0 Loss: 3.3156 +[2025-09-06 03:48:35] [Rank 0] Group 0 Loss: 3.3156 +[2025-09-06 03:48:35] [Rank 0] Group 1 Loss: 3.3752 +[2025-09-06 03:48:35] [Rank 0] Group 1 Loss: 3.3752 +[2025-09-06 03:48:35] [Rank 0] Group 2 Loss: 3.6548 +[2025-09-06 03:48:35] [Rank 0] Group 2 Loss: 3.6548 +[2025-09-06 03:48:36] [Rank 0] Group 3 Loss: 4.3268 +[2025-09-06 03:48:36] [Rank 0] Group 3 Loss: 4.3268 +[2025-09-06 03:48:36] [Rank 0] Group 4 Loss: 5.1772 +[2025-09-06 03:48:36] [Rank 0] Group 4 Loss: 5.1772 +[2025-09-06 03:48:36] [Rank 0] Group 5 Loss: 5.5728 +[2025-09-06 03:48:36] [Rank 0] Group 5 Loss: 5.5728 +[2025-09-06 03:48:36] [Rank 0] Group 6 Loss: 5.8202 +[2025-09-06 03:48:36] [Rank 0] Group 6 Loss: 5.8202 +[2025-09-06 03:48:36] [Rank 0] Group 7 Loss: 5.8530 +[2025-09-06 03:48:36] [Rank 0] Group 7 Loss: 5.8530 +[2025-09-06 03:48:36] [Rank 0] Group 8 Loss: 6.0549 +[2025-09-06 03:48:36] [Rank 0] Group 8 Loss: 6.0549 +[2025-09-06 03:48:36] [Rank 0] Group 9 Loss: 6.2339 +[2025-09-06 03:48:36] [Rank 0] Group 9 Loss: 6.2339 +[2025-09-06 03:48:36] [Rank 0] Group 10 Loss: 6.1887 +[2025-09-06 03:48:36] [Rank 0] Group 10 Loss: 6.1887 +[2025-09-06 03:48:36] [Rank 0] Group 11 Loss: 6.2715 +[2025-09-06 03:48:36] [Rank 0] Group 11 Loss: 6.2715 +[2025-09-06 03:48:36] [Rank 0] Group 12 Loss: 6.0932 +[2025-09-06 03:48:36] [Rank 0] Group 12 Loss: 6.0932 +[2025-09-06 03:48:36] [Rank 0] Group 13 Loss: 6.1149 +[2025-09-06 03:48:36] [Rank 0] Group 13 Loss: 6.1149 +[2025-09-06 03:48:36] [Rank 0] Group 14 Loss: 6.2185 +[2025-09-06 03:48:36] [Rank 0] Group 14 Loss: 6.2185 +[2025-09-06 03:48:36] [Rank 0] Group 15 Loss: 6.1164 +[2025-09-06 03:48:36] [Rank 0] Group 15 Loss: 6.1164 +[2025-09-06 03:48:36] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 03:48:36] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 03:48:36] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:48:36] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:48:36] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:48:36] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:48:36] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:48:36] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:48:36] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:48:36] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:48:36] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 03:48:36] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 03:48:36] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 03:48:36] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 03:48:36] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:48:36] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:48:36] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 03:48:36] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 03:48:36] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:48:36] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:48:36] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 03:48:36] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 03:48:36] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:48:36] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:48:36] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:48:36] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:48:36] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:48:36] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:48:36] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:48:36] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:48:36] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:48:36] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 03:48:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:48:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:48:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:48:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:48:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:48:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:48:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:48:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:48:37] [Rank 0] step:2501/10000 train_time:126102ms step_avg:50.42ms +[2025-09-06 03:48:37] [Rank 0] step:2501/10000 train_time:126102ms step_avg:50.42ms +[2025-09-06 03:48:38] [Rank 0] step:2521/10000 train_time:126787ms step_avg:50.29ms +[2025-09-06 03:48:38] [Rank 0] step:2521/10000 train_time:126787ms step_avg:50.29ms +[2025-09-06 03:48:39] [Rank 0] step:2541/10000 train_time:127527ms step_avg:50.19ms +[2025-09-06 03:48:39] [Rank 0] step:2541/10000 train_time:127527ms step_avg:50.19ms +[2025-09-06 03:48:39] [Rank 0] step:2561/10000 train_time:128379ms step_avg:50.13ms +[2025-09-06 03:48:39] [Rank 0] step:2561/10000 train_time:128379ms step_avg:50.13ms +[2025-09-06 03:48:40] [Rank 0] step:2581/10000 train_time:129119ms step_avg:50.03ms +[2025-09-06 03:48:40] [Rank 0] step:2581/10000 train_time:129119ms step_avg:50.03ms +[2025-09-06 03:48:41] [Rank 0] step:2601/10000 train_time:129859ms step_avg:49.93ms +[2025-09-06 03:48:41] [Rank 0] step:2601/10000 train_time:129859ms step_avg:49.93ms +[2025-09-06 03:48:42] [Rank 0] step:2621/10000 train_time:130713ms step_avg:49.87ms +[2025-09-06 03:48:42] [Rank 0] step:2621/10000 train_time:130713ms step_avg:49.87ms +[2025-09-06 03:48:43] [Rank 0] step:2641/10000 train_time:131453ms step_avg:49.77ms +[2025-09-06 03:48:43] [Rank 0] step:2641/10000 train_time:131453ms step_avg:49.77ms +[2025-09-06 03:48:43] [Rank 0] step:2661/10000 train_time:132193ms step_avg:49.68ms +[2025-09-06 03:48:43] [Rank 0] step:2661/10000 train_time:132193ms step_avg:49.68ms +[2025-09-06 03:48:44] [Rank 0] step:2681/10000 train_time:132933ms step_avg:49.58ms +[2025-09-06 03:48:44] [Rank 0] step:2681/10000 train_time:132933ms step_avg:49.58ms +[2025-09-06 03:48:45] [Rank 0] step:2701/10000 train_time:133672ms step_avg:49.49ms +[2025-09-06 03:48:45] [Rank 0] step:2701/10000 train_time:133672ms step_avg:49.49ms +[2025-09-06 03:48:45] [Rank 0] step:2721/10000 train_time:134412ms step_avg:49.40ms +[2025-09-06 03:48:45] [Rank 0] step:2721/10000 train_time:134412ms step_avg:49.40ms +[2025-09-06 03:48:46] [Rank 0] step:2741/10000 train_time:135152ms step_avg:49.31ms +[2025-09-06 03:48:46] [Rank 0] step:2741/10000 train_time:135152ms step_avg:49.31ms +[2025-09-06 03:48:47] [Rank 0] step:2761/10000 train_time:135892ms step_avg:49.22ms +[2025-09-06 03:48:47] [Rank 0] step:2761/10000 train_time:135892ms step_avg:49.22ms +[2025-09-06 03:48:48] [Rank 0] step:2781/10000 train_time:136634ms step_avg:49.13ms +[2025-09-06 03:48:48] [Rank 0] step:2781/10000 train_time:136634ms step_avg:49.13ms +[2025-09-06 03:48:48] [Rank 0] step:2801/10000 train_time:137374ms step_avg:49.04ms +[2025-09-06 03:48:48] [Rank 0] step:2801/10000 train_time:137374ms step_avg:49.04ms +[2025-09-06 03:48:50] [Rank 0] step:2821/10000 train_time:138723ms step_avg:49.18ms +[2025-09-06 03:48:50] [Rank 0] step:2821/10000 train_time:138723ms step_avg:49.18ms +[2025-09-06 03:48:51] [Rank 0] step:2841/10000 train_time:139464ms step_avg:49.09ms +[2025-09-06 03:48:51] [Rank 0] step:2841/10000 train_time:139464ms step_avg:49.09ms +[2025-09-06 03:48:51] [Rank 0] step:2861/10000 train_time:140204ms step_avg:49.01ms +[2025-09-06 03:48:51] [Rank 0] step:2861/10000 train_time:140204ms step_avg:49.01ms +[2025-09-06 03:48:52] [Rank 0] step:2881/10000 train_time:140944ms step_avg:48.92ms +[2025-09-06 03:48:52] [Rank 0] step:2881/10000 train_time:140944ms step_avg:48.92ms +[2025-09-06 03:48:53] [Rank 0] step:2901/10000 train_time:141683ms step_avg:48.84ms +[2025-09-06 03:48:53] [Rank 0] step:2901/10000 train_time:141683ms step_avg:48.84ms +[2025-09-06 03:48:53] [Rank 0] step:2921/10000 train_time:142423ms step_avg:48.76ms +[2025-09-06 03:48:53] [Rank 0] step:2921/10000 train_time:142423ms step_avg:48.76ms +[2025-09-06 03:48:54] [Rank 0] step:2941/10000 train_time:143162ms step_avg:48.68ms +[2025-09-06 03:48:54] [Rank 0] step:2941/10000 train_time:143162ms step_avg:48.68ms +[2025-09-06 03:48:55] [Rank 0] step:2961/10000 train_time:143902ms step_avg:48.60ms +[2025-09-06 03:48:55] [Rank 0] step:2961/10000 train_time:143902ms step_avg:48.60ms +[2025-09-06 03:48:56] [Rank 0] step:2981/10000 train_time:144642ms step_avg:48.52ms +[2025-09-06 03:48:56] [Rank 0] step:2981/10000 train_time:144642ms step_avg:48.52ms +[2025-09-06 03:48:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:48:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:48:57] [Rank 0] PRINT: step:3000/10000 train_loss:3.1387 val_loss:3.0684 train_time:145463ms step_avg:48.49ms +[2025-09-06 03:48:57] [Rank 0] PRINT: step:3000/10000 train_loss:3.1387 val_loss:3.0684 train_time:145463ms step_avg:48.49ms +[2025-09-06 03:48:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:48:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:48:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:48:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:50:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:50:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:50:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:50:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:50:19] [Rank 0] Total Loss: 5.3034 +[2025-09-06 03:50:19] [Rank 0] Total Loss: 5.3034 +[2025-09-06 03:50:19] [Rank 0] Total FTA (Unweighted): 0.1400 +[2025-09-06 03:50:19] [Rank 0] Total FTA (Unweighted): 0.1400 +[2025-09-06 03:50:19] [Rank 0] Total FTA (Weighted): 0.1400 +[2025-09-06 03:50:19] [Rank 0] Total FTA (Weighted): 0.1400 +[2025-09-06 03:50:19] [Rank 0] Group 0 Loss: 3.2409 +[2025-09-06 03:50:19] [Rank 0] Group 0 Loss: 3.2409 +[2025-09-06 03:50:19] [Rank 0] Group 1 Loss: 3.3263 +[2025-09-06 03:50:19] [Rank 0] Group 1 Loss: 3.3263 +[2025-09-06 03:50:19] [Rank 0] Group 2 Loss: 3.5546 +[2025-09-06 03:50:19] [Rank 0] Group 2 Loss: 3.5546 +[2025-09-06 03:50:19] [Rank 0] Group 3 Loss: 4.1620 +[2025-09-06 03:50:19] [Rank 0] Group 3 Loss: 4.1620 +[2025-09-06 03:50:19] [Rank 0] Group 4 Loss: 4.9880 +[2025-09-06 03:50:19] [Rank 0] Group 4 Loss: 4.9880 +[2025-09-06 03:50:19] [Rank 0] Group 5 Loss: 5.4316 +[2025-09-06 03:50:19] [Rank 0] Group 5 Loss: 5.4316 +[2025-09-06 03:50:19] [Rank 0] Group 6 Loss: 5.6912 +[2025-09-06 03:50:19] [Rank 0] Group 6 Loss: 5.6912 +[2025-09-06 03:50:19] [Rank 0] Group 7 Loss: 5.7568 +[2025-09-06 03:50:19] [Rank 0] Group 7 Loss: 5.7568 +[2025-09-06 03:50:19] [Rank 0] Group 8 Loss: 5.9707 +[2025-09-06 03:50:19] [Rank 0] Group 8 Loss: 5.9707 +[2025-09-06 03:50:19] [Rank 0] Group 9 Loss: 6.1583 +[2025-09-06 03:50:19] [Rank 0] Group 9 Loss: 6.1583 +[2025-09-06 03:50:19] [Rank 0] Group 10 Loss: 6.1004 +[2025-09-06 03:50:19] [Rank 0] Group 10 Loss: 6.1004 +[2025-09-06 03:50:19] [Rank 0] Group 11 Loss: 6.1966 +[2025-09-06 03:50:19] [Rank 0] Group 11 Loss: 6.1966 +[2025-09-06 03:50:19] [Rank 0] Group 12 Loss: 6.0306 +[2025-09-06 03:50:19] [Rank 0] Group 12 Loss: 6.0306 +[2025-09-06 03:50:19] [Rank 0] Group 13 Loss: 6.0458 +[2025-09-06 03:50:19] [Rank 0] Group 13 Loss: 6.0458 +[2025-09-06 03:50:19] [Rank 0] Group 14 Loss: 6.1439 +[2025-09-06 03:50:19] [Rank 0] Group 14 Loss: 6.1439 +[2025-09-06 03:50:19] [Rank 0] Group 15 Loss: 6.0575 +[2025-09-06 03:50:19] [Rank 0] Group 15 Loss: 6.0575 +[2025-09-06 03:50:19] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 03:50:19] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 03:50:19] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:50:19] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:50:19] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:50:19] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:50:19] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:50:19] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:50:19] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:50:19] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 03:50:19] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:50:19] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:50:19] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 03:50:19] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 03:50:19] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:50:19] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:50:19] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 03:50:19] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 03:50:19] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:50:19] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:50:19] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:50:19] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:50:20] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:50:20] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:50:20] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:50:20] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:50:20] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:50:20] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:50:20] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:50:20] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:50:20] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:50:20] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:50:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:50:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:50:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:50:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:50:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:50:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:50:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:50:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:50:21] [Rank 0] step:3001/10000 train_time:145474ms step_avg:48.48ms +[2025-09-06 03:50:21] [Rank 0] step:3001/10000 train_time:145474ms step_avg:48.48ms +[2025-09-06 03:50:22] [Rank 0] step:3021/10000 train_time:146148ms step_avg:48.38ms +[2025-09-06 03:50:22] [Rank 0] step:3021/10000 train_time:146148ms step_avg:48.38ms +[2025-09-06 03:50:22] [Rank 0] step:3041/10000 train_time:146886ms step_avg:48.30ms +[2025-09-06 03:50:22] [Rank 0] step:3041/10000 train_time:146886ms step_avg:48.30ms +[2025-09-06 03:50:23] [Rank 0] step:3061/10000 train_time:147625ms step_avg:48.23ms +[2025-09-06 03:50:23] [Rank 0] step:3061/10000 train_time:147625ms step_avg:48.23ms +[2025-09-06 03:50:24] [Rank 0] step:3081/10000 train_time:148364ms step_avg:48.15ms +[2025-09-06 03:50:24] [Rank 0] step:3081/10000 train_time:148364ms step_avg:48.15ms +[2025-09-06 03:50:25] [Rank 0] step:3101/10000 train_time:149103ms step_avg:48.08ms +[2025-09-06 03:50:25] [Rank 0] step:3101/10000 train_time:149103ms step_avg:48.08ms +[2025-09-06 03:50:25] [Rank 0] step:3121/10000 train_time:149842ms step_avg:48.01ms +[2025-09-06 03:50:25] [Rank 0] step:3121/10000 train_time:149842ms step_avg:48.01ms +[2025-09-06 03:50:26] [Rank 0] step:3141/10000 train_time:150581ms step_avg:47.94ms +[2025-09-06 03:50:26] [Rank 0] step:3141/10000 train_time:150581ms step_avg:47.94ms +[2025-09-06 03:50:27] [Rank 0] step:3161/10000 train_time:151320ms step_avg:47.87ms +[2025-09-06 03:50:27] [Rank 0] step:3161/10000 train_time:151320ms step_avg:47.87ms +[2025-09-06 03:50:28] [Rank 0] step:3181/10000 train_time:152060ms step_avg:47.80ms +[2025-09-06 03:50:28] [Rank 0] step:3181/10000 train_time:152060ms step_avg:47.80ms +[2025-09-06 03:50:28] [Rank 0] step:3201/10000 train_time:152800ms step_avg:47.73ms +[2025-09-06 03:50:28] [Rank 0] step:3201/10000 train_time:152800ms step_avg:47.73ms +[2025-09-06 03:50:29] [Rank 0] step:3221/10000 train_time:153539ms step_avg:47.67ms +[2025-09-06 03:50:29] [Rank 0] step:3221/10000 train_time:153539ms step_avg:47.67ms +[2025-09-06 03:50:30] [Rank 0] step:3241/10000 train_time:154278ms step_avg:47.60ms +[2025-09-06 03:50:30] [Rank 0] step:3241/10000 train_time:154278ms step_avg:47.60ms +[2025-09-06 03:50:31] [Rank 0] step:3261/10000 train_time:155018ms step_avg:47.54ms +[2025-09-06 03:50:31] [Rank 0] step:3261/10000 train_time:155018ms step_avg:47.54ms +[2025-09-06 03:50:31] [Rank 0] step:3281/10000 train_time:155762ms step_avg:47.47ms +[2025-09-06 03:50:31] [Rank 0] step:3281/10000 train_time:155762ms step_avg:47.47ms +[2025-09-06 03:50:32] [Rank 0] step:3301/10000 train_time:156502ms step_avg:47.41ms +[2025-09-06 03:50:32] [Rank 0] step:3301/10000 train_time:156502ms step_avg:47.41ms +[2025-09-06 03:50:33] [Rank 0] step:3321/10000 train_time:157241ms step_avg:47.35ms +[2025-09-06 03:50:33] [Rank 0] step:3321/10000 train_time:157241ms step_avg:47.35ms +[2025-09-06 03:50:33] [Rank 0] step:3341/10000 train_time:157979ms step_avg:47.28ms +[2025-09-06 03:50:33] [Rank 0] step:3341/10000 train_time:157979ms step_avg:47.28ms +[2025-09-06 03:50:34] [Rank 0] step:3361/10000 train_time:158719ms step_avg:47.22ms +[2025-09-06 03:50:34] [Rank 0] step:3361/10000 train_time:158719ms step_avg:47.22ms +[2025-09-06 03:50:35] [Rank 0] step:3381/10000 train_time:159458ms step_avg:47.16ms +[2025-09-06 03:50:35] [Rank 0] step:3381/10000 train_time:159458ms step_avg:47.16ms +[2025-09-06 03:50:36] [Rank 0] step:3401/10000 train_time:160198ms step_avg:47.10ms +[2025-09-06 03:50:36] [Rank 0] step:3401/10000 train_time:160198ms step_avg:47.10ms +[2025-09-06 03:50:36] [Rank 0] step:3421/10000 train_time:160938ms step_avg:47.04ms +[2025-09-06 03:50:36] [Rank 0] step:3421/10000 train_time:160938ms step_avg:47.04ms +[2025-09-06 03:50:37] [Rank 0] step:3441/10000 train_time:161677ms step_avg:46.99ms +[2025-09-06 03:50:37] [Rank 0] step:3441/10000 train_time:161677ms step_avg:46.99ms +[2025-09-06 03:50:38] [Rank 0] step:3461/10000 train_time:162416ms step_avg:46.93ms +[2025-09-06 03:50:38] [Rank 0] step:3461/10000 train_time:162416ms step_avg:46.93ms +[2025-09-06 03:50:39] [Rank 0] step:3481/10000 train_time:163155ms step_avg:46.87ms +[2025-09-06 03:50:39] [Rank 0] step:3481/10000 train_time:163155ms step_avg:46.87ms +[2025-09-06 03:50:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:50:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:50:40] [Rank 0] PRINT: step:3500/10000 train_loss:3.0270 val_loss:2.9660 train_time:163975ms step_avg:46.85ms +[2025-09-06 03:50:40] [Rank 0] PRINT: step:3500/10000 train_loss:3.0270 val_loss:2.9660 train_time:163975ms step_avg:46.85ms +[2025-09-06 03:50:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:50:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:50:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:50:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:52:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:52:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:52:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:52:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:52:02] [Rank 0] Total Loss: 5.2277 +[2025-09-06 03:52:02] [Rank 0] Total Loss: 5.2277 +[2025-09-06 03:52:02] [Rank 0] Total FTA (Unweighted): 0.1688 +[2025-09-06 03:52:02] [Rank 0] Total FTA (Unweighted): 0.1688 +[2025-09-06 03:52:02] [Rank 0] Total FTA (Weighted): 0.1688 +[2025-09-06 03:52:02] [Rank 0] Total FTA (Weighted): 0.1688 +[2025-09-06 03:52:02] [Rank 0] Group 0 Loss: 3.2670 +[2025-09-06 03:52:02] [Rank 0] Group 0 Loss: 3.2670 +[2025-09-06 03:52:02] [Rank 0] Group 1 Loss: 3.2690 +[2025-09-06 03:52:02] [Rank 0] Group 1 Loss: 3.2690 +[2025-09-06 03:52:02] [Rank 0] Group 2 Loss: 3.4809 +[2025-09-06 03:52:02] [Rank 0] Group 2 Loss: 3.4809 +[2025-09-06 03:52:02] [Rank 0] Group 3 Loss: 4.0370 +[2025-09-06 03:52:02] [Rank 0] Group 3 Loss: 4.0370 +[2025-09-06 03:52:02] [Rank 0] Group 4 Loss: 4.8784 +[2025-09-06 03:52:02] [Rank 0] Group 4 Loss: 4.8784 +[2025-09-06 03:52:02] [Rank 0] Group 5 Loss: 5.3717 +[2025-09-06 03:52:02] [Rank 0] Group 5 Loss: 5.3717 +[2025-09-06 03:52:02] [Rank 0] Group 6 Loss: 5.6090 +[2025-09-06 03:52:02] [Rank 0] Group 6 Loss: 5.6090 +[2025-09-06 03:52:02] [Rank 0] Group 7 Loss: 5.6565 +[2025-09-06 03:52:02] [Rank 0] Group 7 Loss: 5.6565 +[2025-09-06 03:52:02] [Rank 0] Group 8 Loss: 5.8793 +[2025-09-06 03:52:02] [Rank 0] Group 8 Loss: 5.8793 +[2025-09-06 03:52:02] [Rank 0] Group 9 Loss: 6.0432 +[2025-09-06 03:52:02] [Rank 0] Group 9 Loss: 6.0432 +[2025-09-06 03:52:02] [Rank 0] Group 10 Loss: 6.0179 +[2025-09-06 03:52:02] [Rank 0] Group 10 Loss: 6.0179 +[2025-09-06 03:52:02] [Rank 0] Group 11 Loss: 6.1360 +[2025-09-06 03:52:02] [Rank 0] Group 11 Loss: 6.1360 +[2025-09-06 03:52:02] [Rank 0] Group 12 Loss: 5.9656 +[2025-09-06 03:52:02] [Rank 0] Group 12 Loss: 5.9656 +[2025-09-06 03:52:02] [Rank 0] Group 13 Loss: 5.9930 +[2025-09-06 03:52:02] [Rank 0] Group 13 Loss: 5.9930 +[2025-09-06 03:52:02] [Rank 0] Group 14 Loss: 6.0571 +[2025-09-06 03:52:02] [Rank 0] Group 14 Loss: 6.0571 +[2025-09-06 03:52:02] [Rank 0] Group 15 Loss: 5.9814 +[2025-09-06 03:52:02] [Rank 0] Group 15 Loss: 5.9814 +[2025-09-06 03:52:02] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:52:02] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:52:02] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:52:02] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:52:02] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:52:02] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:52:02] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:52:02] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:52:02] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:52:02] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:52:02] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:52:02] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:52:02] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 03:52:02] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 03:52:02] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:52:02] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:52:02] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-06 03:52:02] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-06 03:52:02] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:52:02] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:52:02] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:52:02] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:52:02] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:52:02] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:52:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:52:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:52:02] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:52:02] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 03:52:02] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:52:02] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:52:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 03:52:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 03:52:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:52:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:52:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:52:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:52:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:52:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:52:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:52:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:52:03] [Rank 0] step:3501/10000 train_time:163986ms step_avg:46.84ms +[2025-09-06 03:52:03] [Rank 0] step:3501/10000 train_time:163986ms step_avg:46.84ms +[2025-09-06 03:52:04] [Rank 0] step:3521/10000 train_time:164649ms step_avg:46.76ms +[2025-09-06 03:52:04] [Rank 0] step:3521/10000 train_time:164649ms step_avg:46.76ms +[2025-09-06 03:52:05] [Rank 0] step:3541/10000 train_time:165388ms step_avg:46.71ms +[2025-09-06 03:52:05] [Rank 0] step:3541/10000 train_time:165388ms step_avg:46.71ms +[2025-09-06 03:52:06] [Rank 0] step:3561/10000 train_time:166128ms step_avg:46.65ms +[2025-09-06 03:52:06] [Rank 0] step:3561/10000 train_time:166128ms step_avg:46.65ms +[2025-09-06 03:52:06] [Rank 0] step:3581/10000 train_time:166867ms step_avg:46.60ms +[2025-09-06 03:52:06] [Rank 0] step:3581/10000 train_time:166867ms step_avg:46.60ms +[2025-09-06 03:52:07] [Rank 0] step:3601/10000 train_time:167606ms step_avg:46.54ms +[2025-09-06 03:52:07] [Rank 0] step:3601/10000 train_time:167606ms step_avg:46.54ms +[2025-09-06 03:52:08] [Rank 0] step:3621/10000 train_time:168346ms step_avg:46.49ms +[2025-09-06 03:52:08] [Rank 0] step:3621/10000 train_time:168346ms step_avg:46.49ms +[2025-09-06 03:52:09] [Rank 0] step:3641/10000 train_time:169699ms step_avg:46.61ms +[2025-09-06 03:52:09] [Rank 0] step:3641/10000 train_time:169699ms step_avg:46.61ms +[2025-09-06 03:52:10] [Rank 0] step:3661/10000 train_time:170438ms step_avg:46.56ms +[2025-09-06 03:52:10] [Rank 0] step:3661/10000 train_time:170438ms step_avg:46.56ms +[2025-09-06 03:52:11] [Rank 0] step:3681/10000 train_time:171177ms step_avg:46.50ms +[2025-09-06 03:52:11] [Rank 0] step:3681/10000 train_time:171177ms step_avg:46.50ms +[2025-09-06 03:52:11] [Rank 0] step:3701/10000 train_time:171916ms step_avg:46.45ms +[2025-09-06 03:52:11] [Rank 0] step:3701/10000 train_time:171916ms step_avg:46.45ms +[2025-09-06 03:52:12] [Rank 0] step:3721/10000 train_time:172655ms step_avg:46.40ms +[2025-09-06 03:52:12] [Rank 0] step:3721/10000 train_time:172655ms step_avg:46.40ms +[2025-09-06 03:52:13] [Rank 0] step:3741/10000 train_time:173394ms step_avg:46.35ms +[2025-09-06 03:52:13] [Rank 0] step:3741/10000 train_time:173394ms step_avg:46.35ms +[2025-09-06 03:52:14] [Rank 0] step:3761/10000 train_time:174134ms step_avg:46.30ms +[2025-09-06 03:52:14] [Rank 0] step:3761/10000 train_time:174134ms step_avg:46.30ms +[2025-09-06 03:52:14] [Rank 0] step:3781/10000 train_time:174873ms step_avg:46.25ms +[2025-09-06 03:52:14] [Rank 0] step:3781/10000 train_time:174873ms step_avg:46.25ms +[2025-09-06 03:52:15] [Rank 0] step:3801/10000 train_time:175612ms step_avg:46.20ms +[2025-09-06 03:52:15] [Rank 0] step:3801/10000 train_time:175612ms step_avg:46.20ms +[2025-09-06 03:52:16] [Rank 0] step:3821/10000 train_time:176352ms step_avg:46.15ms +[2025-09-06 03:52:16] [Rank 0] step:3821/10000 train_time:176352ms step_avg:46.15ms +[2025-09-06 03:52:16] [Rank 0] step:3841/10000 train_time:177093ms step_avg:46.11ms +[2025-09-06 03:52:16] [Rank 0] step:3841/10000 train_time:177093ms step_avg:46.11ms +[2025-09-06 03:52:17] [Rank 0] step:3861/10000 train_time:177833ms step_avg:46.06ms +[2025-09-06 03:52:17] [Rank 0] step:3861/10000 train_time:177833ms step_avg:46.06ms +[2025-09-06 03:52:18] [Rank 0] step:3881/10000 train_time:178573ms step_avg:46.01ms +[2025-09-06 03:52:18] [Rank 0] step:3881/10000 train_time:178573ms step_avg:46.01ms +[2025-09-06 03:52:19] [Rank 0] step:3901/10000 train_time:179311ms step_avg:45.97ms +[2025-09-06 03:52:19] [Rank 0] step:3901/10000 train_time:179311ms step_avg:45.97ms +[2025-09-06 03:52:19] [Rank 0] step:3921/10000 train_time:180049ms step_avg:45.92ms +[2025-09-06 03:52:19] [Rank 0] step:3921/10000 train_time:180049ms step_avg:45.92ms +[2025-09-06 03:52:20] [Rank 0] step:3941/10000 train_time:180788ms step_avg:45.87ms +[2025-09-06 03:52:20] [Rank 0] step:3941/10000 train_time:180788ms step_avg:45.87ms +[2025-09-06 03:52:21] [Rank 0] step:3961/10000 train_time:181528ms step_avg:45.83ms +[2025-09-06 03:52:21] [Rank 0] step:3961/10000 train_time:181528ms step_avg:45.83ms +[2025-09-06 03:52:22] [Rank 0] step:3981/10000 train_time:182268ms step_avg:45.78ms +[2025-09-06 03:52:22] [Rank 0] step:3981/10000 train_time:182268ms step_avg:45.78ms +[2025-09-06 03:52:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:52:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:52:23] [Rank 0] PRINT: step:4000/10000 train_loss:2.9341 val_loss:2.8865 train_time:183089ms step_avg:45.77ms +[2025-09-06 03:52:23] [Rank 0] PRINT: step:4000/10000 train_loss:2.9341 val_loss:2.8865 train_time:183089ms step_avg:45.77ms +[2025-09-06 03:52:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:52:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:52:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:52:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:53:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:53:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:53:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:53:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:53:45] [Rank 0] Total Loss: 5.1664 +[2025-09-06 03:53:45] [Rank 0] Total Loss: 5.1664 +[2025-09-06 03:53:45] [Rank 0] Total FTA (Unweighted): 0.1725 +[2025-09-06 03:53:45] [Rank 0] Total FTA (Unweighted): 0.1725 +[2025-09-06 03:53:45] [Rank 0] Total FTA (Weighted): 0.1725 +[2025-09-06 03:53:45] [Rank 0] Total FTA (Weighted): 0.1725 +[2025-09-06 03:53:45] [Rank 0] Group 0 Loss: 3.2533 +[2025-09-06 03:53:45] [Rank 0] Group 0 Loss: 3.2533 +[2025-09-06 03:53:45] [Rank 0] Group 1 Loss: 3.3220 +[2025-09-06 03:53:45] [Rank 0] Group 1 Loss: 3.3220 +[2025-09-06 03:53:45] [Rank 0] Group 2 Loss: 3.4697 +[2025-09-06 03:53:45] [Rank 0] Group 2 Loss: 3.4697 +[2025-09-06 03:53:45] [Rank 0] Group 3 Loss: 4.0006 +[2025-09-06 03:53:45] [Rank 0] Group 3 Loss: 4.0006 +[2025-09-06 03:53:45] [Rank 0] Group 4 Loss: 4.7662 +[2025-09-06 03:53:45] [Rank 0] Group 4 Loss: 4.7662 +[2025-09-06 03:53:45] [Rank 0] Group 5 Loss: 5.2448 +[2025-09-06 03:53:45] [Rank 0] Group 5 Loss: 5.2448 +[2025-09-06 03:53:45] [Rank 0] Group 6 Loss: 5.5165 +[2025-09-06 03:53:45] [Rank 0] Group 6 Loss: 5.5165 +[2025-09-06 03:53:45] [Rank 0] Group 7 Loss: 5.5985 +[2025-09-06 03:53:45] [Rank 0] Group 7 Loss: 5.5985 +[2025-09-06 03:53:45] [Rank 0] Group 8 Loss: 5.8237 +[2025-09-06 03:53:45] [Rank 0] Group 8 Loss: 5.8237 +[2025-09-06 03:53:45] [Rank 0] Group 9 Loss: 5.9680 +[2025-09-06 03:53:45] [Rank 0] Group 9 Loss: 5.9680 +[2025-09-06 03:53:45] [Rank 0] Group 10 Loss: 5.9465 +[2025-09-06 03:53:45] [Rank 0] Group 10 Loss: 5.9465 +[2025-09-06 03:53:45] [Rank 0] Group 11 Loss: 6.0368 +[2025-09-06 03:53:45] [Rank 0] Group 11 Loss: 6.0368 +[2025-09-06 03:53:45] [Rank 0] Group 12 Loss: 5.9031 +[2025-09-06 03:53:45] [Rank 0] Group 12 Loss: 5.9031 +[2025-09-06 03:53:45] [Rank 0] Group 13 Loss: 5.9144 +[2025-09-06 03:53:45] [Rank 0] Group 13 Loss: 5.9144 +[2025-09-06 03:53:45] [Rank 0] Group 14 Loss: 5.9768 +[2025-09-06 03:53:45] [Rank 0] Group 14 Loss: 5.9768 +[2025-09-06 03:53:45] [Rank 0] Group 15 Loss: 5.9217 +[2025-09-06 03:53:45] [Rank 0] Group 15 Loss: 5.9217 +[2025-09-06 03:53:45] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:53:45] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:53:45] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:53:45] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:53:46] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:53:46] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:53:46] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:53:46] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:53:46] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:53:46] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:53:46] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:53:46] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:53:46] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 03:53:46] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 03:53:46] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:53:46] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:53:46] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:53:46] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:53:46] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:53:46] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:53:46] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:53:46] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 03:53:46] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:53:46] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:53:46] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:53:46] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:53:46] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:53:46] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:53:46] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:53:46] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 03:53:46] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 03:53:46] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 03:53:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:53:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:53:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:53:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:53:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:53:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:53:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:53:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:53:47] [Rank 0] step:4001/10000 train_time:183100ms step_avg:45.76ms +[2025-09-06 03:53:47] [Rank 0] step:4001/10000 train_time:183100ms step_avg:45.76ms +[2025-09-06 03:53:48] [Rank 0] step:4021/10000 train_time:183966ms step_avg:45.75ms +[2025-09-06 03:53:48] [Rank 0] step:4021/10000 train_time:183966ms step_avg:45.75ms +[2025-09-06 03:53:49] [Rank 0] step:4041/10000 train_time:184706ms step_avg:45.71ms +[2025-09-06 03:53:49] [Rank 0] step:4041/10000 train_time:184706ms step_avg:45.71ms +[2025-09-06 03:53:49] [Rank 0] step:4061/10000 train_time:185446ms step_avg:45.67ms +[2025-09-06 03:53:49] [Rank 0] step:4061/10000 train_time:185446ms step_avg:45.67ms +[2025-09-06 03:53:50] [Rank 0] step:4081/10000 train_time:186186ms step_avg:45.62ms +[2025-09-06 03:53:50] [Rank 0] step:4081/10000 train_time:186186ms step_avg:45.62ms +[2025-09-06 03:53:51] [Rank 0] step:4101/10000 train_time:186925ms step_avg:45.58ms +[2025-09-06 03:53:51] [Rank 0] step:4101/10000 train_time:186925ms step_avg:45.58ms +[2025-09-06 03:53:52] [Rank 0] step:4121/10000 train_time:187665ms step_avg:45.54ms +[2025-09-06 03:53:52] [Rank 0] step:4121/10000 train_time:187665ms step_avg:45.54ms +[2025-09-06 03:53:52] [Rank 0] step:4141/10000 train_time:188404ms step_avg:45.50ms +[2025-09-06 03:53:52] [Rank 0] step:4141/10000 train_time:188404ms step_avg:45.50ms +[2025-09-06 03:53:53] [Rank 0] step:4161/10000 train_time:189143ms step_avg:45.46ms +[2025-09-06 03:53:53] [Rank 0] step:4161/10000 train_time:189143ms step_avg:45.46ms +[2025-09-06 03:53:54] [Rank 0] step:4181/10000 train_time:189883ms step_avg:45.42ms +[2025-09-06 03:53:54] [Rank 0] step:4181/10000 train_time:189883ms step_avg:45.42ms +[2025-09-06 03:53:55] [Rank 0] step:4201/10000 train_time:190624ms step_avg:45.38ms +[2025-09-06 03:53:55] [Rank 0] step:4201/10000 train_time:190624ms step_avg:45.38ms +[2025-09-06 03:53:55] [Rank 0] step:4221/10000 train_time:191486ms step_avg:45.37ms +[2025-09-06 03:53:55] [Rank 0] step:4221/10000 train_time:191486ms step_avg:45.37ms +[2025-09-06 03:53:56] [Rank 0] step:4241/10000 train_time:192226ms step_avg:45.33ms +[2025-09-06 03:53:56] [Rank 0] step:4241/10000 train_time:192226ms step_avg:45.33ms +[2025-09-06 03:53:57] [Rank 0] step:4261/10000 train_time:192965ms step_avg:45.29ms +[2025-09-06 03:53:57] [Rank 0] step:4261/10000 train_time:192965ms step_avg:45.29ms +[2025-09-06 03:53:58] [Rank 0] step:4281/10000 train_time:193847ms step_avg:45.28ms +[2025-09-06 03:53:58] [Rank 0] step:4281/10000 train_time:193847ms step_avg:45.28ms +[2025-09-06 03:53:59] [Rank 0] step:4301/10000 train_time:194585ms step_avg:45.24ms +[2025-09-06 03:53:59] [Rank 0] step:4301/10000 train_time:194585ms step_avg:45.24ms +[2025-09-06 03:53:59] [Rank 0] step:4321/10000 train_time:195325ms step_avg:45.20ms +[2025-09-06 03:53:59] [Rank 0] step:4321/10000 train_time:195325ms step_avg:45.20ms +[2025-09-06 03:54:00] [Rank 0] step:4341/10000 train_time:196065ms step_avg:45.17ms +[2025-09-06 03:54:00] [Rank 0] step:4341/10000 train_time:196065ms step_avg:45.17ms +[2025-09-06 03:54:01] [Rank 0] step:4361/10000 train_time:196803ms step_avg:45.13ms +[2025-09-06 03:54:01] [Rank 0] step:4361/10000 train_time:196803ms step_avg:45.13ms +[2025-09-06 03:54:02] [Rank 0] step:4381/10000 train_time:197543ms step_avg:45.09ms +[2025-09-06 03:54:02] [Rank 0] step:4381/10000 train_time:197543ms step_avg:45.09ms +[2025-09-06 03:54:02] [Rank 0] step:4401/10000 train_time:198284ms step_avg:45.05ms +[2025-09-06 03:54:02] [Rank 0] step:4401/10000 train_time:198284ms step_avg:45.05ms +[2025-09-06 03:54:03] [Rank 0] step:4421/10000 train_time:199026ms step_avg:45.02ms +[2025-09-06 03:54:03] [Rank 0] step:4421/10000 train_time:199026ms step_avg:45.02ms +[2025-09-06 03:54:04] [Rank 0] step:4441/10000 train_time:199766ms step_avg:44.98ms +[2025-09-06 03:54:04] [Rank 0] step:4441/10000 train_time:199766ms step_avg:44.98ms +[2025-09-06 03:54:04] [Rank 0] step:4461/10000 train_time:200505ms step_avg:44.95ms +[2025-09-06 03:54:04] [Rank 0] step:4461/10000 train_time:200505ms step_avg:44.95ms +[2025-09-06 03:54:05] [Rank 0] step:4481/10000 train_time:201244ms step_avg:44.91ms +[2025-09-06 03:54:05] [Rank 0] step:4481/10000 train_time:201244ms step_avg:44.91ms +[2025-09-06 03:54:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:54:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:54:06] [Rank 0] PRINT: step:4500/10000 train_loss:2.8593 val_loss:2.8167 train_time:202064ms step_avg:44.90ms +[2025-09-06 03:54:06] [Rank 0] PRINT: step:4500/10000 train_loss:2.8593 val_loss:2.8167 train_time:202064ms step_avg:44.90ms +[2025-09-06 03:54:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:54:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:54:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:54:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:55:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:55:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:55:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:55:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:55:28] [Rank 0] Total Loss: 5.1472 +[2025-09-06 03:55:28] [Rank 0] Total Loss: 5.1472 +[2025-09-06 03:55:28] [Rank 0] Total FTA (Unweighted): 0.1738 +[2025-09-06 03:55:28] [Rank 0] Total FTA (Unweighted): 0.1738 +[2025-09-06 03:55:28] [Rank 0] Total FTA (Weighted): 0.1737 +[2025-09-06 03:55:28] [Rank 0] Total FTA (Weighted): 0.1737 +[2025-09-06 03:55:28] [Rank 0] Group 0 Loss: 3.2590 +[2025-09-06 03:55:28] [Rank 0] Group 0 Loss: 3.2590 +[2025-09-06 03:55:28] [Rank 0] Group 1 Loss: 3.3298 +[2025-09-06 03:55:28] [Rank 0] Group 1 Loss: 3.3298 +[2025-09-06 03:55:28] [Rank 0] Group 2 Loss: 3.4553 +[2025-09-06 03:55:28] [Rank 0] Group 2 Loss: 3.4553 +[2025-09-06 03:55:28] [Rank 0] Group 3 Loss: 3.9830 +[2025-09-06 03:55:28] [Rank 0] Group 3 Loss: 3.9830 +[2025-09-06 03:55:28] [Rank 0] Group 4 Loss: 4.7160 +[2025-09-06 03:55:28] [Rank 0] Group 4 Loss: 4.7160 +[2025-09-06 03:55:28] [Rank 0] Group 5 Loss: 5.2134 +[2025-09-06 03:55:28] [Rank 0] Group 5 Loss: 5.2134 +[2025-09-06 03:55:28] [Rank 0] Group 6 Loss: 5.4831 +[2025-09-06 03:55:28] [Rank 0] Group 6 Loss: 5.4831 +[2025-09-06 03:55:28] [Rank 0] Group 7 Loss: 5.5632 +[2025-09-06 03:55:28] [Rank 0] Group 7 Loss: 5.5632 +[2025-09-06 03:55:28] [Rank 0] Group 8 Loss: 5.7944 +[2025-09-06 03:55:28] [Rank 0] Group 8 Loss: 5.7944 +[2025-09-06 03:55:28] [Rank 0] Group 9 Loss: 5.9556 +[2025-09-06 03:55:28] [Rank 0] Group 9 Loss: 5.9556 +[2025-09-06 03:55:28] [Rank 0] Group 10 Loss: 5.9524 +[2025-09-06 03:55:28] [Rank 0] Group 10 Loss: 5.9524 +[2025-09-06 03:55:28] [Rank 0] Group 11 Loss: 6.0153 +[2025-09-06 03:55:28] [Rank 0] Group 11 Loss: 6.0153 +[2025-09-06 03:55:28] [Rank 0] Group 12 Loss: 5.8803 +[2025-09-06 03:55:28] [Rank 0] Group 12 Loss: 5.8803 +[2025-09-06 03:55:28] [Rank 0] Group 13 Loss: 5.8872 +[2025-09-06 03:55:28] [Rank 0] Group 13 Loss: 5.8872 +[2025-09-06 03:55:28] [Rank 0] Group 14 Loss: 5.9623 +[2025-09-06 03:55:28] [Rank 0] Group 14 Loss: 5.9623 +[2025-09-06 03:55:28] [Rank 0] Group 15 Loss: 5.9044 +[2025-09-06 03:55:28] [Rank 0] Group 15 Loss: 5.9044 +[2025-09-06 03:55:28] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:55:28] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:55:28] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:55:28] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:55:28] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:55:28] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:55:28] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:55:28] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:55:28] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:55:28] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:55:28] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:55:28] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:55:28] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 03:55:28] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 03:55:28] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:55:28] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:55:28] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:55:28] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:55:28] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:55:28] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:55:28] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:55:28] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:55:28] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:55:28] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 03:55:28] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:55:28] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:55:28] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 03:55:28] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 03:55:29] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:55:29] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:55:29] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 03:55:29] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 03:55:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:55:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:55:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:55:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:55:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:55:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:55:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:55:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:55:30] [Rank 0] step:4501/10000 train_time:202075ms step_avg:44.90ms +[2025-09-06 03:55:30] [Rank 0] step:4501/10000 train_time:202075ms step_avg:44.90ms +[2025-09-06 03:55:31] [Rank 0] step:4521/10000 train_time:202742ms step_avg:44.84ms +[2025-09-06 03:55:31] [Rank 0] step:4521/10000 train_time:202742ms step_avg:44.84ms +[2025-09-06 03:55:31] [Rank 0] step:4541/10000 train_time:203482ms step_avg:44.81ms +[2025-09-06 03:55:31] [Rank 0] step:4541/10000 train_time:203482ms step_avg:44.81ms +[2025-09-06 03:55:32] [Rank 0] step:4561/10000 train_time:204222ms step_avg:44.78ms +[2025-09-06 03:55:32] [Rank 0] step:4561/10000 train_time:204222ms step_avg:44.78ms +[2025-09-06 03:55:33] [Rank 0] step:4581/10000 train_time:204962ms step_avg:44.74ms +[2025-09-06 03:55:33] [Rank 0] step:4581/10000 train_time:204962ms step_avg:44.74ms +[2025-09-06 03:55:34] [Rank 0] step:4601/10000 train_time:205702ms step_avg:44.71ms +[2025-09-06 03:55:34] [Rank 0] step:4601/10000 train_time:205702ms step_avg:44.71ms +[2025-09-06 03:55:34] [Rank 0] step:4621/10000 train_time:206443ms step_avg:44.67ms +[2025-09-06 03:55:34] [Rank 0] step:4621/10000 train_time:206443ms step_avg:44.67ms +[2025-09-06 03:55:35] [Rank 0] step:4641/10000 train_time:207183ms step_avg:44.64ms +[2025-09-06 03:55:35] [Rank 0] step:4641/10000 train_time:207183ms step_avg:44.64ms +[2025-09-06 03:55:36] [Rank 0] step:4661/10000 train_time:207923ms step_avg:44.61ms +[2025-09-06 03:55:36] [Rank 0] step:4661/10000 train_time:207923ms step_avg:44.61ms +[2025-09-06 03:55:37] [Rank 0] step:4681/10000 train_time:208663ms step_avg:44.58ms +[2025-09-06 03:55:37] [Rank 0] step:4681/10000 train_time:208663ms step_avg:44.58ms +[2025-09-06 03:55:37] [Rank 0] step:4701/10000 train_time:209403ms step_avg:44.54ms +[2025-09-06 03:55:37] [Rank 0] step:4701/10000 train_time:209403ms step_avg:44.54ms +[2025-09-06 03:55:38] [Rank 0] step:4721/10000 train_time:210142ms step_avg:44.51ms +[2025-09-06 03:55:38] [Rank 0] step:4721/10000 train_time:210142ms step_avg:44.51ms +[2025-09-06 03:55:39] [Rank 0] step:4741/10000 train_time:210883ms step_avg:44.48ms +[2025-09-06 03:55:39] [Rank 0] step:4741/10000 train_time:210883ms step_avg:44.48ms +[2025-09-06 03:55:40] [Rank 0] step:4761/10000 train_time:211622ms step_avg:44.45ms +[2025-09-06 03:55:40] [Rank 0] step:4761/10000 train_time:211622ms step_avg:44.45ms +[2025-09-06 03:55:40] [Rank 0] step:4781/10000 train_time:212363ms step_avg:44.42ms +[2025-09-06 03:55:40] [Rank 0] step:4781/10000 train_time:212363ms step_avg:44.42ms +[2025-09-06 03:55:41] [Rank 0] step:4801/10000 train_time:213102ms step_avg:44.39ms +[2025-09-06 03:55:41] [Rank 0] step:4801/10000 train_time:213102ms step_avg:44.39ms +[2025-09-06 03:55:42] [Rank 0] step:4821/10000 train_time:213842ms step_avg:44.36ms +[2025-09-06 03:55:42] [Rank 0] step:4821/10000 train_time:213842ms step_avg:44.36ms +[2025-09-06 03:55:43] [Rank 0] step:4841/10000 train_time:214685ms step_avg:44.35ms +[2025-09-06 03:55:43] [Rank 0] step:4841/10000 train_time:214685ms step_avg:44.35ms +[2025-09-06 03:55:43] [Rank 0] step:4861/10000 train_time:215425ms step_avg:44.32ms +[2025-09-06 03:55:43] [Rank 0] step:4861/10000 train_time:215425ms step_avg:44.32ms +[2025-09-06 03:55:44] [Rank 0] step:4881/10000 train_time:216165ms step_avg:44.29ms +[2025-09-06 03:55:44] [Rank 0] step:4881/10000 train_time:216165ms step_avg:44.29ms +[2025-09-06 03:55:45] [Rank 0] step:4901/10000 train_time:216905ms step_avg:44.26ms +[2025-09-06 03:55:45] [Rank 0] step:4901/10000 train_time:216905ms step_avg:44.26ms +[2025-09-06 03:55:46] [Rank 0] step:4921/10000 train_time:217644ms step_avg:44.23ms +[2025-09-06 03:55:46] [Rank 0] step:4921/10000 train_time:217644ms step_avg:44.23ms +[2025-09-06 03:55:46] [Rank 0] step:4941/10000 train_time:218385ms step_avg:44.20ms +[2025-09-06 03:55:46] [Rank 0] step:4941/10000 train_time:218385ms step_avg:44.20ms +[2025-09-06 03:55:47] [Rank 0] step:4961/10000 train_time:219125ms step_avg:44.17ms +[2025-09-06 03:55:47] [Rank 0] step:4961/10000 train_time:219125ms step_avg:44.17ms +[2025-09-06 03:55:48] [Rank 0] step:4981/10000 train_time:219866ms step_avg:44.14ms +[2025-09-06 03:55:48] [Rank 0] step:4981/10000 train_time:219866ms step_avg:44.14ms +[2025-09-06 03:55:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:55:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:55:49] [Rank 0] PRINT: step:5000/10000 train_loss:2.7965 val_loss:2.7614 train_time:220687ms step_avg:44.14ms +[2025-09-06 03:55:49] [Rank 0] PRINT: step:5000/10000 train_loss:2.7965 val_loss:2.7614 train_time:220687ms step_avg:44.14ms +[2025-09-06 03:55:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:55:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:55:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:55:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:57:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:57:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:57:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:57:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:57:12] [Rank 0] Total Loss: 5.1148 +[2025-09-06 03:57:12] [Rank 0] Total Loss: 5.1148 +[2025-09-06 03:57:12] [Rank 0] Total FTA (Unweighted): 0.1738 +[2025-09-06 03:57:12] [Rank 0] Total FTA (Unweighted): 0.1738 +[2025-09-06 03:57:12] [Rank 0] Total FTA (Weighted): 0.1737 +[2025-09-06 03:57:12] [Rank 0] Total FTA (Weighted): 0.1737 +[2025-09-06 03:57:12] [Rank 0] Group 0 Loss: 3.3265 +[2025-09-06 03:57:12] [Rank 0] Group 0 Loss: 3.3265 +[2025-09-06 03:57:12] [Rank 0] Group 1 Loss: 3.3423 +[2025-09-06 03:57:12] [Rank 0] Group 1 Loss: 3.3423 +[2025-09-06 03:57:12] [Rank 0] Group 2 Loss: 3.4841 +[2025-09-06 03:57:12] [Rank 0] Group 2 Loss: 3.4841 +[2025-09-06 03:57:12] [Rank 0] Group 3 Loss: 3.9766 +[2025-09-06 03:57:12] [Rank 0] Group 3 Loss: 3.9766 +[2025-09-06 03:57:12] [Rank 0] Group 4 Loss: 4.6505 +[2025-09-06 03:57:12] [Rank 0] Group 4 Loss: 4.6505 +[2025-09-06 03:57:12] [Rank 0] Group 5 Loss: 5.1489 +[2025-09-06 03:57:12] [Rank 0] Group 5 Loss: 5.1489 +[2025-09-06 03:57:12] [Rank 0] Group 6 Loss: 5.4353 +[2025-09-06 03:57:12] [Rank 0] Group 6 Loss: 5.4353 +[2025-09-06 03:57:12] [Rank 0] Group 7 Loss: 5.5180 +[2025-09-06 03:57:12] [Rank 0] Group 7 Loss: 5.5180 +[2025-09-06 03:57:12] [Rank 0] Group 8 Loss: 5.7344 +[2025-09-06 03:57:12] [Rank 0] Group 8 Loss: 5.7344 +[2025-09-06 03:57:12] [Rank 0] Group 9 Loss: 5.9027 +[2025-09-06 03:57:12] [Rank 0] Group 9 Loss: 5.9027 +[2025-09-06 03:57:12] [Rank 0] Group 10 Loss: 5.8943 +[2025-09-06 03:57:12] [Rank 0] Group 10 Loss: 5.8943 +[2025-09-06 03:57:12] [Rank 0] Group 11 Loss: 5.9636 +[2025-09-06 03:57:12] [Rank 0] Group 11 Loss: 5.9636 +[2025-09-06 03:57:12] [Rank 0] Group 12 Loss: 5.8361 +[2025-09-06 03:57:12] [Rank 0] Group 12 Loss: 5.8361 +[2025-09-06 03:57:12] [Rank 0] Group 13 Loss: 5.8537 +[2025-09-06 03:57:12] [Rank 0] Group 13 Loss: 5.8537 +[2025-09-06 03:57:12] [Rank 0] Group 14 Loss: 5.9214 +[2025-09-06 03:57:12] [Rank 0] Group 14 Loss: 5.9214 +[2025-09-06 03:57:12] [Rank 0] Group 15 Loss: 5.8484 +[2025-09-06 03:57:12] [Rank 0] Group 15 Loss: 5.8484 +[2025-09-06 03:57:12] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:57:12] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 03:57:12] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:57:12] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:57:12] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:57:12] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:57:12] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:57:12] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:57:12] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:57:12] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:57:12] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:57:12] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:57:12] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 03:57:12] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 03:57:12] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:57:12] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:57:12] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:57:12] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 03:57:12] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:57:12] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:57:12] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:57:12] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:57:12] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:57:12] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:57:12] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:57:12] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 03:57:12] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:57:12] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:57:12] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:57:12] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:57:12] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:57:12] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 03:57:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:57:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:57:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:57:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:57:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:57:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:57:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:57:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:57:13] [Rank 0] step:5001/10000 train_time:220697ms step_avg:44.13ms +[2025-09-06 03:57:13] [Rank 0] step:5001/10000 train_time:220697ms step_avg:44.13ms +[2025-09-06 03:57:14] [Rank 0] step:5021/10000 train_time:221368ms step_avg:44.09ms +[2025-09-06 03:57:14] [Rank 0] step:5021/10000 train_time:221368ms step_avg:44.09ms +[2025-09-06 03:57:15] [Rank 0] step:5041/10000 train_time:222108ms step_avg:44.06ms +[2025-09-06 03:57:15] [Rank 0] step:5041/10000 train_time:222108ms step_avg:44.06ms +[2025-09-06 03:57:15] [Rank 0] step:5061/10000 train_time:222848ms step_avg:44.03ms +[2025-09-06 03:57:15] [Rank 0] step:5061/10000 train_time:222848ms step_avg:44.03ms +[2025-09-06 03:57:16] [Rank 0] step:5081/10000 train_time:223595ms step_avg:44.01ms +[2025-09-06 03:57:16] [Rank 0] step:5081/10000 train_time:223595ms step_avg:44.01ms +[2025-09-06 03:57:17] [Rank 0] step:5101/10000 train_time:224335ms step_avg:43.98ms +[2025-09-06 03:57:17] [Rank 0] step:5101/10000 train_time:224335ms step_avg:43.98ms +[2025-09-06 03:57:18] [Rank 0] step:5121/10000 train_time:225074ms step_avg:43.95ms +[2025-09-06 03:57:18] [Rank 0] step:5121/10000 train_time:225074ms step_avg:43.95ms +[2025-09-06 03:57:18] [Rank 0] step:5141/10000 train_time:225813ms step_avg:43.92ms +[2025-09-06 03:57:18] [Rank 0] step:5141/10000 train_time:225813ms step_avg:43.92ms +[2025-09-06 03:57:19] [Rank 0] step:5161/10000 train_time:226552ms step_avg:43.90ms +[2025-09-06 03:57:19] [Rank 0] step:5161/10000 train_time:226552ms step_avg:43.90ms +[2025-09-06 03:57:20] [Rank 0] step:5181/10000 train_time:227292ms step_avg:43.87ms +[2025-09-06 03:57:20] [Rank 0] step:5181/10000 train_time:227292ms step_avg:43.87ms +[2025-09-06 03:57:21] [Rank 0] step:5201/10000 train_time:228032ms step_avg:43.84ms +[2025-09-06 03:57:21] [Rank 0] step:5201/10000 train_time:228032ms step_avg:43.84ms +[2025-09-06 03:57:21] [Rank 0] step:5221/10000 train_time:228772ms step_avg:43.82ms +[2025-09-06 03:57:21] [Rank 0] step:5221/10000 train_time:228772ms step_avg:43.82ms +[2025-09-06 03:57:22] [Rank 0] step:5241/10000 train_time:229512ms step_avg:43.79ms +[2025-09-06 03:57:22] [Rank 0] step:5241/10000 train_time:229512ms step_avg:43.79ms +[2025-09-06 03:57:23] [Rank 0] step:5261/10000 train_time:230252ms step_avg:43.77ms +[2025-09-06 03:57:23] [Rank 0] step:5261/10000 train_time:230252ms step_avg:43.77ms +[2025-09-06 03:57:24] [Rank 0] step:5281/10000 train_time:230992ms step_avg:43.74ms +[2025-09-06 03:57:24] [Rank 0] step:5281/10000 train_time:230992ms step_avg:43.74ms +[2025-09-06 03:57:24] [Rank 0] step:5301/10000 train_time:231732ms step_avg:43.71ms +[2025-09-06 03:57:24] [Rank 0] step:5301/10000 train_time:231732ms step_avg:43.71ms +[2025-09-06 03:57:25] [Rank 0] step:5321/10000 train_time:232472ms step_avg:43.69ms +[2025-09-06 03:57:25] [Rank 0] step:5321/10000 train_time:232472ms step_avg:43.69ms +[2025-09-06 03:57:26] [Rank 0] step:5341/10000 train_time:233212ms step_avg:43.66ms +[2025-09-06 03:57:26] [Rank 0] step:5341/10000 train_time:233212ms step_avg:43.66ms +[2025-09-06 03:57:27] [Rank 0] step:5361/10000 train_time:233952ms step_avg:43.64ms +[2025-09-06 03:57:27] [Rank 0] step:5361/10000 train_time:233952ms step_avg:43.64ms +[2025-09-06 03:57:27] [Rank 0] step:5381/10000 train_time:234691ms step_avg:43.61ms +[2025-09-06 03:57:27] [Rank 0] step:5381/10000 train_time:234691ms step_avg:43.61ms +[2025-09-06 03:57:28] [Rank 0] step:5401/10000 train_time:235431ms step_avg:43.59ms +[2025-09-06 03:57:28] [Rank 0] step:5401/10000 train_time:235431ms step_avg:43.59ms +[2025-09-06 03:57:29] [Rank 0] step:5421/10000 train_time:236171ms step_avg:43.57ms +[2025-09-06 03:57:29] [Rank 0] step:5421/10000 train_time:236171ms step_avg:43.57ms +[2025-09-06 03:57:29] [Rank 0] step:5441/10000 train_time:236912ms step_avg:43.54ms +[2025-09-06 03:57:29] [Rank 0] step:5441/10000 train_time:236912ms step_avg:43.54ms +[2025-09-06 03:57:30] [Rank 0] step:5461/10000 train_time:237652ms step_avg:43.52ms +[2025-09-06 03:57:30] [Rank 0] step:5461/10000 train_time:237652ms step_avg:43.52ms +[2025-09-06 03:57:31] [Rank 0] step:5481/10000 train_time:238391ms step_avg:43.49ms +[2025-09-06 03:57:31] [Rank 0] step:5481/10000 train_time:238391ms step_avg:43.49ms +[2025-09-06 03:57:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:57:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:57:32] [Rank 0] PRINT: step:5500/10000 train_loss:2.7447 val_loss:2.7160 train_time:239212ms step_avg:43.49ms +[2025-09-06 03:57:32] [Rank 0] PRINT: step:5500/10000 train_loss:2.7447 val_loss:2.7160 train_time:239212ms step_avg:43.49ms +[2025-09-06 03:57:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:57:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:57:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:57:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:58:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:58:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 03:58:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:58:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 03:58:54] [Rank 0] Total Loss: 5.0497 +[2025-09-06 03:58:54] [Rank 0] Total Loss: 5.0497 +[2025-09-06 03:58:54] [Rank 0] Total FTA (Unweighted): 0.1856 +[2025-09-06 03:58:54] [Rank 0] Total FTA (Unweighted): 0.1856 +[2025-09-06 03:58:54] [Rank 0] Total FTA (Weighted): 0.1856 +[2025-09-06 03:58:54] [Rank 0] Total FTA (Weighted): 0.1856 +[2025-09-06 03:58:54] [Rank 0] Group 0 Loss: 3.2313 +[2025-09-06 03:58:54] [Rank 0] Group 0 Loss: 3.2313 +[2025-09-06 03:58:54] [Rank 0] Group 1 Loss: 3.2968 +[2025-09-06 03:58:54] [Rank 0] Group 1 Loss: 3.2968 +[2025-09-06 03:58:54] [Rank 0] Group 2 Loss: 3.3690 +[2025-09-06 03:58:54] [Rank 0] Group 2 Loss: 3.3690 +[2025-09-06 03:58:54] [Rank 0] Group 3 Loss: 3.9059 +[2025-09-06 03:58:54] [Rank 0] Group 3 Loss: 3.9059 +[2025-09-06 03:58:54] [Rank 0] Group 4 Loss: 4.5659 +[2025-09-06 03:58:54] [Rank 0] Group 4 Loss: 4.5659 +[2025-09-06 03:58:54] [Rank 0] Group 5 Loss: 5.0887 +[2025-09-06 03:58:54] [Rank 0] Group 5 Loss: 5.0887 +[2025-09-06 03:58:54] [Rank 0] Group 6 Loss: 5.3593 +[2025-09-06 03:58:54] [Rank 0] Group 6 Loss: 5.3593 +[2025-09-06 03:58:54] [Rank 0] Group 7 Loss: 5.4419 +[2025-09-06 03:58:54] [Rank 0] Group 7 Loss: 5.4419 +[2025-09-06 03:58:54] [Rank 0] Group 8 Loss: 5.6980 +[2025-09-06 03:58:54] [Rank 0] Group 8 Loss: 5.6980 +[2025-09-06 03:58:54] [Rank 0] Group 9 Loss: 5.8406 +[2025-09-06 03:58:54] [Rank 0] Group 9 Loss: 5.8406 +[2025-09-06 03:58:54] [Rank 0] Group 10 Loss: 5.8572 +[2025-09-06 03:58:54] [Rank 0] Group 10 Loss: 5.8572 +[2025-09-06 03:58:54] [Rank 0] Group 11 Loss: 5.9233 +[2025-09-06 03:58:54] [Rank 0] Group 11 Loss: 5.9233 +[2025-09-06 03:58:54] [Rank 0] Group 12 Loss: 5.7630 +[2025-09-06 03:58:54] [Rank 0] Group 12 Loss: 5.7630 +[2025-09-06 03:58:54] [Rank 0] Group 13 Loss: 5.8034 +[2025-09-06 03:58:54] [Rank 0] Group 13 Loss: 5.8034 +[2025-09-06 03:58:54] [Rank 0] Group 14 Loss: 5.8590 +[2025-09-06 03:58:54] [Rank 0] Group 14 Loss: 5.8590 +[2025-09-06 03:58:54] [Rank 0] Group 15 Loss: 5.7925 +[2025-09-06 03:58:54] [Rank 0] Group 15 Loss: 5.7925 +[2025-09-06 03:58:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:58:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 03:58:54] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:58:54] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 03:58:54] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:58:54] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 03:58:54] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:58:54] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 03:58:54] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:58:54] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 03:58:54] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:58:54] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 03:58:54] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 03:58:54] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 03:58:54] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:58:54] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 03:58:54] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 03:58:54] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 03:58:54] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:58:54] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 03:58:54] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:58:54] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 03:58:54] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:58:54] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 03:58:54] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:58:54] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 03:58:54] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:58:54] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 03:58:54] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:58:54] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 03:58:54] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 03:58:54] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 03:58:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:58:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 03:58:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:58:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 03:58:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:58:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 03:58:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:58:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 03:58:56] [Rank 0] step:5501/10000 train_time:239223ms step_avg:43.49ms +[2025-09-06 03:58:56] [Rank 0] step:5501/10000 train_time:239223ms step_avg:43.49ms +[2025-09-06 03:58:57] [Rank 0] step:5521/10000 train_time:239888ms step_avg:43.45ms +[2025-09-06 03:58:57] [Rank 0] step:5521/10000 train_time:239888ms step_avg:43.45ms +[2025-09-06 03:58:57] [Rank 0] step:5541/10000 train_time:240627ms step_avg:43.43ms +[2025-09-06 03:58:57] [Rank 0] step:5541/10000 train_time:240627ms step_avg:43.43ms +[2025-09-06 03:58:58] [Rank 0] step:5561/10000 train_time:241367ms step_avg:43.40ms +[2025-09-06 03:58:58] [Rank 0] step:5561/10000 train_time:241367ms step_avg:43.40ms +[2025-09-06 03:58:59] [Rank 0] step:5581/10000 train_time:242107ms step_avg:43.38ms +[2025-09-06 03:58:59] [Rank 0] step:5581/10000 train_time:242107ms step_avg:43.38ms +[2025-09-06 03:58:59] [Rank 0] step:5601/10000 train_time:242847ms step_avg:43.36ms +[2025-09-06 03:58:59] [Rank 0] step:5601/10000 train_time:242847ms step_avg:43.36ms +[2025-09-06 03:59:00] [Rank 0] step:5621/10000 train_time:243587ms step_avg:43.34ms +[2025-09-06 03:59:00] [Rank 0] step:5621/10000 train_time:243587ms step_avg:43.34ms +[2025-09-06 03:59:02] [Rank 0] step:5641/10000 train_time:244950ms step_avg:43.42ms +[2025-09-06 03:59:02] [Rank 0] step:5641/10000 train_time:244950ms step_avg:43.42ms +[2025-09-06 03:59:02] [Rank 0] step:5661/10000 train_time:245695ms step_avg:43.40ms +[2025-09-06 03:59:02] [Rank 0] step:5661/10000 train_time:245695ms step_avg:43.40ms +[2025-09-06 03:59:03] [Rank 0] step:5681/10000 train_time:246436ms step_avg:43.38ms +[2025-09-06 03:59:03] [Rank 0] step:5681/10000 train_time:246436ms step_avg:43.38ms +[2025-09-06 03:59:04] [Rank 0] step:5701/10000 train_time:247177ms step_avg:43.36ms +[2025-09-06 03:59:04] [Rank 0] step:5701/10000 train_time:247177ms step_avg:43.36ms +[2025-09-06 03:59:05] [Rank 0] step:5721/10000 train_time:247917ms step_avg:43.33ms +[2025-09-06 03:59:05] [Rank 0] step:5721/10000 train_time:247917ms step_avg:43.33ms +[2025-09-06 03:59:05] [Rank 0] step:5741/10000 train_time:248656ms step_avg:43.31ms +[2025-09-06 03:59:05] [Rank 0] step:5741/10000 train_time:248656ms step_avg:43.31ms +[2025-09-06 03:59:06] [Rank 0] step:5761/10000 train_time:249396ms step_avg:43.29ms +[2025-09-06 03:59:06] [Rank 0] step:5761/10000 train_time:249396ms step_avg:43.29ms +[2025-09-06 03:59:07] [Rank 0] step:5781/10000 train_time:250136ms step_avg:43.27ms +[2025-09-06 03:59:07] [Rank 0] step:5781/10000 train_time:250136ms step_avg:43.27ms +[2025-09-06 03:59:08] [Rank 0] step:5801/10000 train_time:250876ms step_avg:43.25ms +[2025-09-06 03:59:08] [Rank 0] step:5801/10000 train_time:250876ms step_avg:43.25ms +[2025-09-06 03:59:08] [Rank 0] step:5821/10000 train_time:251616ms step_avg:43.23ms +[2025-09-06 03:59:08] [Rank 0] step:5821/10000 train_time:251616ms step_avg:43.23ms +[2025-09-06 03:59:09] [Rank 0] step:5841/10000 train_time:252355ms step_avg:43.20ms +[2025-09-06 03:59:09] [Rank 0] step:5841/10000 train_time:252355ms step_avg:43.20ms +[2025-09-06 03:59:10] [Rank 0] step:5861/10000 train_time:253095ms step_avg:43.18ms +[2025-09-06 03:59:10] [Rank 0] step:5861/10000 train_time:253095ms step_avg:43.18ms +[2025-09-06 03:59:10] [Rank 0] step:5881/10000 train_time:253835ms step_avg:43.16ms +[2025-09-06 03:59:10] [Rank 0] step:5881/10000 train_time:253835ms step_avg:43.16ms +[2025-09-06 03:59:11] [Rank 0] step:5901/10000 train_time:254709ms step_avg:43.16ms +[2025-09-06 03:59:11] [Rank 0] step:5901/10000 train_time:254709ms step_avg:43.16ms +[2025-09-06 03:59:12] [Rank 0] step:5921/10000 train_time:255448ms step_avg:43.14ms +[2025-09-06 03:59:12] [Rank 0] step:5921/10000 train_time:255448ms step_avg:43.14ms +[2025-09-06 03:59:13] [Rank 0] step:5941/10000 train_time:256188ms step_avg:43.12ms +[2025-09-06 03:59:13] [Rank 0] step:5941/10000 train_time:256188ms step_avg:43.12ms +[2025-09-06 03:59:14] [Rank 0] step:5961/10000 train_time:257085ms step_avg:43.13ms +[2025-09-06 03:59:14] [Rank 0] step:5961/10000 train_time:257085ms step_avg:43.13ms +[2025-09-06 03:59:14] [Rank 0] step:5981/10000 train_time:257824ms step_avg:43.11ms +[2025-09-06 03:59:14] [Rank 0] step:5981/10000 train_time:257824ms step_avg:43.11ms +[2025-09-06 03:59:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:59:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 03:59:16] [Rank 0] PRINT: step:6000/10000 train_loss:2.7040 val_loss:2.6770 train_time:258644ms step_avg:43.11ms +[2025-09-06 03:59:16] [Rank 0] PRINT: step:6000/10000 train_loss:2.7040 val_loss:2.6770 train_time:258644ms step_avg:43.11ms +[2025-09-06 03:59:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:59:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 03:59:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 03:59:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:00:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:00:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:00:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:00:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:00:38] [Rank 0] Total Loss: 5.0242 +[2025-09-06 04:00:38] [Rank 0] Total Loss: 5.0242 +[2025-09-06 04:00:38] [Rank 0] Total FTA (Unweighted): 0.2000 +[2025-09-06 04:00:38] [Rank 0] Total FTA (Unweighted): 0.2000 +[2025-09-06 04:00:38] [Rank 0] Total FTA (Weighted): 0.2000 +[2025-09-06 04:00:38] [Rank 0] Total FTA (Weighted): 0.2000 +[2025-09-06 04:00:38] [Rank 0] Group 0 Loss: 3.2418 +[2025-09-06 04:00:38] [Rank 0] Group 0 Loss: 3.2418 +[2025-09-06 04:00:38] [Rank 0] Group 1 Loss: 3.3606 +[2025-09-06 04:00:38] [Rank 0] Group 1 Loss: 3.3606 +[2025-09-06 04:00:38] [Rank 0] Group 2 Loss: 3.4271 +[2025-09-06 04:00:38] [Rank 0] Group 2 Loss: 3.4271 +[2025-09-06 04:00:38] [Rank 0] Group 3 Loss: 3.8863 +[2025-09-06 04:00:38] [Rank 0] Group 3 Loss: 3.8863 +[2025-09-06 04:00:38] [Rank 0] Group 4 Loss: 4.5299 +[2025-09-06 04:00:38] [Rank 0] Group 4 Loss: 4.5299 +[2025-09-06 04:00:38] [Rank 0] Group 5 Loss: 5.0010 +[2025-09-06 04:00:38] [Rank 0] Group 5 Loss: 5.0010 +[2025-09-06 04:00:38] [Rank 0] Group 6 Loss: 5.2821 +[2025-09-06 04:00:38] [Rank 0] Group 6 Loss: 5.2821 +[2025-09-06 04:00:38] [Rank 0] Group 7 Loss: 5.4105 +[2025-09-06 04:00:38] [Rank 0] Group 7 Loss: 5.4105 +[2025-09-06 04:00:38] [Rank 0] Group 8 Loss: 5.6518 +[2025-09-06 04:00:38] [Rank 0] Group 8 Loss: 5.6518 +[2025-09-06 04:00:38] [Rank 0] Group 9 Loss: 5.8027 +[2025-09-06 04:00:38] [Rank 0] Group 9 Loss: 5.8027 +[2025-09-06 04:00:38] [Rank 0] Group 10 Loss: 5.8034 +[2025-09-06 04:00:38] [Rank 0] Group 10 Loss: 5.8034 +[2025-09-06 04:00:38] [Rank 0] Group 11 Loss: 5.8540 +[2025-09-06 04:00:38] [Rank 0] Group 11 Loss: 5.8540 +[2025-09-06 04:00:38] [Rank 0] Group 12 Loss: 5.7601 +[2025-09-06 04:00:38] [Rank 0] Group 12 Loss: 5.7601 +[2025-09-06 04:00:38] [Rank 0] Group 13 Loss: 5.7783 +[2025-09-06 04:00:38] [Rank 0] Group 13 Loss: 5.7783 +[2025-09-06 04:00:38] [Rank 0] Group 14 Loss: 5.8399 +[2025-09-06 04:00:38] [Rank 0] Group 14 Loss: 5.8399 +[2025-09-06 04:00:38] [Rank 0] Group 15 Loss: 5.7576 +[2025-09-06 04:00:38] [Rank 0] Group 15 Loss: 5.7576 +[2025-09-06 04:00:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:00:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:00:38] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 04:00:38] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 04:00:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:00:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:00:38] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 04:00:38] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 04:00:38] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 04:00:38] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 04:00:38] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 04:00:38] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 04:00:38] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 04:00:38] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 04:00:38] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 04:00:38] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 04:00:38] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 04:00:38] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 04:00:38] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:00:38] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:00:38] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:00:38] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:00:38] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:00:38] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:00:38] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 04:00:38] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 04:00:38] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:00:38] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:00:38] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 04:00:38] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 04:00:38] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 04:00:38] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 04:00:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:00:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:00:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:00:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:00:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:00:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:00:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:00:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:00:39] [Rank 0] step:6001/10000 train_time:258655ms step_avg:43.10ms +[2025-09-06 04:00:39] [Rank 0] step:6001/10000 train_time:258655ms step_avg:43.10ms +[2025-09-06 04:00:41] [Rank 0] step:6021/10000 train_time:259963ms step_avg:43.18ms +[2025-09-06 04:00:41] [Rank 0] step:6021/10000 train_time:259963ms step_avg:43.18ms +[2025-09-06 04:00:42] [Rank 0] step:6041/10000 train_time:260702ms step_avg:43.16ms +[2025-09-06 04:00:42] [Rank 0] step:6041/10000 train_time:260702ms step_avg:43.16ms +[2025-09-06 04:00:42] [Rank 0] step:6061/10000 train_time:261442ms step_avg:43.14ms +[2025-09-06 04:00:42] [Rank 0] step:6061/10000 train_time:261442ms step_avg:43.14ms +[2025-09-06 04:00:43] [Rank 0] step:6081/10000 train_time:262182ms step_avg:43.11ms +[2025-09-06 04:00:43] [Rank 0] step:6081/10000 train_time:262182ms step_avg:43.11ms +[2025-09-06 04:00:44] [Rank 0] step:6101/10000 train_time:262922ms step_avg:43.09ms +[2025-09-06 04:00:44] [Rank 0] step:6101/10000 train_time:262922ms step_avg:43.09ms +[2025-09-06 04:00:44] [Rank 0] step:6121/10000 train_time:263662ms step_avg:43.08ms +[2025-09-06 04:00:44] [Rank 0] step:6121/10000 train_time:263662ms step_avg:43.08ms +[2025-09-06 04:00:45] [Rank 0] step:6141/10000 train_time:264402ms step_avg:43.06ms +[2025-09-06 04:00:45] [Rank 0] step:6141/10000 train_time:264402ms step_avg:43.06ms +[2025-09-06 04:00:46] [Rank 0] step:6161/10000 train_time:265143ms step_avg:43.04ms +[2025-09-06 04:00:46] [Rank 0] step:6161/10000 train_time:265143ms step_avg:43.04ms +[2025-09-06 04:00:47] [Rank 0] step:6181/10000 train_time:265884ms step_avg:43.02ms +[2025-09-06 04:00:47] [Rank 0] step:6181/10000 train_time:265884ms step_avg:43.02ms +[2025-09-06 04:00:47] [Rank 0] step:6201/10000 train_time:266624ms step_avg:43.00ms +[2025-09-06 04:00:47] [Rank 0] step:6201/10000 train_time:266624ms step_avg:43.00ms +[2025-09-06 04:00:48] [Rank 0] step:6221/10000 train_time:267364ms step_avg:42.98ms +[2025-09-06 04:00:48] [Rank 0] step:6221/10000 train_time:267364ms step_avg:42.98ms +[2025-09-06 04:00:49] [Rank 0] step:6241/10000 train_time:268104ms step_avg:42.96ms +[2025-09-06 04:00:49] [Rank 0] step:6241/10000 train_time:268104ms step_avg:42.96ms +[2025-09-06 04:00:50] [Rank 0] step:6261/10000 train_time:268844ms step_avg:42.94ms +[2025-09-06 04:00:50] [Rank 0] step:6261/10000 train_time:268844ms step_avg:42.94ms +[2025-09-06 04:00:50] [Rank 0] step:6281/10000 train_time:269584ms step_avg:42.92ms +[2025-09-06 04:00:50] [Rank 0] step:6281/10000 train_time:269584ms step_avg:42.92ms +[2025-09-06 04:00:51] [Rank 0] step:6301/10000 train_time:270325ms step_avg:42.90ms +[2025-09-06 04:00:51] [Rank 0] step:6301/10000 train_time:270325ms step_avg:42.90ms +[2025-09-06 04:00:52] [Rank 0] step:6321/10000 train_time:271064ms step_avg:42.88ms +[2025-09-06 04:00:52] [Rank 0] step:6321/10000 train_time:271064ms step_avg:42.88ms +[2025-09-06 04:00:53] [Rank 0] step:6341/10000 train_time:271804ms step_avg:42.86ms +[2025-09-06 04:00:53] [Rank 0] step:6341/10000 train_time:271804ms step_avg:42.86ms +[2025-09-06 04:00:53] [Rank 0] step:6361/10000 train_time:272545ms step_avg:42.85ms +[2025-09-06 04:00:53] [Rank 0] step:6361/10000 train_time:272545ms step_avg:42.85ms +[2025-09-06 04:00:54] [Rank 0] step:6381/10000 train_time:273286ms step_avg:42.83ms +[2025-09-06 04:00:54] [Rank 0] step:6381/10000 train_time:273286ms step_avg:42.83ms +[2025-09-06 04:00:55] [Rank 0] step:6401/10000 train_time:274026ms step_avg:42.81ms +[2025-09-06 04:00:55] [Rank 0] step:6401/10000 train_time:274026ms step_avg:42.81ms +[2025-09-06 04:00:56] [Rank 0] step:6421/10000 train_time:274766ms step_avg:42.79ms +[2025-09-06 04:00:56] [Rank 0] step:6421/10000 train_time:274766ms step_avg:42.79ms +[2025-09-06 04:00:56] [Rank 0] step:6441/10000 train_time:275506ms step_avg:42.77ms +[2025-09-06 04:00:56] [Rank 0] step:6441/10000 train_time:275506ms step_avg:42.77ms +[2025-09-06 04:00:57] [Rank 0] step:6461/10000 train_time:276246ms step_avg:42.76ms +[2025-09-06 04:00:57] [Rank 0] step:6461/10000 train_time:276246ms step_avg:42.76ms +[2025-09-06 04:00:58] [Rank 0] step:6481/10000 train_time:276987ms step_avg:42.74ms +[2025-09-06 04:00:58] [Rank 0] step:6481/10000 train_time:276987ms step_avg:42.74ms +[2025-09-06 04:00:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:00:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:00:59] [Rank 0] PRINT: step:6500/10000 train_loss:2.6704 val_loss:2.6453 train_time:277807ms step_avg:42.74ms +[2025-09-06 04:00:59] [Rank 0] PRINT: step:6500/10000 train_loss:2.6704 val_loss:2.6453 train_time:277807ms step_avg:42.74ms +[2025-09-06 04:00:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:00:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:00:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:00:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:02:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:02:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:02:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:02:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:02:23] [Rank 0] Total Loss: 5.0103 +[2025-09-06 04:02:23] [Rank 0] Total Loss: 5.0103 +[2025-09-06 04:02:23] [Rank 0] Total FTA (Unweighted): 0.2006 +[2025-09-06 04:02:23] [Rank 0] Total FTA (Unweighted): 0.2006 +[2025-09-06 04:02:23] [Rank 0] Total FTA (Weighted): 0.2006 +[2025-09-06 04:02:23] [Rank 0] Total FTA (Weighted): 0.2006 +[2025-09-06 04:02:23] [Rank 0] Group 0 Loss: 3.2818 +[2025-09-06 04:02:23] [Rank 0] Group 0 Loss: 3.2818 +[2025-09-06 04:02:23] [Rank 0] Group 1 Loss: 3.3845 +[2025-09-06 04:02:23] [Rank 0] Group 1 Loss: 3.3845 +[2025-09-06 04:02:23] [Rank 0] Group 2 Loss: 3.4424 +[2025-09-06 04:02:23] [Rank 0] Group 2 Loss: 3.4424 +[2025-09-06 04:02:23] [Rank 0] Group 3 Loss: 3.8655 +[2025-09-06 04:02:23] [Rank 0] Group 3 Loss: 3.8655 +[2025-09-06 04:02:23] [Rank 0] Group 4 Loss: 4.5127 +[2025-09-06 04:02:23] [Rank 0] Group 4 Loss: 4.5127 +[2025-09-06 04:02:23] [Rank 0] Group 5 Loss: 4.9786 +[2025-09-06 04:02:23] [Rank 0] Group 5 Loss: 4.9786 +[2025-09-06 04:02:23] [Rank 0] Group 6 Loss: 5.2715 +[2025-09-06 04:02:23] [Rank 0] Group 6 Loss: 5.2715 +[2025-09-06 04:02:23] [Rank 0] Group 7 Loss: 5.3819 +[2025-09-06 04:02:23] [Rank 0] Group 7 Loss: 5.3819 +[2025-09-06 04:02:23] [Rank 0] Group 8 Loss: 5.6229 +[2025-09-06 04:02:23] [Rank 0] Group 8 Loss: 5.6229 +[2025-09-06 04:02:23] [Rank 0] Group 9 Loss: 5.7846 +[2025-09-06 04:02:23] [Rank 0] Group 9 Loss: 5.7846 +[2025-09-06 04:02:23] [Rank 0] Group 10 Loss: 5.7777 +[2025-09-06 04:02:23] [Rank 0] Group 10 Loss: 5.7777 +[2025-09-06 04:02:23] [Rank 0] Group 11 Loss: 5.8364 +[2025-09-06 04:02:23] [Rank 0] Group 11 Loss: 5.8364 +[2025-09-06 04:02:23] [Rank 0] Group 12 Loss: 5.7212 +[2025-09-06 04:02:23] [Rank 0] Group 12 Loss: 5.7212 +[2025-09-06 04:02:23] [Rank 0] Group 13 Loss: 5.7488 +[2025-09-06 04:02:23] [Rank 0] Group 13 Loss: 5.7488 +[2025-09-06 04:02:23] [Rank 0] Group 14 Loss: 5.8184 +[2025-09-06 04:02:23] [Rank 0] Group 14 Loss: 5.8184 +[2025-09-06 04:02:23] [Rank 0] Group 15 Loss: 5.7355 +[2025-09-06 04:02:23] [Rank 0] Group 15 Loss: 5.7355 +[2025-09-06 04:02:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:02:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:02:23] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 04:02:23] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 04:02:23] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:02:23] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:02:23] [Rank 0] Group 3 FTA: 0.1200 +[2025-09-06 04:02:23] [Rank 0] Group 3 FTA: 0.1200 +[2025-09-06 04:02:23] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 04:02:23] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 04:02:23] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 04:02:23] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 04:02:23] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 04:02:23] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 04:02:23] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 04:02:23] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 04:02:23] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 04:02:23] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 04:02:23] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:02:23] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:02:23] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:02:23] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:02:23] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:02:23] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:02:23] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:02:23] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:02:23] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:02:23] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:02:23] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 04:02:23] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 04:02:23] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 04:02:23] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 04:02:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:02:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:02:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:02:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:02:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:02:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:02:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:02:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:02:24] [Rank 0] step:6501/10000 train_time:277817ms step_avg:42.73ms +[2025-09-06 04:02:24] [Rank 0] step:6501/10000 train_time:277817ms step_avg:42.73ms +[2025-09-06 04:02:25] [Rank 0] step:6521/10000 train_time:278487ms step_avg:42.71ms +[2025-09-06 04:02:25] [Rank 0] step:6521/10000 train_time:278487ms step_avg:42.71ms +[2025-09-06 04:02:26] [Rank 0] step:6541/10000 train_time:279226ms step_avg:42.69ms +[2025-09-06 04:02:26] [Rank 0] step:6541/10000 train_time:279226ms step_avg:42.69ms +[2025-09-06 04:02:26] [Rank 0] step:6561/10000 train_time:279965ms step_avg:42.67ms +[2025-09-06 04:02:26] [Rank 0] step:6561/10000 train_time:279965ms step_avg:42.67ms +[2025-09-06 04:02:27] [Rank 0] step:6581/10000 train_time:280704ms step_avg:42.65ms +[2025-09-06 04:02:27] [Rank 0] step:6581/10000 train_time:280704ms step_avg:42.65ms +[2025-09-06 04:02:28] [Rank 0] step:6601/10000 train_time:281444ms step_avg:42.64ms +[2025-09-06 04:02:28] [Rank 0] step:6601/10000 train_time:281444ms step_avg:42.64ms +[2025-09-06 04:02:29] [Rank 0] step:6621/10000 train_time:282184ms step_avg:42.62ms +[2025-09-06 04:02:29] [Rank 0] step:6621/10000 train_time:282184ms step_avg:42.62ms +[2025-09-06 04:02:29] [Rank 0] step:6641/10000 train_time:282924ms step_avg:42.60ms +[2025-09-06 04:02:29] [Rank 0] step:6641/10000 train_time:282924ms step_avg:42.60ms +[2025-09-06 04:02:30] [Rank 0] step:6661/10000 train_time:283664ms step_avg:42.59ms +[2025-09-06 04:02:30] [Rank 0] step:6661/10000 train_time:283664ms step_avg:42.59ms +[2025-09-06 04:02:31] [Rank 0] step:6681/10000 train_time:284404ms step_avg:42.57ms +[2025-09-06 04:02:31] [Rank 0] step:6681/10000 train_time:284404ms step_avg:42.57ms +[2025-09-06 04:02:32] [Rank 0] step:6701/10000 train_time:285142ms step_avg:42.55ms +[2025-09-06 04:02:32] [Rank 0] step:6701/10000 train_time:285142ms step_avg:42.55ms +[2025-09-06 04:02:32] [Rank 0] step:6721/10000 train_time:285881ms step_avg:42.54ms +[2025-09-06 04:02:32] [Rank 0] step:6721/10000 train_time:285881ms step_avg:42.54ms +[2025-09-06 04:02:33] [Rank 0] step:6741/10000 train_time:286621ms step_avg:42.52ms +[2025-09-06 04:02:33] [Rank 0] step:6741/10000 train_time:286621ms step_avg:42.52ms +[2025-09-06 04:02:34] [Rank 0] step:6761/10000 train_time:287361ms step_avg:42.50ms +[2025-09-06 04:02:34] [Rank 0] step:6761/10000 train_time:287361ms step_avg:42.50ms +[2025-09-06 04:02:35] [Rank 0] step:6781/10000 train_time:288101ms step_avg:42.49ms +[2025-09-06 04:02:35] [Rank 0] step:6781/10000 train_time:288101ms step_avg:42.49ms +[2025-09-06 04:02:35] [Rank 0] step:6801/10000 train_time:288842ms step_avg:42.47ms +[2025-09-06 04:02:35] [Rank 0] step:6801/10000 train_time:288842ms step_avg:42.47ms +[2025-09-06 04:02:36] [Rank 0] step:6821/10000 train_time:289583ms step_avg:42.45ms +[2025-09-06 04:02:36] [Rank 0] step:6821/10000 train_time:289583ms step_avg:42.45ms +[2025-09-06 04:02:37] [Rank 0] step:6841/10000 train_time:290520ms step_avg:42.47ms +[2025-09-06 04:02:37] [Rank 0] step:6841/10000 train_time:290520ms step_avg:42.47ms +[2025-09-06 04:02:38] [Rank 0] step:6861/10000 train_time:291260ms step_avg:42.45ms +[2025-09-06 04:02:38] [Rank 0] step:6861/10000 train_time:291260ms step_avg:42.45ms +[2025-09-06 04:02:38] [Rank 0] step:6881/10000 train_time:292000ms step_avg:42.44ms +[2025-09-06 04:02:38] [Rank 0] step:6881/10000 train_time:292000ms step_avg:42.44ms +[2025-09-06 04:02:39] [Rank 0] step:6901/10000 train_time:292740ms step_avg:42.42ms +[2025-09-06 04:02:39] [Rank 0] step:6901/10000 train_time:292740ms step_avg:42.42ms +[2025-09-06 04:02:40] [Rank 0] step:6921/10000 train_time:293479ms step_avg:42.40ms +[2025-09-06 04:02:40] [Rank 0] step:6921/10000 train_time:293479ms step_avg:42.40ms +[2025-09-06 04:02:41] [Rank 0] step:6941/10000 train_time:294220ms step_avg:42.39ms +[2025-09-06 04:02:41] [Rank 0] step:6941/10000 train_time:294220ms step_avg:42.39ms +[2025-09-06 04:02:41] [Rank 0] step:6961/10000 train_time:294959ms step_avg:42.37ms +[2025-09-06 04:02:41] [Rank 0] step:6961/10000 train_time:294959ms step_avg:42.37ms +[2025-09-06 04:02:42] [Rank 0] step:6981/10000 train_time:295700ms step_avg:42.36ms +[2025-09-06 04:02:42] [Rank 0] step:6981/10000 train_time:295700ms step_avg:42.36ms +[2025-09-06 04:02:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:02:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:02:43] [Rank 0] PRINT: step:7000/10000 train_loss:2.6400 val_loss:2.6186 train_time:296520ms step_avg:42.36ms +[2025-09-06 04:02:43] [Rank 0] PRINT: step:7000/10000 train_loss:2.6400 val_loss:2.6186 train_time:296520ms step_avg:42.36ms +[2025-09-06 04:02:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:02:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:02:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:02:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:04:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:04:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:04:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:04:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:04:06] [Rank 0] Total Loss: 4.9835 +[2025-09-06 04:04:06] [Rank 0] Total Loss: 4.9835 +[2025-09-06 04:04:06] [Rank 0] Total FTA (Unweighted): 0.2187 +[2025-09-06 04:04:06] [Rank 0] Total FTA (Unweighted): 0.2187 +[2025-09-06 04:04:06] [Rank 0] Total FTA (Weighted): 0.2188 +[2025-09-06 04:04:06] [Rank 0] Total FTA (Weighted): 0.2188 +[2025-09-06 04:04:06] [Rank 0] Group 0 Loss: 3.3000 +[2025-09-06 04:04:06] [Rank 0] Group 0 Loss: 3.3000 +[2025-09-06 04:04:06] [Rank 0] Group 1 Loss: 3.3689 +[2025-09-06 04:04:06] [Rank 0] Group 1 Loss: 3.3689 +[2025-09-06 04:04:06] [Rank 0] Group 2 Loss: 3.4177 +[2025-09-06 04:04:06] [Rank 0] Group 2 Loss: 3.4177 +[2025-09-06 04:04:06] [Rank 0] Group 3 Loss: 3.8611 +[2025-09-06 04:04:06] [Rank 0] Group 3 Loss: 3.8611 +[2025-09-06 04:04:06] [Rank 0] Group 4 Loss: 4.4630 +[2025-09-06 04:04:06] [Rank 0] Group 4 Loss: 4.4630 +[2025-09-06 04:04:06] [Rank 0] Group 5 Loss: 4.9291 +[2025-09-06 04:04:06] [Rank 0] Group 5 Loss: 4.9291 +[2025-09-06 04:04:06] [Rank 0] Group 6 Loss: 5.2368 +[2025-09-06 04:04:06] [Rank 0] Group 6 Loss: 5.2368 +[2025-09-06 04:04:06] [Rank 0] Group 7 Loss: 5.3501 +[2025-09-06 04:04:06] [Rank 0] Group 7 Loss: 5.3501 +[2025-09-06 04:04:06] [Rank 0] Group 8 Loss: 5.5962 +[2025-09-06 04:04:06] [Rank 0] Group 8 Loss: 5.5962 +[2025-09-06 04:04:06] [Rank 0] Group 9 Loss: 5.7462 +[2025-09-06 04:04:06] [Rank 0] Group 9 Loss: 5.7462 +[2025-09-06 04:04:06] [Rank 0] Group 10 Loss: 5.7394 +[2025-09-06 04:04:06] [Rank 0] Group 10 Loss: 5.7394 +[2025-09-06 04:04:06] [Rank 0] Group 11 Loss: 5.8105 +[2025-09-06 04:04:06] [Rank 0] Group 11 Loss: 5.8105 +[2025-09-06 04:04:06] [Rank 0] Group 12 Loss: 5.7089 +[2025-09-06 04:04:06] [Rank 0] Group 12 Loss: 5.7089 +[2025-09-06 04:04:06] [Rank 0] Group 13 Loss: 5.7238 +[2025-09-06 04:04:06] [Rank 0] Group 13 Loss: 5.7238 +[2025-09-06 04:04:06] [Rank 0] Group 14 Loss: 5.7721 +[2025-09-06 04:04:06] [Rank 0] Group 14 Loss: 5.7721 +[2025-09-06 04:04:06] [Rank 0] Group 15 Loss: 5.7117 +[2025-09-06 04:04:06] [Rank 0] Group 15 Loss: 5.7117 +[2025-09-06 04:04:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:04:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:04:06] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:04:06] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:04:06] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:04:06] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:04:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:04:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:04:06] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 04:04:06] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 04:04:06] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:04:06] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:04:06] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 04:04:06] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 04:04:06] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:04:06] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:04:06] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 04:04:06] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 04:04:06] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:04:06] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:04:06] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:04:06] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:04:06] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:04:06] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:04:06] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:04:06] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:04:06] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:04:06] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:04:06] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 04:04:06] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 04:04:06] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 04:04:06] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 04:04:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:04:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:04:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:04:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:04:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:04:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:04:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:04:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:04:08] [Rank 0] step:7001/10000 train_time:296531ms step_avg:42.36ms +[2025-09-06 04:04:08] [Rank 0] step:7001/10000 train_time:296531ms step_avg:42.36ms +[2025-09-06 04:04:09] [Rank 0] step:7021/10000 train_time:297194ms step_avg:42.33ms +[2025-09-06 04:04:09] [Rank 0] step:7021/10000 train_time:297194ms step_avg:42.33ms +[2025-09-06 04:04:09] [Rank 0] step:7041/10000 train_time:297933ms step_avg:42.31ms +[2025-09-06 04:04:09] [Rank 0] step:7041/10000 train_time:297933ms step_avg:42.31ms +[2025-09-06 04:04:10] [Rank 0] step:7061/10000 train_time:298673ms step_avg:42.30ms +[2025-09-06 04:04:10] [Rank 0] step:7061/10000 train_time:298673ms step_avg:42.30ms +[2025-09-06 04:04:11] [Rank 0] step:7081/10000 train_time:299411ms step_avg:42.28ms +[2025-09-06 04:04:11] [Rank 0] step:7081/10000 train_time:299411ms step_avg:42.28ms +[2025-09-06 04:04:11] [Rank 0] step:7101/10000 train_time:300150ms step_avg:42.27ms +[2025-09-06 04:04:11] [Rank 0] step:7101/10000 train_time:300150ms step_avg:42.27ms +[2025-09-06 04:04:12] [Rank 0] step:7121/10000 train_time:300890ms step_avg:42.25ms +[2025-09-06 04:04:12] [Rank 0] step:7121/10000 train_time:300890ms step_avg:42.25ms +[2025-09-06 04:04:13] [Rank 0] step:7141/10000 train_time:301630ms step_avg:42.24ms +[2025-09-06 04:04:13] [Rank 0] step:7141/10000 train_time:301630ms step_avg:42.24ms +[2025-09-06 04:04:14] [Rank 0] step:7161/10000 train_time:302370ms step_avg:42.22ms +[2025-09-06 04:04:14] [Rank 0] step:7161/10000 train_time:302370ms step_avg:42.22ms +[2025-09-06 04:04:14] [Rank 0] step:7181/10000 train_time:303110ms step_avg:42.21ms +[2025-09-06 04:04:14] [Rank 0] step:7181/10000 train_time:303110ms step_avg:42.21ms +[2025-09-06 04:04:15] [Rank 0] step:7201/10000 train_time:303850ms step_avg:42.20ms +[2025-09-06 04:04:15] [Rank 0] step:7201/10000 train_time:303850ms step_avg:42.20ms +[2025-09-06 04:04:16] [Rank 0] step:7221/10000 train_time:304590ms step_avg:42.18ms +[2025-09-06 04:04:16] [Rank 0] step:7221/10000 train_time:304590ms step_avg:42.18ms +[2025-09-06 04:04:17] [Rank 0] step:7241/10000 train_time:305331ms step_avg:42.17ms +[2025-09-06 04:04:17] [Rank 0] step:7241/10000 train_time:305331ms step_avg:42.17ms +[2025-09-06 04:04:17] [Rank 0] step:7261/10000 train_time:306071ms step_avg:42.15ms +[2025-09-06 04:04:17] [Rank 0] step:7261/10000 train_time:306071ms step_avg:42.15ms +[2025-09-06 04:04:18] [Rank 0] step:7281/10000 train_time:306811ms step_avg:42.14ms +[2025-09-06 04:04:18] [Rank 0] step:7281/10000 train_time:306811ms step_avg:42.14ms +[2025-09-06 04:04:19] [Rank 0] step:7301/10000 train_time:307551ms step_avg:42.12ms +[2025-09-06 04:04:19] [Rank 0] step:7301/10000 train_time:307551ms step_avg:42.12ms +[2025-09-06 04:04:20] [Rank 0] step:7321/10000 train_time:308292ms step_avg:42.11ms +[2025-09-06 04:04:20] [Rank 0] step:7321/10000 train_time:308292ms step_avg:42.11ms +[2025-09-06 04:04:20] [Rank 0] step:7341/10000 train_time:309033ms step_avg:42.10ms +[2025-09-06 04:04:20] [Rank 0] step:7341/10000 train_time:309033ms step_avg:42.10ms +[2025-09-06 04:04:21] [Rank 0] step:7361/10000 train_time:309774ms step_avg:42.08ms +[2025-09-06 04:04:21] [Rank 0] step:7361/10000 train_time:309774ms step_avg:42.08ms +[2025-09-06 04:04:22] [Rank 0] step:7381/10000 train_time:310518ms step_avg:42.07ms +[2025-09-06 04:04:22] [Rank 0] step:7381/10000 train_time:310518ms step_avg:42.07ms +[2025-09-06 04:04:23] [Rank 0] step:7401/10000 train_time:311258ms step_avg:42.06ms +[2025-09-06 04:04:23] [Rank 0] step:7401/10000 train_time:311258ms step_avg:42.06ms +[2025-09-06 04:04:23] [Rank 0] step:7421/10000 train_time:311998ms step_avg:42.04ms +[2025-09-06 04:04:23] [Rank 0] step:7421/10000 train_time:311998ms step_avg:42.04ms +[2025-09-06 04:04:24] [Rank 0] step:7441/10000 train_time:312739ms step_avg:42.03ms +[2025-09-06 04:04:24] [Rank 0] step:7441/10000 train_time:312739ms step_avg:42.03ms +[2025-09-06 04:04:25] [Rank 0] step:7461/10000 train_time:313479ms step_avg:42.02ms +[2025-09-06 04:04:25] [Rank 0] step:7461/10000 train_time:313479ms step_avg:42.02ms +[2025-09-06 04:04:26] [Rank 0] step:7481/10000 train_time:314219ms step_avg:42.00ms +[2025-09-06 04:04:26] [Rank 0] step:7481/10000 train_time:314219ms step_avg:42.00ms +[2025-09-06 04:04:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:04:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:04:27] [Rank 0] PRINT: step:7500/10000 train_loss:2.6147 val_loss:2.5956 train_time:315040ms step_avg:42.01ms +[2025-09-06 04:04:27] [Rank 0] PRINT: step:7500/10000 train_loss:2.6147 val_loss:2.5956 train_time:315040ms step_avg:42.01ms +[2025-09-06 04:04:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:04:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:04:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:04:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:05:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:05:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:05:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:05:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:05:49] [Rank 0] Total Loss: 4.9835 +[2025-09-06 04:05:49] [Rank 0] Total Loss: 4.9835 +[2025-09-06 04:05:49] [Rank 0] Total FTA (Unweighted): 0.2213 +[2025-09-06 04:05:49] [Rank 0] Total FTA (Unweighted): 0.2213 +[2025-09-06 04:05:49] [Rank 0] Total FTA (Weighted): 0.2213 +[2025-09-06 04:05:49] [Rank 0] Total FTA (Weighted): 0.2213 +[2025-09-06 04:05:49] [Rank 0] Group 0 Loss: 3.3088 +[2025-09-06 04:05:49] [Rank 0] Group 0 Loss: 3.3088 +[2025-09-06 04:05:49] [Rank 0] Group 1 Loss: 3.3860 +[2025-09-06 04:05:49] [Rank 0] Group 1 Loss: 3.3860 +[2025-09-06 04:05:49] [Rank 0] Group 2 Loss: 3.4122 +[2025-09-06 04:05:49] [Rank 0] Group 2 Loss: 3.4122 +[2025-09-06 04:05:49] [Rank 0] Group 3 Loss: 3.8623 +[2025-09-06 04:05:49] [Rank 0] Group 3 Loss: 3.8623 +[2025-09-06 04:05:49] [Rank 0] Group 4 Loss: 4.4604 +[2025-09-06 04:05:49] [Rank 0] Group 4 Loss: 4.4604 +[2025-09-06 04:05:49] [Rank 0] Group 5 Loss: 4.9508 +[2025-09-06 04:05:49] [Rank 0] Group 5 Loss: 4.9508 +[2025-09-06 04:05:49] [Rank 0] Group 6 Loss: 5.2395 +[2025-09-06 04:05:49] [Rank 0] Group 6 Loss: 5.2395 +[2025-09-06 04:05:49] [Rank 0] Group 7 Loss: 5.3476 +[2025-09-06 04:05:49] [Rank 0] Group 7 Loss: 5.3476 +[2025-09-06 04:05:49] [Rank 0] Group 8 Loss: 5.6017 +[2025-09-06 04:05:49] [Rank 0] Group 8 Loss: 5.6017 +[2025-09-06 04:05:49] [Rank 0] Group 9 Loss: 5.7310 +[2025-09-06 04:05:49] [Rank 0] Group 9 Loss: 5.7310 +[2025-09-06 04:05:49] [Rank 0] Group 10 Loss: 5.7432 +[2025-09-06 04:05:49] [Rank 0] Group 10 Loss: 5.7432 +[2025-09-06 04:05:49] [Rank 0] Group 11 Loss: 5.7959 +[2025-09-06 04:05:49] [Rank 0] Group 11 Loss: 5.7959 +[2025-09-06 04:05:49] [Rank 0] Group 12 Loss: 5.6942 +[2025-09-06 04:05:49] [Rank 0] Group 12 Loss: 5.6942 +[2025-09-06 04:05:49] [Rank 0] Group 13 Loss: 5.7159 +[2025-09-06 04:05:49] [Rank 0] Group 13 Loss: 5.7159 +[2025-09-06 04:05:49] [Rank 0] Group 14 Loss: 5.7733 +[2025-09-06 04:05:49] [Rank 0] Group 14 Loss: 5.7733 +[2025-09-06 04:05:49] [Rank 0] Group 15 Loss: 5.7127 +[2025-09-06 04:05:49] [Rank 0] Group 15 Loss: 5.7127 +[2025-09-06 04:05:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:05:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:05:49] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:05:49] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:05:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:05:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:05:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:05:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:05:49] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:05:49] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:05:49] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:05:49] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:05:49] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 04:05:49] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 04:05:49] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:05:49] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:05:49] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 04:05:49] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 04:05:49] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:05:49] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:05:49] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:05:49] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:05:49] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:05:49] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:05:49] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 04:05:49] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 04:05:49] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:05:49] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:05:49] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 04:05:49] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 04:05:49] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 04:05:49] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 04:05:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:05:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:05:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:05:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:05:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:05:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:05:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:05:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:05:51] [Rank 0] step:7501/10000 train_time:315051ms step_avg:42.00ms +[2025-09-06 04:05:51] [Rank 0] step:7501/10000 train_time:315051ms step_avg:42.00ms +[2025-09-06 04:05:51] [Rank 0] step:7521/10000 train_time:315731ms step_avg:41.98ms +[2025-09-06 04:05:51] [Rank 0] step:7521/10000 train_time:315731ms step_avg:41.98ms +[2025-09-06 04:05:52] [Rank 0] step:7541/10000 train_time:316471ms step_avg:41.97ms +[2025-09-06 04:05:52] [Rank 0] step:7541/10000 train_time:316471ms step_avg:41.97ms +[2025-09-06 04:05:53] [Rank 0] step:7561/10000 train_time:317211ms step_avg:41.95ms +[2025-09-06 04:05:53] [Rank 0] step:7561/10000 train_time:317211ms step_avg:41.95ms +[2025-09-06 04:05:54] [Rank 0] step:7581/10000 train_time:317950ms step_avg:41.94ms +[2025-09-06 04:05:54] [Rank 0] step:7581/10000 train_time:317950ms step_avg:41.94ms +[2025-09-06 04:05:54] [Rank 0] step:7601/10000 train_time:318691ms step_avg:41.93ms +[2025-09-06 04:05:54] [Rank 0] step:7601/10000 train_time:318691ms step_avg:41.93ms +[2025-09-06 04:05:55] [Rank 0] step:7621/10000 train_time:319431ms step_avg:41.91ms +[2025-09-06 04:05:55] [Rank 0] step:7621/10000 train_time:319431ms step_avg:41.91ms +[2025-09-06 04:05:56] [Rank 0] step:7641/10000 train_time:320395ms step_avg:41.93ms +[2025-09-06 04:05:56] [Rank 0] step:7641/10000 train_time:320395ms step_avg:41.93ms +[2025-09-06 04:05:57] [Rank 0] step:7661/10000 train_time:321105ms step_avg:41.91ms +[2025-09-06 04:05:57] [Rank 0] step:7661/10000 train_time:321105ms step_avg:41.91ms +[2025-09-06 04:05:57] [Rank 0] step:7681/10000 train_time:321845ms step_avg:41.90ms +[2025-09-06 04:05:57] [Rank 0] step:7681/10000 train_time:321845ms step_avg:41.90ms +[2025-09-06 04:05:58] [Rank 0] step:7701/10000 train_time:322585ms step_avg:41.89ms +[2025-09-06 04:05:58] [Rank 0] step:7701/10000 train_time:322585ms step_avg:41.89ms +[2025-09-06 04:05:59] [Rank 0] step:7721/10000 train_time:323325ms step_avg:41.88ms +[2025-09-06 04:05:59] [Rank 0] step:7721/10000 train_time:323325ms step_avg:41.88ms +[2025-09-06 04:06:00] [Rank 0] step:7741/10000 train_time:324065ms step_avg:41.86ms +[2025-09-06 04:06:00] [Rank 0] step:7741/10000 train_time:324065ms step_avg:41.86ms +[2025-09-06 04:06:00] [Rank 0] step:7761/10000 train_time:324805ms step_avg:41.85ms +[2025-09-06 04:06:00] [Rank 0] step:7761/10000 train_time:324805ms step_avg:41.85ms +[2025-09-06 04:06:01] [Rank 0] step:7781/10000 train_time:325545ms step_avg:41.84ms +[2025-09-06 04:06:01] [Rank 0] step:7781/10000 train_time:325545ms step_avg:41.84ms +[2025-09-06 04:06:02] [Rank 0] step:7801/10000 train_time:326288ms step_avg:41.83ms +[2025-09-06 04:06:02] [Rank 0] step:7801/10000 train_time:326288ms step_avg:41.83ms +[2025-09-06 04:06:03] [Rank 0] step:7821/10000 train_time:327028ms step_avg:41.81ms +[2025-09-06 04:06:03] [Rank 0] step:7821/10000 train_time:327028ms step_avg:41.81ms +[2025-09-06 04:06:03] [Rank 0] step:7841/10000 train_time:327771ms step_avg:41.80ms +[2025-09-06 04:06:03] [Rank 0] step:7841/10000 train_time:327771ms step_avg:41.80ms +[2025-09-06 04:06:04] [Rank 0] step:7861/10000 train_time:328512ms step_avg:41.79ms +[2025-09-06 04:06:04] [Rank 0] step:7861/10000 train_time:328512ms step_avg:41.79ms +[2025-09-06 04:06:05] [Rank 0] step:7881/10000 train_time:329251ms step_avg:41.78ms +[2025-09-06 04:06:05] [Rank 0] step:7881/10000 train_time:329251ms step_avg:41.78ms +[2025-09-06 04:06:06] [Rank 0] step:7901/10000 train_time:329991ms step_avg:41.77ms +[2025-09-06 04:06:06] [Rank 0] step:7901/10000 train_time:329991ms step_avg:41.77ms +[2025-09-06 04:06:06] [Rank 0] step:7921/10000 train_time:330732ms step_avg:41.75ms +[2025-09-06 04:06:06] [Rank 0] step:7921/10000 train_time:330732ms step_avg:41.75ms +[2025-09-06 04:06:07] [Rank 0] step:7941/10000 train_time:331471ms step_avg:41.74ms +[2025-09-06 04:06:07] [Rank 0] step:7941/10000 train_time:331471ms step_avg:41.74ms +[2025-09-06 04:06:08] [Rank 0] step:7961/10000 train_time:332212ms step_avg:41.73ms +[2025-09-06 04:06:08] [Rank 0] step:7961/10000 train_time:332212ms step_avg:41.73ms +[2025-09-06 04:06:09] [Rank 0] step:7981/10000 train_time:332952ms step_avg:41.72ms +[2025-09-06 04:06:09] [Rank 0] step:7981/10000 train_time:332952ms step_avg:41.72ms +[2025-09-06 04:06:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:06:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:06:10] [Rank 0] PRINT: step:8000/10000 train_loss:2.5937 val_loss:2.5754 train_time:333773ms step_avg:41.72ms +[2025-09-06 04:06:10] [Rank 0] PRINT: step:8000/10000 train_loss:2.5937 val_loss:2.5754 train_time:333773ms step_avg:41.72ms +[2025-09-06 04:06:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:06:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:06:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:06:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:07:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:07:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:07:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:07:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:07:32] [Rank 0] Total Loss: 4.9670 +[2025-09-06 04:07:32] [Rank 0] Total Loss: 4.9670 +[2025-09-06 04:07:32] [Rank 0] Total FTA (Unweighted): 0.2219 +[2025-09-06 04:07:32] [Rank 0] Total FTA (Unweighted): 0.2219 +[2025-09-06 04:07:32] [Rank 0] Total FTA (Weighted): 0.2219 +[2025-09-06 04:07:32] [Rank 0] Total FTA (Weighted): 0.2219 +[2025-09-06 04:07:32] [Rank 0] Group 0 Loss: 3.2929 +[2025-09-06 04:07:32] [Rank 0] Group 0 Loss: 3.2929 +[2025-09-06 04:07:32] [Rank 0] Group 1 Loss: 3.3701 +[2025-09-06 04:07:32] [Rank 0] Group 1 Loss: 3.3701 +[2025-09-06 04:07:32] [Rank 0] Group 2 Loss: 3.4459 +[2025-09-06 04:07:32] [Rank 0] Group 2 Loss: 3.4459 +[2025-09-06 04:07:32] [Rank 0] Group 3 Loss: 3.8560 +[2025-09-06 04:07:32] [Rank 0] Group 3 Loss: 3.8560 +[2025-09-06 04:07:32] [Rank 0] Group 4 Loss: 4.4255 +[2025-09-06 04:07:32] [Rank 0] Group 4 Loss: 4.4255 +[2025-09-06 04:07:32] [Rank 0] Group 5 Loss: 4.9154 +[2025-09-06 04:07:32] [Rank 0] Group 5 Loss: 4.9154 +[2025-09-06 04:07:32] [Rank 0] Group 6 Loss: 5.2083 +[2025-09-06 04:07:32] [Rank 0] Group 6 Loss: 5.2083 +[2025-09-06 04:07:32] [Rank 0] Group 7 Loss: 5.3171 +[2025-09-06 04:07:32] [Rank 0] Group 7 Loss: 5.3171 +[2025-09-06 04:07:32] [Rank 0] Group 8 Loss: 5.5779 +[2025-09-06 04:07:32] [Rank 0] Group 8 Loss: 5.5779 +[2025-09-06 04:07:32] [Rank 0] Group 9 Loss: 5.7144 +[2025-09-06 04:07:32] [Rank 0] Group 9 Loss: 5.7144 +[2025-09-06 04:07:32] [Rank 0] Group 10 Loss: 5.7231 +[2025-09-06 04:07:32] [Rank 0] Group 10 Loss: 5.7231 +[2025-09-06 04:07:32] [Rank 0] Group 11 Loss: 5.7820 +[2025-09-06 04:07:32] [Rank 0] Group 11 Loss: 5.7820 +[2025-09-06 04:07:32] [Rank 0] Group 12 Loss: 5.6785 +[2025-09-06 04:07:32] [Rank 0] Group 12 Loss: 5.6785 +[2025-09-06 04:07:32] [Rank 0] Group 13 Loss: 5.7087 +[2025-09-06 04:07:32] [Rank 0] Group 13 Loss: 5.7087 +[2025-09-06 04:07:32] [Rank 0] Group 14 Loss: 5.7625 +[2025-09-06 04:07:32] [Rank 0] Group 14 Loss: 5.7625 +[2025-09-06 04:07:32] [Rank 0] Group 15 Loss: 5.6935 +[2025-09-06 04:07:32] [Rank 0] Group 15 Loss: 5.6935 +[2025-09-06 04:07:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:07:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:07:32] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:07:32] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:07:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:07:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:07:32] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:07:32] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:07:32] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:07:32] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:07:32] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:07:32] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:07:32] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:07:32] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:07:32] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:07:32] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:07:32] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 04:07:32] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 04:07:32] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:07:32] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:07:32] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:07:32] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:07:32] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:07:32] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:07:32] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:07:32] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:07:32] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:07:32] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:07:32] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:07:32] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:07:32] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 04:07:32] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 04:07:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:07:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:07:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:07:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:07:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:07:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:07:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:07:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:07:34] [Rank 0] step:8001/10000 train_time:333783ms step_avg:41.72ms +[2025-09-06 04:07:34] [Rank 0] step:8001/10000 train_time:333783ms step_avg:41.72ms +[2025-09-06 04:07:35] [Rank 0] step:8021/10000 train_time:335074ms step_avg:41.77ms +[2025-09-06 04:07:35] [Rank 0] step:8021/10000 train_time:335074ms step_avg:41.77ms +[2025-09-06 04:07:36] [Rank 0] step:8041/10000 train_time:335825ms step_avg:41.76ms +[2025-09-06 04:07:36] [Rank 0] step:8041/10000 train_time:335825ms step_avg:41.76ms +[2025-09-06 04:07:36] [Rank 0] step:8061/10000 train_time:336564ms step_avg:41.75ms +[2025-09-06 04:07:36] [Rank 0] step:8061/10000 train_time:336564ms step_avg:41.75ms +[2025-09-06 04:07:37] [Rank 0] step:8081/10000 train_time:337446ms step_avg:41.76ms +[2025-09-06 04:07:37] [Rank 0] step:8081/10000 train_time:337446ms step_avg:41.76ms +[2025-09-06 04:07:38] [Rank 0] step:8101/10000 train_time:338185ms step_avg:41.75ms +[2025-09-06 04:07:38] [Rank 0] step:8101/10000 train_time:338185ms step_avg:41.75ms +[2025-09-06 04:07:39] [Rank 0] step:8121/10000 train_time:338923ms step_avg:41.73ms +[2025-09-06 04:07:39] [Rank 0] step:8121/10000 train_time:338923ms step_avg:41.73ms +[2025-09-06 04:07:40] [Rank 0] step:8141/10000 train_time:339662ms step_avg:41.72ms +[2025-09-06 04:07:40] [Rank 0] step:8141/10000 train_time:339662ms step_avg:41.72ms +[2025-09-06 04:07:40] [Rank 0] step:8161/10000 train_time:340617ms step_avg:41.74ms +[2025-09-06 04:07:40] [Rank 0] step:8161/10000 train_time:340617ms step_avg:41.74ms +[2025-09-06 04:07:41] [Rank 0] step:8181/10000 train_time:341355ms step_avg:41.73ms +[2025-09-06 04:07:41] [Rank 0] step:8181/10000 train_time:341355ms step_avg:41.73ms +[2025-09-06 04:07:42] [Rank 0] step:8201/10000 train_time:342093ms step_avg:41.71ms +[2025-09-06 04:07:42] [Rank 0] step:8201/10000 train_time:342093ms step_avg:41.71ms +[2025-09-06 04:07:43] [Rank 0] step:8221/10000 train_time:342832ms step_avg:41.70ms +[2025-09-06 04:07:43] [Rank 0] step:8221/10000 train_time:342832ms step_avg:41.70ms +[2025-09-06 04:07:43] [Rank 0] step:8241/10000 train_time:343570ms step_avg:41.69ms +[2025-09-06 04:07:43] [Rank 0] step:8241/10000 train_time:343570ms step_avg:41.69ms +[2025-09-06 04:07:44] [Rank 0] step:8261/10000 train_time:344308ms step_avg:41.68ms +[2025-09-06 04:07:44] [Rank 0] step:8261/10000 train_time:344308ms step_avg:41.68ms +[2025-09-06 04:07:45] [Rank 0] step:8281/10000 train_time:345050ms step_avg:41.67ms +[2025-09-06 04:07:45] [Rank 0] step:8281/10000 train_time:345050ms step_avg:41.67ms +[2025-09-06 04:07:46] [Rank 0] step:8301/10000 train_time:345789ms step_avg:41.66ms +[2025-09-06 04:07:46] [Rank 0] step:8301/10000 train_time:345789ms step_avg:41.66ms +[2025-09-06 04:07:46] [Rank 0] step:8321/10000 train_time:346527ms step_avg:41.64ms +[2025-09-06 04:07:46] [Rank 0] step:8321/10000 train_time:346527ms step_avg:41.64ms +[2025-09-06 04:07:47] [Rank 0] step:8341/10000 train_time:347266ms step_avg:41.63ms +[2025-09-06 04:07:47] [Rank 0] step:8341/10000 train_time:347266ms step_avg:41.63ms +[2025-09-06 04:07:48] [Rank 0] step:8361/10000 train_time:348005ms step_avg:41.62ms +[2025-09-06 04:07:48] [Rank 0] step:8361/10000 train_time:348005ms step_avg:41.62ms +[2025-09-06 04:07:49] [Rank 0] step:8381/10000 train_time:348744ms step_avg:41.61ms +[2025-09-06 04:07:49] [Rank 0] step:8381/10000 train_time:348744ms step_avg:41.61ms +[2025-09-06 04:07:49] [Rank 0] step:8401/10000 train_time:349482ms step_avg:41.60ms +[2025-09-06 04:07:49] [Rank 0] step:8401/10000 train_time:349482ms step_avg:41.60ms +[2025-09-06 04:07:50] [Rank 0] step:8421/10000 train_time:350222ms step_avg:41.59ms +[2025-09-06 04:07:50] [Rank 0] step:8421/10000 train_time:350222ms step_avg:41.59ms +[2025-09-06 04:07:51] [Rank 0] step:8441/10000 train_time:350960ms step_avg:41.58ms +[2025-09-06 04:07:51] [Rank 0] step:8441/10000 train_time:350960ms step_avg:41.58ms +[2025-09-06 04:07:52] [Rank 0] step:8461/10000 train_time:351700ms step_avg:41.57ms +[2025-09-06 04:07:52] [Rank 0] step:8461/10000 train_time:351700ms step_avg:41.57ms +[2025-09-06 04:07:52] [Rank 0] step:8481/10000 train_time:352439ms step_avg:41.56ms +[2025-09-06 04:07:52] [Rank 0] step:8481/10000 train_time:352439ms step_avg:41.56ms +[2025-09-06 04:07:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:07:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:07:53] [Rank 0] PRINT: step:8500/10000 train_loss:2.5762 val_loss:2.5589 train_time:353258ms step_avg:41.56ms +[2025-09-06 04:07:53] [Rank 0] PRINT: step:8500/10000 train_loss:2.5762 val_loss:2.5589 train_time:353258ms step_avg:41.56ms +[2025-09-06 04:07:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:07:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:07:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:07:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:09:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:09:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:09:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:09:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:09:17] [Rank 0] Total Loss: 4.9532 +[2025-09-06 04:09:17] [Rank 0] Total Loss: 4.9532 +[2025-09-06 04:09:17] [Rank 0] Total FTA (Unweighted): 0.2238 +[2025-09-06 04:09:17] [Rank 0] Total FTA (Unweighted): 0.2238 +[2025-09-06 04:09:17] [Rank 0] Total FTA (Weighted): 0.2238 +[2025-09-06 04:09:17] [Rank 0] Total FTA (Weighted): 0.2238 +[2025-09-06 04:09:17] [Rank 0] Group 0 Loss: 3.2576 +[2025-09-06 04:09:17] [Rank 0] Group 0 Loss: 3.2576 +[2025-09-06 04:09:17] [Rank 0] Group 1 Loss: 3.3373 +[2025-09-06 04:09:17] [Rank 0] Group 1 Loss: 3.3373 +[2025-09-06 04:09:17] [Rank 0] Group 2 Loss: 3.4127 +[2025-09-06 04:09:17] [Rank 0] Group 2 Loss: 3.4127 +[2025-09-06 04:09:17] [Rank 0] Group 3 Loss: 3.8433 +[2025-09-06 04:09:17] [Rank 0] Group 3 Loss: 3.8433 +[2025-09-06 04:09:17] [Rank 0] Group 4 Loss: 4.4058 +[2025-09-06 04:09:17] [Rank 0] Group 4 Loss: 4.4058 +[2025-09-06 04:09:17] [Rank 0] Group 5 Loss: 4.9052 +[2025-09-06 04:09:17] [Rank 0] Group 5 Loss: 4.9052 +[2025-09-06 04:09:17] [Rank 0] Group 6 Loss: 5.2060 +[2025-09-06 04:09:17] [Rank 0] Group 6 Loss: 5.2060 +[2025-09-06 04:09:17] [Rank 0] Group 7 Loss: 5.2970 +[2025-09-06 04:09:17] [Rank 0] Group 7 Loss: 5.2970 +[2025-09-06 04:09:17] [Rank 0] Group 8 Loss: 5.5619 +[2025-09-06 04:09:17] [Rank 0] Group 8 Loss: 5.5619 +[2025-09-06 04:09:17] [Rank 0] Group 9 Loss: 5.7138 +[2025-09-06 04:09:17] [Rank 0] Group 9 Loss: 5.7138 +[2025-09-06 04:09:17] [Rank 0] Group 10 Loss: 5.7215 +[2025-09-06 04:09:17] [Rank 0] Group 10 Loss: 5.7215 +[2025-09-06 04:09:17] [Rank 0] Group 11 Loss: 5.7901 +[2025-09-06 04:09:17] [Rank 0] Group 11 Loss: 5.7901 +[2025-09-06 04:09:17] [Rank 0] Group 12 Loss: 5.6694 +[2025-09-06 04:09:17] [Rank 0] Group 12 Loss: 5.6694 +[2025-09-06 04:09:17] [Rank 0] Group 13 Loss: 5.6845 +[2025-09-06 04:09:17] [Rank 0] Group 13 Loss: 5.6845 +[2025-09-06 04:09:17] [Rank 0] Group 14 Loss: 5.7588 +[2025-09-06 04:09:17] [Rank 0] Group 14 Loss: 5.7588 +[2025-09-06 04:09:17] [Rank 0] Group 15 Loss: 5.6857 +[2025-09-06 04:09:17] [Rank 0] Group 15 Loss: 5.6857 +[2025-09-06 04:09:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:09:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:09:17] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:09:17] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:09:17] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:09:17] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:09:17] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:09:17] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:09:17] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:09:17] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:09:17] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:09:17] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:09:17] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:09:17] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:09:17] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:09:17] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:09:17] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 04:09:17] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 04:09:17] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:09:17] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 04:09:17] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:09:17] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 04:09:17] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:09:17] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:09:17] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:09:17] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:09:17] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:09:17] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:09:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 04:09:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 04:09:17] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 04:09:17] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 04:09:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:09:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:09:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:09:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:09:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:09:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:09:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:09:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:09:19] [Rank 0] step:8501/10000 train_time:353268ms step_avg:41.56ms +[2025-09-06 04:09:19] [Rank 0] step:8501/10000 train_time:353268ms step_avg:41.56ms +[2025-09-06 04:09:20] [Rank 0] step:8521/10000 train_time:353937ms step_avg:41.54ms +[2025-09-06 04:09:20] [Rank 0] step:8521/10000 train_time:353937ms step_avg:41.54ms +[2025-09-06 04:09:20] [Rank 0] step:8541/10000 train_time:354676ms step_avg:41.53ms +[2025-09-06 04:09:20] [Rank 0] step:8541/10000 train_time:354676ms step_avg:41.53ms +[2025-09-06 04:09:21] [Rank 0] step:8561/10000 train_time:355415ms step_avg:41.52ms +[2025-09-06 04:09:21] [Rank 0] step:8561/10000 train_time:355415ms step_avg:41.52ms +[2025-09-06 04:09:22] [Rank 0] step:8581/10000 train_time:356153ms step_avg:41.50ms +[2025-09-06 04:09:22] [Rank 0] step:8581/10000 train_time:356153ms step_avg:41.50ms +[2025-09-06 04:09:23] [Rank 0] step:8601/10000 train_time:356892ms step_avg:41.49ms +[2025-09-06 04:09:23] [Rank 0] step:8601/10000 train_time:356892ms step_avg:41.49ms +[2025-09-06 04:09:23] [Rank 0] step:8621/10000 train_time:357630ms step_avg:41.48ms +[2025-09-06 04:09:23] [Rank 0] step:8621/10000 train_time:357630ms step_avg:41.48ms +[2025-09-06 04:09:24] [Rank 0] step:8641/10000 train_time:358368ms step_avg:41.47ms +[2025-09-06 04:09:24] [Rank 0] step:8641/10000 train_time:358368ms step_avg:41.47ms +[2025-09-06 04:09:25] [Rank 0] step:8661/10000 train_time:359107ms step_avg:41.46ms +[2025-09-06 04:09:25] [Rank 0] step:8661/10000 train_time:359107ms step_avg:41.46ms +[2025-09-06 04:09:26] [Rank 0] step:8681/10000 train_time:359846ms step_avg:41.45ms +[2025-09-06 04:09:26] [Rank 0] step:8681/10000 train_time:359846ms step_avg:41.45ms +[2025-09-06 04:09:26] [Rank 0] step:8701/10000 train_time:360584ms step_avg:41.44ms +[2025-09-06 04:09:26] [Rank 0] step:8701/10000 train_time:360584ms step_avg:41.44ms +[2025-09-06 04:09:27] [Rank 0] step:8721/10000 train_time:361322ms step_avg:41.43ms +[2025-09-06 04:09:27] [Rank 0] step:8721/10000 train_time:361322ms step_avg:41.43ms +[2025-09-06 04:09:28] [Rank 0] step:8741/10000 train_time:362061ms step_avg:41.42ms +[2025-09-06 04:09:28] [Rank 0] step:8741/10000 train_time:362061ms step_avg:41.42ms +[2025-09-06 04:09:29] [Rank 0] step:8761/10000 train_time:362800ms step_avg:41.41ms +[2025-09-06 04:09:29] [Rank 0] step:8761/10000 train_time:362800ms step_avg:41.41ms +[2025-09-06 04:09:29] [Rank 0] step:8781/10000 train_time:363539ms step_avg:41.40ms +[2025-09-06 04:09:29] [Rank 0] step:8781/10000 train_time:363539ms step_avg:41.40ms +[2025-09-06 04:09:30] [Rank 0] step:8801/10000 train_time:364277ms step_avg:41.39ms +[2025-09-06 04:09:30] [Rank 0] step:8801/10000 train_time:364277ms step_avg:41.39ms +[2025-09-06 04:09:31] [Rank 0] step:8821/10000 train_time:365016ms step_avg:41.38ms +[2025-09-06 04:09:31] [Rank 0] step:8821/10000 train_time:365016ms step_avg:41.38ms +[2025-09-06 04:09:32] [Rank 0] step:8841/10000 train_time:366366ms step_avg:41.44ms +[2025-09-06 04:09:32] [Rank 0] step:8841/10000 train_time:366366ms step_avg:41.44ms +[2025-09-06 04:09:33] [Rank 0] step:8861/10000 train_time:367105ms step_avg:41.43ms +[2025-09-06 04:09:33] [Rank 0] step:8861/10000 train_time:367105ms step_avg:41.43ms +[2025-09-06 04:09:34] [Rank 0] step:8881/10000 train_time:367843ms step_avg:41.42ms +[2025-09-06 04:09:34] [Rank 0] step:8881/10000 train_time:367843ms step_avg:41.42ms +[2025-09-06 04:09:34] [Rank 0] step:8901/10000 train_time:368582ms step_avg:41.41ms +[2025-09-06 04:09:34] [Rank 0] step:8901/10000 train_time:368582ms step_avg:41.41ms +[2025-09-06 04:09:35] [Rank 0] step:8921/10000 train_time:369320ms step_avg:41.40ms +[2025-09-06 04:09:35] [Rank 0] step:8921/10000 train_time:369320ms step_avg:41.40ms +[2025-09-06 04:09:36] [Rank 0] step:8941/10000 train_time:370059ms step_avg:41.39ms +[2025-09-06 04:09:36] [Rank 0] step:8941/10000 train_time:370059ms step_avg:41.39ms +[2025-09-06 04:09:37] [Rank 0] step:8961/10000 train_time:370798ms step_avg:41.38ms +[2025-09-06 04:09:37] [Rank 0] step:8961/10000 train_time:370798ms step_avg:41.38ms +[2025-09-06 04:09:37] [Rank 0] step:8981/10000 train_time:371536ms step_avg:41.37ms +[2025-09-06 04:09:37] [Rank 0] step:8981/10000 train_time:371536ms step_avg:41.37ms +[2025-09-06 04:09:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:09:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:09:38] [Rank 0] PRINT: step:9000/10000 train_loss:2.5592 val_loss:2.5445 train_time:372355ms step_avg:41.37ms +[2025-09-06 04:09:38] [Rank 0] PRINT: step:9000/10000 train_loss:2.5592 val_loss:2.5445 train_time:372355ms step_avg:41.37ms +[2025-09-06 04:09:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:09:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:09:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:09:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:11:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:11:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:11:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:11:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:11:01] [Rank 0] Total Loss: 4.9343 +[2025-09-06 04:11:01] [Rank 0] Total Loss: 4.9343 +[2025-09-06 04:11:01] [Rank 0] Total FTA (Unweighted): 0.2250 +[2025-09-06 04:11:01] [Rank 0] Total FTA (Unweighted): 0.2250 +[2025-09-06 04:11:01] [Rank 0] Total FTA (Weighted): 0.2250 +[2025-09-06 04:11:01] [Rank 0] Total FTA (Weighted): 0.2250 +[2025-09-06 04:11:01] [Rank 0] Group 0 Loss: 3.2786 +[2025-09-06 04:11:01] [Rank 0] Group 0 Loss: 3.2786 +[2025-09-06 04:11:01] [Rank 0] Group 1 Loss: 3.3375 +[2025-09-06 04:11:01] [Rank 0] Group 1 Loss: 3.3375 +[2025-09-06 04:11:01] [Rank 0] Group 2 Loss: 3.3743 +[2025-09-06 04:11:01] [Rank 0] Group 2 Loss: 3.3743 +[2025-09-06 04:11:01] [Rank 0] Group 3 Loss: 3.8417 +[2025-09-06 04:11:01] [Rank 0] Group 3 Loss: 3.8417 +[2025-09-06 04:11:01] [Rank 0] Group 4 Loss: 4.3564 +[2025-09-06 04:11:01] [Rank 0] Group 4 Loss: 4.3564 +[2025-09-06 04:11:01] [Rank 0] Group 5 Loss: 4.8724 +[2025-09-06 04:11:01] [Rank 0] Group 5 Loss: 4.8724 +[2025-09-06 04:11:01] [Rank 0] Group 6 Loss: 5.1738 +[2025-09-06 04:11:01] [Rank 0] Group 6 Loss: 5.1738 +[2025-09-06 04:11:01] [Rank 0] Group 7 Loss: 5.2880 +[2025-09-06 04:11:01] [Rank 0] Group 7 Loss: 5.2880 +[2025-09-06 04:11:01] [Rank 0] Group 8 Loss: 5.5500 +[2025-09-06 04:11:01] [Rank 0] Group 8 Loss: 5.5500 +[2025-09-06 04:11:01] [Rank 0] Group 9 Loss: 5.6850 +[2025-09-06 04:11:01] [Rank 0] Group 9 Loss: 5.6850 +[2025-09-06 04:11:01] [Rank 0] Group 10 Loss: 5.6930 +[2025-09-06 04:11:01] [Rank 0] Group 10 Loss: 5.6930 +[2025-09-06 04:11:01] [Rank 0] Group 11 Loss: 5.7719 +[2025-09-06 04:11:01] [Rank 0] Group 11 Loss: 5.7719 +[2025-09-06 04:11:01] [Rank 0] Group 12 Loss: 5.6598 +[2025-09-06 04:11:01] [Rank 0] Group 12 Loss: 5.6598 +[2025-09-06 04:11:01] [Rank 0] Group 13 Loss: 5.6703 +[2025-09-06 04:11:01] [Rank 0] Group 13 Loss: 5.6703 +[2025-09-06 04:11:01] [Rank 0] Group 14 Loss: 5.7267 +[2025-09-06 04:11:01] [Rank 0] Group 14 Loss: 5.7267 +[2025-09-06 04:11:01] [Rank 0] Group 15 Loss: 5.6693 +[2025-09-06 04:11:01] [Rank 0] Group 15 Loss: 5.6693 +[2025-09-06 04:11:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:11:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:11:01] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:11:01] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:11:01] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:11:01] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:11:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:11:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:11:01] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:11:01] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:11:01] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:11:01] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:11:01] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:11:01] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:11:01] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:11:01] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:11:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 04:11:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 04:11:01] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 04:11:01] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 04:11:01] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 04:11:01] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 04:11:01] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:11:01] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:11:01] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:11:01] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:11:01] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:11:01] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:11:01] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 04:11:01] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 04:11:01] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 04:11:01] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 04:11:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:11:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:11:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:11:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:11:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:11:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:11:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:11:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:11:03] [Rank 0] step:9001/10000 train_time:372365ms step_avg:41.37ms +[2025-09-06 04:11:03] [Rank 0] step:9001/10000 train_time:372365ms step_avg:41.37ms +[2025-09-06 04:11:03] [Rank 0] step:9021/10000 train_time:373052ms step_avg:41.35ms +[2025-09-06 04:11:03] [Rank 0] step:9021/10000 train_time:373052ms step_avg:41.35ms +[2025-09-06 04:11:04] [Rank 0] step:9041/10000 train_time:373790ms step_avg:41.34ms +[2025-09-06 04:11:04] [Rank 0] step:9041/10000 train_time:373790ms step_avg:41.34ms +[2025-09-06 04:11:05] [Rank 0] step:9061/10000 train_time:374530ms step_avg:41.33ms +[2025-09-06 04:11:05] [Rank 0] step:9061/10000 train_time:374530ms step_avg:41.33ms +[2025-09-06 04:11:06] [Rank 0] step:9081/10000 train_time:375269ms step_avg:41.32ms +[2025-09-06 04:11:06] [Rank 0] step:9081/10000 train_time:375269ms step_avg:41.32ms +[2025-09-06 04:11:06] [Rank 0] step:9101/10000 train_time:376008ms step_avg:41.32ms +[2025-09-06 04:11:06] [Rank 0] step:9101/10000 train_time:376008ms step_avg:41.32ms +[2025-09-06 04:11:07] [Rank 0] step:9121/10000 train_time:376747ms step_avg:41.31ms +[2025-09-06 04:11:07] [Rank 0] step:9121/10000 train_time:376747ms step_avg:41.31ms +[2025-09-06 04:11:08] [Rank 0] step:9141/10000 train_time:377485ms step_avg:41.30ms +[2025-09-06 04:11:08] [Rank 0] step:9141/10000 train_time:377485ms step_avg:41.30ms +[2025-09-06 04:11:09] [Rank 0] step:9161/10000 train_time:378224ms step_avg:41.29ms +[2025-09-06 04:11:09] [Rank 0] step:9161/10000 train_time:378224ms step_avg:41.29ms +[2025-09-06 04:11:09] [Rank 0] step:9181/10000 train_time:378962ms step_avg:41.28ms +[2025-09-06 04:11:09] [Rank 0] step:9181/10000 train_time:378962ms step_avg:41.28ms +[2025-09-06 04:11:10] [Rank 0] step:9201/10000 train_time:379701ms step_avg:41.27ms +[2025-09-06 04:11:10] [Rank 0] step:9201/10000 train_time:379701ms step_avg:41.27ms +[2025-09-06 04:11:11] [Rank 0] step:9221/10000 train_time:380440ms step_avg:41.26ms +[2025-09-06 04:11:11] [Rank 0] step:9221/10000 train_time:380440ms step_avg:41.26ms +[2025-09-06 04:11:12] [Rank 0] step:9241/10000 train_time:381181ms step_avg:41.25ms +[2025-09-06 04:11:12] [Rank 0] step:9241/10000 train_time:381181ms step_avg:41.25ms +[2025-09-06 04:11:12] [Rank 0] step:9261/10000 train_time:381920ms step_avg:41.24ms +[2025-09-06 04:11:12] [Rank 0] step:9261/10000 train_time:381920ms step_avg:41.24ms +[2025-09-06 04:11:13] [Rank 0] step:9281/10000 train_time:382658ms step_avg:41.23ms +[2025-09-06 04:11:13] [Rank 0] step:9281/10000 train_time:382658ms step_avg:41.23ms +[2025-09-06 04:11:14] [Rank 0] step:9301/10000 train_time:383397ms step_avg:41.22ms +[2025-09-06 04:11:14] [Rank 0] step:9301/10000 train_time:383397ms step_avg:41.22ms +[2025-09-06 04:11:15] [Rank 0] step:9321/10000 train_time:384136ms step_avg:41.21ms +[2025-09-06 04:11:15] [Rank 0] step:9321/10000 train_time:384136ms step_avg:41.21ms +[2025-09-06 04:11:15] [Rank 0] step:9341/10000 train_time:384875ms step_avg:41.20ms +[2025-09-06 04:11:15] [Rank 0] step:9341/10000 train_time:384875ms step_avg:41.20ms +[2025-09-06 04:11:16] [Rank 0] step:9361/10000 train_time:385614ms step_avg:41.19ms +[2025-09-06 04:11:16] [Rank 0] step:9361/10000 train_time:385614ms step_avg:41.19ms +[2025-09-06 04:11:17] [Rank 0] step:9381/10000 train_time:386353ms step_avg:41.18ms +[2025-09-06 04:11:17] [Rank 0] step:9381/10000 train_time:386353ms step_avg:41.18ms +[2025-09-06 04:11:18] [Rank 0] step:9401/10000 train_time:387094ms step_avg:41.18ms +[2025-09-06 04:11:18] [Rank 0] step:9401/10000 train_time:387094ms step_avg:41.18ms +[2025-09-06 04:11:18] [Rank 0] step:9421/10000 train_time:387832ms step_avg:41.17ms +[2025-09-06 04:11:18] [Rank 0] step:9421/10000 train_time:387832ms step_avg:41.17ms +[2025-09-06 04:11:19] [Rank 0] step:9441/10000 train_time:388572ms step_avg:41.16ms +[2025-09-06 04:11:19] [Rank 0] step:9441/10000 train_time:388572ms step_avg:41.16ms +[2025-09-06 04:11:20] [Rank 0] step:9461/10000 train_time:389311ms step_avg:41.15ms +[2025-09-06 04:11:20] [Rank 0] step:9461/10000 train_time:389311ms step_avg:41.15ms +[2025-09-06 04:11:20] [Rank 0] step:9481/10000 train_time:390050ms step_avg:41.14ms +[2025-09-06 04:11:20] [Rank 0] step:9481/10000 train_time:390050ms step_avg:41.14ms +[2025-09-06 04:11:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:11:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:11:22] [Rank 0] PRINT: step:9500/10000 train_loss:2.5458 val_loss:2.5328 train_time:390871ms step_avg:41.14ms +[2025-09-06 04:11:22] [Rank 0] PRINT: step:9500/10000 train_loss:2.5458 val_loss:2.5328 train_time:390871ms step_avg:41.14ms +[2025-09-06 04:11:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:11:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:11:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:11:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:12:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:12:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:12:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:12:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:12:45] [Rank 0] Total Loss: 4.9441 +[2025-09-06 04:12:45] [Rank 0] Total Loss: 4.9441 +[2025-09-06 04:12:45] [Rank 0] Total FTA (Unweighted): 0.2256 +[2025-09-06 04:12:45] [Rank 0] Total FTA (Unweighted): 0.2256 +[2025-09-06 04:12:45] [Rank 0] Total FTA (Weighted): 0.2256 +[2025-09-06 04:12:45] [Rank 0] Total FTA (Weighted): 0.2256 +[2025-09-06 04:12:45] [Rank 0] Group 0 Loss: 3.2966 +[2025-09-06 04:12:45] [Rank 0] Group 0 Loss: 3.2966 +[2025-09-06 04:12:45] [Rank 0] Group 1 Loss: 3.3607 +[2025-09-06 04:12:45] [Rank 0] Group 1 Loss: 3.3607 +[2025-09-06 04:12:45] [Rank 0] Group 2 Loss: 3.3994 +[2025-09-06 04:12:45] [Rank 0] Group 2 Loss: 3.3994 +[2025-09-06 04:12:45] [Rank 0] Group 3 Loss: 3.8531 +[2025-09-06 04:12:45] [Rank 0] Group 3 Loss: 3.8531 +[2025-09-06 04:12:45] [Rank 0] Group 4 Loss: 4.3878 +[2025-09-06 04:12:45] [Rank 0] Group 4 Loss: 4.3878 +[2025-09-06 04:12:45] [Rank 0] Group 5 Loss: 4.8827 +[2025-09-06 04:12:45] [Rank 0] Group 5 Loss: 4.8827 +[2025-09-06 04:12:45] [Rank 0] Group 6 Loss: 5.1728 +[2025-09-06 04:12:45] [Rank 0] Group 6 Loss: 5.1728 +[2025-09-06 04:12:45] [Rank 0] Group 7 Loss: 5.2920 +[2025-09-06 04:12:45] [Rank 0] Group 7 Loss: 5.2920 +[2025-09-06 04:12:45] [Rank 0] Group 8 Loss: 5.5513 +[2025-09-06 04:12:45] [Rank 0] Group 8 Loss: 5.5513 +[2025-09-06 04:12:45] [Rank 0] Group 9 Loss: 5.6846 +[2025-09-06 04:12:45] [Rank 0] Group 9 Loss: 5.6846 +[2025-09-06 04:12:45] [Rank 0] Group 10 Loss: 5.6990 +[2025-09-06 04:12:45] [Rank 0] Group 10 Loss: 5.6990 +[2025-09-06 04:12:45] [Rank 0] Group 11 Loss: 5.7769 +[2025-09-06 04:12:45] [Rank 0] Group 11 Loss: 5.7769 +[2025-09-06 04:12:45] [Rank 0] Group 12 Loss: 5.6589 +[2025-09-06 04:12:45] [Rank 0] Group 12 Loss: 5.6589 +[2025-09-06 04:12:45] [Rank 0] Group 13 Loss: 5.6794 +[2025-09-06 04:12:45] [Rank 0] Group 13 Loss: 5.6794 +[2025-09-06 04:12:45] [Rank 0] Group 14 Loss: 5.7328 +[2025-09-06 04:12:45] [Rank 0] Group 14 Loss: 5.7328 +[2025-09-06 04:12:45] [Rank 0] Group 15 Loss: 5.6770 +[2025-09-06 04:12:45] [Rank 0] Group 15 Loss: 5.6770 +[2025-09-06 04:12:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:12:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:12:45] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:12:45] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:12:45] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:12:45] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:12:45] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:12:45] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:12:45] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:12:45] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:12:45] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:12:45] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:12:45] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:12:45] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:12:45] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:12:45] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:12:45] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 04:12:45] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 04:12:45] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 04:12:45] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 04:12:45] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 04:12:45] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 04:12:45] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:12:45] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:12:45] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 04:12:45] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 04:12:45] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:12:45] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:12:45] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:12:45] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:12:45] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 04:12:45] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 04:12:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:12:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:12:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:12:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:12:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:12:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:12:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:12:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:12:46] [Rank 0] step:9501/10000 train_time:390880ms step_avg:41.14ms +[2025-09-06 04:12:46] [Rank 0] step:9501/10000 train_time:390880ms step_avg:41.14ms +[2025-09-06 04:12:47] [Rank 0] step:9521/10000 train_time:391554ms step_avg:41.13ms +[2025-09-06 04:12:47] [Rank 0] step:9521/10000 train_time:391554ms step_avg:41.13ms +[2025-09-06 04:12:48] [Rank 0] step:9541/10000 train_time:392294ms step_avg:41.12ms +[2025-09-06 04:12:48] [Rank 0] step:9541/10000 train_time:392294ms step_avg:41.12ms +[2025-09-06 04:12:48] [Rank 0] step:9561/10000 train_time:393033ms step_avg:41.11ms +[2025-09-06 04:12:48] [Rank 0] step:9561/10000 train_time:393033ms step_avg:41.11ms +[2025-09-06 04:12:49] [Rank 0] step:9581/10000 train_time:393771ms step_avg:41.10ms +[2025-09-06 04:12:49] [Rank 0] step:9581/10000 train_time:393771ms step_avg:41.10ms +[2025-09-06 04:12:50] [Rank 0] step:9601/10000 train_time:394509ms step_avg:41.09ms +[2025-09-06 04:12:50] [Rank 0] step:9601/10000 train_time:394509ms step_avg:41.09ms +[2025-09-06 04:12:51] [Rank 0] step:9621/10000 train_time:395248ms step_avg:41.08ms +[2025-09-06 04:12:51] [Rank 0] step:9621/10000 train_time:395248ms step_avg:41.08ms +[2025-09-06 04:12:51] [Rank 0] step:9641/10000 train_time:395987ms step_avg:41.07ms +[2025-09-06 04:12:51] [Rank 0] step:9641/10000 train_time:395987ms step_avg:41.07ms +[2025-09-06 04:12:52] [Rank 0] step:9661/10000 train_time:396999ms step_avg:41.09ms +[2025-09-06 04:12:52] [Rank 0] step:9661/10000 train_time:396999ms step_avg:41.09ms +[2025-09-06 04:12:53] [Rank 0] step:9681/10000 train_time:397856ms step_avg:41.10ms +[2025-09-06 04:12:53] [Rank 0] step:9681/10000 train_time:397856ms step_avg:41.10ms +[2025-09-06 04:12:54] [Rank 0] step:9701/10000 train_time:398602ms step_avg:41.09ms +[2025-09-06 04:12:54] [Rank 0] step:9701/10000 train_time:398602ms step_avg:41.09ms +[2025-09-06 04:12:55] [Rank 0] step:9721/10000 train_time:399341ms step_avg:41.08ms +[2025-09-06 04:12:55] [Rank 0] step:9721/10000 train_time:399341ms step_avg:41.08ms +[2025-09-06 04:12:55] [Rank 0] step:9741/10000 train_time:400080ms step_avg:41.07ms +[2025-09-06 04:12:55] [Rank 0] step:9741/10000 train_time:400080ms step_avg:41.07ms +[2025-09-06 04:12:56] [Rank 0] step:9761/10000 train_time:400957ms step_avg:41.08ms +[2025-09-06 04:12:56] [Rank 0] step:9761/10000 train_time:400957ms step_avg:41.08ms +[2025-09-06 04:12:57] [Rank 0] step:9781/10000 train_time:401695ms step_avg:41.07ms +[2025-09-06 04:12:57] [Rank 0] step:9781/10000 train_time:401695ms step_avg:41.07ms +[2025-09-06 04:12:58] [Rank 0] step:9801/10000 train_time:402434ms step_avg:41.06ms +[2025-09-06 04:12:58] [Rank 0] step:9801/10000 train_time:402434ms step_avg:41.06ms +[2025-09-06 04:12:58] [Rank 0] step:9821/10000 train_time:403172ms step_avg:41.05ms +[2025-09-06 04:12:58] [Rank 0] step:9821/10000 train_time:403172ms step_avg:41.05ms +[2025-09-06 04:12:59] [Rank 0] step:9841/10000 train_time:403912ms step_avg:41.04ms +[2025-09-06 04:12:59] [Rank 0] step:9841/10000 train_time:403912ms step_avg:41.04ms +[2025-09-06 04:13:00] [Rank 0] step:9861/10000 train_time:404652ms step_avg:41.04ms +[2025-09-06 04:13:00] [Rank 0] step:9861/10000 train_time:404652ms step_avg:41.04ms +[2025-09-06 04:13:01] [Rank 0] step:9881/10000 train_time:405390ms step_avg:41.03ms +[2025-09-06 04:13:01] [Rank 0] step:9881/10000 train_time:405390ms step_avg:41.03ms +[2025-09-06 04:13:01] [Rank 0] step:9901/10000 train_time:406129ms step_avg:41.02ms +[2025-09-06 04:13:01] [Rank 0] step:9901/10000 train_time:406129ms step_avg:41.02ms +[2025-09-06 04:13:02] [Rank 0] step:9921/10000 train_time:406868ms step_avg:41.01ms +[2025-09-06 04:13:02] [Rank 0] step:9921/10000 train_time:406868ms step_avg:41.01ms +[2025-09-06 04:13:03] [Rank 0] step:9941/10000 train_time:407607ms step_avg:41.00ms +[2025-09-06 04:13:03] [Rank 0] step:9941/10000 train_time:407607ms step_avg:41.00ms +[2025-09-06 04:13:04] [Rank 0] step:9961/10000 train_time:408347ms step_avg:40.99ms +[2025-09-06 04:13:04] [Rank 0] step:9961/10000 train_time:408347ms step_avg:40.99ms +[2025-09-06 04:13:04] [Rank 0] step:9981/10000 train_time:409085ms step_avg:40.99ms +[2025-09-06 04:13:04] [Rank 0] step:9981/10000 train_time:409085ms step_avg:40.99ms +[2025-09-06 04:13:05] [Rank 0] step:10000/10000 train_time:409793ms step_avg:40.98ms +[2025-09-06 04:13:05] [Rank 0] step:10000/10000 train_time:409793ms step_avg:40.98ms +[2025-09-06 04:13:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:13:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:13:06] [Rank 0] PRINT: step:10000/10000 train_loss:2.5353 val_loss:2.5232 train_time:409917ms step_avg:40.99ms +[2025-09-06 04:13:06] [Rank 0] PRINT: step:10000/10000 train_loss:2.5353 val_loss:2.5232 train_time:409917ms step_avg:40.99ms +[2025-09-06 04:13:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:13:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:13:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:13:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:14:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:14:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:14:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:14:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:14:28] [Rank 0] Total Loss: 4.9303 +[2025-09-06 04:14:28] [Rank 0] Total Loss: 4.9303 +[2025-09-06 04:14:28] [Rank 0] Total FTA (Unweighted): 0.2275 +[2025-09-06 04:14:28] [Rank 0] Total FTA (Unweighted): 0.2275 +[2025-09-06 04:14:28] [Rank 0] Total FTA (Weighted): 0.2275 +[2025-09-06 04:14:28] [Rank 0] Total FTA (Weighted): 0.2275 +[2025-09-06 04:14:28] [Rank 0] Group 0 Loss: 3.2935 +[2025-09-06 04:14:28] [Rank 0] Group 0 Loss: 3.2935 +[2025-09-06 04:14:28] [Rank 0] Group 1 Loss: 3.3391 +[2025-09-06 04:14:28] [Rank 0] Group 1 Loss: 3.3391 +[2025-09-06 04:14:28] [Rank 0] Group 2 Loss: 3.3755 +[2025-09-06 04:14:28] [Rank 0] Group 2 Loss: 3.3755 +[2025-09-06 04:14:28] [Rank 0] Group 3 Loss: 3.8399 +[2025-09-06 04:14:28] [Rank 0] Group 3 Loss: 3.8399 +[2025-09-06 04:14:28] [Rank 0] Group 4 Loss: 4.3710 +[2025-09-06 04:14:28] [Rank 0] Group 4 Loss: 4.3710 +[2025-09-06 04:14:28] [Rank 0] Group 5 Loss: 4.8675 +[2025-09-06 04:14:28] [Rank 0] Group 5 Loss: 4.8675 +[2025-09-06 04:14:28] [Rank 0] Group 6 Loss: 5.1594 +[2025-09-06 04:14:28] [Rank 0] Group 6 Loss: 5.1594 +[2025-09-06 04:14:28] [Rank 0] Group 7 Loss: 5.2763 +[2025-09-06 04:14:28] [Rank 0] Group 7 Loss: 5.2763 +[2025-09-06 04:14:28] [Rank 0] Group 8 Loss: 5.5447 +[2025-09-06 04:14:28] [Rank 0] Group 8 Loss: 5.5447 +[2025-09-06 04:14:28] [Rank 0] Group 9 Loss: 5.6741 +[2025-09-06 04:14:28] [Rank 0] Group 9 Loss: 5.6741 +[2025-09-06 04:14:28] [Rank 0] Group 10 Loss: 5.6860 +[2025-09-06 04:14:28] [Rank 0] Group 10 Loss: 5.6860 +[2025-09-06 04:14:28] [Rank 0] Group 11 Loss: 5.7633 +[2025-09-06 04:14:28] [Rank 0] Group 11 Loss: 5.7633 +[2025-09-06 04:14:28] [Rank 0] Group 12 Loss: 5.6431 +[2025-09-06 04:14:28] [Rank 0] Group 12 Loss: 5.6431 +[2025-09-06 04:14:28] [Rank 0] Group 13 Loss: 5.6638 +[2025-09-06 04:14:28] [Rank 0] Group 13 Loss: 5.6638 +[2025-09-06 04:14:28] [Rank 0] Group 14 Loss: 5.7255 +[2025-09-06 04:14:28] [Rank 0] Group 14 Loss: 5.7255 +[2025-09-06 04:14:28] [Rank 0] Group 15 Loss: 5.6621 +[2025-09-06 04:14:28] [Rank 0] Group 15 Loss: 5.6621 +[2025-09-06 04:14:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:14:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 04:14:28] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:14:28] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 04:14:28] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:14:28] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:14:28] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:14:28] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 04:14:28] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:14:28] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 04:14:28] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:14:28] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 04:14:28] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:14:28] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 04:14:28] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:14:28] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 04:14:28] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 04:14:28] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 04:14:28] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 04:14:28] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 04:14:28] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 04:14:28] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 04:14:28] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-06 04:14:28] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-06 04:14:28] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:14:28] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 04:14:28] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:14:28] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 04:14:28] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:14:28] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:14:28] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 04:14:28] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 04:14:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:14:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_loss_curves.png +[2025-09-06 04:14:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:14:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/per_class_acc_curves.png +[2025-09-06 04:14:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:14:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_loss_curve.png +[2025-09-06 04:14:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:14:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_44/total_acc_curve.png +[2025-09-06 04:14:29] [Rank 0] step:10001/10000 train_time:409927ms step_avg:40.99ms +[2025-09-06 04:14:29] [Rank 0] step:10001/10000 train_time:409927ms step_avg:40.99ms +[2025-09-06 04:14:29] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 04:14:29 2025 --- +[2025-09-06 04:14:29] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 04:14:29 2025 --- +[2025-09-06 04:14:29] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-06 04:14:29] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b06ae00a61b54e2e4cf6d73a0becb647188b7a75 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.05, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "7e47fe9e-5aa9-4cff-b9cb-fe41d1583cf9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e5792cf314e6adc2dfeb8d67650b8df9b8ed463c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5fbd4efd43f9862e1732166fe5c73e5b79ad01eef016d3dcf39211b08c7c852 +size 217238 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..ba6c72aea65a59e6f57e58eca1712f5669cbcc8f --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa420b1029936e97a804548ed8ecd0e82d7768ce270654245b7af2c98fa02f2e +size 397275 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ca1e72d57d6ae7b5be01bfa0d8991aafde934aa4 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43dd4382548690815cd6fe509eea5b5e12c464de2deeedf05e740b7449656f5c +size 78804 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ebd83cc7a6c76425acf37aa6ae4f4c4b06422d93 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9546edcebb2b95b99b97ae51718f11656f57204c43ab8be1aea456fb42a8183b +size 116454 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/training_log_7e47fe9e-5aa9-4cff-b9cb-fe41d1583cf9.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/training_log_7e47fe9e-5aa9-4cff-b9cb-fe41d1583cf9.txt new file mode 100644 index 0000000000000000000000000000000000000000..6fa058df8a306689a9fecbe28027c5ca2d236bf1 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/training_log_7e47fe9e-5aa9-4cff-b9cb-fe41d1583cf9.txt @@ -0,0 +1,3678 @@ +[2025-09-06 04:14:52] [Rank 0] PRINT: --- Script Start: Sat Sep 6 04:14:52 2025 --- +[2025-09-06 04:14:52] [Rank 0] PRINT: --- Script Start: Sat Sep 6 04:14:52 2025 --- +[2025-09-06 04:14:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.05, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 04:14:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.05, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 04:14:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 04:14:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 04:14:52] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-06 04:14:52] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-06 04:14:52] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45 +[2025-09-06 04:14:52] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45 +[2025-09-06 04:14:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 04:14:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 04:14:52] [Rank 0] PRINT: Constructing model... +[2025-09-06 04:14:52] [Rank 0] PRINT: Constructing model... +[2025-09-06 04:14:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 04:14:53] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 04:14:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 04:14:53] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 04:14:53] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 04:14:53] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 04:14:57] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 04:14:57] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 04:14:57] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 04:14:57] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 04:14:57] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 04:14:57] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 04:14:57] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 04:14:57] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 04:14:58] [Rank 0] PRINT: Model returns: +[2025-09-06 04:14:58] [Rank 0] PRINT: Model returns: +[2025-09-06 04:14:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 04:14:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 04:14:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 04:14:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 04:14:58] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.05). +[2025-09-06 04:14:58] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.05). +[2025-09-06 04:14:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 04:14:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 04:14:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 04:14:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 04:15:02] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 04:15:02] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 04:15:02] [Rank 0] PRINT: Starting warmup... +[2025-09-06 04:15:02] [Rank 0] PRINT: Starting warmup... +[2025-09-06 04:15:39] [Rank 0] PRINT: Warmup complete. +[2025-09-06 04:15:39] [Rank 0] PRINT: Warmup complete. +[2025-09-06 04:15:39] [Rank 0] PRINT: Starting training... +[2025-09-06 04:15:39] [Rank 0] PRINT: Starting training... +[2025-09-06 04:15:46] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/fixed_eval_indices.json +[2025-09-06 04:15:46] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/fixed_eval_indices.json +[2025-09-06 04:15:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:15:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:15:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 04:15:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 04:16:22] [Rank 0] step:21/10000 train_time:32559ms step_avg:1550.41ms +[2025-09-06 04:16:22] [Rank 0] step:21/10000 train_time:32559ms step_avg:1550.41ms +[2025-09-06 04:16:23] [Rank 0] step:41/10000 train_time:33286ms step_avg:811.86ms +[2025-09-06 04:16:23] [Rank 0] step:41/10000 train_time:33286ms step_avg:811.86ms +[2025-09-06 04:16:24] [Rank 0] step:61/10000 train_time:34012ms step_avg:557.58ms +[2025-09-06 04:16:24] [Rank 0] step:61/10000 train_time:34012ms step_avg:557.58ms +[2025-09-06 04:16:24] [Rank 0] step:81/10000 train_time:34739ms step_avg:428.87ms +[2025-09-06 04:16:24] [Rank 0] step:81/10000 train_time:34739ms step_avg:428.87ms +[2025-09-06 04:16:25] [Rank 0] step:101/10000 train_time:35466ms step_avg:351.15ms +[2025-09-06 04:16:25] [Rank 0] step:101/10000 train_time:35466ms step_avg:351.15ms +[2025-09-06 04:16:26] [Rank 0] step:121/10000 train_time:36193ms step_avg:299.11ms +[2025-09-06 04:16:26] [Rank 0] step:121/10000 train_time:36193ms step_avg:299.11ms +[2025-09-06 04:16:27] [Rank 0] step:141/10000 train_time:36919ms step_avg:261.84ms +[2025-09-06 04:16:27] [Rank 0] step:141/10000 train_time:36919ms step_avg:261.84ms +[2025-09-06 04:16:27] [Rank 0] step:161/10000 train_time:37646ms step_avg:233.83ms +[2025-09-06 04:16:27] [Rank 0] step:161/10000 train_time:37646ms step_avg:233.83ms +[2025-09-06 04:16:28] [Rank 0] step:181/10000 train_time:38373ms step_avg:212.00ms +[2025-09-06 04:16:28] [Rank 0] step:181/10000 train_time:38373ms step_avg:212.00ms +[2025-09-06 04:16:29] [Rank 0] step:201/10000 train_time:39099ms step_avg:194.52ms +[2025-09-06 04:16:29] [Rank 0] step:201/10000 train_time:39099ms step_avg:194.52ms +[2025-09-06 04:16:30] [Rank 0] step:221/10000 train_time:39824ms step_avg:180.20ms +[2025-09-06 04:16:30] [Rank 0] step:221/10000 train_time:39824ms step_avg:180.20ms +[2025-09-06 04:16:30] [Rank 0] step:241/10000 train_time:40552ms step_avg:168.27ms +[2025-09-06 04:16:30] [Rank 0] step:241/10000 train_time:40552ms step_avg:168.27ms +[2025-09-06 04:16:31] [Rank 0] step:261/10000 train_time:41279ms step_avg:158.16ms +[2025-09-06 04:16:31] [Rank 0] step:261/10000 train_time:41279ms step_avg:158.16ms +[2025-09-06 04:16:32] [Rank 0] step:281/10000 train_time:42005ms step_avg:149.48ms +[2025-09-06 04:16:32] [Rank 0] step:281/10000 train_time:42005ms step_avg:149.48ms +[2025-09-06 04:16:32] [Rank 0] step:301/10000 train_time:42732ms step_avg:141.97ms +[2025-09-06 04:16:32] [Rank 0] step:301/10000 train_time:42732ms step_avg:141.97ms +[2025-09-06 04:16:33] [Rank 0] step:321/10000 train_time:43459ms step_avg:135.39ms +[2025-09-06 04:16:33] [Rank 0] step:321/10000 train_time:43459ms step_avg:135.39ms +[2025-09-06 04:16:34] [Rank 0] step:341/10000 train_time:44186ms step_avg:129.58ms +[2025-09-06 04:16:34] [Rank 0] step:341/10000 train_time:44186ms step_avg:129.58ms +[2025-09-06 04:16:35] [Rank 0] step:361/10000 train_time:44912ms step_avg:124.41ms +[2025-09-06 04:16:35] [Rank 0] step:361/10000 train_time:44912ms step_avg:124.41ms +[2025-09-06 04:16:35] [Rank 0] step:381/10000 train_time:45638ms step_avg:119.79ms +[2025-09-06 04:16:35] [Rank 0] step:381/10000 train_time:45638ms step_avg:119.79ms +[2025-09-06 04:16:36] [Rank 0] step:401/10000 train_time:46366ms step_avg:115.62ms +[2025-09-06 04:16:36] [Rank 0] step:401/10000 train_time:46366ms step_avg:115.62ms +[2025-09-06 04:16:37] [Rank 0] step:421/10000 train_time:47091ms step_avg:111.86ms +[2025-09-06 04:16:37] [Rank 0] step:421/10000 train_time:47091ms step_avg:111.86ms +[2025-09-06 04:16:38] [Rank 0] step:441/10000 train_time:47818ms step_avg:108.43ms +[2025-09-06 04:16:38] [Rank 0] step:441/10000 train_time:47818ms step_avg:108.43ms +[2025-09-06 04:16:38] [Rank 0] step:461/10000 train_time:48544ms step_avg:105.30ms +[2025-09-06 04:16:38] [Rank 0] step:461/10000 train_time:48544ms step_avg:105.30ms +[2025-09-06 04:16:39] [Rank 0] step:481/10000 train_time:49270ms step_avg:102.43ms +[2025-09-06 04:16:39] [Rank 0] step:481/10000 train_time:49270ms step_avg:102.43ms +[2025-09-06 04:16:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:16:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:16:40] [Rank 0] PRINT: step:500/10000 train_loss:6.7859 val_loss:4.9735 train_time:50076ms step_avg:100.15ms +[2025-09-06 04:16:40] [Rank 0] PRINT: step:500/10000 train_loss:6.7859 val_loss:4.9735 train_time:50076ms step_avg:100.15ms +[2025-09-06 04:16:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:16:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:16:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:16:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:18:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:18:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:18:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:18:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:18:02] [Rank 0] Total Loss: 6.6942 +[2025-09-06 04:18:02] [Rank 0] Total Loss: 6.6942 +[2025-09-06 04:18:02] [Rank 0] Total FTA (Unweighted): 0.0281 +[2025-09-06 04:18:02] [Rank 0] Total FTA (Unweighted): 0.0281 +[2025-09-06 04:18:02] [Rank 0] Total FTA (Weighted): 0.0281 +[2025-09-06 04:18:02] [Rank 0] Total FTA (Weighted): 0.0281 +[2025-09-06 04:18:02] [Rank 0] Group 0 Loss: 4.5333 +[2025-09-06 04:18:02] [Rank 0] Group 0 Loss: 4.5333 +[2025-09-06 04:18:02] [Rank 0] Group 1 Loss: 5.4076 +[2025-09-06 04:18:02] [Rank 0] Group 1 Loss: 5.4076 +[2025-09-06 04:18:02] [Rank 0] Group 2 Loss: 5.9640 +[2025-09-06 04:18:02] [Rank 0] Group 2 Loss: 5.9640 +[2025-09-06 04:18:02] [Rank 0] Group 3 Loss: 6.4842 +[2025-09-06 04:18:02] [Rank 0] Group 3 Loss: 6.4842 +[2025-09-06 04:18:02] [Rank 0] Group 4 Loss: 6.8873 +[2025-09-06 04:18:02] [Rank 0] Group 4 Loss: 6.8873 +[2025-09-06 04:18:02] [Rank 0] Group 5 Loss: 6.9856 +[2025-09-06 04:18:02] [Rank 0] Group 5 Loss: 6.9856 +[2025-09-06 04:18:02] [Rank 0] Group 6 Loss: 7.0541 +[2025-09-06 04:18:02] [Rank 0] Group 6 Loss: 7.0541 +[2025-09-06 04:18:02] [Rank 0] Group 7 Loss: 6.9352 +[2025-09-06 04:18:02] [Rank 0] Group 7 Loss: 6.9352 +[2025-09-06 04:18:02] [Rank 0] Group 8 Loss: 7.0680 +[2025-09-06 04:18:02] [Rank 0] Group 8 Loss: 7.0680 +[2025-09-06 04:18:02] [Rank 0] Group 9 Loss: 7.1466 +[2025-09-06 04:18:02] [Rank 0] Group 9 Loss: 7.1466 +[2025-09-06 04:18:02] [Rank 0] Group 10 Loss: 7.1403 +[2025-09-06 04:18:02] [Rank 0] Group 10 Loss: 7.1403 +[2025-09-06 04:18:02] [Rank 0] Group 11 Loss: 7.1993 +[2025-09-06 04:18:02] [Rank 0] Group 11 Loss: 7.1993 +[2025-09-06 04:18:02] [Rank 0] Group 12 Loss: 7.0290 +[2025-09-06 04:18:02] [Rank 0] Group 12 Loss: 7.0290 +[2025-09-06 04:18:02] [Rank 0] Group 13 Loss: 7.0611 +[2025-09-06 04:18:02] [Rank 0] Group 13 Loss: 7.0611 +[2025-09-06 04:18:02] [Rank 0] Group 14 Loss: 7.1606 +[2025-09-06 04:18:02] [Rank 0] Group 14 Loss: 7.1606 +[2025-09-06 04:18:02] [Rank 0] Group 15 Loss: 7.0511 +[2025-09-06 04:18:02] [Rank 0] Group 15 Loss: 7.0511 +[2025-09-06 04:18:02] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 04:18:02] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 04:18:02] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:18:02] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:18:02] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-06 04:18:02] [Rank 0] Group 2 FTA: 0.0000 +[2025-09-06 04:18:02] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-06 04:18:02] [Rank 0] Group 3 FTA: 0.0000 +[2025-09-06 04:18:02] [Rank 0] Group 4 FTA: 0.0100 +[2025-09-06 04:18:02] [Rank 0] Group 4 FTA: 0.0100 +[2025-09-06 04:18:02] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-06 04:18:02] [Rank 0] Group 5 FTA: 0.0000 +[2025-09-06 04:18:02] [Rank 0] Group 6 FTA: 0.0200 +[2025-09-06 04:18:02] [Rank 0] Group 6 FTA: 0.0200 +[2025-09-06 04:18:02] [Rank 0] Group 7 FTA: 0.0300 +[2025-09-06 04:18:02] [Rank 0] Group 7 FTA: 0.0300 +[2025-09-06 04:18:02] [Rank 0] Group 8 FTA: 0.0400 +[2025-09-06 04:18:02] [Rank 0] Group 8 FTA: 0.0400 +[2025-09-06 04:18:02] [Rank 0] Group 9 FTA: 0.0200 +[2025-09-06 04:18:02] [Rank 0] Group 9 FTA: 0.0200 +[2025-09-06 04:18:02] [Rank 0] Group 10 FTA: 0.0200 +[2025-09-06 04:18:02] [Rank 0] Group 10 FTA: 0.0200 +[2025-09-06 04:18:02] [Rank 0] Group 11 FTA: 0.0300 +[2025-09-06 04:18:02] [Rank 0] Group 11 FTA: 0.0300 +[2025-09-06 04:18:02] [Rank 0] Group 12 FTA: 0.0100 +[2025-09-06 04:18:02] [Rank 0] Group 12 FTA: 0.0100 +[2025-09-06 04:18:02] [Rank 0] Group 13 FTA: 0.0200 +[2025-09-06 04:18:02] [Rank 0] Group 13 FTA: 0.0200 +[2025-09-06 04:18:02] [Rank 0] Group 14 FTA: 0.0100 +[2025-09-06 04:18:02] [Rank 0] Group 14 FTA: 0.0100 +[2025-09-06 04:18:02] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-06 04:18:02] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-06 04:18:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:18:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:18:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:18:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:18:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:18:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:18:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:18:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:18:04] [Rank 0] step:501/10000 train_time:50085ms step_avg:99.97ms +[2025-09-06 04:18:04] [Rank 0] step:501/10000 train_time:50085ms step_avg:99.97ms +[2025-09-06 04:18:04] [Rank 0] step:521/10000 train_time:50752ms step_avg:97.41ms +[2025-09-06 04:18:04] [Rank 0] step:521/10000 train_time:50752ms step_avg:97.41ms +[2025-09-06 04:18:05] [Rank 0] step:541/10000 train_time:51478ms step_avg:95.15ms +[2025-09-06 04:18:05] [Rank 0] step:541/10000 train_time:51478ms step_avg:95.15ms +[2025-09-06 04:18:06] [Rank 0] step:561/10000 train_time:52205ms step_avg:93.06ms +[2025-09-06 04:18:06] [Rank 0] step:561/10000 train_time:52205ms step_avg:93.06ms +[2025-09-06 04:18:07] [Rank 0] step:581/10000 train_time:52932ms step_avg:91.10ms +[2025-09-06 04:18:07] [Rank 0] step:581/10000 train_time:52932ms step_avg:91.10ms +[2025-09-06 04:18:07] [Rank 0] step:601/10000 train_time:53658ms step_avg:89.28ms +[2025-09-06 04:18:07] [Rank 0] step:601/10000 train_time:53658ms step_avg:89.28ms +[2025-09-06 04:18:08] [Rank 0] step:621/10000 train_time:54384ms step_avg:87.57ms +[2025-09-06 04:18:08] [Rank 0] step:621/10000 train_time:54384ms step_avg:87.57ms +[2025-09-06 04:18:09] [Rank 0] step:641/10000 train_time:55110ms step_avg:85.97ms +[2025-09-06 04:18:09] [Rank 0] step:641/10000 train_time:55110ms step_avg:85.97ms +[2025-09-06 04:18:10] [Rank 0] step:661/10000 train_time:56002ms step_avg:84.72ms +[2025-09-06 04:18:10] [Rank 0] step:661/10000 train_time:56002ms step_avg:84.72ms +[2025-09-06 04:18:10] [Rank 0] step:681/10000 train_time:56728ms step_avg:83.30ms +[2025-09-06 04:18:10] [Rank 0] step:681/10000 train_time:56728ms step_avg:83.30ms +[2025-09-06 04:18:11] [Rank 0] step:701/10000 train_time:57455ms step_avg:81.96ms +[2025-09-06 04:18:11] [Rank 0] step:701/10000 train_time:57455ms step_avg:81.96ms +[2025-09-06 04:18:12] [Rank 0] step:721/10000 train_time:58332ms step_avg:80.90ms +[2025-09-06 04:18:12] [Rank 0] step:721/10000 train_time:58332ms step_avg:80.90ms +[2025-09-06 04:18:13] [Rank 0] step:741/10000 train_time:59058ms step_avg:79.70ms +[2025-09-06 04:18:13] [Rank 0] step:741/10000 train_time:59058ms step_avg:79.70ms +[2025-09-06 04:18:13] [Rank 0] step:761/10000 train_time:59788ms step_avg:78.57ms +[2025-09-06 04:18:13] [Rank 0] step:761/10000 train_time:59788ms step_avg:78.57ms +[2025-09-06 04:18:14] [Rank 0] step:781/10000 train_time:60520ms step_avg:77.49ms +[2025-09-06 04:18:14] [Rank 0] step:781/10000 train_time:60520ms step_avg:77.49ms +[2025-09-06 04:18:15] [Rank 0] step:801/10000 train_time:61254ms step_avg:76.47ms +[2025-09-06 04:18:15] [Rank 0] step:801/10000 train_time:61254ms step_avg:76.47ms +[2025-09-06 04:18:16] [Rank 0] step:821/10000 train_time:62598ms step_avg:76.25ms +[2025-09-06 04:18:16] [Rank 0] step:821/10000 train_time:62598ms step_avg:76.25ms +[2025-09-06 04:18:17] [Rank 0] step:841/10000 train_time:63328ms step_avg:75.30ms +[2025-09-06 04:18:17] [Rank 0] step:841/10000 train_time:63328ms step_avg:75.30ms +[2025-09-06 04:18:18] [Rank 0] step:861/10000 train_time:64062ms step_avg:74.40ms +[2025-09-06 04:18:18] [Rank 0] step:861/10000 train_time:64062ms step_avg:74.40ms +[2025-09-06 04:18:18] [Rank 0] step:881/10000 train_time:64792ms step_avg:73.54ms +[2025-09-06 04:18:18] [Rank 0] step:881/10000 train_time:64792ms step_avg:73.54ms +[2025-09-06 04:18:19] [Rank 0] step:901/10000 train_time:65523ms step_avg:72.72ms +[2025-09-06 04:18:19] [Rank 0] step:901/10000 train_time:65523ms step_avg:72.72ms +[2025-09-06 04:18:20] [Rank 0] step:921/10000 train_time:66253ms step_avg:71.94ms +[2025-09-06 04:18:20] [Rank 0] step:921/10000 train_time:66253ms step_avg:71.94ms +[2025-09-06 04:18:21] [Rank 0] step:941/10000 train_time:66991ms step_avg:71.19ms +[2025-09-06 04:18:21] [Rank 0] step:941/10000 train_time:66991ms step_avg:71.19ms +[2025-09-06 04:18:21] [Rank 0] step:961/10000 train_time:67723ms step_avg:70.47ms +[2025-09-06 04:18:21] [Rank 0] step:961/10000 train_time:67723ms step_avg:70.47ms +[2025-09-06 04:18:22] [Rank 0] step:981/10000 train_time:68454ms step_avg:69.78ms +[2025-09-06 04:18:22] [Rank 0] step:981/10000 train_time:68454ms step_avg:69.78ms +[2025-09-06 04:18:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:18:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:18:23] [Rank 0] PRINT: step:1000/10000 train_loss:4.4568 val_loss:4.0643 train_time:69264ms step_avg:69.26ms +[2025-09-06 04:18:23] [Rank 0] PRINT: step:1000/10000 train_loss:4.4568 val_loss:4.0643 train_time:69264ms step_avg:69.26ms +[2025-09-06 04:18:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:18:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:18:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:18:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:19:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:19:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:19:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:19:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:19:44] [Rank 0] Total Loss: 5.9907 +[2025-09-06 04:19:44] [Rank 0] Total Loss: 5.9907 +[2025-09-06 04:19:44] [Rank 0] Total FTA (Unweighted): 0.0831 +[2025-09-06 04:19:44] [Rank 0] Total FTA (Unweighted): 0.0831 +[2025-09-06 04:19:44] [Rank 0] Total FTA (Weighted): 0.0831 +[2025-09-06 04:19:44] [Rank 0] Total FTA (Weighted): 0.0831 +[2025-09-06 04:19:44] [Rank 0] Group 0 Loss: 3.6930 +[2025-09-06 04:19:44] [Rank 0] Group 0 Loss: 3.6930 +[2025-09-06 04:19:44] [Rank 0] Group 1 Loss: 3.8136 +[2025-09-06 04:19:44] [Rank 0] Group 1 Loss: 3.8136 +[2025-09-06 04:19:44] [Rank 0] Group 2 Loss: 4.6170 +[2025-09-06 04:19:44] [Rank 0] Group 2 Loss: 4.6170 +[2025-09-06 04:19:44] [Rank 0] Group 3 Loss: 5.3823 +[2025-09-06 04:19:44] [Rank 0] Group 3 Loss: 5.3823 +[2025-09-06 04:19:44] [Rank 0] Group 4 Loss: 6.1753 +[2025-09-06 04:19:44] [Rank 0] Group 4 Loss: 6.1753 +[2025-09-06 04:19:44] [Rank 0] Group 5 Loss: 6.3548 +[2025-09-06 04:19:44] [Rank 0] Group 5 Loss: 6.3548 +[2025-09-06 04:19:44] [Rank 0] Group 6 Loss: 6.4180 +[2025-09-06 04:19:44] [Rank 0] Group 6 Loss: 6.4180 +[2025-09-06 04:19:44] [Rank 0] Group 7 Loss: 6.4135 +[2025-09-06 04:19:44] [Rank 0] Group 7 Loss: 6.4135 +[2025-09-06 04:19:44] [Rank 0] Group 8 Loss: 6.5520 +[2025-09-06 04:19:44] [Rank 0] Group 8 Loss: 6.5520 +[2025-09-06 04:19:44] [Rank 0] Group 9 Loss: 6.6822 +[2025-09-06 04:19:44] [Rank 0] Group 9 Loss: 6.6822 +[2025-09-06 04:19:44] [Rank 0] Group 10 Loss: 6.6399 +[2025-09-06 04:19:44] [Rank 0] Group 10 Loss: 6.6399 +[2025-09-06 04:19:44] [Rank 0] Group 11 Loss: 6.7429 +[2025-09-06 04:19:44] [Rank 0] Group 11 Loss: 6.7429 +[2025-09-06 04:19:44] [Rank 0] Group 12 Loss: 6.5660 +[2025-09-06 04:19:44] [Rank 0] Group 12 Loss: 6.5660 +[2025-09-06 04:19:44] [Rank 0] Group 13 Loss: 6.5602 +[2025-09-06 04:19:44] [Rank 0] Group 13 Loss: 6.5602 +[2025-09-06 04:19:44] [Rank 0] Group 14 Loss: 6.6708 +[2025-09-06 04:19:44] [Rank 0] Group 14 Loss: 6.6708 +[2025-09-06 04:19:44] [Rank 0] Group 15 Loss: 6.5691 +[2025-09-06 04:19:44] [Rank 0] Group 15 Loss: 6.5691 +[2025-09-06 04:19:44] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 04:19:44] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 04:19:44] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:19:44] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:19:44] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 04:19:44] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 04:19:44] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 04:19:44] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 04:19:44] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 04:19:44] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 04:19:44] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 04:19:44] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 04:19:44] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 04:19:44] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 04:19:44] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 04:19:44] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 04:19:44] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 04:19:44] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 04:19:44] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-06 04:19:44] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-06 04:19:44] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 04:19:44] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 04:19:44] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 04:19:44] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 04:19:44] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 04:19:44] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 04:19:44] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 04:19:44] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 04:19:44] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 04:19:44] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 04:19:44] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 04:19:44] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 04:19:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:19:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:19:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:19:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:19:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:19:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:19:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:19:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:19:46] [Rank 0] step:1001/10000 train_time:69274ms step_avg:69.20ms +[2025-09-06 04:19:46] [Rank 0] step:1001/10000 train_time:69274ms step_avg:69.20ms +[2025-09-06 04:19:47] [Rank 0] step:1021/10000 train_time:69931ms step_avg:68.49ms +[2025-09-06 04:19:47] [Rank 0] step:1021/10000 train_time:69931ms step_avg:68.49ms +[2025-09-06 04:19:48] [Rank 0] step:1041/10000 train_time:70662ms step_avg:67.88ms +[2025-09-06 04:19:48] [Rank 0] step:1041/10000 train_time:70662ms step_avg:67.88ms +[2025-09-06 04:19:48] [Rank 0] step:1061/10000 train_time:71394ms step_avg:67.29ms +[2025-09-06 04:19:48] [Rank 0] step:1061/10000 train_time:71394ms step_avg:67.29ms +[2025-09-06 04:19:49] [Rank 0] step:1081/10000 train_time:72126ms step_avg:66.72ms +[2025-09-06 04:19:49] [Rank 0] step:1081/10000 train_time:72126ms step_avg:66.72ms +[2025-09-06 04:19:50] [Rank 0] step:1101/10000 train_time:72859ms step_avg:66.18ms +[2025-09-06 04:19:50] [Rank 0] step:1101/10000 train_time:72859ms step_avg:66.18ms +[2025-09-06 04:19:51] [Rank 0] step:1121/10000 train_time:73590ms step_avg:65.65ms +[2025-09-06 04:19:51] [Rank 0] step:1121/10000 train_time:73590ms step_avg:65.65ms +[2025-09-06 04:19:51] [Rank 0] step:1141/10000 train_time:74322ms step_avg:65.14ms +[2025-09-06 04:19:51] [Rank 0] step:1141/10000 train_time:74322ms step_avg:65.14ms +[2025-09-06 04:19:52] [Rank 0] step:1161/10000 train_time:75054ms step_avg:64.65ms +[2025-09-06 04:19:52] [Rank 0] step:1161/10000 train_time:75054ms step_avg:64.65ms +[2025-09-06 04:19:53] [Rank 0] step:1181/10000 train_time:75786ms step_avg:64.17ms +[2025-09-06 04:19:53] [Rank 0] step:1181/10000 train_time:75786ms step_avg:64.17ms +[2025-09-06 04:19:53] [Rank 0] step:1201/10000 train_time:76518ms step_avg:63.71ms +[2025-09-06 04:19:53] [Rank 0] step:1201/10000 train_time:76518ms step_avg:63.71ms +[2025-09-06 04:19:54] [Rank 0] step:1221/10000 train_time:77249ms step_avg:63.27ms +[2025-09-06 04:19:54] [Rank 0] step:1221/10000 train_time:77249ms step_avg:63.27ms +[2025-09-06 04:19:55] [Rank 0] step:1241/10000 train_time:77981ms step_avg:62.84ms +[2025-09-06 04:19:55] [Rank 0] step:1241/10000 train_time:77981ms step_avg:62.84ms +[2025-09-06 04:19:56] [Rank 0] step:1261/10000 train_time:78714ms step_avg:62.42ms +[2025-09-06 04:19:56] [Rank 0] step:1261/10000 train_time:78714ms step_avg:62.42ms +[2025-09-06 04:19:56] [Rank 0] step:1281/10000 train_time:79446ms step_avg:62.02ms +[2025-09-06 04:19:56] [Rank 0] step:1281/10000 train_time:79446ms step_avg:62.02ms +[2025-09-06 04:19:57] [Rank 0] step:1301/10000 train_time:80178ms step_avg:61.63ms +[2025-09-06 04:19:57] [Rank 0] step:1301/10000 train_time:80178ms step_avg:61.63ms +[2025-09-06 04:19:58] [Rank 0] step:1321/10000 train_time:80911ms step_avg:61.25ms +[2025-09-06 04:19:58] [Rank 0] step:1321/10000 train_time:80911ms step_avg:61.25ms +[2025-09-06 04:19:59] [Rank 0] step:1341/10000 train_time:81643ms step_avg:60.88ms +[2025-09-06 04:19:59] [Rank 0] step:1341/10000 train_time:81643ms step_avg:60.88ms +[2025-09-06 04:19:59] [Rank 0] step:1361/10000 train_time:82375ms step_avg:60.53ms +[2025-09-06 04:19:59] [Rank 0] step:1361/10000 train_time:82375ms step_avg:60.53ms +[2025-09-06 04:20:00] [Rank 0] step:1381/10000 train_time:83107ms step_avg:60.18ms +[2025-09-06 04:20:00] [Rank 0] step:1381/10000 train_time:83107ms step_avg:60.18ms +[2025-09-06 04:20:01] [Rank 0] step:1401/10000 train_time:83839ms step_avg:59.84ms +[2025-09-06 04:20:01] [Rank 0] step:1401/10000 train_time:83839ms step_avg:59.84ms +[2025-09-06 04:20:02] [Rank 0] step:1421/10000 train_time:84571ms step_avg:59.52ms +[2025-09-06 04:20:02] [Rank 0] step:1421/10000 train_time:84571ms step_avg:59.52ms +[2025-09-06 04:20:02] [Rank 0] step:1441/10000 train_time:85304ms step_avg:59.20ms +[2025-09-06 04:20:02] [Rank 0] step:1441/10000 train_time:85304ms step_avg:59.20ms +[2025-09-06 04:20:03] [Rank 0] step:1461/10000 train_time:86037ms step_avg:58.89ms +[2025-09-06 04:20:03] [Rank 0] step:1461/10000 train_time:86037ms step_avg:58.89ms +[2025-09-06 04:20:04] [Rank 0] step:1481/10000 train_time:86769ms step_avg:58.59ms +[2025-09-06 04:20:04] [Rank 0] step:1481/10000 train_time:86769ms step_avg:58.59ms +[2025-09-06 04:20:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:20:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:20:05] [Rank 0] PRINT: step:1500/10000 train_loss:3.8410 val_loss:3.6411 train_time:87581ms step_avg:58.39ms +[2025-09-06 04:20:05] [Rank 0] PRINT: step:1500/10000 train_loss:3.8410 val_loss:3.6411 train_time:87581ms step_avg:58.39ms +[2025-09-06 04:20:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:20:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:20:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:20:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:21:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:21:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:21:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:21:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:21:26] [Rank 0] Total Loss: 5.7976 +[2025-09-06 04:21:26] [Rank 0] Total Loss: 5.7976 +[2025-09-06 04:21:26] [Rank 0] Total FTA (Unweighted): 0.0938 +[2025-09-06 04:21:26] [Rank 0] Total FTA (Unweighted): 0.0938 +[2025-09-06 04:21:26] [Rank 0] Total FTA (Weighted): 0.0938 +[2025-09-06 04:21:26] [Rank 0] Total FTA (Weighted): 0.0938 +[2025-09-06 04:21:26] [Rank 0] Group 0 Loss: 3.5081 +[2025-09-06 04:21:26] [Rank 0] Group 0 Loss: 3.5081 +[2025-09-06 04:21:26] [Rank 0] Group 1 Loss: 3.7171 +[2025-09-06 04:21:26] [Rank 0] Group 1 Loss: 3.7171 +[2025-09-06 04:21:26] [Rank 0] Group 2 Loss: 4.1128 +[2025-09-06 04:21:26] [Rank 0] Group 2 Loss: 4.1128 +[2025-09-06 04:21:26] [Rank 0] Group 3 Loss: 4.9337 +[2025-09-06 04:21:26] [Rank 0] Group 3 Loss: 4.9337 +[2025-09-06 04:21:26] [Rank 0] Group 4 Loss: 5.8265 +[2025-09-06 04:21:26] [Rank 0] Group 4 Loss: 5.8265 +[2025-09-06 04:21:26] [Rank 0] Group 5 Loss: 6.1112 +[2025-09-06 04:21:26] [Rank 0] Group 5 Loss: 6.1112 +[2025-09-06 04:21:26] [Rank 0] Group 6 Loss: 6.2550 +[2025-09-06 04:21:26] [Rank 0] Group 6 Loss: 6.2550 +[2025-09-06 04:21:26] [Rank 0] Group 7 Loss: 6.2512 +[2025-09-06 04:21:26] [Rank 0] Group 7 Loss: 6.2512 +[2025-09-06 04:21:26] [Rank 0] Group 8 Loss: 6.4169 +[2025-09-06 04:21:26] [Rank 0] Group 8 Loss: 6.4169 +[2025-09-06 04:21:26] [Rank 0] Group 9 Loss: 6.5678 +[2025-09-06 04:21:26] [Rank 0] Group 9 Loss: 6.5678 +[2025-09-06 04:21:26] [Rank 0] Group 10 Loss: 6.5493 +[2025-09-06 04:21:26] [Rank 0] Group 10 Loss: 6.5493 +[2025-09-06 04:21:26] [Rank 0] Group 11 Loss: 6.6333 +[2025-09-06 04:21:26] [Rank 0] Group 11 Loss: 6.6333 +[2025-09-06 04:21:26] [Rank 0] Group 12 Loss: 6.4267 +[2025-09-06 04:21:26] [Rank 0] Group 12 Loss: 6.4267 +[2025-09-06 04:21:26] [Rank 0] Group 13 Loss: 6.4382 +[2025-09-06 04:21:26] [Rank 0] Group 13 Loss: 6.4382 +[2025-09-06 04:21:26] [Rank 0] Group 14 Loss: 6.5634 +[2025-09-06 04:21:26] [Rank 0] Group 14 Loss: 6.5634 +[2025-09-06 04:21:26] [Rank 0] Group 15 Loss: 6.4506 +[2025-09-06 04:21:26] [Rank 0] Group 15 Loss: 6.4506 +[2025-09-06 04:21:26] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 04:21:26] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 04:21:26] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:21:26] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:21:26] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:21:26] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:21:26] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 04:21:26] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 04:21:26] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-06 04:21:26] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-06 04:21:26] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 04:21:26] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 04:21:26] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 04:21:26] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 04:21:26] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 04:21:26] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 04:21:26] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 04:21:26] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 04:21:26] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-06 04:21:26] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-06 04:21:26] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 04:21:26] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 04:21:26] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 04:21:26] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 04:21:26] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 04:21:26] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 04:21:26] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 04:21:26] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 04:21:26] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:21:26] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:21:26] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 04:21:26] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 04:21:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:21:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:21:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:21:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:21:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:21:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:21:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:21:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:21:27] [Rank 0] step:1501/10000 train_time:87591ms step_avg:58.35ms +[2025-09-06 04:21:27] [Rank 0] step:1501/10000 train_time:87591ms step_avg:58.35ms +[2025-09-06 04:21:28] [Rank 0] step:1521/10000 train_time:88265ms step_avg:58.03ms +[2025-09-06 04:21:28] [Rank 0] step:1521/10000 train_time:88265ms step_avg:58.03ms +[2025-09-06 04:21:29] [Rank 0] step:1541/10000 train_time:88996ms step_avg:57.75ms +[2025-09-06 04:21:29] [Rank 0] step:1541/10000 train_time:88996ms step_avg:57.75ms +[2025-09-06 04:21:29] [Rank 0] step:1561/10000 train_time:89727ms step_avg:57.48ms +[2025-09-06 04:21:29] [Rank 0] step:1561/10000 train_time:89727ms step_avg:57.48ms +[2025-09-06 04:21:30] [Rank 0] step:1581/10000 train_time:90460ms step_avg:57.22ms +[2025-09-06 04:21:30] [Rank 0] step:1581/10000 train_time:90460ms step_avg:57.22ms +[2025-09-06 04:21:31] [Rank 0] step:1601/10000 train_time:91192ms step_avg:56.96ms +[2025-09-06 04:21:31] [Rank 0] step:1601/10000 train_time:91192ms step_avg:56.96ms +[2025-09-06 04:21:32] [Rank 0] step:1621/10000 train_time:91926ms step_avg:56.71ms +[2025-09-06 04:21:32] [Rank 0] step:1621/10000 train_time:91926ms step_avg:56.71ms +[2025-09-06 04:21:33] [Rank 0] step:1641/10000 train_time:93283ms step_avg:56.85ms +[2025-09-06 04:21:33] [Rank 0] step:1641/10000 train_time:93283ms step_avg:56.85ms +[2025-09-06 04:21:34] [Rank 0] step:1661/10000 train_time:94015ms step_avg:56.60ms +[2025-09-06 04:21:34] [Rank 0] step:1661/10000 train_time:94015ms step_avg:56.60ms +[2025-09-06 04:21:34] [Rank 0] step:1681/10000 train_time:94746ms step_avg:56.36ms +[2025-09-06 04:21:34] [Rank 0] step:1681/10000 train_time:94746ms step_avg:56.36ms +[2025-09-06 04:21:35] [Rank 0] step:1701/10000 train_time:95478ms step_avg:56.13ms +[2025-09-06 04:21:35] [Rank 0] step:1701/10000 train_time:95478ms step_avg:56.13ms +[2025-09-06 04:21:36] [Rank 0] step:1721/10000 train_time:96209ms step_avg:55.90ms +[2025-09-06 04:21:36] [Rank 0] step:1721/10000 train_time:96209ms step_avg:55.90ms +[2025-09-06 04:21:37] [Rank 0] step:1741/10000 train_time:96941ms step_avg:55.68ms +[2025-09-06 04:21:37] [Rank 0] step:1741/10000 train_time:96941ms step_avg:55.68ms +[2025-09-06 04:21:37] [Rank 0] step:1761/10000 train_time:97672ms step_avg:55.46ms +[2025-09-06 04:21:37] [Rank 0] step:1761/10000 train_time:97672ms step_avg:55.46ms +[2025-09-06 04:21:38] [Rank 0] step:1781/10000 train_time:98404ms step_avg:55.25ms +[2025-09-06 04:21:38] [Rank 0] step:1781/10000 train_time:98404ms step_avg:55.25ms +[2025-09-06 04:21:39] [Rank 0] step:1801/10000 train_time:99135ms step_avg:55.04ms +[2025-09-06 04:21:39] [Rank 0] step:1801/10000 train_time:99135ms step_avg:55.04ms +[2025-09-06 04:21:39] [Rank 0] step:1821/10000 train_time:99867ms step_avg:54.84ms +[2025-09-06 04:21:39] [Rank 0] step:1821/10000 train_time:99867ms step_avg:54.84ms +[2025-09-06 04:21:40] [Rank 0] step:1841/10000 train_time:100598ms step_avg:54.64ms +[2025-09-06 04:21:40] [Rank 0] step:1841/10000 train_time:100598ms step_avg:54.64ms +[2025-09-06 04:21:41] [Rank 0] step:1861/10000 train_time:101329ms step_avg:54.45ms +[2025-09-06 04:21:41] [Rank 0] step:1861/10000 train_time:101329ms step_avg:54.45ms +[2025-09-06 04:21:42] [Rank 0] step:1881/10000 train_time:102061ms step_avg:54.26ms +[2025-09-06 04:21:42] [Rank 0] step:1881/10000 train_time:102061ms step_avg:54.26ms +[2025-09-06 04:21:42] [Rank 0] step:1901/10000 train_time:102792ms step_avg:54.07ms +[2025-09-06 04:21:42] [Rank 0] step:1901/10000 train_time:102792ms step_avg:54.07ms +[2025-09-06 04:21:43] [Rank 0] step:1921/10000 train_time:103524ms step_avg:53.89ms +[2025-09-06 04:21:43] [Rank 0] step:1921/10000 train_time:103524ms step_avg:53.89ms +[2025-09-06 04:21:44] [Rank 0] step:1941/10000 train_time:104256ms step_avg:53.71ms +[2025-09-06 04:21:44] [Rank 0] step:1941/10000 train_time:104256ms step_avg:53.71ms +[2025-09-06 04:21:45] [Rank 0] step:1961/10000 train_time:104988ms step_avg:53.54ms +[2025-09-06 04:21:45] [Rank 0] step:1961/10000 train_time:104988ms step_avg:53.54ms +[2025-09-06 04:21:45] [Rank 0] step:1981/10000 train_time:105719ms step_avg:53.37ms +[2025-09-06 04:21:45] [Rank 0] step:1981/10000 train_time:105719ms step_avg:53.37ms +[2025-09-06 04:21:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:21:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:21:47] [Rank 0] PRINT: step:2000/10000 train_loss:3.5127 val_loss:3.3906 train_time:106530ms step_avg:53.27ms +[2025-09-06 04:21:47] [Rank 0] PRINT: step:2000/10000 train_loss:3.5127 val_loss:3.3906 train_time:106530ms step_avg:53.27ms +[2025-09-06 04:21:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:21:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:21:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:21:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:23:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:23:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:23:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:23:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:23:08] [Rank 0] Total Loss: 5.6589 +[2025-09-06 04:23:08] [Rank 0] Total Loss: 5.6589 +[2025-09-06 04:23:08] [Rank 0] Total FTA (Unweighted): 0.1250 +[2025-09-06 04:23:08] [Rank 0] Total FTA (Unweighted): 0.1250 +[2025-09-06 04:23:08] [Rank 0] Total FTA (Weighted): 0.1250 +[2025-09-06 04:23:08] [Rank 0] Total FTA (Weighted): 0.1250 +[2025-09-06 04:23:08] [Rank 0] Group 0 Loss: 3.5842 +[2025-09-06 04:23:08] [Rank 0] Group 0 Loss: 3.5842 +[2025-09-06 04:23:08] [Rank 0] Group 1 Loss: 3.5856 +[2025-09-06 04:23:08] [Rank 0] Group 1 Loss: 3.5856 +[2025-09-06 04:23:08] [Rank 0] Group 2 Loss: 3.9148 +[2025-09-06 04:23:08] [Rank 0] Group 2 Loss: 3.9148 +[2025-09-06 04:23:08] [Rank 0] Group 3 Loss: 4.6519 +[2025-09-06 04:23:08] [Rank 0] Group 3 Loss: 4.6519 +[2025-09-06 04:23:08] [Rank 0] Group 4 Loss: 5.5621 +[2025-09-06 04:23:08] [Rank 0] Group 4 Loss: 5.5621 +[2025-09-06 04:23:08] [Rank 0] Group 5 Loss: 5.8973 +[2025-09-06 04:23:08] [Rank 0] Group 5 Loss: 5.8973 +[2025-09-06 04:23:08] [Rank 0] Group 6 Loss: 6.0925 +[2025-09-06 04:23:08] [Rank 0] Group 6 Loss: 6.0925 +[2025-09-06 04:23:08] [Rank 0] Group 7 Loss: 6.1120 +[2025-09-06 04:23:08] [Rank 0] Group 7 Loss: 6.1120 +[2025-09-06 04:23:08] [Rank 0] Group 8 Loss: 6.3205 +[2025-09-06 04:23:08] [Rank 0] Group 8 Loss: 6.3205 +[2025-09-06 04:23:08] [Rank 0] Group 9 Loss: 6.4601 +[2025-09-06 04:23:08] [Rank 0] Group 9 Loss: 6.4601 +[2025-09-06 04:23:08] [Rank 0] Group 10 Loss: 6.4433 +[2025-09-06 04:23:08] [Rank 0] Group 10 Loss: 6.4433 +[2025-09-06 04:23:08] [Rank 0] Group 11 Loss: 6.4948 +[2025-09-06 04:23:08] [Rank 0] Group 11 Loss: 6.4948 +[2025-09-06 04:23:08] [Rank 0] Group 12 Loss: 6.3103 +[2025-09-06 04:23:08] [Rank 0] Group 12 Loss: 6.3103 +[2025-09-06 04:23:08] [Rank 0] Group 13 Loss: 6.3330 +[2025-09-06 04:23:08] [Rank 0] Group 13 Loss: 6.3330 +[2025-09-06 04:23:08] [Rank 0] Group 14 Loss: 6.4380 +[2025-09-06 04:23:08] [Rank 0] Group 14 Loss: 6.4380 +[2025-09-06 04:23:08] [Rank 0] Group 15 Loss: 6.3421 +[2025-09-06 04:23:08] [Rank 0] Group 15 Loss: 6.3421 +[2025-09-06 04:23:08] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 04:23:08] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 04:23:08] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:23:08] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:23:08] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:23:08] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:23:08] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 04:23:08] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 04:23:08] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 04:23:08] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 04:23:08] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 04:23:08] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 04:23:08] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 04:23:08] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 04:23:08] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 04:23:08] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 04:23:08] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 04:23:08] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-06 04:23:08] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 04:23:08] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 04:23:08] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-06 04:23:08] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-06 04:23:08] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 04:23:08] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 04:23:08] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 04:23:08] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 04:23:08] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 04:23:08] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 04:23:08] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:23:08] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:23:08] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 04:23:08] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 04:23:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:23:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:23:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:23:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:23:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:23:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:23:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:23:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:23:09] [Rank 0] step:2001/10000 train_time:106540ms step_avg:53.24ms +[2025-09-06 04:23:09] [Rank 0] step:2001/10000 train_time:106540ms step_avg:53.24ms +[2025-09-06 04:23:10] [Rank 0] step:2021/10000 train_time:107406ms step_avg:53.15ms +[2025-09-06 04:23:10] [Rank 0] step:2021/10000 train_time:107406ms step_avg:53.15ms +[2025-09-06 04:23:11] [Rank 0] step:2041/10000 train_time:108137ms step_avg:52.98ms +[2025-09-06 04:23:11] [Rank 0] step:2041/10000 train_time:108137ms step_avg:52.98ms +[2025-09-06 04:23:12] [Rank 0] step:2061/10000 train_time:108869ms step_avg:52.82ms +[2025-09-06 04:23:12] [Rank 0] step:2061/10000 train_time:108869ms step_avg:52.82ms +[2025-09-06 04:23:12] [Rank 0] step:2081/10000 train_time:109601ms step_avg:52.67ms +[2025-09-06 04:23:12] [Rank 0] step:2081/10000 train_time:109601ms step_avg:52.67ms +[2025-09-06 04:23:13] [Rank 0] step:2101/10000 train_time:110331ms step_avg:52.51ms +[2025-09-06 04:23:13] [Rank 0] step:2101/10000 train_time:110331ms step_avg:52.51ms +[2025-09-06 04:23:14] [Rank 0] step:2121/10000 train_time:111062ms step_avg:52.36ms +[2025-09-06 04:23:14] [Rank 0] step:2121/10000 train_time:111062ms step_avg:52.36ms +[2025-09-06 04:23:15] [Rank 0] step:2141/10000 train_time:111794ms step_avg:52.22ms +[2025-09-06 04:23:15] [Rank 0] step:2141/10000 train_time:111794ms step_avg:52.22ms +[2025-09-06 04:23:15] [Rank 0] step:2161/10000 train_time:112526ms step_avg:52.07ms +[2025-09-06 04:23:15] [Rank 0] step:2161/10000 train_time:112526ms step_avg:52.07ms +[2025-09-06 04:23:16] [Rank 0] step:2181/10000 train_time:113258ms step_avg:51.93ms +[2025-09-06 04:23:16] [Rank 0] step:2181/10000 train_time:113258ms step_avg:51.93ms +[2025-09-06 04:23:17] [Rank 0] step:2201/10000 train_time:113989ms step_avg:51.79ms +[2025-09-06 04:23:17] [Rank 0] step:2201/10000 train_time:113989ms step_avg:51.79ms +[2025-09-06 04:23:18] [Rank 0] step:2221/10000 train_time:114720ms step_avg:51.65ms +[2025-09-06 04:23:18] [Rank 0] step:2221/10000 train_time:114720ms step_avg:51.65ms +[2025-09-06 04:23:18] [Rank 0] step:2241/10000 train_time:115457ms step_avg:51.52ms +[2025-09-06 04:23:18] [Rank 0] step:2241/10000 train_time:115457ms step_avg:51.52ms +[2025-09-06 04:23:19] [Rank 0] step:2261/10000 train_time:116194ms step_avg:51.39ms +[2025-09-06 04:23:19] [Rank 0] step:2261/10000 train_time:116194ms step_avg:51.39ms +[2025-09-06 04:23:20] [Rank 0] step:2281/10000 train_time:116932ms step_avg:51.26ms +[2025-09-06 04:23:20] [Rank 0] step:2281/10000 train_time:116932ms step_avg:51.26ms +[2025-09-06 04:23:20] [Rank 0] step:2301/10000 train_time:117669ms step_avg:51.14ms +[2025-09-06 04:23:20] [Rank 0] step:2301/10000 train_time:117669ms step_avg:51.14ms +[2025-09-06 04:23:21] [Rank 0] step:2321/10000 train_time:118406ms step_avg:51.02ms +[2025-09-06 04:23:21] [Rank 0] step:2321/10000 train_time:118406ms step_avg:51.02ms +[2025-09-06 04:23:22] [Rank 0] step:2341/10000 train_time:119145ms step_avg:50.89ms +[2025-09-06 04:23:22] [Rank 0] step:2341/10000 train_time:119145ms step_avg:50.89ms +[2025-09-06 04:23:23] [Rank 0] step:2361/10000 train_time:119883ms step_avg:50.78ms +[2025-09-06 04:23:23] [Rank 0] step:2361/10000 train_time:119883ms step_avg:50.78ms +[2025-09-06 04:23:23] [Rank 0] step:2381/10000 train_time:120621ms step_avg:50.66ms +[2025-09-06 04:23:23] [Rank 0] step:2381/10000 train_time:120621ms step_avg:50.66ms +[2025-09-06 04:23:24] [Rank 0] step:2401/10000 train_time:121359ms step_avg:50.55ms +[2025-09-06 04:23:24] [Rank 0] step:2401/10000 train_time:121359ms step_avg:50.55ms +[2025-09-06 04:23:25] [Rank 0] step:2421/10000 train_time:122097ms step_avg:50.43ms +[2025-09-06 04:23:25] [Rank 0] step:2421/10000 train_time:122097ms step_avg:50.43ms +[2025-09-06 04:23:26] [Rank 0] step:2441/10000 train_time:122982ms step_avg:50.38ms +[2025-09-06 04:23:26] [Rank 0] step:2441/10000 train_time:122982ms step_avg:50.38ms +[2025-09-06 04:23:27] [Rank 0] step:2461/10000 train_time:123721ms step_avg:50.27ms +[2025-09-06 04:23:27] [Rank 0] step:2461/10000 train_time:123721ms step_avg:50.27ms +[2025-09-06 04:23:27] [Rank 0] step:2481/10000 train_time:124458ms step_avg:50.16ms +[2025-09-06 04:23:27] [Rank 0] step:2481/10000 train_time:124458ms step_avg:50.16ms +[2025-09-06 04:23:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:23:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:23:28] [Rank 0] PRINT: step:2500/10000 train_loss:3.3039 val_loss:3.2085 train_time:125276ms step_avg:50.11ms +[2025-09-06 04:23:28] [Rank 0] PRINT: step:2500/10000 train_loss:3.3039 val_loss:3.2085 train_time:125276ms step_avg:50.11ms +[2025-09-06 04:23:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:23:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:23:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:23:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:24:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:24:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:24:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:24:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:24:49] [Rank 0] Total Loss: 5.5795 +[2025-09-06 04:24:49] [Rank 0] Total Loss: 5.5795 +[2025-09-06 04:24:49] [Rank 0] Total FTA (Unweighted): 0.1238 +[2025-09-06 04:24:49] [Rank 0] Total FTA (Unweighted): 0.1238 +[2025-09-06 04:24:49] [Rank 0] Total FTA (Weighted): 0.1237 +[2025-09-06 04:24:49] [Rank 0] Total FTA (Weighted): 0.1237 +[2025-09-06 04:24:49] [Rank 0] Group 0 Loss: 3.6129 +[2025-09-06 04:24:49] [Rank 0] Group 0 Loss: 3.6129 +[2025-09-06 04:24:49] [Rank 0] Group 1 Loss: 3.5436 +[2025-09-06 04:24:49] [Rank 0] Group 1 Loss: 3.5436 +[2025-09-06 04:24:49] [Rank 0] Group 2 Loss: 3.8318 +[2025-09-06 04:24:49] [Rank 0] Group 2 Loss: 3.8318 +[2025-09-06 04:24:49] [Rank 0] Group 3 Loss: 4.5001 +[2025-09-06 04:24:49] [Rank 0] Group 3 Loss: 4.5001 +[2025-09-06 04:24:49] [Rank 0] Group 4 Loss: 5.3850 +[2025-09-06 04:24:49] [Rank 0] Group 4 Loss: 5.3850 +[2025-09-06 04:24:49] [Rank 0] Group 5 Loss: 5.7743 +[2025-09-06 04:24:49] [Rank 0] Group 5 Loss: 5.7743 +[2025-09-06 04:24:49] [Rank 0] Group 6 Loss: 5.9821 +[2025-09-06 04:24:49] [Rank 0] Group 6 Loss: 5.9821 +[2025-09-06 04:24:49] [Rank 0] Group 7 Loss: 6.0162 +[2025-09-06 04:24:49] [Rank 0] Group 7 Loss: 6.0162 +[2025-09-06 04:24:49] [Rank 0] Group 8 Loss: 6.2473 +[2025-09-06 04:24:49] [Rank 0] Group 8 Loss: 6.2473 +[2025-09-06 04:24:49] [Rank 0] Group 9 Loss: 6.3876 +[2025-09-06 04:24:49] [Rank 0] Group 9 Loss: 6.3876 +[2025-09-06 04:24:49] [Rank 0] Group 10 Loss: 6.3879 +[2025-09-06 04:24:49] [Rank 0] Group 10 Loss: 6.3879 +[2025-09-06 04:24:49] [Rank 0] Group 11 Loss: 6.4436 +[2025-09-06 04:24:49] [Rank 0] Group 11 Loss: 6.4436 +[2025-09-06 04:24:49] [Rank 0] Group 12 Loss: 6.2568 +[2025-09-06 04:24:49] [Rank 0] Group 12 Loss: 6.2568 +[2025-09-06 04:24:49] [Rank 0] Group 13 Loss: 6.2627 +[2025-09-06 04:24:49] [Rank 0] Group 13 Loss: 6.2627 +[2025-09-06 04:24:49] [Rank 0] Group 14 Loss: 6.3698 +[2025-09-06 04:24:49] [Rank 0] Group 14 Loss: 6.3698 +[2025-09-06 04:24:49] [Rank 0] Group 15 Loss: 6.2707 +[2025-09-06 04:24:49] [Rank 0] Group 15 Loss: 6.2707 +[2025-09-06 04:24:49] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 04:24:49] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 04:24:49] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:24:49] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:24:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:24:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:24:49] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 04:24:49] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 04:24:49] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 04:24:49] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 04:24:49] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 04:24:49] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 04:24:49] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 04:24:49] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 04:24:49] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 04:24:49] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 04:24:49] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 04:24:49] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 04:24:49] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 04:24:49] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 04:24:49] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-06 04:24:49] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-06 04:24:49] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 04:24:49] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 04:24:49] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 04:24:49] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 04:24:49] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 04:24:49] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 04:24:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:24:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 04:24:49] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 04:24:49] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 04:24:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:24:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:24:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:24:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:24:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:24:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:24:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:24:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:24:51] [Rank 0] step:2501/10000 train_time:125285ms step_avg:50.09ms +[2025-09-06 04:24:51] [Rank 0] step:2501/10000 train_time:125285ms step_avg:50.09ms +[2025-09-06 04:24:52] [Rank 0] step:2521/10000 train_time:125949ms step_avg:49.96ms +[2025-09-06 04:24:52] [Rank 0] step:2521/10000 train_time:125949ms step_avg:49.96ms +[2025-09-06 04:24:52] [Rank 0] step:2541/10000 train_time:126687ms step_avg:49.86ms +[2025-09-06 04:24:52] [Rank 0] step:2541/10000 train_time:126687ms step_avg:49.86ms +[2025-09-06 04:24:53] [Rank 0] step:2561/10000 train_time:127424ms step_avg:49.76ms +[2025-09-06 04:24:53] [Rank 0] step:2561/10000 train_time:127424ms step_avg:49.76ms +[2025-09-06 04:24:54] [Rank 0] step:2581/10000 train_time:128162ms step_avg:49.66ms +[2025-09-06 04:24:54] [Rank 0] step:2581/10000 train_time:128162ms step_avg:49.66ms +[2025-09-06 04:24:55] [Rank 0] step:2601/10000 train_time:128900ms step_avg:49.56ms +[2025-09-06 04:24:55] [Rank 0] step:2601/10000 train_time:128900ms step_avg:49.56ms +[2025-09-06 04:24:55] [Rank 0] step:2621/10000 train_time:129637ms step_avg:49.46ms +[2025-09-06 04:24:55] [Rank 0] step:2621/10000 train_time:129637ms step_avg:49.46ms +[2025-09-06 04:24:56] [Rank 0] step:2641/10000 train_time:130375ms step_avg:49.37ms +[2025-09-06 04:24:56] [Rank 0] step:2641/10000 train_time:130375ms step_avg:49.37ms +[2025-09-06 04:24:57] [Rank 0] step:2661/10000 train_time:131114ms step_avg:49.27ms +[2025-09-06 04:24:57] [Rank 0] step:2661/10000 train_time:131114ms step_avg:49.27ms +[2025-09-06 04:24:58] [Rank 0] step:2681/10000 train_time:131851ms step_avg:49.18ms +[2025-09-06 04:24:58] [Rank 0] step:2681/10000 train_time:131851ms step_avg:49.18ms +[2025-09-06 04:24:58] [Rank 0] step:2701/10000 train_time:132589ms step_avg:49.09ms +[2025-09-06 04:24:58] [Rank 0] step:2701/10000 train_time:132589ms step_avg:49.09ms +[2025-09-06 04:24:59] [Rank 0] step:2721/10000 train_time:133326ms step_avg:49.00ms +[2025-09-06 04:24:59] [Rank 0] step:2721/10000 train_time:133326ms step_avg:49.00ms +[2025-09-06 04:25:00] [Rank 0] step:2741/10000 train_time:134065ms step_avg:48.91ms +[2025-09-06 04:25:00] [Rank 0] step:2741/10000 train_time:134065ms step_avg:48.91ms +[2025-09-06 04:25:00] [Rank 0] step:2761/10000 train_time:134803ms step_avg:48.82ms +[2025-09-06 04:25:00] [Rank 0] step:2761/10000 train_time:134803ms step_avg:48.82ms +[2025-09-06 04:25:01] [Rank 0] step:2781/10000 train_time:135541ms step_avg:48.74ms +[2025-09-06 04:25:01] [Rank 0] step:2781/10000 train_time:135541ms step_avg:48.74ms +[2025-09-06 04:25:02] [Rank 0] step:2801/10000 train_time:136280ms step_avg:48.65ms +[2025-09-06 04:25:02] [Rank 0] step:2801/10000 train_time:136280ms step_avg:48.65ms +[2025-09-06 04:25:03] [Rank 0] step:2821/10000 train_time:137646ms step_avg:48.79ms +[2025-09-06 04:25:03] [Rank 0] step:2821/10000 train_time:137646ms step_avg:48.79ms +[2025-09-06 04:25:04] [Rank 0] step:2841/10000 train_time:138384ms step_avg:48.71ms +[2025-09-06 04:25:04] [Rank 0] step:2841/10000 train_time:138384ms step_avg:48.71ms +[2025-09-06 04:25:05] [Rank 0] step:2861/10000 train_time:139123ms step_avg:48.63ms +[2025-09-06 04:25:05] [Rank 0] step:2861/10000 train_time:139123ms step_avg:48.63ms +[2025-09-06 04:25:06] [Rank 0] step:2881/10000 train_time:139862ms step_avg:48.55ms +[2025-09-06 04:25:06] [Rank 0] step:2881/10000 train_time:139862ms step_avg:48.55ms +[2025-09-06 04:25:06] [Rank 0] step:2901/10000 train_time:140600ms step_avg:48.47ms +[2025-09-06 04:25:06] [Rank 0] step:2901/10000 train_time:140600ms step_avg:48.47ms +[2025-09-06 04:25:07] [Rank 0] step:2921/10000 train_time:141337ms step_avg:48.39ms +[2025-09-06 04:25:07] [Rank 0] step:2921/10000 train_time:141337ms step_avg:48.39ms +[2025-09-06 04:25:08] [Rank 0] step:2941/10000 train_time:142075ms step_avg:48.31ms +[2025-09-06 04:25:08] [Rank 0] step:2941/10000 train_time:142075ms step_avg:48.31ms +[2025-09-06 04:25:08] [Rank 0] step:2961/10000 train_time:142813ms step_avg:48.23ms +[2025-09-06 04:25:08] [Rank 0] step:2961/10000 train_time:142813ms step_avg:48.23ms +[2025-09-06 04:25:09] [Rank 0] step:2981/10000 train_time:143557ms step_avg:48.16ms +[2025-09-06 04:25:09] [Rank 0] step:2981/10000 train_time:143557ms step_avg:48.16ms +[2025-09-06 04:25:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:25:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:25:10] [Rank 0] PRINT: step:3000/10000 train_loss:3.1430 val_loss:3.0684 train_time:144375ms step_avg:48.13ms +[2025-09-06 04:25:10] [Rank 0] PRINT: step:3000/10000 train_loss:3.1430 val_loss:3.0684 train_time:144375ms step_avg:48.13ms +[2025-09-06 04:25:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:25:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:25:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:25:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:26:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:26:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 04:26:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:26:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 04:26:31] [Rank 0] Total Loss: 5.4465 +[2025-09-06 04:26:31] [Rank 0] Total Loss: 5.4465 +[2025-09-06 04:26:31] [Rank 0] Total FTA (Unweighted): 0.1381 +[2025-09-06 04:26:31] [Rank 0] Total FTA (Unweighted): 0.1381 +[2025-09-06 04:26:31] [Rank 0] Total FTA (Weighted): 0.1381 +[2025-09-06 04:26:31] [Rank 0] Total FTA (Weighted): 0.1381 +[2025-09-06 04:26:31] [Rank 0] Group 0 Loss: 3.5434 +[2025-09-06 04:26:31] [Rank 0] Group 0 Loss: 3.5434 +[2025-09-06 04:26:31] [Rank 0] Group 1 Loss: 3.5148 +[2025-09-06 04:26:31] [Rank 0] Group 1 Loss: 3.5148 +[2025-09-06 04:26:31] [Rank 0] Group 2 Loss: 3.7153 +[2025-09-06 04:26:31] [Rank 0] Group 2 Loss: 3.7153 +[2025-09-06 04:26:31] [Rank 0] Group 3 Loss: 4.2670 +[2025-09-06 04:26:31] [Rank 0] Group 3 Loss: 4.2670 +[2025-09-06 04:26:31] [Rank 0] Group 4 Loss: 5.1801 +[2025-09-06 04:26:31] [Rank 0] Group 4 Loss: 5.1801 +[2025-09-06 04:26:32] [Rank 0] Group 5 Loss: 5.5759 +[2025-09-06 04:26:32] [Rank 0] Group 5 Loss: 5.5759 +[2025-09-06 04:26:32] [Rank 0] Group 6 Loss: 5.8276 +[2025-09-06 04:26:32] [Rank 0] Group 6 Loss: 5.8276 +[2025-09-06 04:26:32] [Rank 0] Group 7 Loss: 5.8817 +[2025-09-06 04:26:32] [Rank 0] Group 7 Loss: 5.8817 +[2025-09-06 04:26:32] [Rank 0] Group 8 Loss: 6.1138 +[2025-09-06 04:26:32] [Rank 0] Group 8 Loss: 6.1138 +[2025-09-06 04:26:32] [Rank 0] Group 9 Loss: 6.2780 +[2025-09-06 04:26:32] [Rank 0] Group 9 Loss: 6.2780 +[2025-09-06 04:26:32] [Rank 0] Group 10 Loss: 6.2590 +[2025-09-06 04:26:32] [Rank 0] Group 10 Loss: 6.2590 +[2025-09-06 04:26:32] [Rank 0] Group 11 Loss: 6.3056 +[2025-09-06 04:26:32] [Rank 0] Group 11 Loss: 6.3056 +[2025-09-06 04:26:32] [Rank 0] Group 12 Loss: 6.1537 +[2025-09-06 04:26:32] [Rank 0] Group 12 Loss: 6.1537 +[2025-09-06 04:26:32] [Rank 0] Group 13 Loss: 6.1320 +[2025-09-06 04:26:32] [Rank 0] Group 13 Loss: 6.1320 +[2025-09-06 04:26:32] [Rank 0] Group 14 Loss: 6.2374 +[2025-09-06 04:26:32] [Rank 0] Group 14 Loss: 6.2374 +[2025-09-06 04:26:32] [Rank 0] Group 15 Loss: 6.1582 +[2025-09-06 04:26:32] [Rank 0] Group 15 Loss: 6.1582 +[2025-09-06 04:26:32] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 04:26:32] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 04:26:32] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:26:32] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 04:26:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:26:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 04:26:32] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 04:26:32] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 04:26:32] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 04:26:32] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 04:26:32] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 04:26:32] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 04:26:32] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 04:26:32] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 04:26:32] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 04:26:32] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 04:26:32] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 04:26:32] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 04:26:32] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 04:26:32] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 04:26:32] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 04:26:32] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 04:26:32] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:26:32] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 04:26:32] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 04:26:32] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 04:26:32] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 04:26:32] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 04:26:32] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 04:26:32] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 04:26:32] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 04:26:32] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 04:26:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:26:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_loss_curves.png +[2025-09-06 04:26:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:26:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/per_class_acc_curves.png +[2025-09-06 04:26:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:26:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_loss_curve.png +[2025-09-06 04:26:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:26:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.05_seed_45/total_acc_curve.png +[2025-09-06 04:26:33] [Rank 0] step:3001/10000 train_time:144385ms step_avg:48.11ms +[2025-09-06 04:26:33] [Rank 0] step:3001/10000 train_time:144385ms step_avg:48.11ms +[2025-09-06 04:26:34] [Rank 0] step:3021/10000 train_time:145068ms step_avg:48.02ms +[2025-09-06 04:26:34] [Rank 0] step:3021/10000 train_time:145068ms step_avg:48.02ms +[2025-09-06 04:26:35] [Rank 0] step:3041/10000 train_time:145805ms step_avg:47.95ms +[2025-09-06 04:26:35] [Rank 0] step:3041/10000 train_time:145805ms step_avg:47.95ms +[2025-09-06 04:26:35] [Rank 0] step:3061/10000 train_time:146682ms step_avg:47.92ms +[2025-09-06 04:26:35] [Rank 0] step:3061/10000 train_time:146682ms step_avg:47.92ms +[2025-09-06 04:26:36] [Rank 0] step:3081/10000 train_time:147420ms step_avg:47.85ms +[2025-09-06 04:26:36] [Rank 0] step:3081/10000 train_time:147420ms step_avg:47.85ms +[2025-09-06 04:26:37] [Rank 0] step:3101/10000 train_time:148157ms step_avg:47.78ms +[2025-09-06 04:26:37] [Rank 0] step:3101/10000 train_time:148157ms step_avg:47.78ms +[2025-09-06 04:26:38] [Rank 0] step:3121/10000 train_time:148894ms step_avg:47.71ms +[2025-09-06 04:26:38] [Rank 0] step:3121/10000 train_time:148894ms step_avg:47.71ms +[2025-09-06 04:26:38] [Rank 0] step:3141/10000 train_time:149783ms step_avg:47.69ms +[2025-09-06 04:26:38] [Rank 0] step:3141/10000 train_time:149783ms step_avg:47.69ms +[2025-09-06 04:26:39] [Rank 0] step:3161/10000 train_time:150522ms step_avg:47.62ms +[2025-09-06 04:26:39] [Rank 0] step:3161/10000 train_time:150522ms step_avg:47.62ms +[2025-09-06 04:26:40] [Rank 0] step:3181/10000 train_time:151260ms step_avg:47.55ms +[2025-09-06 04:26:40] [Rank 0] step:3181/10000 train_time:151260ms step_avg:47.55ms +[2025-09-06 04:26:41] [Rank 0] step:3201/10000 train_time:151997ms step_avg:47.48ms +[2025-09-06 04:26:41] [Rank 0] step:3201/10000 train_time:151997ms step_avg:47.48ms +[2025-09-06 04:26:41] [Rank 0] step:3221/10000 train_time:152734ms step_avg:47.42ms +[2025-09-06 04:26:41] [Rank 0] step:3221/10000 train_time:152734ms step_avg:47.42ms +[2025-09-06 04:26:42] [Rank 0] step:3241/10000 train_time:153470ms step_avg:47.35ms +[2025-09-06 04:26:42] [Rank 0] step:3241/10000 train_time:153470ms step_avg:47.35ms +[2025-09-06 04:26:43] [Rank 0] step:3261/10000 train_time:154207ms step_avg:47.29ms +[2025-09-06 04:26:43] [Rank 0] step:3261/10000 train_time:154207ms step_avg:47.29ms +[2025-09-06 04:26:44] [Rank 0] step:3281/10000 train_time:154945ms step_avg:47.22ms +[2025-09-06 04:26:44] [Rank 0] step:3281/10000 train_time:154945ms step_avg:47.22ms +[2025-09-06 04:26:44] [Rank 0] step:3301/10000 train_time:155682ms step_avg:47.16ms +[2025-09-06 04:26:44] [Rank 0] step:3301/10000 train_time:155682ms step_avg:47.16ms +[2025-09-06 04:26:45] [Rank 0] step:3321/10000 train_time:156420ms step_avg:47.10ms +[2025-09-06 04:26:45] [Rank 0] step:3321/10000 train_time:156420ms step_avg:47.10ms +[2025-09-06 04:26:46] [Rank 0] step:3341/10000 train_time:157157ms step_avg:47.04ms +[2025-09-06 04:26:46] [Rank 0] step:3341/10000 train_time:157157ms step_avg:47.04ms +[2025-09-06 04:26:47] [Rank 0] step:3361/10000 train_time:157895ms step_avg:46.98ms +[2025-09-06 04:26:47] [Rank 0] step:3361/10000 train_time:157895ms step_avg:46.98ms +[2025-09-06 04:26:47] [Rank 0] step:3381/10000 train_time:158632ms step_avg:46.92ms +[2025-09-06 04:26:47] [Rank 0] step:3381/10000 train_time:158632ms step_avg:46.92ms +[2025-09-06 04:26:48] [Rank 0] step:3401/10000 train_time:159370ms step_avg:46.86ms +[2025-09-06 04:26:48] [Rank 0] step:3401/10000 train_time:159370ms step_avg:46.86ms +[2025-09-06 04:26:49] [Rank 0] step:3421/10000 train_time:160107ms step_avg:46.80ms +[2025-09-06 04:26:49] [Rank 0] step:3421/10000 train_time:160107ms step_avg:46.80ms +[2025-09-06 04:26:50] [Rank 0] step:3441/10000 train_time:160845ms step_avg:46.74ms +[2025-09-06 04:26:50] [Rank 0] step:3441/10000 train_time:160845ms step_avg:46.74ms +[2025-09-06 04:26:50] [Rank 0] step:3461/10000 train_time:161583ms step_avg:46.69ms +[2025-09-06 04:26:50] [Rank 0] step:3461/10000 train_time:161583ms step_avg:46.69ms +[2025-09-06 04:26:51] [Rank 0] step:3481/10000 train_time:162320ms step_avg:46.63ms +[2025-09-06 04:26:51] [Rank 0] step:3481/10000 train_time:162320ms step_avg:46.63ms +[2025-09-06 04:26:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:26:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 04:26:52] [Rank 0] PRINT: step:3500/10000 train_loss:3.0222 val_loss:2.9634 train_time:163139ms step_avg:46.61ms +[2025-09-06 04:26:52] [Rank 0] PRINT: step:3500/10000 train_loss:3.0222 val_loss:2.9634 train_time:163139ms step_avg:46.61ms +[2025-09-06 04:26:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:26:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 04:26:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 04:26:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a8e11bf1106bbdd8939a5bdb2b5890fc3a04bf66 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.08, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "a5d24c3d-ecf9-48db-b7f3-7b81d729325f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..22f7bdb94dd6276264bb9c6e34f314cbc4a12d59 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c519eacd3c28ab098057d3152287a2d8df2c18c2e747d602c6e5d5f4ddba05 +size 314499 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..cd85fa7285ddc77871704740d9536def9cebc8df --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31e486d34e017b5a76bbd16ba8c551ca989e07b1cf04f273b7a1388c7fea0af6 +size 409580 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..13d11f63f9333a411b9f111cd4a7af2d59860af9 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68789366e530b4e21c202b970456181ea2d4567d34fe5f16166a2a936b79854b +size 91718 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..1975bbc78c2fdf30f2e7cad719a9d778ce5a060b --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cde3d1a259be4c3066f695d1ea138ac8ae69f0a2e1a961b3e93b16637cadd1f6 +size 117060 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/training_log_a5d24c3d-ecf9-48db-b7f3-7b81d729325f.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/training_log_a5d24c3d-ecf9-48db-b7f3-7b81d729325f.txt new file mode 100644 index 0000000000000000000000000000000000000000..d22d0a0150a46d0f5f150c1669c6e5c549a82514 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/training_log_a5d24c3d-ecf9-48db-b7f3-7b81d729325f.txt @@ -0,0 +1,5614 @@ +[2025-09-05 23:27:00] [Rank 0] PRINT: --- Script Start: Fri Sep 5 23:27:00 2025 --- +[2025-09-05 23:27:00] [Rank 0] PRINT: --- Script Start: Fri Sep 5 23:27:00 2025 --- +[2025-09-05 23:27:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 23:27:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 23:27:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 23:27:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 23:27:00] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 23:27:00] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 23:27:00] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42 +[2025-09-05 23:27:00] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42 +[2025-09-05 23:27:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 23:27:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 23:27:00] [Rank 0] PRINT: Constructing model... +[2025-09-05 23:27:00] [Rank 0] PRINT: Constructing model... +[2025-09-05 23:27:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 23:27:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 23:27:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 23:27:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 23:27:01] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 23:27:01] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 23:27:06] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 23:27:06] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 23:27:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 23:27:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 23:27:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 23:27:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 23:27:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 23:27:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 23:27:06] [Rank 0] PRINT: Model returns: +[2025-09-05 23:27:06] [Rank 0] PRINT: Model returns: +[2025-09-05 23:27:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 23:27:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 23:27:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 23:27:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 23:27:06] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-05 23:27:06] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-05 23:27:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 23:27:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 23:27:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 23:27:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 23:27:10] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 23:27:10] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 23:27:10] [Rank 0] PRINT: Starting warmup... +[2025-09-05 23:27:10] [Rank 0] PRINT: Starting warmup... +[2025-09-05 23:27:48] [Rank 0] PRINT: Warmup complete. +[2025-09-05 23:27:48] [Rank 0] PRINT: Warmup complete. +[2025-09-05 23:27:48] [Rank 0] PRINT: Starting training... +[2025-09-05 23:27:48] [Rank 0] PRINT: Starting training... +[2025-09-05 23:27:54] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/fixed_eval_indices.json +[2025-09-05 23:27:54] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/fixed_eval_indices.json +[2025-09-05 23:27:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:27:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:27:58] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 23:27:58] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 23:28:30] [Rank 0] step:21/10000 train_time:31720ms step_avg:1510.47ms +[2025-09-05 23:28:30] [Rank 0] step:21/10000 train_time:31720ms step_avg:1510.47ms +[2025-09-05 23:28:30] [Rank 0] step:41/10000 train_time:32446ms step_avg:791.36ms +[2025-09-05 23:28:30] [Rank 0] step:41/10000 train_time:32446ms step_avg:791.36ms +[2025-09-05 23:28:31] [Rank 0] step:61/10000 train_time:33170ms step_avg:543.77ms +[2025-09-05 23:28:31] [Rank 0] step:61/10000 train_time:33170ms step_avg:543.77ms +[2025-09-05 23:28:32] [Rank 0] step:81/10000 train_time:33895ms step_avg:418.46ms +[2025-09-05 23:28:32] [Rank 0] step:81/10000 train_time:33895ms step_avg:418.46ms +[2025-09-05 23:28:32] [Rank 0] step:101/10000 train_time:34619ms step_avg:342.76ms +[2025-09-05 23:28:32] [Rank 0] step:101/10000 train_time:34619ms step_avg:342.76ms +[2025-09-05 23:28:33] [Rank 0] step:121/10000 train_time:35344ms step_avg:292.10ms +[2025-09-05 23:28:33] [Rank 0] step:121/10000 train_time:35344ms step_avg:292.10ms +[2025-09-05 23:28:34] [Rank 0] step:141/10000 train_time:36069ms step_avg:255.81ms +[2025-09-05 23:28:34] [Rank 0] step:141/10000 train_time:36069ms step_avg:255.81ms +[2025-09-05 23:28:35] [Rank 0] step:161/10000 train_time:36795ms step_avg:228.54ms +[2025-09-05 23:28:35] [Rank 0] step:161/10000 train_time:36795ms step_avg:228.54ms +[2025-09-05 23:28:35] [Rank 0] step:181/10000 train_time:37519ms step_avg:207.29ms +[2025-09-05 23:28:35] [Rank 0] step:181/10000 train_time:37519ms step_avg:207.29ms +[2025-09-05 23:28:36] [Rank 0] step:201/10000 train_time:38245ms step_avg:190.27ms +[2025-09-05 23:28:36] [Rank 0] step:201/10000 train_time:38245ms step_avg:190.27ms +[2025-09-05 23:28:37] [Rank 0] step:221/10000 train_time:38970ms step_avg:176.33ms +[2025-09-05 23:28:37] [Rank 0] step:221/10000 train_time:38970ms step_avg:176.33ms +[2025-09-05 23:28:38] [Rank 0] step:241/10000 train_time:39695ms step_avg:164.71ms +[2025-09-05 23:28:38] [Rank 0] step:241/10000 train_time:39695ms step_avg:164.71ms +[2025-09-05 23:28:38] [Rank 0] step:261/10000 train_time:40419ms step_avg:154.86ms +[2025-09-05 23:28:38] [Rank 0] step:261/10000 train_time:40419ms step_avg:154.86ms +[2025-09-05 23:28:39] [Rank 0] step:281/10000 train_time:41144ms step_avg:146.42ms +[2025-09-05 23:28:39] [Rank 0] step:281/10000 train_time:41144ms step_avg:146.42ms +[2025-09-05 23:28:40] [Rank 0] step:301/10000 train_time:41871ms step_avg:139.10ms +[2025-09-05 23:28:40] [Rank 0] step:301/10000 train_time:41871ms step_avg:139.10ms +[2025-09-05 23:28:40] [Rank 0] step:321/10000 train_time:42595ms step_avg:132.70ms +[2025-09-05 23:28:40] [Rank 0] step:321/10000 train_time:42595ms step_avg:132.70ms +[2025-09-05 23:28:41] [Rank 0] step:341/10000 train_time:43320ms step_avg:127.04ms +[2025-09-05 23:28:41] [Rank 0] step:341/10000 train_time:43320ms step_avg:127.04ms +[2025-09-05 23:28:42] [Rank 0] step:361/10000 train_time:44045ms step_avg:122.01ms +[2025-09-05 23:28:42] [Rank 0] step:361/10000 train_time:44045ms step_avg:122.01ms +[2025-09-05 23:28:43] [Rank 0] step:381/10000 train_time:44770ms step_avg:117.51ms +[2025-09-05 23:28:43] [Rank 0] step:381/10000 train_time:44770ms step_avg:117.51ms +[2025-09-05 23:28:43] [Rank 0] step:401/10000 train_time:45495ms step_avg:113.45ms +[2025-09-05 23:28:43] [Rank 0] step:401/10000 train_time:45495ms step_avg:113.45ms +[2025-09-05 23:28:44] [Rank 0] step:421/10000 train_time:46220ms step_avg:109.79ms +[2025-09-05 23:28:44] [Rank 0] step:421/10000 train_time:46220ms step_avg:109.79ms +[2025-09-05 23:28:45] [Rank 0] step:441/10000 train_time:46944ms step_avg:106.45ms +[2025-09-05 23:28:45] [Rank 0] step:441/10000 train_time:46944ms step_avg:106.45ms +[2025-09-05 23:28:46] [Rank 0] step:461/10000 train_time:47669ms step_avg:103.40ms +[2025-09-05 23:28:46] [Rank 0] step:461/10000 train_time:47669ms step_avg:103.40ms +[2025-09-05 23:28:46] [Rank 0] step:481/10000 train_time:48395ms step_avg:100.61ms +[2025-09-05 23:28:46] [Rank 0] step:481/10000 train_time:48395ms step_avg:100.61ms +[2025-09-05 23:28:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:28:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:28:47] [Rank 0] PRINT: step:500/10000 train_loss:5.9566 val_loss:4.2945 train_time:49199ms step_avg:98.40ms +[2025-09-05 23:28:47] [Rank 0] PRINT: step:500/10000 train_loss:5.9566 val_loss:4.2945 train_time:49199ms step_avg:98.40ms +[2025-09-05 23:28:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:28:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:28:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:28:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:30:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:30:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:30:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:30:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:30:08] [Rank 0] Total Loss: 6.0845 +[2025-09-05 23:30:08] [Rank 0] Total Loss: 6.0845 +[2025-09-05 23:30:08] [Rank 0] Total FTA (Unweighted): 0.0806 +[2025-09-05 23:30:08] [Rank 0] Total FTA (Unweighted): 0.0806 +[2025-09-05 23:30:08] [Rank 0] Total FTA (Weighted): 0.0806 +[2025-09-05 23:30:08] [Rank 0] Total FTA (Weighted): 0.0806 +[2025-09-05 23:30:08] [Rank 0] Group 0 Loss: 3.8431 +[2025-09-05 23:30:08] [Rank 0] Group 0 Loss: 3.8431 +[2025-09-05 23:30:08] [Rank 0] Group 1 Loss: 3.9774 +[2025-09-05 23:30:08] [Rank 0] Group 1 Loss: 3.9774 +[2025-09-05 23:30:08] [Rank 0] Group 2 Loss: 4.8441 +[2025-09-05 23:30:08] [Rank 0] Group 2 Loss: 4.8441 +[2025-09-05 23:30:08] [Rank 0] Group 3 Loss: 5.6274 +[2025-09-05 23:30:08] [Rank 0] Group 3 Loss: 5.6274 +[2025-09-05 23:30:08] [Rank 0] Group 4 Loss: 6.3295 +[2025-09-05 23:30:08] [Rank 0] Group 4 Loss: 6.3295 +[2025-09-05 23:30:08] [Rank 0] Group 5 Loss: 6.4303 +[2025-09-05 23:30:08] [Rank 0] Group 5 Loss: 6.4303 +[2025-09-05 23:30:08] [Rank 0] Group 6 Loss: 6.4870 +[2025-09-05 23:30:08] [Rank 0] Group 6 Loss: 6.4870 +[2025-09-05 23:30:08] [Rank 0] Group 7 Loss: 6.4858 +[2025-09-05 23:30:08] [Rank 0] Group 7 Loss: 6.4858 +[2025-09-05 23:30:08] [Rank 0] Group 8 Loss: 6.6048 +[2025-09-05 23:30:08] [Rank 0] Group 8 Loss: 6.6048 +[2025-09-05 23:30:08] [Rank 0] Group 9 Loss: 6.7346 +[2025-09-05 23:30:08] [Rank 0] Group 9 Loss: 6.7346 +[2025-09-05 23:30:08] [Rank 0] Group 10 Loss: 6.7065 +[2025-09-05 23:30:08] [Rank 0] Group 10 Loss: 6.7065 +[2025-09-05 23:30:08] [Rank 0] Group 11 Loss: 6.7686 +[2025-09-05 23:30:08] [Rank 0] Group 11 Loss: 6.7686 +[2025-09-05 23:30:08] [Rank 0] Group 12 Loss: 6.6087 +[2025-09-05 23:30:08] [Rank 0] Group 12 Loss: 6.6087 +[2025-09-05 23:30:08] [Rank 0] Group 13 Loss: 6.5900 +[2025-09-05 23:30:08] [Rank 0] Group 13 Loss: 6.5900 +[2025-09-05 23:30:08] [Rank 0] Group 14 Loss: 6.7056 +[2025-09-05 23:30:08] [Rank 0] Group 14 Loss: 6.7056 +[2025-09-05 23:30:08] [Rank 0] Group 15 Loss: 6.6081 +[2025-09-05 23:30:08] [Rank 0] Group 15 Loss: 6.6081 +[2025-09-05 23:30:08] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 23:30:08] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 23:30:08] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:30:08] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:30:08] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 23:30:08] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 23:30:08] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 23:30:08] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 23:30:08] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 23:30:08] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 23:30:08] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 23:30:08] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 23:30:08] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 23:30:08] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 23:30:08] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-05 23:30:08] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-05 23:30:08] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 23:30:08] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 23:30:08] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 23:30:08] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 23:30:08] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 23:30:08] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 23:30:08] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 23:30:08] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 23:30:08] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:30:08] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:30:08] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 23:30:08] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 23:30:08] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:30:08] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:30:08] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:30:08] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:30:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:30:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:30:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:30:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:30:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:30:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:30:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:30:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:30:10] [Rank 0] step:501/10000 train_time:49208ms step_avg:98.22ms +[2025-09-05 23:30:10] [Rank 0] step:501/10000 train_time:49208ms step_avg:98.22ms +[2025-09-05 23:30:11] [Rank 0] step:521/10000 train_time:49873ms step_avg:95.73ms +[2025-09-05 23:30:11] [Rank 0] step:521/10000 train_time:49873ms step_avg:95.73ms +[2025-09-05 23:30:12] [Rank 0] step:541/10000 train_time:50598ms step_avg:93.53ms +[2025-09-05 23:30:12] [Rank 0] step:541/10000 train_time:50598ms step_avg:93.53ms +[2025-09-05 23:30:12] [Rank 0] step:561/10000 train_time:51322ms step_avg:91.48ms +[2025-09-05 23:30:12] [Rank 0] step:561/10000 train_time:51322ms step_avg:91.48ms +[2025-09-05 23:30:13] [Rank 0] step:581/10000 train_time:52046ms step_avg:89.58ms +[2025-09-05 23:30:13] [Rank 0] step:581/10000 train_time:52046ms step_avg:89.58ms +[2025-09-05 23:30:14] [Rank 0] step:601/10000 train_time:52771ms step_avg:87.81ms +[2025-09-05 23:30:14] [Rank 0] step:601/10000 train_time:52771ms step_avg:87.81ms +[2025-09-05 23:30:14] [Rank 0] step:621/10000 train_time:53496ms step_avg:86.15ms +[2025-09-05 23:30:14] [Rank 0] step:621/10000 train_time:53496ms step_avg:86.15ms +[2025-09-05 23:30:15] [Rank 0] step:641/10000 train_time:54221ms step_avg:84.59ms +[2025-09-05 23:30:15] [Rank 0] step:641/10000 train_time:54221ms step_avg:84.59ms +[2025-09-05 23:30:16] [Rank 0] step:661/10000 train_time:54946ms step_avg:83.13ms +[2025-09-05 23:30:16] [Rank 0] step:661/10000 train_time:54946ms step_avg:83.13ms +[2025-09-05 23:30:17] [Rank 0] step:681/10000 train_time:55671ms step_avg:81.75ms +[2025-09-05 23:30:17] [Rank 0] step:681/10000 train_time:55671ms step_avg:81.75ms +[2025-09-05 23:30:17] [Rank 0] step:701/10000 train_time:56396ms step_avg:80.45ms +[2025-09-05 23:30:17] [Rank 0] step:701/10000 train_time:56396ms step_avg:80.45ms +[2025-09-05 23:30:18] [Rank 0] step:721/10000 train_time:57121ms step_avg:79.22ms +[2025-09-05 23:30:18] [Rank 0] step:721/10000 train_time:57121ms step_avg:79.22ms +[2025-09-05 23:30:19] [Rank 0] step:741/10000 train_time:57846ms step_avg:78.06ms +[2025-09-05 23:30:19] [Rank 0] step:741/10000 train_time:57846ms step_avg:78.06ms +[2025-09-05 23:30:19] [Rank 0] step:761/10000 train_time:58575ms step_avg:76.97ms +[2025-09-05 23:30:19] [Rank 0] step:761/10000 train_time:58575ms step_avg:76.97ms +[2025-09-05 23:30:20] [Rank 0] step:781/10000 train_time:59304ms step_avg:75.93ms +[2025-09-05 23:30:20] [Rank 0] step:781/10000 train_time:59304ms step_avg:75.93ms +[2025-09-05 23:30:21] [Rank 0] step:801/10000 train_time:60035ms step_avg:74.95ms +[2025-09-05 23:30:21] [Rank 0] step:801/10000 train_time:60035ms step_avg:74.95ms +[2025-09-05 23:30:22] [Rank 0] step:821/10000 train_time:61375ms step_avg:74.76ms +[2025-09-05 23:30:22] [Rank 0] step:821/10000 train_time:61375ms step_avg:74.76ms +[2025-09-05 23:30:23] [Rank 0] step:841/10000 train_time:62105ms step_avg:73.85ms +[2025-09-05 23:30:23] [Rank 0] step:841/10000 train_time:62105ms step_avg:73.85ms +[2025-09-05 23:30:24] [Rank 0] step:861/10000 train_time:62835ms step_avg:72.98ms +[2025-09-05 23:30:24] [Rank 0] step:861/10000 train_time:62835ms step_avg:72.98ms +[2025-09-05 23:30:25] [Rank 0] step:881/10000 train_time:63681ms step_avg:72.28ms +[2025-09-05 23:30:25] [Rank 0] step:881/10000 train_time:63681ms step_avg:72.28ms +[2025-09-05 23:30:25] [Rank 0] step:901/10000 train_time:64410ms step_avg:71.49ms +[2025-09-05 23:30:25] [Rank 0] step:901/10000 train_time:64410ms step_avg:71.49ms +[2025-09-05 23:30:26] [Rank 0] step:921/10000 train_time:65141ms step_avg:70.73ms +[2025-09-05 23:30:26] [Rank 0] step:921/10000 train_time:65141ms step_avg:70.73ms +[2025-09-05 23:30:27] [Rank 0] step:941/10000 train_time:65871ms step_avg:70.00ms +[2025-09-05 23:30:27] [Rank 0] step:941/10000 train_time:65871ms step_avg:70.00ms +[2025-09-05 23:30:28] [Rank 0] step:961/10000 train_time:66754ms step_avg:69.46ms +[2025-09-05 23:30:28] [Rank 0] step:961/10000 train_time:66754ms step_avg:69.46ms +[2025-09-05 23:30:28] [Rank 0] step:981/10000 train_time:67484ms step_avg:68.79ms +[2025-09-05 23:30:28] [Rank 0] step:981/10000 train_time:67484ms step_avg:68.79ms +[2025-09-05 23:30:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:30:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:30:30] [Rank 0] PRINT: step:1000/10000 train_loss:3.8611 val_loss:3.5342 train_time:68293ms step_avg:68.29ms +[2025-09-05 23:30:30] [Rank 0] PRINT: step:1000/10000 train_loss:3.8611 val_loss:3.5342 train_time:68293ms step_avg:68.29ms +[2025-09-05 23:30:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:30:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:30:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:30:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:31:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:31:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:31:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:31:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:31:50] [Rank 0] Total Loss: 5.6120 +[2025-09-05 23:31:50] [Rank 0] Total Loss: 5.6120 +[2025-09-05 23:31:50] [Rank 0] Total FTA (Unweighted): 0.1156 +[2025-09-05 23:31:50] [Rank 0] Total FTA (Unweighted): 0.1156 +[2025-09-05 23:31:50] [Rank 0] Total FTA (Weighted): 0.1156 +[2025-09-05 23:31:50] [Rank 0] Total FTA (Weighted): 0.1156 +[2025-09-05 23:31:50] [Rank 0] Group 0 Loss: 3.4600 +[2025-09-05 23:31:50] [Rank 0] Group 0 Loss: 3.4600 +[2025-09-05 23:31:50] [Rank 0] Group 1 Loss: 3.4309 +[2025-09-05 23:31:50] [Rank 0] Group 1 Loss: 3.4309 +[2025-09-05 23:31:50] [Rank 0] Group 2 Loss: 3.8632 +[2025-09-05 23:31:50] [Rank 0] Group 2 Loss: 3.8632 +[2025-09-05 23:31:50] [Rank 0] Group 3 Loss: 4.6820 +[2025-09-05 23:31:50] [Rank 0] Group 3 Loss: 4.6820 +[2025-09-05 23:31:50] [Rank 0] Group 4 Loss: 5.6174 +[2025-09-05 23:31:50] [Rank 0] Group 4 Loss: 5.6174 +[2025-09-05 23:31:50] [Rank 0] Group 5 Loss: 5.8651 +[2025-09-05 23:31:50] [Rank 0] Group 5 Loss: 5.8651 +[2025-09-05 23:31:50] [Rank 0] Group 6 Loss: 6.0548 +[2025-09-05 23:31:50] [Rank 0] Group 6 Loss: 6.0548 +[2025-09-05 23:31:50] [Rank 0] Group 7 Loss: 6.0911 +[2025-09-05 23:31:50] [Rank 0] Group 7 Loss: 6.0911 +[2025-09-05 23:31:50] [Rank 0] Group 8 Loss: 6.2580 +[2025-09-05 23:31:50] [Rank 0] Group 8 Loss: 6.2580 +[2025-09-05 23:31:50] [Rank 0] Group 9 Loss: 6.4160 +[2025-09-05 23:31:50] [Rank 0] Group 9 Loss: 6.4160 +[2025-09-05 23:31:50] [Rank 0] Group 10 Loss: 6.3485 +[2025-09-05 23:31:50] [Rank 0] Group 10 Loss: 6.3485 +[2025-09-05 23:31:50] [Rank 0] Group 11 Loss: 6.4465 +[2025-09-05 23:31:50] [Rank 0] Group 11 Loss: 6.4465 +[2025-09-05 23:31:50] [Rank 0] Group 12 Loss: 6.2971 +[2025-09-05 23:31:50] [Rank 0] Group 12 Loss: 6.2971 +[2025-09-05 23:31:50] [Rank 0] Group 13 Loss: 6.3030 +[2025-09-05 23:31:50] [Rank 0] Group 13 Loss: 6.3030 +[2025-09-05 23:31:50] [Rank 0] Group 14 Loss: 6.3691 +[2025-09-05 23:31:50] [Rank 0] Group 14 Loss: 6.3691 +[2025-09-05 23:31:50] [Rank 0] Group 15 Loss: 6.2893 +[2025-09-05 23:31:50] [Rank 0] Group 15 Loss: 6.2893 +[2025-09-05 23:31:50] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 23:31:50] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 23:31:50] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:31:50] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:31:50] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:31:50] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:31:50] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:31:50] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:31:50] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-05 23:31:50] [Rank 0] Group 4 FTA: 0.0500 +[2025-09-05 23:31:50] [Rank 0] Group 5 FTA: 0.0900 +[2025-09-05 23:31:50] [Rank 0] Group 5 FTA: 0.0900 +[2025-09-05 23:31:50] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 23:31:50] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 23:31:50] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 23:31:50] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 23:31:50] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-05 23:31:50] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-05 23:31:50] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-05 23:31:50] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-05 23:31:50] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 23:31:50] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 23:31:50] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 23:31:50] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 23:31:50] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:31:50] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:31:50] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:31:50] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:31:50] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:31:50] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:31:50] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 23:31:50] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 23:31:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:31:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:31:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:31:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:31:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:31:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:31:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:31:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:31:52] [Rank 0] step:1001/10000 train_time:68302ms step_avg:68.23ms +[2025-09-05 23:31:52] [Rank 0] step:1001/10000 train_time:68302ms step_avg:68.23ms +[2025-09-05 23:31:53] [Rank 0] step:1021/10000 train_time:68974ms step_avg:67.56ms +[2025-09-05 23:31:53] [Rank 0] step:1021/10000 train_time:68974ms step_avg:67.56ms +[2025-09-05 23:31:54] [Rank 0] step:1041/10000 train_time:69705ms step_avg:66.96ms +[2025-09-05 23:31:54] [Rank 0] step:1041/10000 train_time:69705ms step_avg:66.96ms +[2025-09-05 23:31:54] [Rank 0] step:1061/10000 train_time:70435ms step_avg:66.39ms +[2025-09-05 23:31:54] [Rank 0] step:1061/10000 train_time:70435ms step_avg:66.39ms +[2025-09-05 23:31:55] [Rank 0] step:1081/10000 train_time:71165ms step_avg:65.83ms +[2025-09-05 23:31:55] [Rank 0] step:1081/10000 train_time:71165ms step_avg:65.83ms +[2025-09-05 23:31:56] [Rank 0] step:1101/10000 train_time:71895ms step_avg:65.30ms +[2025-09-05 23:31:56] [Rank 0] step:1101/10000 train_time:71895ms step_avg:65.30ms +[2025-09-05 23:31:57] [Rank 0] step:1121/10000 train_time:72625ms step_avg:64.79ms +[2025-09-05 23:31:57] [Rank 0] step:1121/10000 train_time:72625ms step_avg:64.79ms +[2025-09-05 23:31:57] [Rank 0] step:1141/10000 train_time:73355ms step_avg:64.29ms +[2025-09-05 23:31:57] [Rank 0] step:1141/10000 train_time:73355ms step_avg:64.29ms +[2025-09-05 23:31:58] [Rank 0] step:1161/10000 train_time:74085ms step_avg:63.81ms +[2025-09-05 23:31:58] [Rank 0] step:1161/10000 train_time:74085ms step_avg:63.81ms +[2025-09-05 23:31:59] [Rank 0] step:1181/10000 train_time:74814ms step_avg:63.35ms +[2025-09-05 23:31:59] [Rank 0] step:1181/10000 train_time:74814ms step_avg:63.35ms +[2025-09-05 23:31:59] [Rank 0] step:1201/10000 train_time:75544ms step_avg:62.90ms +[2025-09-05 23:31:59] [Rank 0] step:1201/10000 train_time:75544ms step_avg:62.90ms +[2025-09-05 23:32:00] [Rank 0] step:1221/10000 train_time:76274ms step_avg:62.47ms +[2025-09-05 23:32:00] [Rank 0] step:1221/10000 train_time:76274ms step_avg:62.47ms +[2025-09-05 23:32:01] [Rank 0] step:1241/10000 train_time:77004ms step_avg:62.05ms +[2025-09-05 23:32:01] [Rank 0] step:1241/10000 train_time:77004ms step_avg:62.05ms +[2025-09-05 23:32:02] [Rank 0] step:1261/10000 train_time:77734ms step_avg:61.64ms +[2025-09-05 23:32:02] [Rank 0] step:1261/10000 train_time:77734ms step_avg:61.64ms +[2025-09-05 23:32:02] [Rank 0] step:1281/10000 train_time:78465ms step_avg:61.25ms +[2025-09-05 23:32:02] [Rank 0] step:1281/10000 train_time:78465ms step_avg:61.25ms +[2025-09-05 23:32:03] [Rank 0] step:1301/10000 train_time:79194ms step_avg:60.87ms +[2025-09-05 23:32:03] [Rank 0] step:1301/10000 train_time:79194ms step_avg:60.87ms +[2025-09-05 23:32:04] [Rank 0] step:1321/10000 train_time:79925ms step_avg:60.50ms +[2025-09-05 23:32:04] [Rank 0] step:1321/10000 train_time:79925ms step_avg:60.50ms +[2025-09-05 23:32:05] [Rank 0] step:1341/10000 train_time:80654ms step_avg:60.14ms +[2025-09-05 23:32:05] [Rank 0] step:1341/10000 train_time:80654ms step_avg:60.14ms +[2025-09-05 23:32:05] [Rank 0] step:1361/10000 train_time:81385ms step_avg:59.80ms +[2025-09-05 23:32:05] [Rank 0] step:1361/10000 train_time:81385ms step_avg:59.80ms +[2025-09-05 23:32:06] [Rank 0] step:1381/10000 train_time:82115ms step_avg:59.46ms +[2025-09-05 23:32:06] [Rank 0] step:1381/10000 train_time:82115ms step_avg:59.46ms +[2025-09-05 23:32:07] [Rank 0] step:1401/10000 train_time:82844ms step_avg:59.13ms +[2025-09-05 23:32:07] [Rank 0] step:1401/10000 train_time:82844ms step_avg:59.13ms +[2025-09-05 23:32:07] [Rank 0] step:1421/10000 train_time:83574ms step_avg:58.81ms +[2025-09-05 23:32:07] [Rank 0] step:1421/10000 train_time:83574ms step_avg:58.81ms +[2025-09-05 23:32:08] [Rank 0] step:1441/10000 train_time:84304ms step_avg:58.50ms +[2025-09-05 23:32:08] [Rank 0] step:1441/10000 train_time:84304ms step_avg:58.50ms +[2025-09-05 23:32:09] [Rank 0] step:1461/10000 train_time:85033ms step_avg:58.20ms +[2025-09-05 23:32:09] [Rank 0] step:1461/10000 train_time:85033ms step_avg:58.20ms +[2025-09-05 23:32:10] [Rank 0] step:1481/10000 train_time:85763ms step_avg:57.91ms +[2025-09-05 23:32:10] [Rank 0] step:1481/10000 train_time:85763ms step_avg:57.91ms +[2025-09-05 23:32:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:32:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:32:11] [Rank 0] PRINT: step:1500/10000 train_loss:3.3295 val_loss:3.1523 train_time:86573ms step_avg:57.72ms +[2025-09-05 23:32:11] [Rank 0] PRINT: step:1500/10000 train_loss:3.3295 val_loss:3.1523 train_time:86573ms step_avg:57.72ms +[2025-09-05 23:32:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:32:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:32:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:32:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:33:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:33:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:33:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:33:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:33:32] [Rank 0] Total Loss: 5.3043 +[2025-09-05 23:33:32] [Rank 0] Total Loss: 5.3043 +[2025-09-05 23:33:32] [Rank 0] Total FTA (Unweighted): 0.1375 +[2025-09-05 23:33:32] [Rank 0] Total FTA (Unweighted): 0.1375 +[2025-09-05 23:33:32] [Rank 0] Total FTA (Weighted): 0.1375 +[2025-09-05 23:33:32] [Rank 0] Total FTA (Weighted): 0.1375 +[2025-09-05 23:33:32] [Rank 0] Group 0 Loss: 3.3229 +[2025-09-05 23:33:32] [Rank 0] Group 0 Loss: 3.3229 +[2025-09-05 23:33:32] [Rank 0] Group 1 Loss: 3.3621 +[2025-09-05 23:33:32] [Rank 0] Group 1 Loss: 3.3621 +[2025-09-05 23:33:32] [Rank 0] Group 2 Loss: 3.5050 +[2025-09-05 23:33:32] [Rank 0] Group 2 Loss: 3.5050 +[2025-09-05 23:33:32] [Rank 0] Group 3 Loss: 4.1571 +[2025-09-05 23:33:32] [Rank 0] Group 3 Loss: 4.1571 +[2025-09-05 23:33:32] [Rank 0] Group 4 Loss: 5.0872 +[2025-09-05 23:33:32] [Rank 0] Group 4 Loss: 5.0872 +[2025-09-05 23:33:32] [Rank 0] Group 5 Loss: 5.4439 +[2025-09-05 23:33:32] [Rank 0] Group 5 Loss: 5.4439 +[2025-09-05 23:33:32] [Rank 0] Group 6 Loss: 5.6866 +[2025-09-05 23:33:32] [Rank 0] Group 6 Loss: 5.6866 +[2025-09-05 23:33:32] [Rank 0] Group 7 Loss: 5.7604 +[2025-09-05 23:33:32] [Rank 0] Group 7 Loss: 5.7604 +[2025-09-05 23:33:32] [Rank 0] Group 8 Loss: 5.9688 +[2025-09-05 23:33:32] [Rank 0] Group 8 Loss: 5.9688 +[2025-09-05 23:33:32] [Rank 0] Group 9 Loss: 6.1188 +[2025-09-05 23:33:32] [Rank 0] Group 9 Loss: 6.1188 +[2025-09-05 23:33:32] [Rank 0] Group 10 Loss: 6.0953 +[2025-09-05 23:33:32] [Rank 0] Group 10 Loss: 6.0953 +[2025-09-05 23:33:32] [Rank 0] Group 11 Loss: 6.1689 +[2025-09-05 23:33:32] [Rank 0] Group 11 Loss: 6.1689 +[2025-09-05 23:33:32] [Rank 0] Group 12 Loss: 6.0313 +[2025-09-05 23:33:32] [Rank 0] Group 12 Loss: 6.0313 +[2025-09-05 23:33:32] [Rank 0] Group 13 Loss: 6.0238 +[2025-09-05 23:33:32] [Rank 0] Group 13 Loss: 6.0238 +[2025-09-05 23:33:32] [Rank 0] Group 14 Loss: 6.1036 +[2025-09-05 23:33:32] [Rank 0] Group 14 Loss: 6.1036 +[2025-09-05 23:33:32] [Rank 0] Group 15 Loss: 6.0338 +[2025-09-05 23:33:32] [Rank 0] Group 15 Loss: 6.0338 +[2025-09-05 23:33:32] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-05 23:33:32] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-05 23:33:32] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:33:32] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:33:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:33:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:33:32] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:33:32] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:33:32] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 23:33:32] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 23:33:32] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:33:32] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:33:32] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 23:33:32] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 23:33:32] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 23:33:32] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 23:33:32] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 23:33:32] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 23:33:32] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:33:32] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:33:32] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 23:33:32] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 23:33:32] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 23:33:32] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 23:33:32] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 23:33:32] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 23:33:32] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:33:32] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:33:32] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:33:32] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:33:32] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:33:32] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:33:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:33:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:33:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:33:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:33:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:33:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:33:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:33:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:33:33] [Rank 0] step:1501/10000 train_time:86582ms step_avg:57.68ms +[2025-09-05 23:33:33] [Rank 0] step:1501/10000 train_time:86582ms step_avg:57.68ms +[2025-09-05 23:33:34] [Rank 0] step:1521/10000 train_time:87254ms step_avg:57.37ms +[2025-09-05 23:33:34] [Rank 0] step:1521/10000 train_time:87254ms step_avg:57.37ms +[2025-09-05 23:33:35] [Rank 0] step:1541/10000 train_time:88129ms step_avg:57.19ms +[2025-09-05 23:33:35] [Rank 0] step:1541/10000 train_time:88129ms step_avg:57.19ms +[2025-09-05 23:33:36] [Rank 0] step:1561/10000 train_time:88859ms step_avg:56.92ms +[2025-09-05 23:33:36] [Rank 0] step:1561/10000 train_time:88859ms step_avg:56.92ms +[2025-09-05 23:33:36] [Rank 0] step:1581/10000 train_time:89589ms step_avg:56.67ms +[2025-09-05 23:33:36] [Rank 0] step:1581/10000 train_time:89589ms step_avg:56.67ms +[2025-09-05 23:33:37] [Rank 0] step:1601/10000 train_time:90534ms step_avg:56.55ms +[2025-09-05 23:33:37] [Rank 0] step:1601/10000 train_time:90534ms step_avg:56.55ms +[2025-09-05 23:33:38] [Rank 0] step:1621/10000 train_time:91264ms step_avg:56.30ms +[2025-09-05 23:33:38] [Rank 0] step:1621/10000 train_time:91264ms step_avg:56.30ms +[2025-09-05 23:33:39] [Rank 0] step:1641/10000 train_time:92616ms step_avg:56.44ms +[2025-09-05 23:33:39] [Rank 0] step:1641/10000 train_time:92616ms step_avg:56.44ms +[2025-09-05 23:33:40] [Rank 0] step:1661/10000 train_time:93346ms step_avg:56.20ms +[2025-09-05 23:33:40] [Rank 0] step:1661/10000 train_time:93346ms step_avg:56.20ms +[2025-09-05 23:33:41] [Rank 0] step:1681/10000 train_time:94075ms step_avg:55.96ms +[2025-09-05 23:33:41] [Rank 0] step:1681/10000 train_time:94075ms step_avg:55.96ms +[2025-09-05 23:33:42] [Rank 0] step:1701/10000 train_time:94805ms step_avg:55.73ms +[2025-09-05 23:33:42] [Rank 0] step:1701/10000 train_time:94805ms step_avg:55.73ms +[2025-09-05 23:33:42] [Rank 0] step:1721/10000 train_time:95534ms step_avg:55.51ms +[2025-09-05 23:33:42] [Rank 0] step:1721/10000 train_time:95534ms step_avg:55.51ms +[2025-09-05 23:33:43] [Rank 0] step:1741/10000 train_time:96264ms step_avg:55.29ms +[2025-09-05 23:33:43] [Rank 0] step:1741/10000 train_time:96264ms step_avg:55.29ms +[2025-09-05 23:33:44] [Rank 0] step:1761/10000 train_time:96994ms step_avg:55.08ms +[2025-09-05 23:33:44] [Rank 0] step:1761/10000 train_time:96994ms step_avg:55.08ms +[2025-09-05 23:33:44] [Rank 0] step:1781/10000 train_time:97723ms step_avg:54.87ms +[2025-09-05 23:33:44] [Rank 0] step:1781/10000 train_time:97723ms step_avg:54.87ms +[2025-09-05 23:33:45] [Rank 0] step:1801/10000 train_time:98453ms step_avg:54.67ms +[2025-09-05 23:33:45] [Rank 0] step:1801/10000 train_time:98453ms step_avg:54.67ms +[2025-09-05 23:33:46] [Rank 0] step:1821/10000 train_time:99183ms step_avg:54.47ms +[2025-09-05 23:33:46] [Rank 0] step:1821/10000 train_time:99183ms step_avg:54.47ms +[2025-09-05 23:33:47] [Rank 0] step:1841/10000 train_time:99913ms step_avg:54.27ms +[2025-09-05 23:33:47] [Rank 0] step:1841/10000 train_time:99913ms step_avg:54.27ms +[2025-09-05 23:33:47] [Rank 0] step:1861/10000 train_time:100643ms step_avg:54.08ms +[2025-09-05 23:33:47] [Rank 0] step:1861/10000 train_time:100643ms step_avg:54.08ms +[2025-09-05 23:33:48] [Rank 0] step:1881/10000 train_time:101373ms step_avg:53.89ms +[2025-09-05 23:33:48] [Rank 0] step:1881/10000 train_time:101373ms step_avg:53.89ms +[2025-09-05 23:33:49] [Rank 0] step:1901/10000 train_time:102102ms step_avg:53.71ms +[2025-09-05 23:33:49] [Rank 0] step:1901/10000 train_time:102102ms step_avg:53.71ms +[2025-09-05 23:33:50] [Rank 0] step:1921/10000 train_time:102833ms step_avg:53.53ms +[2025-09-05 23:33:50] [Rank 0] step:1921/10000 train_time:102833ms step_avg:53.53ms +[2025-09-05 23:33:50] [Rank 0] step:1941/10000 train_time:103562ms step_avg:53.35ms +[2025-09-05 23:33:50] [Rank 0] step:1941/10000 train_time:103562ms step_avg:53.35ms +[2025-09-05 23:33:51] [Rank 0] step:1961/10000 train_time:104292ms step_avg:53.18ms +[2025-09-05 23:33:51] [Rank 0] step:1961/10000 train_time:104292ms step_avg:53.18ms +[2025-09-05 23:33:52] [Rank 0] step:1981/10000 train_time:105022ms step_avg:53.01ms +[2025-09-05 23:33:52] [Rank 0] step:1981/10000 train_time:105022ms step_avg:53.01ms +[2025-09-05 23:33:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:33:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:33:53] [Rank 0] PRINT: step:2000/10000 train_loss:3.0483 val_loss:2.9247 train_time:105832ms step_avg:52.92ms +[2025-09-05 23:33:53] [Rank 0] PRINT: step:2000/10000 train_loss:3.0483 val_loss:2.9247 train_time:105832ms step_avg:52.92ms +[2025-09-05 23:33:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:33:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:33:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:33:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:35:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:35:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:35:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:35:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:35:14] [Rank 0] Total Loss: 5.2694 +[2025-09-05 23:35:14] [Rank 0] Total Loss: 5.2694 +[2025-09-05 23:35:14] [Rank 0] Total FTA (Unweighted): 0.1781 +[2025-09-05 23:35:14] [Rank 0] Total FTA (Unweighted): 0.1781 +[2025-09-05 23:35:14] [Rank 0] Total FTA (Weighted): 0.1781 +[2025-09-05 23:35:14] [Rank 0] Total FTA (Weighted): 0.1781 +[2025-09-05 23:35:14] [Rank 0] Group 0 Loss: 3.3797 +[2025-09-05 23:35:14] [Rank 0] Group 0 Loss: 3.3797 +[2025-09-05 23:35:14] [Rank 0] Group 1 Loss: 3.4319 +[2025-09-05 23:35:14] [Rank 0] Group 1 Loss: 3.4319 +[2025-09-05 23:35:14] [Rank 0] Group 2 Loss: 3.4945 +[2025-09-05 23:35:14] [Rank 0] Group 2 Loss: 3.4945 +[2025-09-05 23:35:14] [Rank 0] Group 3 Loss: 4.0667 +[2025-09-05 23:35:14] [Rank 0] Group 3 Loss: 4.0667 +[2025-09-05 23:35:14] [Rank 0] Group 4 Loss: 4.9414 +[2025-09-05 23:35:14] [Rank 0] Group 4 Loss: 4.9414 +[2025-09-05 23:35:14] [Rank 0] Group 5 Loss: 5.3394 +[2025-09-05 23:35:14] [Rank 0] Group 5 Loss: 5.3394 +[2025-09-05 23:35:14] [Rank 0] Group 6 Loss: 5.6164 +[2025-09-05 23:35:14] [Rank 0] Group 6 Loss: 5.6164 +[2025-09-05 23:35:14] [Rank 0] Group 7 Loss: 5.7003 +[2025-09-05 23:35:14] [Rank 0] Group 7 Loss: 5.7003 +[2025-09-05 23:35:14] [Rank 0] Group 8 Loss: 5.9261 +[2025-09-05 23:35:14] [Rank 0] Group 8 Loss: 5.9261 +[2025-09-05 23:35:14] [Rank 0] Group 9 Loss: 6.0531 +[2025-09-05 23:35:14] [Rank 0] Group 9 Loss: 6.0531 +[2025-09-05 23:35:14] [Rank 0] Group 10 Loss: 6.0844 +[2025-09-05 23:35:14] [Rank 0] Group 10 Loss: 6.0844 +[2025-09-05 23:35:14] [Rank 0] Group 11 Loss: 6.1546 +[2025-09-05 23:35:14] [Rank 0] Group 11 Loss: 6.1546 +[2025-09-05 23:35:14] [Rank 0] Group 12 Loss: 6.0049 +[2025-09-05 23:35:14] [Rank 0] Group 12 Loss: 6.0049 +[2025-09-05 23:35:14] [Rank 0] Group 13 Loss: 6.0194 +[2025-09-05 23:35:14] [Rank 0] Group 13 Loss: 6.0194 +[2025-09-05 23:35:14] [Rank 0] Group 14 Loss: 6.0841 +[2025-09-05 23:35:14] [Rank 0] Group 14 Loss: 6.0841 +[2025-09-05 23:35:14] [Rank 0] Group 15 Loss: 6.0141 +[2025-09-05 23:35:14] [Rank 0] Group 15 Loss: 6.0141 +[2025-09-05 23:35:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:35:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:35:14] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:35:14] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:35:14] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:35:14] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:35:14] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:35:14] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:35:14] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 23:35:14] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 23:35:14] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:35:14] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:35:14] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 23:35:14] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 23:35:14] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 23:35:14] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 23:35:14] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 23:35:14] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 23:35:14] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:35:14] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:35:14] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 23:35:14] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 23:35:14] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 23:35:14] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 23:35:14] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-05 23:35:14] [Rank 0] Group 12 FTA: 0.0700 +[2025-09-05 23:35:14] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:35:14] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:35:14] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:35:14] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:35:14] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:35:14] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:35:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:35:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:35:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:35:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:35:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:35:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:35:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:35:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:35:16] [Rank 0] step:2001/10000 train_time:105842ms step_avg:52.89ms +[2025-09-05 23:35:16] [Rank 0] step:2001/10000 train_time:105842ms step_avg:52.89ms +[2025-09-05 23:35:17] [Rank 0] step:2021/10000 train_time:106710ms step_avg:52.80ms +[2025-09-05 23:35:17] [Rank 0] step:2021/10000 train_time:106710ms step_avg:52.80ms +[2025-09-05 23:35:17] [Rank 0] step:2041/10000 train_time:107440ms step_avg:52.64ms +[2025-09-05 23:35:17] [Rank 0] step:2041/10000 train_time:107440ms step_avg:52.64ms +[2025-09-05 23:35:18] [Rank 0] step:2061/10000 train_time:108170ms step_avg:52.48ms +[2025-09-05 23:35:18] [Rank 0] step:2061/10000 train_time:108170ms step_avg:52.48ms +[2025-09-05 23:35:19] [Rank 0] step:2081/10000 train_time:108900ms step_avg:52.33ms +[2025-09-05 23:35:19] [Rank 0] step:2081/10000 train_time:108900ms step_avg:52.33ms +[2025-09-05 23:35:19] [Rank 0] step:2101/10000 train_time:109630ms step_avg:52.18ms +[2025-09-05 23:35:19] [Rank 0] step:2101/10000 train_time:109630ms step_avg:52.18ms +[2025-09-05 23:35:20] [Rank 0] step:2121/10000 train_time:110360ms step_avg:52.03ms +[2025-09-05 23:35:20] [Rank 0] step:2121/10000 train_time:110360ms step_avg:52.03ms +[2025-09-05 23:35:21] [Rank 0] step:2141/10000 train_time:111090ms step_avg:51.89ms +[2025-09-05 23:35:21] [Rank 0] step:2141/10000 train_time:111090ms step_avg:51.89ms +[2025-09-05 23:35:22] [Rank 0] step:2161/10000 train_time:111819ms step_avg:51.74ms +[2025-09-05 23:35:22] [Rank 0] step:2161/10000 train_time:111819ms step_avg:51.74ms +[2025-09-05 23:35:22] [Rank 0] step:2181/10000 train_time:112549ms step_avg:51.60ms +[2025-09-05 23:35:22] [Rank 0] step:2181/10000 train_time:112549ms step_avg:51.60ms +[2025-09-05 23:35:23] [Rank 0] step:2201/10000 train_time:113279ms step_avg:51.47ms +[2025-09-05 23:35:23] [Rank 0] step:2201/10000 train_time:113279ms step_avg:51.47ms +[2025-09-05 23:35:24] [Rank 0] step:2221/10000 train_time:114009ms step_avg:51.33ms +[2025-09-05 23:35:24] [Rank 0] step:2221/10000 train_time:114009ms step_avg:51.33ms +[2025-09-05 23:35:25] [Rank 0] step:2241/10000 train_time:114743ms step_avg:51.20ms +[2025-09-05 23:35:25] [Rank 0] step:2241/10000 train_time:114743ms step_avg:51.20ms +[2025-09-05 23:35:25] [Rank 0] step:2261/10000 train_time:115480ms step_avg:51.07ms +[2025-09-05 23:35:25] [Rank 0] step:2261/10000 train_time:115480ms step_avg:51.07ms +[2025-09-05 23:35:26] [Rank 0] step:2281/10000 train_time:116216ms step_avg:50.95ms +[2025-09-05 23:35:26] [Rank 0] step:2281/10000 train_time:116216ms step_avg:50.95ms +[2025-09-05 23:35:27] [Rank 0] step:2301/10000 train_time:116952ms step_avg:50.83ms +[2025-09-05 23:35:27] [Rank 0] step:2301/10000 train_time:116952ms step_avg:50.83ms +[2025-09-05 23:35:27] [Rank 0] step:2321/10000 train_time:117688ms step_avg:50.71ms +[2025-09-05 23:35:27] [Rank 0] step:2321/10000 train_time:117688ms step_avg:50.71ms +[2025-09-05 23:35:28] [Rank 0] step:2341/10000 train_time:118424ms step_avg:50.59ms +[2025-09-05 23:35:28] [Rank 0] step:2341/10000 train_time:118424ms step_avg:50.59ms +[2025-09-05 23:35:29] [Rank 0] step:2361/10000 train_time:119160ms step_avg:50.47ms +[2025-09-05 23:35:29] [Rank 0] step:2361/10000 train_time:119160ms step_avg:50.47ms +[2025-09-05 23:35:30] [Rank 0] step:2381/10000 train_time:119896ms step_avg:50.36ms +[2025-09-05 23:35:30] [Rank 0] step:2381/10000 train_time:119896ms step_avg:50.36ms +[2025-09-05 23:35:30] [Rank 0] step:2401/10000 train_time:120631ms step_avg:50.24ms +[2025-09-05 23:35:30] [Rank 0] step:2401/10000 train_time:120631ms step_avg:50.24ms +[2025-09-05 23:35:31] [Rank 0] step:2421/10000 train_time:121367ms step_avg:50.13ms +[2025-09-05 23:35:31] [Rank 0] step:2421/10000 train_time:121367ms step_avg:50.13ms +[2025-09-05 23:35:32] [Rank 0] step:2441/10000 train_time:122102ms step_avg:50.02ms +[2025-09-05 23:35:32] [Rank 0] step:2441/10000 train_time:122102ms step_avg:50.02ms +[2025-09-05 23:35:33] [Rank 0] step:2461/10000 train_time:122837ms step_avg:49.91ms +[2025-09-05 23:35:33] [Rank 0] step:2461/10000 train_time:122837ms step_avg:49.91ms +[2025-09-05 23:35:33] [Rank 0] step:2481/10000 train_time:123574ms step_avg:49.81ms +[2025-09-05 23:35:33] [Rank 0] step:2481/10000 train_time:123574ms step_avg:49.81ms +[2025-09-05 23:35:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:35:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:35:35] [Rank 0] PRINT: step:2500/10000 train_loss:2.8422 val_loss:2.7536 train_time:124390ms step_avg:49.76ms +[2025-09-05 23:35:35] [Rank 0] PRINT: step:2500/10000 train_loss:2.8422 val_loss:2.7536 train_time:124390ms step_avg:49.76ms +[2025-09-05 23:35:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:35:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:35:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:35:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:36:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:36:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:36:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:36:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:36:55] [Rank 0] Total Loss: 5.0406 +[2025-09-05 23:36:55] [Rank 0] Total Loss: 5.0406 +[2025-09-05 23:36:55] [Rank 0] Total FTA (Unweighted): 0.1825 +[2025-09-05 23:36:55] [Rank 0] Total FTA (Unweighted): 0.1825 +[2025-09-05 23:36:55] [Rank 0] Total FTA (Weighted): 0.1825 +[2025-09-05 23:36:55] [Rank 0] Total FTA (Weighted): 0.1825 +[2025-09-05 23:36:55] [Rank 0] Group 0 Loss: 3.2590 +[2025-09-05 23:36:55] [Rank 0] Group 0 Loss: 3.2590 +[2025-09-05 23:36:55] [Rank 0] Group 1 Loss: 3.2005 +[2025-09-05 23:36:55] [Rank 0] Group 1 Loss: 3.2005 +[2025-09-05 23:36:55] [Rank 0] Group 2 Loss: 3.3797 +[2025-09-05 23:36:55] [Rank 0] Group 2 Loss: 3.3797 +[2025-09-05 23:36:55] [Rank 0] Group 3 Loss: 3.8635 +[2025-09-05 23:36:55] [Rank 0] Group 3 Loss: 3.8635 +[2025-09-05 23:36:55] [Rank 0] Group 4 Loss: 4.6166 +[2025-09-05 23:36:55] [Rank 0] Group 4 Loss: 4.6166 +[2025-09-05 23:36:55] [Rank 0] Group 5 Loss: 5.0455 +[2025-09-05 23:36:55] [Rank 0] Group 5 Loss: 5.0455 +[2025-09-05 23:36:55] [Rank 0] Group 6 Loss: 5.3290 +[2025-09-05 23:36:55] [Rank 0] Group 6 Loss: 5.3290 +[2025-09-05 23:36:55] [Rank 0] Group 7 Loss: 5.4563 +[2025-09-05 23:36:55] [Rank 0] Group 7 Loss: 5.4563 +[2025-09-05 23:36:55] [Rank 0] Group 8 Loss: 5.6995 +[2025-09-05 23:36:55] [Rank 0] Group 8 Loss: 5.6995 +[2025-09-05 23:36:55] [Rank 0] Group 9 Loss: 5.8198 +[2025-09-05 23:36:55] [Rank 0] Group 9 Loss: 5.8198 +[2025-09-05 23:36:55] [Rank 0] Group 10 Loss: 5.8166 +[2025-09-05 23:36:55] [Rank 0] Group 10 Loss: 5.8166 +[2025-09-05 23:36:55] [Rank 0] Group 11 Loss: 5.8893 +[2025-09-05 23:36:55] [Rank 0] Group 11 Loss: 5.8893 +[2025-09-05 23:36:55] [Rank 0] Group 12 Loss: 5.8009 +[2025-09-05 23:36:55] [Rank 0] Group 12 Loss: 5.8009 +[2025-09-05 23:36:55] [Rank 0] Group 13 Loss: 5.7838 +[2025-09-05 23:36:55] [Rank 0] Group 13 Loss: 5.7838 +[2025-09-05 23:36:55] [Rank 0] Group 14 Loss: 5.8715 +[2025-09-05 23:36:55] [Rank 0] Group 14 Loss: 5.8715 +[2025-09-05 23:36:55] [Rank 0] Group 15 Loss: 5.8184 +[2025-09-05 23:36:55] [Rank 0] Group 15 Loss: 5.8184 +[2025-09-05 23:36:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:36:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:36:56] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:36:56] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 23:36:56] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:36:56] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:36:56] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:36:56] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:36:56] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 23:36:56] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 23:36:56] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:36:56] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:36:56] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 23:36:56] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 23:36:56] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 23:36:56] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 23:36:56] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 23:36:56] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 23:36:56] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:36:56] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:36:56] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 23:36:56] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 23:36:56] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 23:36:56] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 23:36:56] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:36:56] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:36:56] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 23:36:56] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 23:36:56] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:36:56] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:36:56] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:36:56] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:36:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:36:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:36:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:36:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:36:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:36:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:36:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:36:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:36:57] [Rank 0] step:2501/10000 train_time:124400ms step_avg:49.74ms +[2025-09-05 23:36:57] [Rank 0] step:2501/10000 train_time:124400ms step_avg:49.74ms +[2025-09-05 23:36:58] [Rank 0] step:2521/10000 train_time:125082ms step_avg:49.62ms +[2025-09-05 23:36:58] [Rank 0] step:2521/10000 train_time:125082ms step_avg:49.62ms +[2025-09-05 23:36:59] [Rank 0] step:2541/10000 train_time:125817ms step_avg:49.51ms +[2025-09-05 23:36:59] [Rank 0] step:2541/10000 train_time:125817ms step_avg:49.51ms +[2025-09-05 23:36:59] [Rank 0] step:2561/10000 train_time:126553ms step_avg:49.42ms +[2025-09-05 23:36:59] [Rank 0] step:2561/10000 train_time:126553ms step_avg:49.42ms +[2025-09-05 23:37:00] [Rank 0] step:2581/10000 train_time:127290ms step_avg:49.32ms +[2025-09-05 23:37:00] [Rank 0] step:2581/10000 train_time:127290ms step_avg:49.32ms +[2025-09-05 23:37:01] [Rank 0] step:2601/10000 train_time:128025ms step_avg:49.22ms +[2025-09-05 23:37:01] [Rank 0] step:2601/10000 train_time:128025ms step_avg:49.22ms +[2025-09-05 23:37:01] [Rank 0] step:2621/10000 train_time:128762ms step_avg:49.13ms +[2025-09-05 23:37:01] [Rank 0] step:2621/10000 train_time:128762ms step_avg:49.13ms +[2025-09-05 23:37:02] [Rank 0] step:2641/10000 train_time:129498ms step_avg:49.03ms +[2025-09-05 23:37:02] [Rank 0] step:2641/10000 train_time:129498ms step_avg:49.03ms +[2025-09-05 23:37:03] [Rank 0] step:2661/10000 train_time:130235ms step_avg:48.94ms +[2025-09-05 23:37:03] [Rank 0] step:2661/10000 train_time:130235ms step_avg:48.94ms +[2025-09-05 23:37:04] [Rank 0] step:2681/10000 train_time:130971ms step_avg:48.85ms +[2025-09-05 23:37:04] [Rank 0] step:2681/10000 train_time:130971ms step_avg:48.85ms +[2025-09-05 23:37:04] [Rank 0] step:2701/10000 train_time:131707ms step_avg:48.76ms +[2025-09-05 23:37:04] [Rank 0] step:2701/10000 train_time:131707ms step_avg:48.76ms +[2025-09-05 23:37:05] [Rank 0] step:2721/10000 train_time:132443ms step_avg:48.67ms +[2025-09-05 23:37:05] [Rank 0] step:2721/10000 train_time:132443ms step_avg:48.67ms +[2025-09-05 23:37:06] [Rank 0] step:2741/10000 train_time:133179ms step_avg:48.59ms +[2025-09-05 23:37:06] [Rank 0] step:2741/10000 train_time:133179ms step_avg:48.59ms +[2025-09-05 23:37:07] [Rank 0] step:2761/10000 train_time:133914ms step_avg:48.50ms +[2025-09-05 23:37:07] [Rank 0] step:2761/10000 train_time:133914ms step_avg:48.50ms +[2025-09-05 23:37:07] [Rank 0] step:2781/10000 train_time:134650ms step_avg:48.42ms +[2025-09-05 23:37:07] [Rank 0] step:2781/10000 train_time:134650ms step_avg:48.42ms +[2025-09-05 23:37:08] [Rank 0] step:2801/10000 train_time:135385ms step_avg:48.33ms +[2025-09-05 23:37:08] [Rank 0] step:2801/10000 train_time:135385ms step_avg:48.33ms +[2025-09-05 23:37:09] [Rank 0] step:2821/10000 train_time:136724ms step_avg:48.47ms +[2025-09-05 23:37:09] [Rank 0] step:2821/10000 train_time:136724ms step_avg:48.47ms +[2025-09-05 23:37:10] [Rank 0] step:2841/10000 train_time:137461ms step_avg:48.38ms +[2025-09-05 23:37:10] [Rank 0] step:2841/10000 train_time:137461ms step_avg:48.38ms +[2025-09-05 23:37:11] [Rank 0] step:2861/10000 train_time:138197ms step_avg:48.30ms +[2025-09-05 23:37:11] [Rank 0] step:2861/10000 train_time:138197ms step_avg:48.30ms +[2025-09-05 23:37:12] [Rank 0] step:2881/10000 train_time:138934ms step_avg:48.22ms +[2025-09-05 23:37:12] [Rank 0] step:2881/10000 train_time:138934ms step_avg:48.22ms +[2025-09-05 23:37:12] [Rank 0] step:2901/10000 train_time:139670ms step_avg:48.15ms +[2025-09-05 23:37:12] [Rank 0] step:2901/10000 train_time:139670ms step_avg:48.15ms +[2025-09-05 23:37:13] [Rank 0] step:2921/10000 train_time:140406ms step_avg:48.07ms +[2025-09-05 23:37:13] [Rank 0] step:2921/10000 train_time:140406ms step_avg:48.07ms +[2025-09-05 23:37:14] [Rank 0] step:2941/10000 train_time:141142ms step_avg:47.99ms +[2025-09-05 23:37:14] [Rank 0] step:2941/10000 train_time:141142ms step_avg:47.99ms +[2025-09-05 23:37:15] [Rank 0] step:2961/10000 train_time:141878ms step_avg:47.92ms +[2025-09-05 23:37:15] [Rank 0] step:2961/10000 train_time:141878ms step_avg:47.92ms +[2025-09-05 23:37:15] [Rank 0] step:2981/10000 train_time:142613ms step_avg:47.84ms +[2025-09-05 23:37:15] [Rank 0] step:2981/10000 train_time:142613ms step_avg:47.84ms +[2025-09-05 23:37:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:37:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:37:16] [Rank 0] PRINT: step:3000/10000 train_loss:2.6889 val_loss:2.6208 train_time:143431ms step_avg:47.81ms +[2025-09-05 23:37:16] [Rank 0] PRINT: step:3000/10000 train_loss:2.6889 val_loss:2.6208 train_time:143431ms step_avg:47.81ms +[2025-09-05 23:37:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:37:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:37:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:37:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:38:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:38:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:38:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:38:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:38:38] [Rank 0] Total Loss: 4.9647 +[2025-09-05 23:38:38] [Rank 0] Total Loss: 4.9647 +[2025-09-05 23:38:38] [Rank 0] Total FTA (Unweighted): 0.1906 +[2025-09-05 23:38:38] [Rank 0] Total FTA (Unweighted): 0.1906 +[2025-09-05 23:38:38] [Rank 0] Total FTA (Weighted): 0.1906 +[2025-09-05 23:38:38] [Rank 0] Total FTA (Weighted): 0.1906 +[2025-09-05 23:38:38] [Rank 0] Group 0 Loss: 3.2909 +[2025-09-05 23:38:38] [Rank 0] Group 0 Loss: 3.2909 +[2025-09-05 23:38:38] [Rank 0] Group 1 Loss: 3.2935 +[2025-09-05 23:38:38] [Rank 0] Group 1 Loss: 3.2935 +[2025-09-05 23:38:38] [Rank 0] Group 2 Loss: 3.3133 +[2025-09-05 23:38:38] [Rank 0] Group 2 Loss: 3.3133 +[2025-09-05 23:38:38] [Rank 0] Group 3 Loss: 3.7989 +[2025-09-05 23:38:38] [Rank 0] Group 3 Loss: 3.7989 +[2025-09-05 23:38:38] [Rank 0] Group 4 Loss: 4.4973 +[2025-09-05 23:38:38] [Rank 0] Group 4 Loss: 4.4973 +[2025-09-05 23:38:38] [Rank 0] Group 5 Loss: 4.9144 +[2025-09-05 23:38:38] [Rank 0] Group 5 Loss: 4.9144 +[2025-09-05 23:38:38] [Rank 0] Group 6 Loss: 5.2051 +[2025-09-05 23:38:38] [Rank 0] Group 6 Loss: 5.2051 +[2025-09-05 23:38:38] [Rank 0] Group 7 Loss: 5.3261 +[2025-09-05 23:38:38] [Rank 0] Group 7 Loss: 5.3261 +[2025-09-05 23:38:38] [Rank 0] Group 8 Loss: 5.5862 +[2025-09-05 23:38:38] [Rank 0] Group 8 Loss: 5.5862 +[2025-09-05 23:38:38] [Rank 0] Group 9 Loss: 5.7330 +[2025-09-05 23:38:38] [Rank 0] Group 9 Loss: 5.7330 +[2025-09-05 23:38:38] [Rank 0] Group 10 Loss: 5.7329 +[2025-09-05 23:38:38] [Rank 0] Group 10 Loss: 5.7329 +[2025-09-05 23:38:38] [Rank 0] Group 11 Loss: 5.8015 +[2025-09-05 23:38:38] [Rank 0] Group 11 Loss: 5.8015 +[2025-09-05 23:38:38] [Rank 0] Group 12 Loss: 5.7199 +[2025-09-05 23:38:38] [Rank 0] Group 12 Loss: 5.7199 +[2025-09-05 23:38:38] [Rank 0] Group 13 Loss: 5.7211 +[2025-09-05 23:38:38] [Rank 0] Group 13 Loss: 5.7211 +[2025-09-05 23:38:38] [Rank 0] Group 14 Loss: 5.7903 +[2025-09-05 23:38:38] [Rank 0] Group 14 Loss: 5.7903 +[2025-09-05 23:38:38] [Rank 0] Group 15 Loss: 5.7111 +[2025-09-05 23:38:38] [Rank 0] Group 15 Loss: 5.7111 +[2025-09-05 23:38:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:38:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:38:38] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-05 23:38:38] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-05 23:38:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:38:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:38:38] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:38:38] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 23:38:38] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:38:38] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:38:38] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:38:38] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:38:38] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 23:38:38] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 23:38:38] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 23:38:38] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 23:38:38] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 23:38:38] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 23:38:38] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 23:38:38] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 23:38:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:38:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:38:38] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:38:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:38:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:38:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:38:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:38:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:38:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:38:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:38:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:38:39] [Rank 0] step:3001/10000 train_time:143440ms step_avg:47.80ms +[2025-09-05 23:38:39] [Rank 0] step:3001/10000 train_time:143440ms step_avg:47.80ms +[2025-09-05 23:38:40] [Rank 0] step:3021/10000 train_time:144106ms step_avg:47.70ms +[2025-09-05 23:38:40] [Rank 0] step:3021/10000 train_time:144106ms step_avg:47.70ms +[2025-09-05 23:38:41] [Rank 0] step:3041/10000 train_time:144842ms step_avg:47.63ms +[2025-09-05 23:38:41] [Rank 0] step:3041/10000 train_time:144842ms step_avg:47.63ms +[2025-09-05 23:38:41] [Rank 0] step:3061/10000 train_time:145578ms step_avg:47.56ms +[2025-09-05 23:38:41] [Rank 0] step:3061/10000 train_time:145578ms step_avg:47.56ms +[2025-09-05 23:38:42] [Rank 0] step:3081/10000 train_time:146313ms step_avg:47.49ms +[2025-09-05 23:38:42] [Rank 0] step:3081/10000 train_time:146313ms step_avg:47.49ms +[2025-09-05 23:38:43] [Rank 0] step:3101/10000 train_time:147049ms step_avg:47.42ms +[2025-09-05 23:38:43] [Rank 0] step:3101/10000 train_time:147049ms step_avg:47.42ms +[2025-09-05 23:38:43] [Rank 0] step:3121/10000 train_time:147785ms step_avg:47.35ms +[2025-09-05 23:38:43] [Rank 0] step:3121/10000 train_time:147785ms step_avg:47.35ms +[2025-09-05 23:38:44] [Rank 0] step:3141/10000 train_time:148520ms step_avg:47.28ms +[2025-09-05 23:38:44] [Rank 0] step:3141/10000 train_time:148520ms step_avg:47.28ms +[2025-09-05 23:38:45] [Rank 0] step:3161/10000 train_time:149256ms step_avg:47.22ms +[2025-09-05 23:38:45] [Rank 0] step:3161/10000 train_time:149256ms step_avg:47.22ms +[2025-09-05 23:38:46] [Rank 0] step:3181/10000 train_time:149992ms step_avg:47.15ms +[2025-09-05 23:38:46] [Rank 0] step:3181/10000 train_time:149992ms step_avg:47.15ms +[2025-09-05 23:38:46] [Rank 0] step:3201/10000 train_time:150728ms step_avg:47.09ms +[2025-09-05 23:38:46] [Rank 0] step:3201/10000 train_time:150728ms step_avg:47.09ms +[2025-09-05 23:38:47] [Rank 0] step:3221/10000 train_time:151463ms step_avg:47.02ms +[2025-09-05 23:38:47] [Rank 0] step:3221/10000 train_time:151463ms step_avg:47.02ms +[2025-09-05 23:38:48] [Rank 0] step:3241/10000 train_time:152200ms step_avg:46.96ms +[2025-09-05 23:38:48] [Rank 0] step:3241/10000 train_time:152200ms step_avg:46.96ms +[2025-09-05 23:38:49] [Rank 0] step:3261/10000 train_time:152936ms step_avg:46.90ms +[2025-09-05 23:38:49] [Rank 0] step:3261/10000 train_time:152936ms step_avg:46.90ms +[2025-09-05 23:38:49] [Rank 0] step:3281/10000 train_time:153672ms step_avg:46.84ms +[2025-09-05 23:38:49] [Rank 0] step:3281/10000 train_time:153672ms step_avg:46.84ms +[2025-09-05 23:38:50] [Rank 0] step:3301/10000 train_time:154517ms step_avg:46.81ms +[2025-09-05 23:38:50] [Rank 0] step:3301/10000 train_time:154517ms step_avg:46.81ms +[2025-09-05 23:38:51] [Rank 0] step:3321/10000 train_time:155263ms step_avg:46.75ms +[2025-09-05 23:38:51] [Rank 0] step:3321/10000 train_time:155263ms step_avg:46.75ms +[2025-09-05 23:38:52] [Rank 0] step:3341/10000 train_time:155999ms step_avg:46.69ms +[2025-09-05 23:38:52] [Rank 0] step:3341/10000 train_time:155999ms step_avg:46.69ms +[2025-09-05 23:38:52] [Rank 0] step:3361/10000 train_time:156734ms step_avg:46.63ms +[2025-09-05 23:38:52] [Rank 0] step:3361/10000 train_time:156734ms step_avg:46.63ms +[2025-09-05 23:38:53] [Rank 0] step:3381/10000 train_time:157593ms step_avg:46.61ms +[2025-09-05 23:38:53] [Rank 0] step:3381/10000 train_time:157593ms step_avg:46.61ms +[2025-09-05 23:38:54] [Rank 0] step:3401/10000 train_time:158329ms step_avg:46.55ms +[2025-09-05 23:38:54] [Rank 0] step:3401/10000 train_time:158329ms step_avg:46.55ms +[2025-09-05 23:38:55] [Rank 0] step:3421/10000 train_time:159065ms step_avg:46.50ms +[2025-09-05 23:38:55] [Rank 0] step:3421/10000 train_time:159065ms step_avg:46.50ms +[2025-09-05 23:38:55] [Rank 0] step:3441/10000 train_time:159801ms step_avg:46.44ms +[2025-09-05 23:38:55] [Rank 0] step:3441/10000 train_time:159801ms step_avg:46.44ms +[2025-09-05 23:38:56] [Rank 0] step:3461/10000 train_time:160538ms step_avg:46.38ms +[2025-09-05 23:38:56] [Rank 0] step:3461/10000 train_time:160538ms step_avg:46.38ms +[2025-09-05 23:38:57] [Rank 0] step:3481/10000 train_time:161273ms step_avg:46.33ms +[2025-09-05 23:38:57] [Rank 0] step:3481/10000 train_time:161273ms step_avg:46.33ms +[2025-09-05 23:38:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:38:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:38:58] [Rank 0] PRINT: step:3500/10000 train_loss:2.5800 val_loss:2.5235 train_time:162090ms step_avg:46.31ms +[2025-09-05 23:38:58] [Rank 0] PRINT: step:3500/10000 train_loss:2.5800 val_loss:2.5235 train_time:162090ms step_avg:46.31ms +[2025-09-05 23:38:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:38:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:38:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:38:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:40:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:40:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:40:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:40:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:40:19] [Rank 0] Total Loss: 4.9191 +[2025-09-05 23:40:19] [Rank 0] Total Loss: 4.9191 +[2025-09-05 23:40:19] [Rank 0] Total FTA (Unweighted): 0.2287 +[2025-09-05 23:40:19] [Rank 0] Total FTA (Unweighted): 0.2287 +[2025-09-05 23:40:19] [Rank 0] Total FTA (Weighted): 0.2288 +[2025-09-05 23:40:19] [Rank 0] Total FTA (Weighted): 0.2288 +[2025-09-05 23:40:19] [Rank 0] Group 0 Loss: 3.2493 +[2025-09-05 23:40:19] [Rank 0] Group 0 Loss: 3.2493 +[2025-09-05 23:40:19] [Rank 0] Group 1 Loss: 3.2622 +[2025-09-05 23:40:19] [Rank 0] Group 1 Loss: 3.2622 +[2025-09-05 23:40:19] [Rank 0] Group 2 Loss: 3.3695 +[2025-09-05 23:40:19] [Rank 0] Group 2 Loss: 3.3695 +[2025-09-05 23:40:19] [Rank 0] Group 3 Loss: 3.7911 +[2025-09-05 23:40:19] [Rank 0] Group 3 Loss: 3.7911 +[2025-09-05 23:40:19] [Rank 0] Group 4 Loss: 4.3838 +[2025-09-05 23:40:19] [Rank 0] Group 4 Loss: 4.3838 +[2025-09-05 23:40:19] [Rank 0] Group 5 Loss: 4.8313 +[2025-09-05 23:40:19] [Rank 0] Group 5 Loss: 4.8313 +[2025-09-05 23:40:19] [Rank 0] Group 6 Loss: 5.1342 +[2025-09-05 23:40:19] [Rank 0] Group 6 Loss: 5.1342 +[2025-09-05 23:40:19] [Rank 0] Group 7 Loss: 5.2604 +[2025-09-05 23:40:19] [Rank 0] Group 7 Loss: 5.2604 +[2025-09-05 23:40:19] [Rank 0] Group 8 Loss: 5.5492 +[2025-09-05 23:40:19] [Rank 0] Group 8 Loss: 5.5492 +[2025-09-05 23:40:19] [Rank 0] Group 9 Loss: 5.6755 +[2025-09-05 23:40:19] [Rank 0] Group 9 Loss: 5.6755 +[2025-09-05 23:40:19] [Rank 0] Group 10 Loss: 5.6849 +[2025-09-05 23:40:19] [Rank 0] Group 10 Loss: 5.6849 +[2025-09-05 23:40:19] [Rank 0] Group 11 Loss: 5.7908 +[2025-09-05 23:40:19] [Rank 0] Group 11 Loss: 5.7908 +[2025-09-05 23:40:19] [Rank 0] Group 12 Loss: 5.6505 +[2025-09-05 23:40:19] [Rank 0] Group 12 Loss: 5.6505 +[2025-09-05 23:40:19] [Rank 0] Group 13 Loss: 5.6852 +[2025-09-05 23:40:19] [Rank 0] Group 13 Loss: 5.6852 +[2025-09-05 23:40:19] [Rank 0] Group 14 Loss: 5.7242 +[2025-09-05 23:40:19] [Rank 0] Group 14 Loss: 5.7242 +[2025-09-05 23:40:19] [Rank 0] Group 15 Loss: 5.6630 +[2025-09-05 23:40:19] [Rank 0] Group 15 Loss: 5.6630 +[2025-09-05 23:40:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:40:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:40:19] [Rank 0] Group 1 FTA: 0.8300 +[2025-09-05 23:40:19] [Rank 0] Group 1 FTA: 0.8300 +[2025-09-05 23:40:19] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:40:19] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:40:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:40:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:40:19] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 23:40:19] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 23:40:19] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 23:40:19] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 23:40:19] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-05 23:40:19] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-05 23:40:19] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 23:40:19] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 23:40:19] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-05 23:40:19] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-05 23:40:19] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 23:40:19] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 23:40:19] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 23:40:19] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 23:40:19] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 23:40:19] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 23:40:19] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 23:40:19] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 23:40:19] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:40:19] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:40:19] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:40:19] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:40:19] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 23:40:19] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 23:40:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:40:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:40:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:40:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:40:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:40:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:40:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:40:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:40:20] [Rank 0] step:3501/10000 train_time:162099ms step_avg:46.30ms +[2025-09-05 23:40:20] [Rank 0] step:3501/10000 train_time:162099ms step_avg:46.30ms +[2025-09-05 23:40:21] [Rank 0] step:3521/10000 train_time:162766ms step_avg:46.23ms +[2025-09-05 23:40:21] [Rank 0] step:3521/10000 train_time:162766ms step_avg:46.23ms +[2025-09-05 23:40:22] [Rank 0] step:3541/10000 train_time:163502ms step_avg:46.17ms +[2025-09-05 23:40:22] [Rank 0] step:3541/10000 train_time:163502ms step_avg:46.17ms +[2025-09-05 23:40:23] [Rank 0] step:3561/10000 train_time:164237ms step_avg:46.12ms +[2025-09-05 23:40:23] [Rank 0] step:3561/10000 train_time:164237ms step_avg:46.12ms +[2025-09-05 23:40:23] [Rank 0] step:3581/10000 train_time:164974ms step_avg:46.07ms +[2025-09-05 23:40:23] [Rank 0] step:3581/10000 train_time:164974ms step_avg:46.07ms +[2025-09-05 23:40:24] [Rank 0] step:3601/10000 train_time:165709ms step_avg:46.02ms +[2025-09-05 23:40:24] [Rank 0] step:3601/10000 train_time:165709ms step_avg:46.02ms +[2025-09-05 23:40:25] [Rank 0] step:3621/10000 train_time:166447ms step_avg:45.97ms +[2025-09-05 23:40:25] [Rank 0] step:3621/10000 train_time:166447ms step_avg:45.97ms +[2025-09-05 23:40:26] [Rank 0] step:3641/10000 train_time:167799ms step_avg:46.09ms +[2025-09-05 23:40:26] [Rank 0] step:3641/10000 train_time:167799ms step_avg:46.09ms +[2025-09-05 23:40:27] [Rank 0] step:3661/10000 train_time:168534ms step_avg:46.03ms +[2025-09-05 23:40:27] [Rank 0] step:3661/10000 train_time:168534ms step_avg:46.03ms +[2025-09-05 23:40:28] [Rank 0] step:3681/10000 train_time:169270ms step_avg:45.98ms +[2025-09-05 23:40:28] [Rank 0] step:3681/10000 train_time:169270ms step_avg:45.98ms +[2025-09-05 23:40:28] [Rank 0] step:3701/10000 train_time:170006ms step_avg:45.94ms +[2025-09-05 23:40:28] [Rank 0] step:3701/10000 train_time:170006ms step_avg:45.94ms +[2025-09-05 23:40:29] [Rank 0] step:3721/10000 train_time:170741ms step_avg:45.89ms +[2025-09-05 23:40:29] [Rank 0] step:3721/10000 train_time:170741ms step_avg:45.89ms +[2025-09-05 23:40:30] [Rank 0] step:3741/10000 train_time:171478ms step_avg:45.84ms +[2025-09-05 23:40:30] [Rank 0] step:3741/10000 train_time:171478ms step_avg:45.84ms +[2025-09-05 23:40:31] [Rank 0] step:3761/10000 train_time:172214ms step_avg:45.79ms +[2025-09-05 23:40:31] [Rank 0] step:3761/10000 train_time:172214ms step_avg:45.79ms +[2025-09-05 23:40:31] [Rank 0] step:3781/10000 train_time:172950ms step_avg:45.74ms +[2025-09-05 23:40:31] [Rank 0] step:3781/10000 train_time:172950ms step_avg:45.74ms +[2025-09-05 23:40:32] [Rank 0] step:3801/10000 train_time:173685ms step_avg:45.69ms +[2025-09-05 23:40:32] [Rank 0] step:3801/10000 train_time:173685ms step_avg:45.69ms +[2025-09-05 23:40:33] [Rank 0] step:3821/10000 train_time:174421ms step_avg:45.65ms +[2025-09-05 23:40:33] [Rank 0] step:3821/10000 train_time:174421ms step_avg:45.65ms +[2025-09-05 23:40:34] [Rank 0] step:3841/10000 train_time:175157ms step_avg:45.60ms +[2025-09-05 23:40:34] [Rank 0] step:3841/10000 train_time:175157ms step_avg:45.60ms +[2025-09-05 23:40:34] [Rank 0] step:3861/10000 train_time:175893ms step_avg:45.56ms +[2025-09-05 23:40:34] [Rank 0] step:3861/10000 train_time:175893ms step_avg:45.56ms +[2025-09-05 23:40:35] [Rank 0] step:3881/10000 train_time:176629ms step_avg:45.51ms +[2025-09-05 23:40:35] [Rank 0] step:3881/10000 train_time:176629ms step_avg:45.51ms +[2025-09-05 23:40:36] [Rank 0] step:3901/10000 train_time:177365ms step_avg:45.47ms +[2025-09-05 23:40:36] [Rank 0] step:3901/10000 train_time:177365ms step_avg:45.47ms +[2025-09-05 23:40:36] [Rank 0] step:3921/10000 train_time:178102ms step_avg:45.42ms +[2025-09-05 23:40:36] [Rank 0] step:3921/10000 train_time:178102ms step_avg:45.42ms +[2025-09-05 23:40:37] [Rank 0] step:3941/10000 train_time:178838ms step_avg:45.38ms +[2025-09-05 23:40:37] [Rank 0] step:3941/10000 train_time:178838ms step_avg:45.38ms +[2025-09-05 23:40:38] [Rank 0] step:3961/10000 train_time:179573ms step_avg:45.34ms +[2025-09-05 23:40:38] [Rank 0] step:3961/10000 train_time:179573ms step_avg:45.34ms +[2025-09-05 23:40:39] [Rank 0] step:3981/10000 train_time:180308ms step_avg:45.29ms +[2025-09-05 23:40:39] [Rank 0] step:3981/10000 train_time:180308ms step_avg:45.29ms +[2025-09-05 23:40:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:40:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:40:40] [Rank 0] PRINT: step:4000/10000 train_loss:2.4959 val_loss:2.4517 train_time:181124ms step_avg:45.28ms +[2025-09-05 23:40:40] [Rank 0] PRINT: step:4000/10000 train_loss:2.4959 val_loss:2.4517 train_time:181124ms step_avg:45.28ms +[2025-09-05 23:40:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:40:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:40:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:40:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:42:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:42:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:42:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:42:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:42:01] [Rank 0] Total Loss: 4.8354 +[2025-09-05 23:42:01] [Rank 0] Total Loss: 4.8354 +[2025-09-05 23:42:01] [Rank 0] Total FTA (Unweighted): 0.2494 +[2025-09-05 23:42:01] [Rank 0] Total FTA (Unweighted): 0.2494 +[2025-09-05 23:42:01] [Rank 0] Total FTA (Weighted): 0.2494 +[2025-09-05 23:42:01] [Rank 0] Total FTA (Weighted): 0.2494 +[2025-09-05 23:42:01] [Rank 0] Group 0 Loss: 3.2989 +[2025-09-05 23:42:01] [Rank 0] Group 0 Loss: 3.2989 +[2025-09-05 23:42:01] [Rank 0] Group 1 Loss: 3.1615 +[2025-09-05 23:42:01] [Rank 0] Group 1 Loss: 3.1615 +[2025-09-05 23:42:01] [Rank 0] Group 2 Loss: 3.3342 +[2025-09-05 23:42:01] [Rank 0] Group 2 Loss: 3.3342 +[2025-09-05 23:42:01] [Rank 0] Group 3 Loss: 3.7265 +[2025-09-05 23:42:01] [Rank 0] Group 3 Loss: 3.7265 +[2025-09-05 23:42:01] [Rank 0] Group 4 Loss: 4.2661 +[2025-09-05 23:42:01] [Rank 0] Group 4 Loss: 4.2661 +[2025-09-05 23:42:01] [Rank 0] Group 5 Loss: 4.7383 +[2025-09-05 23:42:01] [Rank 0] Group 5 Loss: 4.7383 +[2025-09-05 23:42:01] [Rank 0] Group 6 Loss: 5.0412 +[2025-09-05 23:42:01] [Rank 0] Group 6 Loss: 5.0412 +[2025-09-05 23:42:01] [Rank 0] Group 7 Loss: 5.1537 +[2025-09-05 23:42:01] [Rank 0] Group 7 Loss: 5.1537 +[2025-09-05 23:42:01] [Rank 0] Group 8 Loss: 5.4441 +[2025-09-05 23:42:01] [Rank 0] Group 8 Loss: 5.4441 +[2025-09-05 23:42:01] [Rank 0] Group 9 Loss: 5.5590 +[2025-09-05 23:42:01] [Rank 0] Group 9 Loss: 5.5590 +[2025-09-05 23:42:01] [Rank 0] Group 10 Loss: 5.5935 +[2025-09-05 23:42:01] [Rank 0] Group 10 Loss: 5.5935 +[2025-09-05 23:42:01] [Rank 0] Group 11 Loss: 5.6471 +[2025-09-05 23:42:01] [Rank 0] Group 11 Loss: 5.6471 +[2025-09-05 23:42:01] [Rank 0] Group 12 Loss: 5.5741 +[2025-09-05 23:42:01] [Rank 0] Group 12 Loss: 5.5741 +[2025-09-05 23:42:01] [Rank 0] Group 13 Loss: 5.5919 +[2025-09-05 23:42:01] [Rank 0] Group 13 Loss: 5.5919 +[2025-09-05 23:42:01] [Rank 0] Group 14 Loss: 5.6477 +[2025-09-05 23:42:01] [Rank 0] Group 14 Loss: 5.6477 +[2025-09-05 23:42:01] [Rank 0] Group 15 Loss: 5.5887 +[2025-09-05 23:42:01] [Rank 0] Group 15 Loss: 5.5887 +[2025-09-05 23:42:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:42:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:42:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:42:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:42:01] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:42:01] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:42:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:42:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:42:01] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 23:42:01] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 23:42:01] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 23:42:01] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 23:42:01] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-05 23:42:01] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-05 23:42:01] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 23:42:01] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 23:42:01] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:42:01] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:42:01] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:42:01] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:42:01] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-05 23:42:01] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-05 23:42:01] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 23:42:01] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 23:42:01] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:42:01] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:42:01] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 23:42:01] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 23:42:01] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:42:01] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:42:01] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:42:01] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:42:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:42:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:42:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:42:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:42:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:42:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:42:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:42:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:42:03] [Rank 0] step:4001/10000 train_time:181134ms step_avg:45.27ms +[2025-09-05 23:42:03] [Rank 0] step:4001/10000 train_time:181134ms step_avg:45.27ms +[2025-09-05 23:42:04] [Rank 0] step:4021/10000 train_time:182419ms step_avg:45.37ms +[2025-09-05 23:42:04] [Rank 0] step:4021/10000 train_time:182419ms step_avg:45.37ms +[2025-09-05 23:42:05] [Rank 0] step:4041/10000 train_time:183155ms step_avg:45.32ms +[2025-09-05 23:42:05] [Rank 0] step:4041/10000 train_time:183155ms step_avg:45.32ms +[2025-09-05 23:42:05] [Rank 0] step:4061/10000 train_time:183891ms step_avg:45.28ms +[2025-09-05 23:42:05] [Rank 0] step:4061/10000 train_time:183891ms step_avg:45.28ms +[2025-09-05 23:42:06] [Rank 0] step:4081/10000 train_time:184627ms step_avg:45.24ms +[2025-09-05 23:42:06] [Rank 0] step:4081/10000 train_time:184627ms step_avg:45.24ms +[2025-09-05 23:42:07] [Rank 0] step:4101/10000 train_time:185363ms step_avg:45.20ms +[2025-09-05 23:42:07] [Rank 0] step:4101/10000 train_time:185363ms step_avg:45.20ms +[2025-09-05 23:42:08] [Rank 0] step:4121/10000 train_time:186099ms step_avg:45.16ms +[2025-09-05 23:42:08] [Rank 0] step:4121/10000 train_time:186099ms step_avg:45.16ms +[2025-09-05 23:42:08] [Rank 0] step:4141/10000 train_time:186835ms step_avg:45.12ms +[2025-09-05 23:42:08] [Rank 0] step:4141/10000 train_time:186835ms step_avg:45.12ms +[2025-09-05 23:42:09] [Rank 0] step:4161/10000 train_time:187571ms step_avg:45.08ms +[2025-09-05 23:42:09] [Rank 0] step:4161/10000 train_time:187571ms step_avg:45.08ms +[2025-09-05 23:42:10] [Rank 0] step:4181/10000 train_time:188314ms step_avg:45.04ms +[2025-09-05 23:42:10] [Rank 0] step:4181/10000 train_time:188314ms step_avg:45.04ms +[2025-09-05 23:42:11] [Rank 0] step:4201/10000 train_time:189050ms step_avg:45.00ms +[2025-09-05 23:42:11] [Rank 0] step:4201/10000 train_time:189050ms step_avg:45.00ms +[2025-09-05 23:42:11] [Rank 0] step:4221/10000 train_time:189786ms step_avg:44.96ms +[2025-09-05 23:42:11] [Rank 0] step:4221/10000 train_time:189786ms step_avg:44.96ms +[2025-09-05 23:42:12] [Rank 0] step:4241/10000 train_time:190521ms step_avg:44.92ms +[2025-09-05 23:42:12] [Rank 0] step:4241/10000 train_time:190521ms step_avg:44.92ms +[2025-09-05 23:42:13] [Rank 0] step:4261/10000 train_time:191258ms step_avg:44.89ms +[2025-09-05 23:42:13] [Rank 0] step:4261/10000 train_time:191258ms step_avg:44.89ms +[2025-09-05 23:42:13] [Rank 0] step:4281/10000 train_time:191994ms step_avg:44.85ms +[2025-09-05 23:42:13] [Rank 0] step:4281/10000 train_time:191994ms step_avg:44.85ms +[2025-09-05 23:42:14] [Rank 0] step:4301/10000 train_time:192730ms step_avg:44.81ms +[2025-09-05 23:42:14] [Rank 0] step:4301/10000 train_time:192730ms step_avg:44.81ms +[2025-09-05 23:42:15] [Rank 0] step:4321/10000 train_time:193466ms step_avg:44.77ms +[2025-09-05 23:42:15] [Rank 0] step:4321/10000 train_time:193466ms step_avg:44.77ms +[2025-09-05 23:42:16] [Rank 0] step:4341/10000 train_time:194203ms step_avg:44.74ms +[2025-09-05 23:42:16] [Rank 0] step:4341/10000 train_time:194203ms step_avg:44.74ms +[2025-09-05 23:42:16] [Rank 0] step:4361/10000 train_time:194939ms step_avg:44.70ms +[2025-09-05 23:42:16] [Rank 0] step:4361/10000 train_time:194939ms step_avg:44.70ms +[2025-09-05 23:42:17] [Rank 0] step:4381/10000 train_time:195675ms step_avg:44.66ms +[2025-09-05 23:42:17] [Rank 0] step:4381/10000 train_time:195675ms step_avg:44.66ms +[2025-09-05 23:42:18] [Rank 0] step:4401/10000 train_time:196411ms step_avg:44.63ms +[2025-09-05 23:42:18] [Rank 0] step:4401/10000 train_time:196411ms step_avg:44.63ms +[2025-09-05 23:42:19] [Rank 0] step:4421/10000 train_time:197147ms step_avg:44.59ms +[2025-09-05 23:42:19] [Rank 0] step:4421/10000 train_time:197147ms step_avg:44.59ms +[2025-09-05 23:42:19] [Rank 0] step:4441/10000 train_time:197884ms step_avg:44.56ms +[2025-09-05 23:42:19] [Rank 0] step:4441/10000 train_time:197884ms step_avg:44.56ms +[2025-09-05 23:42:20] [Rank 0] step:4461/10000 train_time:198619ms step_avg:44.52ms +[2025-09-05 23:42:20] [Rank 0] step:4461/10000 train_time:198619ms step_avg:44.52ms +[2025-09-05 23:42:21] [Rank 0] step:4481/10000 train_time:199355ms step_avg:44.49ms +[2025-09-05 23:42:21] [Rank 0] step:4481/10000 train_time:199355ms step_avg:44.49ms +[2025-09-05 23:42:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:42:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:42:22] [Rank 0] PRINT: step:4500/10000 train_loss:2.4298 val_loss:2.3901 train_time:200172ms step_avg:44.48ms +[2025-09-05 23:42:22] [Rank 0] PRINT: step:4500/10000 train_loss:2.4298 val_loss:2.3901 train_time:200172ms step_avg:44.48ms +[2025-09-05 23:42:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:42:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:42:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:42:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:43:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:43:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:43:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:43:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:43:43] [Rank 0] Total Loss: 4.8144 +[2025-09-05 23:43:43] [Rank 0] Total Loss: 4.8144 +[2025-09-05 23:43:43] [Rank 0] Total FTA (Unweighted): 0.2756 +[2025-09-05 23:43:43] [Rank 0] Total FTA (Unweighted): 0.2756 +[2025-09-05 23:43:43] [Rank 0] Total FTA (Weighted): 0.2756 +[2025-09-05 23:43:43] [Rank 0] Total FTA (Weighted): 0.2756 +[2025-09-05 23:43:43] [Rank 0] Group 0 Loss: 3.3150 +[2025-09-05 23:43:43] [Rank 0] Group 0 Loss: 3.3150 +[2025-09-05 23:43:43] [Rank 0] Group 1 Loss: 3.1959 +[2025-09-05 23:43:43] [Rank 0] Group 1 Loss: 3.1959 +[2025-09-05 23:43:43] [Rank 0] Group 2 Loss: 3.3144 +[2025-09-05 23:43:43] [Rank 0] Group 2 Loss: 3.3144 +[2025-09-05 23:43:43] [Rank 0] Group 3 Loss: 3.7429 +[2025-09-05 23:43:43] [Rank 0] Group 3 Loss: 3.7429 +[2025-09-05 23:43:43] [Rank 0] Group 4 Loss: 4.2275 +[2025-09-05 23:43:43] [Rank 0] Group 4 Loss: 4.2275 +[2025-09-05 23:43:43] [Rank 0] Group 5 Loss: 4.6760 +[2025-09-05 23:43:43] [Rank 0] Group 5 Loss: 4.6760 +[2025-09-05 23:43:43] [Rank 0] Group 6 Loss: 4.9900 +[2025-09-05 23:43:43] [Rank 0] Group 6 Loss: 4.9900 +[2025-09-05 23:43:43] [Rank 0] Group 7 Loss: 5.1163 +[2025-09-05 23:43:43] [Rank 0] Group 7 Loss: 5.1163 +[2025-09-05 23:43:43] [Rank 0] Group 8 Loss: 5.4229 +[2025-09-05 23:43:43] [Rank 0] Group 8 Loss: 5.4229 +[2025-09-05 23:43:43] [Rank 0] Group 9 Loss: 5.5530 +[2025-09-05 23:43:43] [Rank 0] Group 9 Loss: 5.5530 +[2025-09-05 23:43:43] [Rank 0] Group 10 Loss: 5.5517 +[2025-09-05 23:43:43] [Rank 0] Group 10 Loss: 5.5517 +[2025-09-05 23:43:43] [Rank 0] Group 11 Loss: 5.6280 +[2025-09-05 23:43:43] [Rank 0] Group 11 Loss: 5.6280 +[2025-09-05 23:43:43] [Rank 0] Group 12 Loss: 5.5469 +[2025-09-05 23:43:43] [Rank 0] Group 12 Loss: 5.5469 +[2025-09-05 23:43:43] [Rank 0] Group 13 Loss: 5.5650 +[2025-09-05 23:43:43] [Rank 0] Group 13 Loss: 5.5650 +[2025-09-05 23:43:43] [Rank 0] Group 14 Loss: 5.6294 +[2025-09-05 23:43:43] [Rank 0] Group 14 Loss: 5.6294 +[2025-09-05 23:43:43] [Rank 0] Group 15 Loss: 5.5560 +[2025-09-05 23:43:43] [Rank 0] Group 15 Loss: 5.5560 +[2025-09-05 23:43:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:43:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:43:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:43:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:43:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:43:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:43:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:43:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:43:43] [Rank 0] Group 4 FTA: 0.2400 +[2025-09-05 23:43:43] [Rank 0] Group 4 FTA: 0.2400 +[2025-09-05 23:43:43] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:43:43] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:43:43] [Rank 0] Group 6 FTA: 0.2300 +[2025-09-05 23:43:43] [Rank 0] Group 6 FTA: 0.2300 +[2025-09-05 23:43:43] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:43:43] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:43:43] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:43:43] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:43:43] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 23:43:43] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 23:43:43] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 23:43:43] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 23:43:43] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 23:43:43] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 23:43:43] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 23:43:43] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 23:43:43] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 23:43:43] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 23:43:43] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 23:43:43] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 23:43:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:43:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:43:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:43:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:43:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:43:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:43:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:43:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:43:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:43:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:43:44] [Rank 0] step:4501/10000 train_time:200183ms step_avg:44.48ms +[2025-09-05 23:43:44] [Rank 0] step:4501/10000 train_time:200183ms step_avg:44.48ms +[2025-09-05 23:43:45] [Rank 0] step:4521/10000 train_time:200860ms step_avg:44.43ms +[2025-09-05 23:43:45] [Rank 0] step:4521/10000 train_time:200860ms step_avg:44.43ms +[2025-09-05 23:43:46] [Rank 0] step:4541/10000 train_time:201596ms step_avg:44.39ms +[2025-09-05 23:43:46] [Rank 0] step:4541/10000 train_time:201596ms step_avg:44.39ms +[2025-09-05 23:43:47] [Rank 0] step:4561/10000 train_time:202331ms step_avg:44.36ms +[2025-09-05 23:43:47] [Rank 0] step:4561/10000 train_time:202331ms step_avg:44.36ms +[2025-09-05 23:43:47] [Rank 0] step:4581/10000 train_time:203067ms step_avg:44.33ms +[2025-09-05 23:43:47] [Rank 0] step:4581/10000 train_time:203067ms step_avg:44.33ms +[2025-09-05 23:43:48] [Rank 0] step:4601/10000 train_time:203803ms step_avg:44.30ms +[2025-09-05 23:43:48] [Rank 0] step:4601/10000 train_time:203803ms step_avg:44.30ms +[2025-09-05 23:43:49] [Rank 0] step:4621/10000 train_time:204539ms step_avg:44.26ms +[2025-09-05 23:43:49] [Rank 0] step:4621/10000 train_time:204539ms step_avg:44.26ms +[2025-09-05 23:43:49] [Rank 0] step:4641/10000 train_time:205278ms step_avg:44.23ms +[2025-09-05 23:43:49] [Rank 0] step:4641/10000 train_time:205278ms step_avg:44.23ms +[2025-09-05 23:43:50] [Rank 0] step:4661/10000 train_time:206014ms step_avg:44.20ms +[2025-09-05 23:43:50] [Rank 0] step:4661/10000 train_time:206014ms step_avg:44.20ms +[2025-09-05 23:43:51] [Rank 0] step:4681/10000 train_time:206750ms step_avg:44.17ms +[2025-09-05 23:43:51] [Rank 0] step:4681/10000 train_time:206750ms step_avg:44.17ms +[2025-09-05 23:43:52] [Rank 0] step:4701/10000 train_time:207486ms step_avg:44.14ms +[2025-09-05 23:43:52] [Rank 0] step:4701/10000 train_time:207486ms step_avg:44.14ms +[2025-09-05 23:43:52] [Rank 0] step:4721/10000 train_time:208222ms step_avg:44.11ms +[2025-09-05 23:43:52] [Rank 0] step:4721/10000 train_time:208222ms step_avg:44.11ms +[2025-09-05 23:43:53] [Rank 0] step:4741/10000 train_time:208959ms step_avg:44.07ms +[2025-09-05 23:43:53] [Rank 0] step:4741/10000 train_time:208959ms step_avg:44.07ms +[2025-09-05 23:43:54] [Rank 0] step:4761/10000 train_time:209695ms step_avg:44.04ms +[2025-09-05 23:43:54] [Rank 0] step:4761/10000 train_time:209695ms step_avg:44.04ms +[2025-09-05 23:43:55] [Rank 0] step:4781/10000 train_time:210431ms step_avg:44.01ms +[2025-09-05 23:43:55] [Rank 0] step:4781/10000 train_time:210431ms step_avg:44.01ms +[2025-09-05 23:43:55] [Rank 0] step:4801/10000 train_time:211167ms step_avg:43.98ms +[2025-09-05 23:43:55] [Rank 0] step:4801/10000 train_time:211167ms step_avg:43.98ms +[2025-09-05 23:43:56] [Rank 0] step:4821/10000 train_time:211904ms step_avg:43.95ms +[2025-09-05 23:43:56] [Rank 0] step:4821/10000 train_time:211904ms step_avg:43.95ms +[2025-09-05 23:43:57] [Rank 0] step:4841/10000 train_time:212948ms step_avg:43.99ms +[2025-09-05 23:43:57] [Rank 0] step:4841/10000 train_time:212948ms step_avg:43.99ms +[2025-09-05 23:43:58] [Rank 0] step:4861/10000 train_time:213684ms step_avg:43.96ms +[2025-09-05 23:43:58] [Rank 0] step:4861/10000 train_time:213684ms step_avg:43.96ms +[2025-09-05 23:43:59] [Rank 0] step:4881/10000 train_time:214420ms step_avg:43.93ms +[2025-09-05 23:43:59] [Rank 0] step:4881/10000 train_time:214420ms step_avg:43.93ms +[2025-09-05 23:43:59] [Rank 0] step:4901/10000 train_time:215156ms step_avg:43.90ms +[2025-09-05 23:43:59] [Rank 0] step:4901/10000 train_time:215156ms step_avg:43.90ms +[2025-09-05 23:44:00] [Rank 0] step:4921/10000 train_time:215893ms step_avg:43.87ms +[2025-09-05 23:44:00] [Rank 0] step:4921/10000 train_time:215893ms step_avg:43.87ms +[2025-09-05 23:44:01] [Rank 0] step:4941/10000 train_time:216629ms step_avg:43.84ms +[2025-09-05 23:44:01] [Rank 0] step:4941/10000 train_time:216629ms step_avg:43.84ms +[2025-09-05 23:44:02] [Rank 0] step:4961/10000 train_time:217365ms step_avg:43.81ms +[2025-09-05 23:44:02] [Rank 0] step:4961/10000 train_time:217365ms step_avg:43.81ms +[2025-09-05 23:44:02] [Rank 0] step:4981/10000 train_time:218101ms step_avg:43.79ms +[2025-09-05 23:44:02] [Rank 0] step:4981/10000 train_time:218101ms step_avg:43.79ms +[2025-09-05 23:44:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:44:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:44:03] [Rank 0] PRINT: step:5000/10000 train_loss:2.3762 val_loss:2.3477 train_time:218918ms step_avg:43.78ms +[2025-09-05 23:44:03] [Rank 0] PRINT: step:5000/10000 train_loss:2.3762 val_loss:2.3477 train_time:218918ms step_avg:43.78ms +[2025-09-05 23:44:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:44:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:44:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:44:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:45:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:45:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:45:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:45:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:45:25] [Rank 0] Total Loss: 4.8215 +[2025-09-05 23:45:25] [Rank 0] Total Loss: 4.8215 +[2025-09-05 23:45:25] [Rank 0] Total FTA (Unweighted): 0.2731 +[2025-09-05 23:45:25] [Rank 0] Total FTA (Unweighted): 0.2731 +[2025-09-05 23:45:25] [Rank 0] Total FTA (Weighted): 0.2731 +[2025-09-05 23:45:25] [Rank 0] Total FTA (Weighted): 0.2731 +[2025-09-05 23:45:25] [Rank 0] Group 0 Loss: 3.3201 +[2025-09-05 23:45:25] [Rank 0] Group 0 Loss: 3.3201 +[2025-09-05 23:45:25] [Rank 0] Group 1 Loss: 3.2951 +[2025-09-05 23:45:25] [Rank 0] Group 1 Loss: 3.2951 +[2025-09-05 23:45:25] [Rank 0] Group 2 Loss: 3.3931 +[2025-09-05 23:45:25] [Rank 0] Group 2 Loss: 3.3931 +[2025-09-05 23:45:25] [Rank 0] Group 3 Loss: 3.7715 +[2025-09-05 23:45:25] [Rank 0] Group 3 Loss: 3.7715 +[2025-09-05 23:45:25] [Rank 0] Group 4 Loss: 4.2077 +[2025-09-05 23:45:25] [Rank 0] Group 4 Loss: 4.2077 +[2025-09-05 23:45:25] [Rank 0] Group 5 Loss: 4.6552 +[2025-09-05 23:45:25] [Rank 0] Group 5 Loss: 4.6552 +[2025-09-05 23:45:25] [Rank 0] Group 6 Loss: 4.9866 +[2025-09-05 23:45:25] [Rank 0] Group 6 Loss: 4.9866 +[2025-09-05 23:45:25] [Rank 0] Group 7 Loss: 5.1196 +[2025-09-05 23:45:25] [Rank 0] Group 7 Loss: 5.1196 +[2025-09-05 23:45:25] [Rank 0] Group 8 Loss: 5.4118 +[2025-09-05 23:45:25] [Rank 0] Group 8 Loss: 5.4118 +[2025-09-05 23:45:25] [Rank 0] Group 9 Loss: 5.5264 +[2025-09-05 23:45:25] [Rank 0] Group 9 Loss: 5.5264 +[2025-09-05 23:45:25] [Rank 0] Group 10 Loss: 5.5628 +[2025-09-05 23:45:25] [Rank 0] Group 10 Loss: 5.5628 +[2025-09-05 23:45:25] [Rank 0] Group 11 Loss: 5.6307 +[2025-09-05 23:45:25] [Rank 0] Group 11 Loss: 5.6307 +[2025-09-05 23:45:25] [Rank 0] Group 12 Loss: 5.5378 +[2025-09-05 23:45:25] [Rank 0] Group 12 Loss: 5.5378 +[2025-09-05 23:45:25] [Rank 0] Group 13 Loss: 5.5634 +[2025-09-05 23:45:25] [Rank 0] Group 13 Loss: 5.5634 +[2025-09-05 23:45:25] [Rank 0] Group 14 Loss: 5.6129 +[2025-09-05 23:45:25] [Rank 0] Group 14 Loss: 5.6129 +[2025-09-05 23:45:25] [Rank 0] Group 15 Loss: 5.5486 +[2025-09-05 23:45:25] [Rank 0] Group 15 Loss: 5.5486 +[2025-09-05 23:45:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:45:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:45:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:45:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:45:25] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:45:25] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:45:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:45:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:45:25] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 23:45:25] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 23:45:25] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-05 23:45:25] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-05 23:45:25] [Rank 0] Group 6 FTA: 0.2300 +[2025-09-05 23:45:25] [Rank 0] Group 6 FTA: 0.2300 +[2025-09-05 23:45:25] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 23:45:25] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 23:45:25] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:45:25] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:45:25] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 23:45:25] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 23:45:25] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 23:45:25] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 23:45:25] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 23:45:25] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 23:45:25] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 23:45:25] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 23:45:25] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 23:45:25] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 23:45:25] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 23:45:25] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 23:45:25] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 23:45:25] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 23:45:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:45:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:45:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:45:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:45:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:45:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:45:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:45:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:45:26] [Rank 0] step:5001/10000 train_time:218928ms step_avg:43.78ms +[2025-09-05 23:45:26] [Rank 0] step:5001/10000 train_time:218928ms step_avg:43.78ms +[2025-09-05 23:45:27] [Rank 0] step:5021/10000 train_time:219596ms step_avg:43.74ms +[2025-09-05 23:45:27] [Rank 0] step:5021/10000 train_time:219596ms step_avg:43.74ms +[2025-09-05 23:45:28] [Rank 0] step:5041/10000 train_time:220333ms step_avg:43.71ms +[2025-09-05 23:45:28] [Rank 0] step:5041/10000 train_time:220333ms step_avg:43.71ms +[2025-09-05 23:45:29] [Rank 0] step:5061/10000 train_time:221069ms step_avg:43.68ms +[2025-09-05 23:45:29] [Rank 0] step:5061/10000 train_time:221069ms step_avg:43.68ms +[2025-09-05 23:45:29] [Rank 0] step:5081/10000 train_time:221804ms step_avg:43.65ms +[2025-09-05 23:45:29] [Rank 0] step:5081/10000 train_time:221804ms step_avg:43.65ms +[2025-09-05 23:45:30] [Rank 0] step:5101/10000 train_time:222540ms step_avg:43.63ms +[2025-09-05 23:45:30] [Rank 0] step:5101/10000 train_time:222540ms step_avg:43.63ms +[2025-09-05 23:45:31] [Rank 0] step:5121/10000 train_time:223275ms step_avg:43.60ms +[2025-09-05 23:45:31] [Rank 0] step:5121/10000 train_time:223275ms step_avg:43.60ms +[2025-09-05 23:45:32] [Rank 0] step:5141/10000 train_time:224011ms step_avg:43.57ms +[2025-09-05 23:45:32] [Rank 0] step:5141/10000 train_time:224011ms step_avg:43.57ms +[2025-09-05 23:45:32] [Rank 0] step:5161/10000 train_time:224747ms step_avg:43.55ms +[2025-09-05 23:45:32] [Rank 0] step:5161/10000 train_time:224747ms step_avg:43.55ms +[2025-09-05 23:45:33] [Rank 0] step:5181/10000 train_time:225483ms step_avg:43.52ms +[2025-09-05 23:45:33] [Rank 0] step:5181/10000 train_time:225483ms step_avg:43.52ms +[2025-09-05 23:45:34] [Rank 0] step:5201/10000 train_time:226219ms step_avg:43.50ms +[2025-09-05 23:45:34] [Rank 0] step:5201/10000 train_time:226219ms step_avg:43.50ms +[2025-09-05 23:45:35] [Rank 0] step:5221/10000 train_time:226956ms step_avg:43.47ms +[2025-09-05 23:45:35] [Rank 0] step:5221/10000 train_time:226956ms step_avg:43.47ms +[2025-09-05 23:45:35] [Rank 0] step:5241/10000 train_time:227692ms step_avg:43.44ms +[2025-09-05 23:45:35] [Rank 0] step:5241/10000 train_time:227692ms step_avg:43.44ms +[2025-09-05 23:45:36] [Rank 0] step:5261/10000 train_time:228428ms step_avg:43.42ms +[2025-09-05 23:45:36] [Rank 0] step:5261/10000 train_time:228428ms step_avg:43.42ms +[2025-09-05 23:45:37] [Rank 0] step:5281/10000 train_time:229164ms step_avg:43.39ms +[2025-09-05 23:45:37] [Rank 0] step:5281/10000 train_time:229164ms step_avg:43.39ms +[2025-09-05 23:45:38] [Rank 0] step:5301/10000 train_time:229900ms step_avg:43.37ms +[2025-09-05 23:45:38] [Rank 0] step:5301/10000 train_time:229900ms step_avg:43.37ms +[2025-09-05 23:45:38] [Rank 0] step:5321/10000 train_time:230635ms step_avg:43.34ms +[2025-09-05 23:45:38] [Rank 0] step:5321/10000 train_time:230635ms step_avg:43.34ms +[2025-09-05 23:45:39] [Rank 0] step:5341/10000 train_time:231371ms step_avg:43.32ms +[2025-09-05 23:45:39] [Rank 0] step:5341/10000 train_time:231371ms step_avg:43.32ms +[2025-09-05 23:45:40] [Rank 0] step:5361/10000 train_time:232108ms step_avg:43.30ms +[2025-09-05 23:45:40] [Rank 0] step:5361/10000 train_time:232108ms step_avg:43.30ms +[2025-09-05 23:45:40] [Rank 0] step:5381/10000 train_time:232844ms step_avg:43.27ms +[2025-09-05 23:45:40] [Rank 0] step:5381/10000 train_time:232844ms step_avg:43.27ms +[2025-09-05 23:45:41] [Rank 0] step:5401/10000 train_time:233580ms step_avg:43.25ms +[2025-09-05 23:45:41] [Rank 0] step:5401/10000 train_time:233580ms step_avg:43.25ms +[2025-09-05 23:45:42] [Rank 0] step:5421/10000 train_time:234316ms step_avg:43.22ms +[2025-09-05 23:45:42] [Rank 0] step:5421/10000 train_time:234316ms step_avg:43.22ms +[2025-09-05 23:45:43] [Rank 0] step:5441/10000 train_time:235054ms step_avg:43.20ms +[2025-09-05 23:45:43] [Rank 0] step:5441/10000 train_time:235054ms step_avg:43.20ms +[2025-09-05 23:45:43] [Rank 0] step:5461/10000 train_time:235789ms step_avg:43.18ms +[2025-09-05 23:45:43] [Rank 0] step:5461/10000 train_time:235789ms step_avg:43.18ms +[2025-09-05 23:45:44] [Rank 0] step:5481/10000 train_time:236525ms step_avg:43.15ms +[2025-09-05 23:45:44] [Rank 0] step:5481/10000 train_time:236525ms step_avg:43.15ms +[2025-09-05 23:45:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:45:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:45:45] [Rank 0] PRINT: step:5500/10000 train_loss:2.3344 val_loss:2.3056 train_time:237341ms step_avg:43.15ms +[2025-09-05 23:45:45] [Rank 0] PRINT: step:5500/10000 train_loss:2.3344 val_loss:2.3056 train_time:237341ms step_avg:43.15ms +[2025-09-05 23:45:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:45:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:45:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:45:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:47:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:47:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:47:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:47:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:47:06] [Rank 0] Total Loss: 4.7746 +[2025-09-05 23:47:06] [Rank 0] Total Loss: 4.7746 +[2025-09-05 23:47:06] [Rank 0] Total FTA (Unweighted): 0.2819 +[2025-09-05 23:47:06] [Rank 0] Total FTA (Unweighted): 0.2819 +[2025-09-05 23:47:06] [Rank 0] Total FTA (Weighted): 0.2819 +[2025-09-05 23:47:06] [Rank 0] Total FTA (Weighted): 0.2819 +[2025-09-05 23:47:06] [Rank 0] Group 0 Loss: 3.2980 +[2025-09-05 23:47:06] [Rank 0] Group 0 Loss: 3.2980 +[2025-09-05 23:47:06] [Rank 0] Group 1 Loss: 3.2335 +[2025-09-05 23:47:06] [Rank 0] Group 1 Loss: 3.2335 +[2025-09-05 23:47:06] [Rank 0] Group 2 Loss: 3.3344 +[2025-09-05 23:47:06] [Rank 0] Group 2 Loss: 3.3344 +[2025-09-05 23:47:06] [Rank 0] Group 3 Loss: 3.7461 +[2025-09-05 23:47:06] [Rank 0] Group 3 Loss: 3.7461 +[2025-09-05 23:47:06] [Rank 0] Group 4 Loss: 4.1779 +[2025-09-05 23:47:06] [Rank 0] Group 4 Loss: 4.1779 +[2025-09-05 23:47:06] [Rank 0] Group 5 Loss: 4.6012 +[2025-09-05 23:47:06] [Rank 0] Group 5 Loss: 4.6012 +[2025-09-05 23:47:06] [Rank 0] Group 6 Loss: 4.9230 +[2025-09-05 23:47:06] [Rank 0] Group 6 Loss: 4.9230 +[2025-09-05 23:47:06] [Rank 0] Group 7 Loss: 5.0634 +[2025-09-05 23:47:06] [Rank 0] Group 7 Loss: 5.0634 +[2025-09-05 23:47:06] [Rank 0] Group 8 Loss: 5.3627 +[2025-09-05 23:47:06] [Rank 0] Group 8 Loss: 5.3627 +[2025-09-05 23:47:07] [Rank 0] Group 9 Loss: 5.4748 +[2025-09-05 23:47:07] [Rank 0] Group 9 Loss: 5.4748 +[2025-09-05 23:47:07] [Rank 0] Group 10 Loss: 5.5179 +[2025-09-05 23:47:07] [Rank 0] Group 10 Loss: 5.5179 +[2025-09-05 23:47:07] [Rank 0] Group 11 Loss: 5.5827 +[2025-09-05 23:47:07] [Rank 0] Group 11 Loss: 5.5827 +[2025-09-05 23:47:07] [Rank 0] Group 12 Loss: 5.4935 +[2025-09-05 23:47:07] [Rank 0] Group 12 Loss: 5.4935 +[2025-09-05 23:47:07] [Rank 0] Group 13 Loss: 5.5097 +[2025-09-05 23:47:07] [Rank 0] Group 13 Loss: 5.5097 +[2025-09-05 23:47:07] [Rank 0] Group 14 Loss: 5.5610 +[2025-09-05 23:47:07] [Rank 0] Group 14 Loss: 5.5610 +[2025-09-05 23:47:07] [Rank 0] Group 15 Loss: 5.5131 +[2025-09-05 23:47:07] [Rank 0] Group 15 Loss: 5.5131 +[2025-09-05 23:47:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:47:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:47:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:47:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:47:07] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:47:07] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:47:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:47:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:47:07] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:47:07] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:47:07] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:47:07] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:47:07] [Rank 0] Group 6 FTA: 0.2600 +[2025-09-05 23:47:07] [Rank 0] Group 6 FTA: 0.2600 +[2025-09-05 23:47:07] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:47:07] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:47:07] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:47:07] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:47:07] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:47:07] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:47:07] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-05 23:47:07] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-05 23:47:07] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 23:47:07] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 23:47:07] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 23:47:07] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 23:47:07] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 23:47:07] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 23:47:07] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 23:47:07] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 23:47:07] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:47:07] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:47:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:47:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:47:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:47:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:47:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:47:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:47:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:47:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:47:08] [Rank 0] step:5501/10000 train_time:237350ms step_avg:43.15ms +[2025-09-05 23:47:08] [Rank 0] step:5501/10000 train_time:237350ms step_avg:43.15ms +[2025-09-05 23:47:09] [Rank 0] step:5521/10000 train_time:238032ms step_avg:43.11ms +[2025-09-05 23:47:09] [Rank 0] step:5521/10000 train_time:238032ms step_avg:43.11ms +[2025-09-05 23:47:10] [Rank 0] step:5541/10000 train_time:238769ms step_avg:43.09ms +[2025-09-05 23:47:10] [Rank 0] step:5541/10000 train_time:238769ms step_avg:43.09ms +[2025-09-05 23:47:10] [Rank 0] step:5561/10000 train_time:239506ms step_avg:43.07ms +[2025-09-05 23:47:10] [Rank 0] step:5561/10000 train_time:239506ms step_avg:43.07ms +[2025-09-05 23:47:11] [Rank 0] step:5581/10000 train_time:240241ms step_avg:43.05ms +[2025-09-05 23:47:11] [Rank 0] step:5581/10000 train_time:240241ms step_avg:43.05ms +[2025-09-05 23:47:12] [Rank 0] step:5601/10000 train_time:240978ms step_avg:43.02ms +[2025-09-05 23:47:12] [Rank 0] step:5601/10000 train_time:240978ms step_avg:43.02ms +[2025-09-05 23:47:13] [Rank 0] step:5621/10000 train_time:241714ms step_avg:43.00ms +[2025-09-05 23:47:13] [Rank 0] step:5621/10000 train_time:241714ms step_avg:43.00ms +[2025-09-05 23:47:14] [Rank 0] step:5641/10000 train_time:243066ms step_avg:43.09ms +[2025-09-05 23:47:14] [Rank 0] step:5641/10000 train_time:243066ms step_avg:43.09ms +[2025-09-05 23:47:15] [Rank 0] step:5661/10000 train_time:243802ms step_avg:43.07ms +[2025-09-05 23:47:15] [Rank 0] step:5661/10000 train_time:243802ms step_avg:43.07ms +[2025-09-05 23:47:15] [Rank 0] step:5681/10000 train_time:244538ms step_avg:43.04ms +[2025-09-05 23:47:15] [Rank 0] step:5681/10000 train_time:244538ms step_avg:43.04ms +[2025-09-05 23:47:16] [Rank 0] step:5701/10000 train_time:245386ms step_avg:43.04ms +[2025-09-05 23:47:16] [Rank 0] step:5701/10000 train_time:245386ms step_avg:43.04ms +[2025-09-05 23:47:17] [Rank 0] step:5721/10000 train_time:246121ms step_avg:43.02ms +[2025-09-05 23:47:17] [Rank 0] step:5721/10000 train_time:246121ms step_avg:43.02ms +[2025-09-05 23:47:18] [Rank 0] step:5741/10000 train_time:246858ms step_avg:43.00ms +[2025-09-05 23:47:18] [Rank 0] step:5741/10000 train_time:246858ms step_avg:43.00ms +[2025-09-05 23:47:19] [Rank 0] step:5761/10000 train_time:247708ms step_avg:43.00ms +[2025-09-05 23:47:19] [Rank 0] step:5761/10000 train_time:247708ms step_avg:43.00ms +[2025-09-05 23:47:19] [Rank 0] step:5781/10000 train_time:248445ms step_avg:42.98ms +[2025-09-05 23:47:19] [Rank 0] step:5781/10000 train_time:248445ms step_avg:42.98ms +[2025-09-05 23:47:20] [Rank 0] step:5801/10000 train_time:249182ms step_avg:42.95ms +[2025-09-05 23:47:20] [Rank 0] step:5801/10000 train_time:249182ms step_avg:42.95ms +[2025-09-05 23:47:21] [Rank 0] step:5821/10000 train_time:249918ms step_avg:42.93ms +[2025-09-05 23:47:21] [Rank 0] step:5821/10000 train_time:249918ms step_avg:42.93ms +[2025-09-05 23:47:21] [Rank 0] step:5841/10000 train_time:250654ms step_avg:42.91ms +[2025-09-05 23:47:21] [Rank 0] step:5841/10000 train_time:250654ms step_avg:42.91ms +[2025-09-05 23:47:22] [Rank 0] step:5861/10000 train_time:251391ms step_avg:42.89ms +[2025-09-05 23:47:22] [Rank 0] step:5861/10000 train_time:251391ms step_avg:42.89ms +[2025-09-05 23:47:23] [Rank 0] step:5881/10000 train_time:252129ms step_avg:42.87ms +[2025-09-05 23:47:23] [Rank 0] step:5881/10000 train_time:252129ms step_avg:42.87ms +[2025-09-05 23:47:24] [Rank 0] step:5901/10000 train_time:252864ms step_avg:42.85ms +[2025-09-05 23:47:24] [Rank 0] step:5901/10000 train_time:252864ms step_avg:42.85ms +[2025-09-05 23:47:24] [Rank 0] step:5921/10000 train_time:253601ms step_avg:42.83ms +[2025-09-05 23:47:24] [Rank 0] step:5921/10000 train_time:253601ms step_avg:42.83ms +[2025-09-05 23:47:25] [Rank 0] step:5941/10000 train_time:254337ms step_avg:42.81ms +[2025-09-05 23:47:25] [Rank 0] step:5941/10000 train_time:254337ms step_avg:42.81ms +[2025-09-05 23:47:26] [Rank 0] step:5961/10000 train_time:255074ms step_avg:42.79ms +[2025-09-05 23:47:26] [Rank 0] step:5961/10000 train_time:255074ms step_avg:42.79ms +[2025-09-05 23:47:27] [Rank 0] step:5981/10000 train_time:255810ms step_avg:42.77ms +[2025-09-05 23:47:27] [Rank 0] step:5981/10000 train_time:255810ms step_avg:42.77ms +[2025-09-05 23:47:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:47:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:47:28] [Rank 0] PRINT: step:6000/10000 train_loss:2.2982 val_loss:2.2747 train_time:256626ms step_avg:42.77ms +[2025-09-05 23:47:28] [Rank 0] PRINT: step:6000/10000 train_loss:2.2982 val_loss:2.2747 train_time:256626ms step_avg:42.77ms +[2025-09-05 23:47:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:47:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:47:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:47:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:48:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:48:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:48:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:48:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:48:49] [Rank 0] Total Loss: 4.7812 +[2025-09-05 23:48:49] [Rank 0] Total Loss: 4.7812 +[2025-09-05 23:48:49] [Rank 0] Total FTA (Unweighted): 0.2938 +[2025-09-05 23:48:49] [Rank 0] Total FTA (Unweighted): 0.2938 +[2025-09-05 23:48:49] [Rank 0] Total FTA (Weighted): 0.2938 +[2025-09-05 23:48:49] [Rank 0] Total FTA (Weighted): 0.2938 +[2025-09-05 23:48:49] [Rank 0] Group 0 Loss: 3.2858 +[2025-09-05 23:48:49] [Rank 0] Group 0 Loss: 3.2858 +[2025-09-05 23:48:49] [Rank 0] Group 1 Loss: 3.2966 +[2025-09-05 23:48:49] [Rank 0] Group 1 Loss: 3.2966 +[2025-09-05 23:48:49] [Rank 0] Group 2 Loss: 3.3380 +[2025-09-05 23:48:49] [Rank 0] Group 2 Loss: 3.3380 +[2025-09-05 23:48:49] [Rank 0] Group 3 Loss: 3.7568 +[2025-09-05 23:48:49] [Rank 0] Group 3 Loss: 3.7568 +[2025-09-05 23:48:49] [Rank 0] Group 4 Loss: 4.1669 +[2025-09-05 23:48:49] [Rank 0] Group 4 Loss: 4.1669 +[2025-09-05 23:48:49] [Rank 0] Group 5 Loss: 4.6160 +[2025-09-05 23:48:49] [Rank 0] Group 5 Loss: 4.6160 +[2025-09-05 23:48:49] [Rank 0] Group 6 Loss: 4.9284 +[2025-09-05 23:48:49] [Rank 0] Group 6 Loss: 4.9284 +[2025-09-05 23:48:49] [Rank 0] Group 7 Loss: 5.0759 +[2025-09-05 23:48:49] [Rank 0] Group 7 Loss: 5.0759 +[2025-09-05 23:48:49] [Rank 0] Group 8 Loss: 5.3724 +[2025-09-05 23:48:49] [Rank 0] Group 8 Loss: 5.3724 +[2025-09-05 23:48:49] [Rank 0] Group 9 Loss: 5.4716 +[2025-09-05 23:48:49] [Rank 0] Group 9 Loss: 5.4716 +[2025-09-05 23:48:49] [Rank 0] Group 10 Loss: 5.5423 +[2025-09-05 23:48:49] [Rank 0] Group 10 Loss: 5.5423 +[2025-09-05 23:48:49] [Rank 0] Group 11 Loss: 5.5904 +[2025-09-05 23:48:49] [Rank 0] Group 11 Loss: 5.5904 +[2025-09-05 23:48:49] [Rank 0] Group 12 Loss: 5.4939 +[2025-09-05 23:48:49] [Rank 0] Group 12 Loss: 5.4939 +[2025-09-05 23:48:49] [Rank 0] Group 13 Loss: 5.5124 +[2025-09-05 23:48:49] [Rank 0] Group 13 Loss: 5.5124 +[2025-09-05 23:48:49] [Rank 0] Group 14 Loss: 5.5504 +[2025-09-05 23:48:49] [Rank 0] Group 14 Loss: 5.5504 +[2025-09-05 23:48:49] [Rank 0] Group 15 Loss: 5.5011 +[2025-09-05 23:48:49] [Rank 0] Group 15 Loss: 5.5011 +[2025-09-05 23:48:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:48:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:48:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:48:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:48:49] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:48:49] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:48:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:48:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:48:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:48:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:48:49] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:48:49] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:48:49] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:48:49] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:48:49] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:48:49] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:48:49] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:48:49] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:48:49] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:48:49] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:48:49] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 23:48:49] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 23:48:49] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:48:49] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:48:49] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 23:48:49] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 23:48:49] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 23:48:49] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 23:48:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:48:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:48:49] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:48:49] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:48:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:48:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:48:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:48:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:48:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:48:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:48:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:48:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:48:51] [Rank 0] step:6001/10000 train_time:256636ms step_avg:42.77ms +[2025-09-05 23:48:51] [Rank 0] step:6001/10000 train_time:256636ms step_avg:42.77ms +[2025-09-05 23:48:52] [Rank 0] step:6021/10000 train_time:257504ms step_avg:42.77ms +[2025-09-05 23:48:52] [Rank 0] step:6021/10000 train_time:257504ms step_avg:42.77ms +[2025-09-05 23:48:52] [Rank 0] step:6041/10000 train_time:258241ms step_avg:42.75ms +[2025-09-05 23:48:52] [Rank 0] step:6041/10000 train_time:258241ms step_avg:42.75ms +[2025-09-05 23:48:53] [Rank 0] step:6061/10000 train_time:258976ms step_avg:42.73ms +[2025-09-05 23:48:53] [Rank 0] step:6061/10000 train_time:258976ms step_avg:42.73ms +[2025-09-05 23:48:54] [Rank 0] step:6081/10000 train_time:259713ms step_avg:42.71ms +[2025-09-05 23:48:54] [Rank 0] step:6081/10000 train_time:259713ms step_avg:42.71ms +[2025-09-05 23:48:55] [Rank 0] step:6101/10000 train_time:260448ms step_avg:42.69ms +[2025-09-05 23:48:55] [Rank 0] step:6101/10000 train_time:260448ms step_avg:42.69ms +[2025-09-05 23:48:55] [Rank 0] step:6121/10000 train_time:261185ms step_avg:42.67ms +[2025-09-05 23:48:55] [Rank 0] step:6121/10000 train_time:261185ms step_avg:42.67ms +[2025-09-05 23:48:56] [Rank 0] step:6141/10000 train_time:261922ms step_avg:42.65ms +[2025-09-05 23:48:56] [Rank 0] step:6141/10000 train_time:261922ms step_avg:42.65ms +[2025-09-05 23:48:57] [Rank 0] step:6161/10000 train_time:262660ms step_avg:42.63ms +[2025-09-05 23:48:57] [Rank 0] step:6161/10000 train_time:262660ms step_avg:42.63ms +[2025-09-05 23:48:57] [Rank 0] step:6181/10000 train_time:263396ms step_avg:42.61ms +[2025-09-05 23:48:57] [Rank 0] step:6181/10000 train_time:263396ms step_avg:42.61ms +[2025-09-05 23:48:58] [Rank 0] step:6201/10000 train_time:264132ms step_avg:42.60ms +[2025-09-05 23:48:58] [Rank 0] step:6201/10000 train_time:264132ms step_avg:42.60ms +[2025-09-05 23:48:59] [Rank 0] step:6221/10000 train_time:264868ms step_avg:42.58ms +[2025-09-05 23:48:59] [Rank 0] step:6221/10000 train_time:264868ms step_avg:42.58ms +[2025-09-05 23:49:00] [Rank 0] step:6241/10000 train_time:265605ms step_avg:42.56ms +[2025-09-05 23:49:00] [Rank 0] step:6241/10000 train_time:265605ms step_avg:42.56ms +[2025-09-05 23:49:00] [Rank 0] step:6261/10000 train_time:266341ms step_avg:42.54ms +[2025-09-05 23:49:00] [Rank 0] step:6261/10000 train_time:266341ms step_avg:42.54ms +[2025-09-05 23:49:01] [Rank 0] step:6281/10000 train_time:267077ms step_avg:42.52ms +[2025-09-05 23:49:01] [Rank 0] step:6281/10000 train_time:267077ms step_avg:42.52ms +[2025-09-05 23:49:02] [Rank 0] step:6301/10000 train_time:267813ms step_avg:42.50ms +[2025-09-05 23:49:02] [Rank 0] step:6301/10000 train_time:267813ms step_avg:42.50ms +[2025-09-05 23:49:03] [Rank 0] step:6321/10000 train_time:268549ms step_avg:42.49ms +[2025-09-05 23:49:03] [Rank 0] step:6321/10000 train_time:268549ms step_avg:42.49ms +[2025-09-05 23:49:03] [Rank 0] step:6341/10000 train_time:269286ms step_avg:42.47ms +[2025-09-05 23:49:03] [Rank 0] step:6341/10000 train_time:269286ms step_avg:42.47ms +[2025-09-05 23:49:04] [Rank 0] step:6361/10000 train_time:270022ms step_avg:42.45ms +[2025-09-05 23:49:04] [Rank 0] step:6361/10000 train_time:270022ms step_avg:42.45ms +[2025-09-05 23:49:05] [Rank 0] step:6381/10000 train_time:270758ms step_avg:42.43ms +[2025-09-05 23:49:05] [Rank 0] step:6381/10000 train_time:270758ms step_avg:42.43ms +[2025-09-05 23:49:06] [Rank 0] step:6401/10000 train_time:271494ms step_avg:42.41ms +[2025-09-05 23:49:06] [Rank 0] step:6401/10000 train_time:271494ms step_avg:42.41ms +[2025-09-05 23:49:06] [Rank 0] step:6421/10000 train_time:272230ms step_avg:42.40ms +[2025-09-05 23:49:06] [Rank 0] step:6421/10000 train_time:272230ms step_avg:42.40ms +[2025-09-05 23:49:07] [Rank 0] step:6441/10000 train_time:272966ms step_avg:42.38ms +[2025-09-05 23:49:07] [Rank 0] step:6441/10000 train_time:272966ms step_avg:42.38ms +[2025-09-05 23:49:08] [Rank 0] step:6461/10000 train_time:273703ms step_avg:42.36ms +[2025-09-05 23:49:08] [Rank 0] step:6461/10000 train_time:273703ms step_avg:42.36ms +[2025-09-05 23:49:09] [Rank 0] step:6481/10000 train_time:274438ms step_avg:42.35ms +[2025-09-05 23:49:09] [Rank 0] step:6481/10000 train_time:274438ms step_avg:42.35ms +[2025-09-05 23:49:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:49:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:49:10] [Rank 0] PRINT: step:6500/10000 train_loss:2.2709 val_loss:2.2493 train_time:275255ms step_avg:42.35ms +[2025-09-05 23:49:10] [Rank 0] PRINT: step:6500/10000 train_loss:2.2709 val_loss:2.2493 train_time:275255ms step_avg:42.35ms +[2025-09-05 23:49:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:49:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:49:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:49:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:50:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:50:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:50:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:50:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:50:32] [Rank 0] Total Loss: 4.7375 +[2025-09-05 23:50:32] [Rank 0] Total Loss: 4.7375 +[2025-09-05 23:50:32] [Rank 0] Total FTA (Unweighted): 0.2962 +[2025-09-05 23:50:32] [Rank 0] Total FTA (Unweighted): 0.2962 +[2025-09-05 23:50:32] [Rank 0] Total FTA (Weighted): 0.2963 +[2025-09-05 23:50:32] [Rank 0] Total FTA (Weighted): 0.2963 +[2025-09-05 23:50:32] [Rank 0] Group 0 Loss: 3.4114 +[2025-09-05 23:50:32] [Rank 0] Group 0 Loss: 3.4114 +[2025-09-05 23:50:32] [Rank 0] Group 1 Loss: 3.2568 +[2025-09-05 23:50:32] [Rank 0] Group 1 Loss: 3.2568 +[2025-09-05 23:50:32] [Rank 0] Group 2 Loss: 3.3406 +[2025-09-05 23:50:32] [Rank 0] Group 2 Loss: 3.3406 +[2025-09-05 23:50:32] [Rank 0] Group 3 Loss: 3.7314 +[2025-09-05 23:50:32] [Rank 0] Group 3 Loss: 3.7314 +[2025-09-05 23:50:32] [Rank 0] Group 4 Loss: 4.1310 +[2025-09-05 23:50:32] [Rank 0] Group 4 Loss: 4.1310 +[2025-09-05 23:50:32] [Rank 0] Group 5 Loss: 4.5587 +[2025-09-05 23:50:32] [Rank 0] Group 5 Loss: 4.5587 +[2025-09-05 23:50:32] [Rank 0] Group 6 Loss: 4.8595 +[2025-09-05 23:50:32] [Rank 0] Group 6 Loss: 4.8595 +[2025-09-05 23:50:32] [Rank 0] Group 7 Loss: 5.0041 +[2025-09-05 23:50:32] [Rank 0] Group 7 Loss: 5.0041 +[2025-09-05 23:50:32] [Rank 0] Group 8 Loss: 5.2953 +[2025-09-05 23:50:32] [Rank 0] Group 8 Loss: 5.2953 +[2025-09-05 23:50:32] [Rank 0] Group 9 Loss: 5.4104 +[2025-09-05 23:50:32] [Rank 0] Group 9 Loss: 5.4104 +[2025-09-05 23:50:32] [Rank 0] Group 10 Loss: 5.4668 +[2025-09-05 23:50:32] [Rank 0] Group 10 Loss: 5.4668 +[2025-09-05 23:50:32] [Rank 0] Group 11 Loss: 5.5058 +[2025-09-05 23:50:32] [Rank 0] Group 11 Loss: 5.5058 +[2025-09-05 23:50:32] [Rank 0] Group 12 Loss: 5.4301 +[2025-09-05 23:50:32] [Rank 0] Group 12 Loss: 5.4301 +[2025-09-05 23:50:32] [Rank 0] Group 13 Loss: 5.4607 +[2025-09-05 23:50:32] [Rank 0] Group 13 Loss: 5.4607 +[2025-09-05 23:50:32] [Rank 0] Group 14 Loss: 5.4938 +[2025-09-05 23:50:32] [Rank 0] Group 14 Loss: 5.4938 +[2025-09-05 23:50:32] [Rank 0] Group 15 Loss: 5.4440 +[2025-09-05 23:50:32] [Rank 0] Group 15 Loss: 5.4440 +[2025-09-05 23:50:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:50:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:50:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:50:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:50:32] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:50:32] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:50:32] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:50:32] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:50:32] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:50:32] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:50:32] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:50:32] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:50:32] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:50:32] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:50:32] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:50:32] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:50:32] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:50:32] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:50:32] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:50:32] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:50:32] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 23:50:32] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 23:50:32] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:50:32] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:50:32] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 23:50:32] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 23:50:32] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 23:50:32] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 23:50:32] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:50:32] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:50:32] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 23:50:32] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 23:50:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:50:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:50:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:50:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:50:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:50:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:50:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:50:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:50:34] [Rank 0] step:6501/10000 train_time:275265ms step_avg:42.34ms +[2025-09-05 23:50:34] [Rank 0] step:6501/10000 train_time:275265ms step_avg:42.34ms +[2025-09-05 23:50:34] [Rank 0] step:6521/10000 train_time:275931ms step_avg:42.31ms +[2025-09-05 23:50:34] [Rank 0] step:6521/10000 train_time:275931ms step_avg:42.31ms +[2025-09-05 23:50:35] [Rank 0] step:6541/10000 train_time:276667ms step_avg:42.30ms +[2025-09-05 23:50:35] [Rank 0] step:6541/10000 train_time:276667ms step_avg:42.30ms +[2025-09-05 23:50:36] [Rank 0] step:6561/10000 train_time:277404ms step_avg:42.28ms +[2025-09-05 23:50:36] [Rank 0] step:6561/10000 train_time:277404ms step_avg:42.28ms +[2025-09-05 23:50:37] [Rank 0] step:6581/10000 train_time:278141ms step_avg:42.26ms +[2025-09-05 23:50:37] [Rank 0] step:6581/10000 train_time:278141ms step_avg:42.26ms +[2025-09-05 23:50:37] [Rank 0] step:6601/10000 train_time:278877ms step_avg:42.25ms +[2025-09-05 23:50:37] [Rank 0] step:6601/10000 train_time:278877ms step_avg:42.25ms +[2025-09-05 23:50:38] [Rank 0] step:6621/10000 train_time:279613ms step_avg:42.23ms +[2025-09-05 23:50:38] [Rank 0] step:6621/10000 train_time:279613ms step_avg:42.23ms +[2025-09-05 23:50:39] [Rank 0] step:6641/10000 train_time:280349ms step_avg:42.21ms +[2025-09-05 23:50:39] [Rank 0] step:6641/10000 train_time:280349ms step_avg:42.21ms +[2025-09-05 23:50:40] [Rank 0] step:6661/10000 train_time:281085ms step_avg:42.20ms +[2025-09-05 23:50:40] [Rank 0] step:6661/10000 train_time:281085ms step_avg:42.20ms +[2025-09-05 23:50:40] [Rank 0] step:6681/10000 train_time:281822ms step_avg:42.18ms +[2025-09-05 23:50:40] [Rank 0] step:6681/10000 train_time:281822ms step_avg:42.18ms +[2025-09-05 23:50:41] [Rank 0] step:6701/10000 train_time:282558ms step_avg:42.17ms +[2025-09-05 23:50:41] [Rank 0] step:6701/10000 train_time:282558ms step_avg:42.17ms +[2025-09-05 23:50:42] [Rank 0] step:6721/10000 train_time:283294ms step_avg:42.15ms +[2025-09-05 23:50:42] [Rank 0] step:6721/10000 train_time:283294ms step_avg:42.15ms +[2025-09-05 23:50:42] [Rank 0] step:6741/10000 train_time:284031ms step_avg:42.13ms +[2025-09-05 23:50:42] [Rank 0] step:6741/10000 train_time:284031ms step_avg:42.13ms +[2025-09-05 23:50:43] [Rank 0] step:6761/10000 train_time:284767ms step_avg:42.12ms +[2025-09-05 23:50:43] [Rank 0] step:6761/10000 train_time:284767ms step_avg:42.12ms +[2025-09-05 23:50:44] [Rank 0] step:6781/10000 train_time:285504ms step_avg:42.10ms +[2025-09-05 23:50:44] [Rank 0] step:6781/10000 train_time:285504ms step_avg:42.10ms +[2025-09-05 23:50:45] [Rank 0] step:6801/10000 train_time:286240ms step_avg:42.09ms +[2025-09-05 23:50:45] [Rank 0] step:6801/10000 train_time:286240ms step_avg:42.09ms +[2025-09-05 23:50:45] [Rank 0] step:6821/10000 train_time:286976ms step_avg:42.07ms +[2025-09-05 23:50:45] [Rank 0] step:6821/10000 train_time:286976ms step_avg:42.07ms +[2025-09-05 23:50:47] [Rank 0] step:6841/10000 train_time:288344ms step_avg:42.15ms +[2025-09-05 23:50:47] [Rank 0] step:6841/10000 train_time:288344ms step_avg:42.15ms +[2025-09-05 23:50:48] [Rank 0] step:6861/10000 train_time:289081ms step_avg:42.13ms +[2025-09-05 23:50:48] [Rank 0] step:6861/10000 train_time:289081ms step_avg:42.13ms +[2025-09-05 23:50:48] [Rank 0] step:6881/10000 train_time:289817ms step_avg:42.12ms +[2025-09-05 23:50:48] [Rank 0] step:6881/10000 train_time:289817ms step_avg:42.12ms +[2025-09-05 23:50:49] [Rank 0] step:6901/10000 train_time:290552ms step_avg:42.10ms +[2025-09-05 23:50:49] [Rank 0] step:6901/10000 train_time:290552ms step_avg:42.10ms +[2025-09-05 23:50:50] [Rank 0] step:6921/10000 train_time:291290ms step_avg:42.09ms +[2025-09-05 23:50:50] [Rank 0] step:6921/10000 train_time:291290ms step_avg:42.09ms +[2025-09-05 23:50:50] [Rank 0] step:6941/10000 train_time:292026ms step_avg:42.07ms +[2025-09-05 23:50:50] [Rank 0] step:6941/10000 train_time:292026ms step_avg:42.07ms +[2025-09-05 23:50:51] [Rank 0] step:6961/10000 train_time:292762ms step_avg:42.06ms +[2025-09-05 23:50:51] [Rank 0] step:6961/10000 train_time:292762ms step_avg:42.06ms +[2025-09-05 23:50:52] [Rank 0] step:6981/10000 train_time:293498ms step_avg:42.04ms +[2025-09-05 23:50:52] [Rank 0] step:6981/10000 train_time:293498ms step_avg:42.04ms +[2025-09-05 23:50:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:50:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:50:53] [Rank 0] PRINT: step:7000/10000 train_loss:2.2467 val_loss:2.2285 train_time:294314ms step_avg:42.04ms +[2025-09-05 23:50:53] [Rank 0] PRINT: step:7000/10000 train_loss:2.2467 val_loss:2.2285 train_time:294314ms step_avg:42.04ms +[2025-09-05 23:50:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:50:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:50:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:50:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:52:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:52:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:52:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:52:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:52:14] [Rank 0] Total Loss: 4.7131 +[2025-09-05 23:52:14] [Rank 0] Total Loss: 4.7131 +[2025-09-05 23:52:14] [Rank 0] Total FTA (Unweighted): 0.2938 +[2025-09-05 23:52:14] [Rank 0] Total FTA (Unweighted): 0.2938 +[2025-09-05 23:52:14] [Rank 0] Total FTA (Weighted): 0.2938 +[2025-09-05 23:52:14] [Rank 0] Total FTA (Weighted): 0.2938 +[2025-09-05 23:52:14] [Rank 0] Group 0 Loss: 3.2622 +[2025-09-05 23:52:14] [Rank 0] Group 0 Loss: 3.2622 +[2025-09-05 23:52:14] [Rank 0] Group 1 Loss: 3.2544 +[2025-09-05 23:52:14] [Rank 0] Group 1 Loss: 3.2544 +[2025-09-05 23:52:14] [Rank 0] Group 2 Loss: 3.3183 +[2025-09-05 23:52:14] [Rank 0] Group 2 Loss: 3.3183 +[2025-09-05 23:52:14] [Rank 0] Group 3 Loss: 3.7130 +[2025-09-05 23:52:14] [Rank 0] Group 3 Loss: 3.7130 +[2025-09-05 23:52:14] [Rank 0] Group 4 Loss: 4.0863 +[2025-09-05 23:52:14] [Rank 0] Group 4 Loss: 4.0863 +[2025-09-05 23:52:14] [Rank 0] Group 5 Loss: 4.5408 +[2025-09-05 23:52:14] [Rank 0] Group 5 Loss: 4.5408 +[2025-09-05 23:52:14] [Rank 0] Group 6 Loss: 4.8494 +[2025-09-05 23:52:14] [Rank 0] Group 6 Loss: 4.8494 +[2025-09-05 23:52:14] [Rank 0] Group 7 Loss: 4.9999 +[2025-09-05 23:52:14] [Rank 0] Group 7 Loss: 4.9999 +[2025-09-05 23:52:14] [Rank 0] Group 8 Loss: 5.2785 +[2025-09-05 23:52:14] [Rank 0] Group 8 Loss: 5.2785 +[2025-09-05 23:52:14] [Rank 0] Group 9 Loss: 5.3852 +[2025-09-05 23:52:14] [Rank 0] Group 9 Loss: 5.3852 +[2025-09-05 23:52:14] [Rank 0] Group 10 Loss: 5.4591 +[2025-09-05 23:52:14] [Rank 0] Group 10 Loss: 5.4591 +[2025-09-05 23:52:14] [Rank 0] Group 11 Loss: 5.5041 +[2025-09-05 23:52:14] [Rank 0] Group 11 Loss: 5.5041 +[2025-09-05 23:52:14] [Rank 0] Group 12 Loss: 5.4234 +[2025-09-05 23:52:14] [Rank 0] Group 12 Loss: 5.4234 +[2025-09-05 23:52:14] [Rank 0] Group 13 Loss: 5.4382 +[2025-09-05 23:52:14] [Rank 0] Group 13 Loss: 5.4382 +[2025-09-05 23:52:14] [Rank 0] Group 14 Loss: 5.4727 +[2025-09-05 23:52:14] [Rank 0] Group 14 Loss: 5.4727 +[2025-09-05 23:52:14] [Rank 0] Group 15 Loss: 5.4244 +[2025-09-05 23:52:14] [Rank 0] Group 15 Loss: 5.4244 +[2025-09-05 23:52:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:52:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:52:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:52:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:52:14] [Rank 0] Group 2 FTA: 0.3700 +[2025-09-05 23:52:14] [Rank 0] Group 2 FTA: 0.3700 +[2025-09-05 23:52:14] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:52:14] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:52:14] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:52:14] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:52:14] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:52:14] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:52:14] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:52:14] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:52:14] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:52:14] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:52:14] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:52:14] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:52:14] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 23:52:14] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 23:52:14] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 23:52:14] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 23:52:14] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:52:14] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:52:14] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 23:52:14] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 23:52:14] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 23:52:14] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 23:52:14] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 23:52:14] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 23:52:14] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:52:14] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:52:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:52:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:52:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:52:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:52:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:52:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:52:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:52:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:52:16] [Rank 0] step:7001/10000 train_time:294324ms step_avg:42.04ms +[2025-09-05 23:52:16] [Rank 0] step:7001/10000 train_time:294324ms step_avg:42.04ms +[2025-09-05 23:52:16] [Rank 0] step:7021/10000 train_time:294987ms step_avg:42.01ms +[2025-09-05 23:52:16] [Rank 0] step:7021/10000 train_time:294987ms step_avg:42.01ms +[2025-09-05 23:52:17] [Rank 0] step:7041/10000 train_time:295723ms step_avg:42.00ms +[2025-09-05 23:52:17] [Rank 0] step:7041/10000 train_time:295723ms step_avg:42.00ms +[2025-09-05 23:52:18] [Rank 0] step:7061/10000 train_time:296502ms step_avg:41.99ms +[2025-09-05 23:52:18] [Rank 0] step:7061/10000 train_time:296502ms step_avg:41.99ms +[2025-09-05 23:52:19] [Rank 0] step:7081/10000 train_time:297239ms step_avg:41.98ms +[2025-09-05 23:52:19] [Rank 0] step:7081/10000 train_time:297239ms step_avg:41.98ms +[2025-09-05 23:52:19] [Rank 0] step:7101/10000 train_time:297977ms step_avg:41.96ms +[2025-09-05 23:52:19] [Rank 0] step:7101/10000 train_time:297977ms step_avg:41.96ms +[2025-09-05 23:52:20] [Rank 0] step:7121/10000 train_time:298713ms step_avg:41.95ms +[2025-09-05 23:52:20] [Rank 0] step:7121/10000 train_time:298713ms step_avg:41.95ms +[2025-09-05 23:52:21] [Rank 0] step:7141/10000 train_time:299449ms step_avg:41.93ms +[2025-09-05 23:52:21] [Rank 0] step:7141/10000 train_time:299449ms step_avg:41.93ms +[2025-09-05 23:52:22] [Rank 0] step:7161/10000 train_time:300185ms step_avg:41.92ms +[2025-09-05 23:52:22] [Rank 0] step:7161/10000 train_time:300185ms step_avg:41.92ms +[2025-09-05 23:52:22] [Rank 0] step:7181/10000 train_time:300921ms step_avg:41.91ms +[2025-09-05 23:52:22] [Rank 0] step:7181/10000 train_time:300921ms step_avg:41.91ms +[2025-09-05 23:52:23] [Rank 0] step:7201/10000 train_time:301657ms step_avg:41.89ms +[2025-09-05 23:52:23] [Rank 0] step:7201/10000 train_time:301657ms step_avg:41.89ms +[2025-09-05 23:52:24] [Rank 0] step:7221/10000 train_time:302392ms step_avg:41.88ms +[2025-09-05 23:52:24] [Rank 0] step:7221/10000 train_time:302392ms step_avg:41.88ms +[2025-09-05 23:52:25] [Rank 0] step:7241/10000 train_time:303128ms step_avg:41.86ms +[2025-09-05 23:52:25] [Rank 0] step:7241/10000 train_time:303128ms step_avg:41.86ms +[2025-09-05 23:52:25] [Rank 0] step:7261/10000 train_time:303864ms step_avg:41.85ms +[2025-09-05 23:52:25] [Rank 0] step:7261/10000 train_time:303864ms step_avg:41.85ms +[2025-09-05 23:52:26] [Rank 0] step:7281/10000 train_time:304600ms step_avg:41.83ms +[2025-09-05 23:52:26] [Rank 0] step:7281/10000 train_time:304600ms step_avg:41.83ms +[2025-09-05 23:52:27] [Rank 0] step:7301/10000 train_time:305336ms step_avg:41.82ms +[2025-09-05 23:52:27] [Rank 0] step:7301/10000 train_time:305336ms step_avg:41.82ms +[2025-09-05 23:52:28] [Rank 0] step:7321/10000 train_time:306073ms step_avg:41.81ms +[2025-09-05 23:52:28] [Rank 0] step:7321/10000 train_time:306073ms step_avg:41.81ms +[2025-09-05 23:52:28] [Rank 0] step:7341/10000 train_time:306810ms step_avg:41.79ms +[2025-09-05 23:52:28] [Rank 0] step:7341/10000 train_time:306810ms step_avg:41.79ms +[2025-09-05 23:52:29] [Rank 0] step:7361/10000 train_time:307545ms step_avg:41.78ms +[2025-09-05 23:52:29] [Rank 0] step:7361/10000 train_time:307545ms step_avg:41.78ms +[2025-09-05 23:52:30] [Rank 0] step:7381/10000 train_time:308281ms step_avg:41.77ms +[2025-09-05 23:52:30] [Rank 0] step:7381/10000 train_time:308281ms step_avg:41.77ms +[2025-09-05 23:52:31] [Rank 0] step:7401/10000 train_time:309017ms step_avg:41.75ms +[2025-09-05 23:52:31] [Rank 0] step:7401/10000 train_time:309017ms step_avg:41.75ms +[2025-09-05 23:52:31] [Rank 0] step:7421/10000 train_time:309753ms step_avg:41.74ms +[2025-09-05 23:52:31] [Rank 0] step:7421/10000 train_time:309753ms step_avg:41.74ms +[2025-09-05 23:52:32] [Rank 0] step:7441/10000 train_time:310489ms step_avg:41.73ms +[2025-09-05 23:52:32] [Rank 0] step:7441/10000 train_time:310489ms step_avg:41.73ms +[2025-09-05 23:52:33] [Rank 0] step:7461/10000 train_time:311386ms step_avg:41.74ms +[2025-09-05 23:52:33] [Rank 0] step:7461/10000 train_time:311386ms step_avg:41.74ms +[2025-09-05 23:52:34] [Rank 0] step:7481/10000 train_time:312122ms step_avg:41.72ms +[2025-09-05 23:52:34] [Rank 0] step:7481/10000 train_time:312122ms step_avg:41.72ms +[2025-09-05 23:52:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:52:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:52:35] [Rank 0] PRINT: step:7500/10000 train_loss:2.2264 val_loss:2.2095 train_time:312939ms step_avg:41.73ms +[2025-09-05 23:52:35] [Rank 0] PRINT: step:7500/10000 train_loss:2.2264 val_loss:2.2095 train_time:312939ms step_avg:41.73ms +[2025-09-05 23:52:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:52:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:52:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:52:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:53:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:53:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:53:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:53:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:53:56] [Rank 0] Total Loss: 4.7036 +[2025-09-05 23:53:56] [Rank 0] Total Loss: 4.7036 +[2025-09-05 23:53:56] [Rank 0] Total FTA (Unweighted): 0.3006 +[2025-09-05 23:53:56] [Rank 0] Total FTA (Unweighted): 0.3006 +[2025-09-05 23:53:56] [Rank 0] Total FTA (Weighted): 0.3006 +[2025-09-05 23:53:56] [Rank 0] Total FTA (Weighted): 0.3006 +[2025-09-05 23:53:56] [Rank 0] Group 0 Loss: 3.3926 +[2025-09-05 23:53:56] [Rank 0] Group 0 Loss: 3.3926 +[2025-09-05 23:53:56] [Rank 0] Group 1 Loss: 3.2643 +[2025-09-05 23:53:56] [Rank 0] Group 1 Loss: 3.2643 +[2025-09-05 23:53:56] [Rank 0] Group 2 Loss: 3.2977 +[2025-09-05 23:53:56] [Rank 0] Group 2 Loss: 3.2977 +[2025-09-05 23:53:56] [Rank 0] Group 3 Loss: 3.7065 +[2025-09-05 23:53:56] [Rank 0] Group 3 Loss: 3.7065 +[2025-09-05 23:53:56] [Rank 0] Group 4 Loss: 4.0787 +[2025-09-05 23:53:56] [Rank 0] Group 4 Loss: 4.0787 +[2025-09-05 23:53:56] [Rank 0] Group 5 Loss: 4.5132 +[2025-09-05 23:53:56] [Rank 0] Group 5 Loss: 4.5132 +[2025-09-05 23:53:56] [Rank 0] Group 6 Loss: 4.8123 +[2025-09-05 23:53:56] [Rank 0] Group 6 Loss: 4.8123 +[2025-09-05 23:53:56] [Rank 0] Group 7 Loss: 4.9626 +[2025-09-05 23:53:56] [Rank 0] Group 7 Loss: 4.9626 +[2025-09-05 23:53:56] [Rank 0] Group 8 Loss: 5.2654 +[2025-09-05 23:53:56] [Rank 0] Group 8 Loss: 5.2654 +[2025-09-05 23:53:56] [Rank 0] Group 9 Loss: 5.3736 +[2025-09-05 23:53:56] [Rank 0] Group 9 Loss: 5.3736 +[2025-09-05 23:53:56] [Rank 0] Group 10 Loss: 5.4445 +[2025-09-05 23:53:56] [Rank 0] Group 10 Loss: 5.4445 +[2025-09-05 23:53:56] [Rank 0] Group 11 Loss: 5.4807 +[2025-09-05 23:53:56] [Rank 0] Group 11 Loss: 5.4807 +[2025-09-05 23:53:56] [Rank 0] Group 12 Loss: 5.3890 +[2025-09-05 23:53:56] [Rank 0] Group 12 Loss: 5.3890 +[2025-09-05 23:53:56] [Rank 0] Group 13 Loss: 5.4232 +[2025-09-05 23:53:56] [Rank 0] Group 13 Loss: 5.4232 +[2025-09-05 23:53:56] [Rank 0] Group 14 Loss: 5.4418 +[2025-09-05 23:53:56] [Rank 0] Group 14 Loss: 5.4418 +[2025-09-05 23:53:56] [Rank 0] Group 15 Loss: 5.4118 +[2025-09-05 23:53:56] [Rank 0] Group 15 Loss: 5.4118 +[2025-09-05 23:53:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:53:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:53:56] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:53:56] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:53:56] [Rank 0] Group 2 FTA: 0.3700 +[2025-09-05 23:53:56] [Rank 0] Group 2 FTA: 0.3700 +[2025-09-05 23:53:56] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:53:56] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:53:56] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:53:56] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:53:56] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:53:56] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:53:56] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:53:56] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:53:56] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:53:56] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:53:56] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:53:56] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:53:56] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:53:56] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:53:56] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 23:53:56] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 23:53:56] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:53:56] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:53:56] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 23:53:56] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 23:53:56] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 23:53:56] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 23:53:56] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:53:56] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:53:56] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 23:53:56] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 23:53:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:53:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:53:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:53:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:53:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:53:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:53:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:53:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:53:58] [Rank 0] step:7501/10000 train_time:312949ms step_avg:41.72ms +[2025-09-05 23:53:58] [Rank 0] step:7501/10000 train_time:312949ms step_avg:41.72ms +[2025-09-05 23:53:58] [Rank 0] step:7521/10000 train_time:313628ms step_avg:41.70ms +[2025-09-05 23:53:58] [Rank 0] step:7521/10000 train_time:313628ms step_avg:41.70ms +[2025-09-05 23:53:59] [Rank 0] step:7541/10000 train_time:314364ms step_avg:41.69ms +[2025-09-05 23:53:59] [Rank 0] step:7541/10000 train_time:314364ms step_avg:41.69ms +[2025-09-05 23:54:00] [Rank 0] step:7561/10000 train_time:315099ms step_avg:41.67ms +[2025-09-05 23:54:00] [Rank 0] step:7561/10000 train_time:315099ms step_avg:41.67ms +[2025-09-05 23:54:01] [Rank 0] step:7581/10000 train_time:315835ms step_avg:41.66ms +[2025-09-05 23:54:01] [Rank 0] step:7581/10000 train_time:315835ms step_avg:41.66ms +[2025-09-05 23:54:01] [Rank 0] step:7601/10000 train_time:316571ms step_avg:41.65ms +[2025-09-05 23:54:01] [Rank 0] step:7601/10000 train_time:316571ms step_avg:41.65ms +[2025-09-05 23:54:02] [Rank 0] step:7621/10000 train_time:317307ms step_avg:41.64ms +[2025-09-05 23:54:02] [Rank 0] step:7621/10000 train_time:317307ms step_avg:41.64ms +[2025-09-05 23:54:03] [Rank 0] step:7641/10000 train_time:318267ms step_avg:41.65ms +[2025-09-05 23:54:03] [Rank 0] step:7641/10000 train_time:318267ms step_avg:41.65ms +[2025-09-05 23:54:04] [Rank 0] step:7661/10000 train_time:319388ms step_avg:41.69ms +[2025-09-05 23:54:04] [Rank 0] step:7661/10000 train_time:319388ms step_avg:41.69ms +[2025-09-05 23:54:05] [Rank 0] step:7681/10000 train_time:320124ms step_avg:41.68ms +[2025-09-05 23:54:05] [Rank 0] step:7681/10000 train_time:320124ms step_avg:41.68ms +[2025-09-05 23:54:06] [Rank 0] step:7701/10000 train_time:320860ms step_avg:41.66ms +[2025-09-05 23:54:06] [Rank 0] step:7701/10000 train_time:320860ms step_avg:41.66ms +[2025-09-05 23:54:06] [Rank 0] step:7721/10000 train_time:321596ms step_avg:41.65ms +[2025-09-05 23:54:06] [Rank 0] step:7721/10000 train_time:321596ms step_avg:41.65ms +[2025-09-05 23:54:07] [Rank 0] step:7741/10000 train_time:322332ms step_avg:41.64ms +[2025-09-05 23:54:07] [Rank 0] step:7741/10000 train_time:322332ms step_avg:41.64ms +[2025-09-05 23:54:08] [Rank 0] step:7761/10000 train_time:323069ms step_avg:41.63ms +[2025-09-05 23:54:08] [Rank 0] step:7761/10000 train_time:323069ms step_avg:41.63ms +[2025-09-05 23:54:08] [Rank 0] step:7781/10000 train_time:323806ms step_avg:41.61ms +[2025-09-05 23:54:08] [Rank 0] step:7781/10000 train_time:323806ms step_avg:41.61ms +[2025-09-05 23:54:09] [Rank 0] step:7801/10000 train_time:324542ms step_avg:41.60ms +[2025-09-05 23:54:09] [Rank 0] step:7801/10000 train_time:324542ms step_avg:41.60ms +[2025-09-05 23:54:10] [Rank 0] step:7821/10000 train_time:325278ms step_avg:41.59ms +[2025-09-05 23:54:10] [Rank 0] step:7821/10000 train_time:325278ms step_avg:41.59ms +[2025-09-05 23:54:11] [Rank 0] step:7841/10000 train_time:326014ms step_avg:41.58ms +[2025-09-05 23:54:11] [Rank 0] step:7841/10000 train_time:326014ms step_avg:41.58ms +[2025-09-05 23:54:11] [Rank 0] step:7861/10000 train_time:326750ms step_avg:41.57ms +[2025-09-05 23:54:11] [Rank 0] step:7861/10000 train_time:326750ms step_avg:41.57ms +[2025-09-05 23:54:12] [Rank 0] step:7881/10000 train_time:327486ms step_avg:41.55ms +[2025-09-05 23:54:12] [Rank 0] step:7881/10000 train_time:327486ms step_avg:41.55ms +[2025-09-05 23:54:13] [Rank 0] step:7901/10000 train_time:328222ms step_avg:41.54ms +[2025-09-05 23:54:13] [Rank 0] step:7901/10000 train_time:328222ms step_avg:41.54ms +[2025-09-05 23:54:14] [Rank 0] step:7921/10000 train_time:328959ms step_avg:41.53ms +[2025-09-05 23:54:14] [Rank 0] step:7921/10000 train_time:328959ms step_avg:41.53ms +[2025-09-05 23:54:14] [Rank 0] step:7941/10000 train_time:329694ms step_avg:41.52ms +[2025-09-05 23:54:14] [Rank 0] step:7941/10000 train_time:329694ms step_avg:41.52ms +[2025-09-05 23:54:15] [Rank 0] step:7961/10000 train_time:330430ms step_avg:41.51ms +[2025-09-05 23:54:15] [Rank 0] step:7961/10000 train_time:330430ms step_avg:41.51ms +[2025-09-05 23:54:16] [Rank 0] step:7981/10000 train_time:331167ms step_avg:41.49ms +[2025-09-05 23:54:16] [Rank 0] step:7981/10000 train_time:331167ms step_avg:41.49ms +[2025-09-05 23:54:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:54:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:54:17] [Rank 0] PRINT: step:8000/10000 train_loss:2.2098 val_loss:2.1931 train_time:331984ms step_avg:41.50ms +[2025-09-05 23:54:17] [Rank 0] PRINT: step:8000/10000 train_loss:2.2098 val_loss:2.1931 train_time:331984ms step_avg:41.50ms +[2025-09-05 23:54:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:54:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:54:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:54:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:55:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:55:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:55:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:55:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:55:39] [Rank 0] Total Loss: 4.7312 +[2025-09-05 23:55:39] [Rank 0] Total Loss: 4.7312 +[2025-09-05 23:55:39] [Rank 0] Total FTA (Unweighted): 0.3050 +[2025-09-05 23:55:39] [Rank 0] Total FTA (Unweighted): 0.3050 +[2025-09-05 23:55:39] [Rank 0] Total FTA (Weighted): 0.3050 +[2025-09-05 23:55:39] [Rank 0] Total FTA (Weighted): 0.3050 +[2025-09-05 23:55:39] [Rank 0] Group 0 Loss: 3.3586 +[2025-09-05 23:55:39] [Rank 0] Group 0 Loss: 3.3586 +[2025-09-05 23:55:39] [Rank 0] Group 1 Loss: 3.3719 +[2025-09-05 23:55:39] [Rank 0] Group 1 Loss: 3.3719 +[2025-09-05 23:55:39] [Rank 0] Group 2 Loss: 3.3459 +[2025-09-05 23:55:39] [Rank 0] Group 2 Loss: 3.3459 +[2025-09-05 23:55:39] [Rank 0] Group 3 Loss: 3.7308 +[2025-09-05 23:55:39] [Rank 0] Group 3 Loss: 3.7308 +[2025-09-05 23:55:39] [Rank 0] Group 4 Loss: 4.1068 +[2025-09-05 23:55:39] [Rank 0] Group 4 Loss: 4.1068 +[2025-09-05 23:55:39] [Rank 0] Group 5 Loss: 4.5263 +[2025-09-05 23:55:39] [Rank 0] Group 5 Loss: 4.5263 +[2025-09-05 23:55:39] [Rank 0] Group 6 Loss: 4.8523 +[2025-09-05 23:55:39] [Rank 0] Group 6 Loss: 4.8523 +[2025-09-05 23:55:39] [Rank 0] Group 7 Loss: 4.9828 +[2025-09-05 23:55:39] [Rank 0] Group 7 Loss: 4.9828 +[2025-09-05 23:55:39] [Rank 0] Group 8 Loss: 5.2850 +[2025-09-05 23:55:39] [Rank 0] Group 8 Loss: 5.2850 +[2025-09-05 23:55:39] [Rank 0] Group 9 Loss: 5.3917 +[2025-09-05 23:55:39] [Rank 0] Group 9 Loss: 5.3917 +[2025-09-05 23:55:39] [Rank 0] Group 10 Loss: 5.4601 +[2025-09-05 23:55:39] [Rank 0] Group 10 Loss: 5.4601 +[2025-09-05 23:55:39] [Rank 0] Group 11 Loss: 5.5145 +[2025-09-05 23:55:39] [Rank 0] Group 11 Loss: 5.5145 +[2025-09-05 23:55:39] [Rank 0] Group 12 Loss: 5.4187 +[2025-09-05 23:55:39] [Rank 0] Group 12 Loss: 5.4187 +[2025-09-05 23:55:39] [Rank 0] Group 13 Loss: 5.4398 +[2025-09-05 23:55:39] [Rank 0] Group 13 Loss: 5.4398 +[2025-09-05 23:55:39] [Rank 0] Group 14 Loss: 5.4790 +[2025-09-05 23:55:39] [Rank 0] Group 14 Loss: 5.4790 +[2025-09-05 23:55:39] [Rank 0] Group 15 Loss: 5.4349 +[2025-09-05 23:55:39] [Rank 0] Group 15 Loss: 5.4349 +[2025-09-05 23:55:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:55:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:55:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:55:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:55:39] [Rank 0] Group 2 FTA: 0.3700 +[2025-09-05 23:55:39] [Rank 0] Group 2 FTA: 0.3700 +[2025-09-05 23:55:39] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:55:39] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:55:39] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:55:39] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:55:39] [Rank 0] Group 5 FTA: 0.2600 +[2025-09-05 23:55:39] [Rank 0] Group 5 FTA: 0.2600 +[2025-09-05 23:55:39] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:55:39] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:55:39] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:55:39] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:55:39] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:55:39] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:55:39] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:55:39] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:55:39] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 23:55:39] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 23:55:39] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 23:55:39] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 23:55:39] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 23:55:39] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 23:55:39] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 23:55:39] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 23:55:39] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 23:55:39] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 23:55:39] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 23:55:39] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 23:55:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:55:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:55:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:55:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:55:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:55:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:55:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:55:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:55:40] [Rank 0] step:8001/10000 train_time:331994ms step_avg:41.49ms +[2025-09-05 23:55:40] [Rank 0] step:8001/10000 train_time:331994ms step_avg:41.49ms +[2025-09-05 23:55:42] [Rank 0] step:8021/10000 train_time:333304ms step_avg:41.55ms +[2025-09-05 23:55:42] [Rank 0] step:8021/10000 train_time:333304ms step_avg:41.55ms +[2025-09-05 23:55:43] [Rank 0] step:8041/10000 train_time:334104ms step_avg:41.55ms +[2025-09-05 23:55:43] [Rank 0] step:8041/10000 train_time:334104ms step_avg:41.55ms +[2025-09-05 23:55:43] [Rank 0] step:8061/10000 train_time:334839ms step_avg:41.54ms +[2025-09-05 23:55:43] [Rank 0] step:8061/10000 train_time:334839ms step_avg:41.54ms +[2025-09-05 23:55:44] [Rank 0] step:8081/10000 train_time:335576ms step_avg:41.53ms +[2025-09-05 23:55:44] [Rank 0] step:8081/10000 train_time:335576ms step_avg:41.53ms +[2025-09-05 23:55:45] [Rank 0] step:8101/10000 train_time:336504ms step_avg:41.54ms +[2025-09-05 23:55:45] [Rank 0] step:8101/10000 train_time:336504ms step_avg:41.54ms +[2025-09-05 23:55:46] [Rank 0] step:8121/10000 train_time:337240ms step_avg:41.53ms +[2025-09-05 23:55:46] [Rank 0] step:8121/10000 train_time:337240ms step_avg:41.53ms +[2025-09-05 23:55:46] [Rank 0] step:8141/10000 train_time:337976ms step_avg:41.52ms +[2025-09-05 23:55:46] [Rank 0] step:8141/10000 train_time:337976ms step_avg:41.52ms +[2025-09-05 23:55:47] [Rank 0] step:8161/10000 train_time:338713ms step_avg:41.50ms +[2025-09-05 23:55:47] [Rank 0] step:8161/10000 train_time:338713ms step_avg:41.50ms +[2025-09-05 23:55:48] [Rank 0] step:8181/10000 train_time:339449ms step_avg:41.49ms +[2025-09-05 23:55:48] [Rank 0] step:8181/10000 train_time:339449ms step_avg:41.49ms +[2025-09-05 23:55:49] [Rank 0] step:8201/10000 train_time:340185ms step_avg:41.48ms +[2025-09-05 23:55:49] [Rank 0] step:8201/10000 train_time:340185ms step_avg:41.48ms +[2025-09-05 23:55:49] [Rank 0] step:8221/10000 train_time:340921ms step_avg:41.47ms +[2025-09-05 23:55:49] [Rank 0] step:8221/10000 train_time:340921ms step_avg:41.47ms +[2025-09-05 23:55:50] [Rank 0] step:8241/10000 train_time:341658ms step_avg:41.46ms +[2025-09-05 23:55:50] [Rank 0] step:8241/10000 train_time:341658ms step_avg:41.46ms +[2025-09-05 23:55:51] [Rank 0] step:8261/10000 train_time:342394ms step_avg:41.45ms +[2025-09-05 23:55:51] [Rank 0] step:8261/10000 train_time:342394ms step_avg:41.45ms +[2025-09-05 23:55:52] [Rank 0] step:8281/10000 train_time:343130ms step_avg:41.44ms +[2025-09-05 23:55:52] [Rank 0] step:8281/10000 train_time:343130ms step_avg:41.44ms +[2025-09-05 23:55:52] [Rank 0] step:8301/10000 train_time:343866ms step_avg:41.42ms +[2025-09-05 23:55:52] [Rank 0] step:8301/10000 train_time:343866ms step_avg:41.42ms +[2025-09-05 23:55:53] [Rank 0] step:8321/10000 train_time:344602ms step_avg:41.41ms +[2025-09-05 23:55:53] [Rank 0] step:8321/10000 train_time:344602ms step_avg:41.41ms +[2025-09-05 23:55:54] [Rank 0] step:8341/10000 train_time:345338ms step_avg:41.40ms +[2025-09-05 23:55:54] [Rank 0] step:8341/10000 train_time:345338ms step_avg:41.40ms +[2025-09-05 23:55:55] [Rank 0] step:8361/10000 train_time:346074ms step_avg:41.39ms +[2025-09-05 23:55:55] [Rank 0] step:8361/10000 train_time:346074ms step_avg:41.39ms +[2025-09-05 23:55:55] [Rank 0] step:8381/10000 train_time:346810ms step_avg:41.38ms +[2025-09-05 23:55:55] [Rank 0] step:8381/10000 train_time:346810ms step_avg:41.38ms +[2025-09-05 23:55:56] [Rank 0] step:8401/10000 train_time:347546ms step_avg:41.37ms +[2025-09-05 23:55:56] [Rank 0] step:8401/10000 train_time:347546ms step_avg:41.37ms +[2025-09-05 23:55:57] [Rank 0] step:8421/10000 train_time:348282ms step_avg:41.36ms +[2025-09-05 23:55:57] [Rank 0] step:8421/10000 train_time:348282ms step_avg:41.36ms +[2025-09-05 23:55:58] [Rank 0] step:8441/10000 train_time:349017ms step_avg:41.35ms +[2025-09-05 23:55:58] [Rank 0] step:8441/10000 train_time:349017ms step_avg:41.35ms +[2025-09-05 23:55:58] [Rank 0] step:8461/10000 train_time:349754ms step_avg:41.34ms +[2025-09-05 23:55:58] [Rank 0] step:8461/10000 train_time:349754ms step_avg:41.34ms +[2025-09-05 23:55:59] [Rank 0] step:8481/10000 train_time:350491ms step_avg:41.33ms +[2025-09-05 23:55:59] [Rank 0] step:8481/10000 train_time:350491ms step_avg:41.33ms +[2025-09-05 23:56:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:56:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:56:00] [Rank 0] PRINT: step:8500/10000 train_loss:2.1954 val_loss:2.1794 train_time:351307ms step_avg:41.33ms +[2025-09-05 23:56:00] [Rank 0] PRINT: step:8500/10000 train_loss:2.1954 val_loss:2.1794 train_time:351307ms step_avg:41.33ms +[2025-09-05 23:56:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:56:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:56:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:56:00] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:57:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:57:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:57:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:57:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:57:22] [Rank 0] Total Loss: 4.7081 +[2025-09-05 23:57:22] [Rank 0] Total Loss: 4.7081 +[2025-09-05 23:57:22] [Rank 0] Total FTA (Unweighted): 0.3081 +[2025-09-05 23:57:22] [Rank 0] Total FTA (Unweighted): 0.3081 +[2025-09-05 23:57:22] [Rank 0] Total FTA (Weighted): 0.3081 +[2025-09-05 23:57:22] [Rank 0] Total FTA (Weighted): 0.3081 +[2025-09-05 23:57:22] [Rank 0] Group 0 Loss: 3.3830 +[2025-09-05 23:57:22] [Rank 0] Group 0 Loss: 3.3830 +[2025-09-05 23:57:22] [Rank 0] Group 1 Loss: 3.2835 +[2025-09-05 23:57:22] [Rank 0] Group 1 Loss: 3.2835 +[2025-09-05 23:57:22] [Rank 0] Group 2 Loss: 3.3422 +[2025-09-05 23:57:22] [Rank 0] Group 2 Loss: 3.3422 +[2025-09-05 23:57:22] [Rank 0] Group 3 Loss: 3.7323 +[2025-09-05 23:57:22] [Rank 0] Group 3 Loss: 3.7323 +[2025-09-05 23:57:22] [Rank 0] Group 4 Loss: 4.0775 +[2025-09-05 23:57:22] [Rank 0] Group 4 Loss: 4.0775 +[2025-09-05 23:57:22] [Rank 0] Group 5 Loss: 4.5009 +[2025-09-05 23:57:22] [Rank 0] Group 5 Loss: 4.5009 +[2025-09-05 23:57:22] [Rank 0] Group 6 Loss: 4.8186 +[2025-09-05 23:57:22] [Rank 0] Group 6 Loss: 4.8186 +[2025-09-05 23:57:22] [Rank 0] Group 7 Loss: 4.9610 +[2025-09-05 23:57:22] [Rank 0] Group 7 Loss: 4.9610 +[2025-09-05 23:57:22] [Rank 0] Group 8 Loss: 5.2608 +[2025-09-05 23:57:22] [Rank 0] Group 8 Loss: 5.2608 +[2025-09-05 23:57:22] [Rank 0] Group 9 Loss: 5.3644 +[2025-09-05 23:57:22] [Rank 0] Group 9 Loss: 5.3644 +[2025-09-05 23:57:22] [Rank 0] Group 10 Loss: 5.4413 +[2025-09-05 23:57:22] [Rank 0] Group 10 Loss: 5.4413 +[2025-09-05 23:57:22] [Rank 0] Group 11 Loss: 5.4970 +[2025-09-05 23:57:22] [Rank 0] Group 11 Loss: 5.4970 +[2025-09-05 23:57:22] [Rank 0] Group 12 Loss: 5.3987 +[2025-09-05 23:57:22] [Rank 0] Group 12 Loss: 5.3987 +[2025-09-05 23:57:22] [Rank 0] Group 13 Loss: 5.4071 +[2025-09-05 23:57:22] [Rank 0] Group 13 Loss: 5.4071 +[2025-09-05 23:57:22] [Rank 0] Group 14 Loss: 5.4528 +[2025-09-05 23:57:22] [Rank 0] Group 14 Loss: 5.4528 +[2025-09-05 23:57:22] [Rank 0] Group 15 Loss: 5.4090 +[2025-09-05 23:57:22] [Rank 0] Group 15 Loss: 5.4090 +[2025-09-05 23:57:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:57:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:57:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:57:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:57:22] [Rank 0] Group 2 FTA: 0.3700 +[2025-09-05 23:57:22] [Rank 0] Group 2 FTA: 0.3700 +[2025-09-05 23:57:22] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:57:22] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:57:22] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:57:22] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:57:22] [Rank 0] Group 5 FTA: 0.2600 +[2025-09-05 23:57:22] [Rank 0] Group 5 FTA: 0.2600 +[2025-09-05 23:57:22] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:57:22] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:57:22] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:57:22] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:57:22] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:57:22] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:57:22] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:57:22] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:57:22] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 23:57:22] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 23:57:22] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 23:57:22] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 23:57:22] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 23:57:22] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 23:57:22] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 23:57:22] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 23:57:22] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 23:57:22] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 23:57:22] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 23:57:22] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 23:57:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:57:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:57:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:57:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:57:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:57:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:57:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:57:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:57:23] [Rank 0] step:8501/10000 train_time:351317ms step_avg:41.33ms +[2025-09-05 23:57:23] [Rank 0] step:8501/10000 train_time:351317ms step_avg:41.33ms +[2025-09-05 23:57:24] [Rank 0] step:8521/10000 train_time:351994ms step_avg:41.31ms +[2025-09-05 23:57:24] [Rank 0] step:8521/10000 train_time:351994ms step_avg:41.31ms +[2025-09-05 23:57:25] [Rank 0] step:8541/10000 train_time:352730ms step_avg:41.30ms +[2025-09-05 23:57:25] [Rank 0] step:8541/10000 train_time:352730ms step_avg:41.30ms +[2025-09-05 23:57:25] [Rank 0] step:8561/10000 train_time:353466ms step_avg:41.29ms +[2025-09-05 23:57:25] [Rank 0] step:8561/10000 train_time:353466ms step_avg:41.29ms +[2025-09-05 23:57:26] [Rank 0] step:8581/10000 train_time:354202ms step_avg:41.28ms +[2025-09-05 23:57:26] [Rank 0] step:8581/10000 train_time:354202ms step_avg:41.28ms +[2025-09-05 23:57:27] [Rank 0] step:8601/10000 train_time:354939ms step_avg:41.27ms +[2025-09-05 23:57:27] [Rank 0] step:8601/10000 train_time:354939ms step_avg:41.27ms +[2025-09-05 23:57:28] [Rank 0] step:8621/10000 train_time:355674ms step_avg:41.26ms +[2025-09-05 23:57:28] [Rank 0] step:8621/10000 train_time:355674ms step_avg:41.26ms +[2025-09-05 23:57:28] [Rank 0] step:8641/10000 train_time:356410ms step_avg:41.25ms +[2025-09-05 23:57:28] [Rank 0] step:8641/10000 train_time:356410ms step_avg:41.25ms +[2025-09-05 23:57:29] [Rank 0] step:8661/10000 train_time:357145ms step_avg:41.24ms +[2025-09-05 23:57:29] [Rank 0] step:8661/10000 train_time:357145ms step_avg:41.24ms +[2025-09-05 23:57:30] [Rank 0] step:8681/10000 train_time:357881ms step_avg:41.23ms +[2025-09-05 23:57:30] [Rank 0] step:8681/10000 train_time:357881ms step_avg:41.23ms +[2025-09-05 23:57:31] [Rank 0] step:8701/10000 train_time:358618ms step_avg:41.22ms +[2025-09-05 23:57:31] [Rank 0] step:8701/10000 train_time:358618ms step_avg:41.22ms +[2025-09-05 23:57:31] [Rank 0] step:8721/10000 train_time:359353ms step_avg:41.21ms +[2025-09-05 23:57:31] [Rank 0] step:8721/10000 train_time:359353ms step_avg:41.21ms +[2025-09-05 23:57:32] [Rank 0] step:8741/10000 train_time:360089ms step_avg:41.20ms +[2025-09-05 23:57:32] [Rank 0] step:8741/10000 train_time:360089ms step_avg:41.20ms +[2025-09-05 23:57:33] [Rank 0] step:8761/10000 train_time:360825ms step_avg:41.19ms +[2025-09-05 23:57:33] [Rank 0] step:8761/10000 train_time:360825ms step_avg:41.19ms +[2025-09-05 23:57:34] [Rank 0] step:8781/10000 train_time:361561ms step_avg:41.18ms +[2025-09-05 23:57:34] [Rank 0] step:8781/10000 train_time:361561ms step_avg:41.18ms +[2025-09-05 23:57:34] [Rank 0] step:8801/10000 train_time:362297ms step_avg:41.17ms +[2025-09-05 23:57:34] [Rank 0] step:8801/10000 train_time:362297ms step_avg:41.17ms +[2025-09-05 23:57:35] [Rank 0] step:8821/10000 train_time:363033ms step_avg:41.16ms +[2025-09-05 23:57:35] [Rank 0] step:8821/10000 train_time:363033ms step_avg:41.16ms +[2025-09-05 23:57:36] [Rank 0] step:8841/10000 train_time:364372ms step_avg:41.21ms +[2025-09-05 23:57:36] [Rank 0] step:8841/10000 train_time:364372ms step_avg:41.21ms +[2025-09-05 23:57:37] [Rank 0] step:8861/10000 train_time:365108ms step_avg:41.20ms +[2025-09-05 23:57:37] [Rank 0] step:8861/10000 train_time:365108ms step_avg:41.20ms +[2025-09-05 23:57:38] [Rank 0] step:8881/10000 train_time:365843ms step_avg:41.19ms +[2025-09-05 23:57:38] [Rank 0] step:8881/10000 train_time:365843ms step_avg:41.19ms +[2025-09-05 23:57:39] [Rank 0] step:8901/10000 train_time:366579ms step_avg:41.18ms +[2025-09-05 23:57:39] [Rank 0] step:8901/10000 train_time:366579ms step_avg:41.18ms +[2025-09-05 23:57:39] [Rank 0] step:8921/10000 train_time:367314ms step_avg:41.17ms +[2025-09-05 23:57:39] [Rank 0] step:8921/10000 train_time:367314ms step_avg:41.17ms +[2025-09-05 23:57:40] [Rank 0] step:8941/10000 train_time:368049ms step_avg:41.16ms +[2025-09-05 23:57:40] [Rank 0] step:8941/10000 train_time:368049ms step_avg:41.16ms +[2025-09-05 23:57:41] [Rank 0] step:8961/10000 train_time:368785ms step_avg:41.15ms +[2025-09-05 23:57:41] [Rank 0] step:8961/10000 train_time:368785ms step_avg:41.15ms +[2025-09-05 23:57:41] [Rank 0] step:8981/10000 train_time:369521ms step_avg:41.14ms +[2025-09-05 23:57:41] [Rank 0] step:8981/10000 train_time:369521ms step_avg:41.14ms +[2025-09-05 23:57:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:57:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:57:43] [Rank 0] PRINT: step:9000/10000 train_loss:2.1822 val_loss:2.1686 train_time:370337ms step_avg:41.15ms +[2025-09-05 23:57:43] [Rank 0] PRINT: step:9000/10000 train_loss:2.1822 val_loss:2.1686 train_time:370337ms step_avg:41.15ms +[2025-09-05 23:57:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:57:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:57:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:57:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:59:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:59:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:59:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:59:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:59:05] [Rank 0] Total Loss: 4.7012 +[2025-09-05 23:59:05] [Rank 0] Total Loss: 4.7012 +[2025-09-05 23:59:05] [Rank 0] Total FTA (Unweighted): 0.3269 +[2025-09-05 23:59:05] [Rank 0] Total FTA (Unweighted): 0.3269 +[2025-09-05 23:59:05] [Rank 0] Total FTA (Weighted): 0.3269 +[2025-09-05 23:59:05] [Rank 0] Total FTA (Weighted): 0.3269 +[2025-09-05 23:59:05] [Rank 0] Group 0 Loss: 3.3817 +[2025-09-05 23:59:05] [Rank 0] Group 0 Loss: 3.3817 +[2025-09-05 23:59:05] [Rank 0] Group 1 Loss: 3.2843 +[2025-09-05 23:59:05] [Rank 0] Group 1 Loss: 3.2843 +[2025-09-05 23:59:05] [Rank 0] Group 2 Loss: 3.3352 +[2025-09-05 23:59:05] [Rank 0] Group 2 Loss: 3.3352 +[2025-09-05 23:59:05] [Rank 0] Group 3 Loss: 3.7222 +[2025-09-05 23:59:05] [Rank 0] Group 3 Loss: 3.7222 +[2025-09-05 23:59:05] [Rank 0] Group 4 Loss: 4.0821 +[2025-09-05 23:59:05] [Rank 0] Group 4 Loss: 4.0821 +[2025-09-05 23:59:05] [Rank 0] Group 5 Loss: 4.4964 +[2025-09-05 23:59:05] [Rank 0] Group 5 Loss: 4.4964 +[2025-09-05 23:59:05] [Rank 0] Group 6 Loss: 4.8113 +[2025-09-05 23:59:05] [Rank 0] Group 6 Loss: 4.8113 +[2025-09-05 23:59:05] [Rank 0] Group 7 Loss: 4.9529 +[2025-09-05 23:59:05] [Rank 0] Group 7 Loss: 4.9529 +[2025-09-05 23:59:05] [Rank 0] Group 8 Loss: 5.2532 +[2025-09-05 23:59:05] [Rank 0] Group 8 Loss: 5.2532 +[2025-09-05 23:59:05] [Rank 0] Group 9 Loss: 5.3560 +[2025-09-05 23:59:05] [Rank 0] Group 9 Loss: 5.3560 +[2025-09-05 23:59:05] [Rank 0] Group 10 Loss: 5.4326 +[2025-09-05 23:59:05] [Rank 0] Group 10 Loss: 5.4326 +[2025-09-05 23:59:05] [Rank 0] Group 11 Loss: 5.4613 +[2025-09-05 23:59:05] [Rank 0] Group 11 Loss: 5.4613 +[2025-09-05 23:59:05] [Rank 0] Group 12 Loss: 5.3980 +[2025-09-05 23:59:05] [Rank 0] Group 12 Loss: 5.3980 +[2025-09-05 23:59:05] [Rank 0] Group 13 Loss: 5.4089 +[2025-09-05 23:59:05] [Rank 0] Group 13 Loss: 5.4089 +[2025-09-05 23:59:05] [Rank 0] Group 14 Loss: 5.4402 +[2025-09-05 23:59:05] [Rank 0] Group 14 Loss: 5.4402 +[2025-09-05 23:59:05] [Rank 0] Group 15 Loss: 5.4023 +[2025-09-05 23:59:05] [Rank 0] Group 15 Loss: 5.4023 +[2025-09-05 23:59:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:59:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:59:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:59:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:59:05] [Rank 0] Group 2 FTA: 0.5600 +[2025-09-05 23:59:05] [Rank 0] Group 2 FTA: 0.5600 +[2025-09-05 23:59:05] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:59:05] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:59:05] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:59:05] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:59:05] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:59:05] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:59:05] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:59:05] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:59:05] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 23:59:05] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 23:59:05] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:59:05] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:59:05] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 23:59:05] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 23:59:05] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 23:59:05] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 23:59:05] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 23:59:05] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 23:59:05] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 23:59:05] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 23:59:05] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 23:59:05] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 23:59:05] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 23:59:05] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 23:59:05] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 23:59:05] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 23:59:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:59:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-05 23:59:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:59:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-05 23:59:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:59:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-05 23:59:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:59:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-05 23:59:07] [Rank 0] step:9001/10000 train_time:370347ms step_avg:41.15ms +[2025-09-05 23:59:07] [Rank 0] step:9001/10000 train_time:370347ms step_avg:41.15ms +[2025-09-05 23:59:08] [Rank 0] step:9021/10000 train_time:371014ms step_avg:41.13ms +[2025-09-05 23:59:08] [Rank 0] step:9021/10000 train_time:371014ms step_avg:41.13ms +[2025-09-05 23:59:08] [Rank 0] step:9041/10000 train_time:371750ms step_avg:41.12ms +[2025-09-05 23:59:08] [Rank 0] step:9041/10000 train_time:371750ms step_avg:41.12ms +[2025-09-05 23:59:09] [Rank 0] step:9061/10000 train_time:372486ms step_avg:41.11ms +[2025-09-05 23:59:09] [Rank 0] step:9061/10000 train_time:372486ms step_avg:41.11ms +[2025-09-05 23:59:10] [Rank 0] step:9081/10000 train_time:373222ms step_avg:41.10ms +[2025-09-05 23:59:10] [Rank 0] step:9081/10000 train_time:373222ms step_avg:41.10ms +[2025-09-05 23:59:10] [Rank 0] step:9101/10000 train_time:373958ms step_avg:41.09ms +[2025-09-05 23:59:10] [Rank 0] step:9101/10000 train_time:373958ms step_avg:41.09ms +[2025-09-05 23:59:11] [Rank 0] step:9121/10000 train_time:374694ms step_avg:41.08ms +[2025-09-05 23:59:11] [Rank 0] step:9121/10000 train_time:374694ms step_avg:41.08ms +[2025-09-05 23:59:12] [Rank 0] step:9141/10000 train_time:375429ms step_avg:41.07ms +[2025-09-05 23:59:12] [Rank 0] step:9141/10000 train_time:375429ms step_avg:41.07ms +[2025-09-05 23:59:13] [Rank 0] step:9161/10000 train_time:376166ms step_avg:41.06ms +[2025-09-05 23:59:13] [Rank 0] step:9161/10000 train_time:376166ms step_avg:41.06ms +[2025-09-05 23:59:13] [Rank 0] step:9181/10000 train_time:376901ms step_avg:41.05ms +[2025-09-05 23:59:13] [Rank 0] step:9181/10000 train_time:376901ms step_avg:41.05ms +[2025-09-05 23:59:14] [Rank 0] step:9201/10000 train_time:377637ms step_avg:41.04ms +[2025-09-05 23:59:14] [Rank 0] step:9201/10000 train_time:377637ms step_avg:41.04ms +[2025-09-05 23:59:15] [Rank 0] step:9221/10000 train_time:378372ms step_avg:41.03ms +[2025-09-05 23:59:15] [Rank 0] step:9221/10000 train_time:378372ms step_avg:41.03ms +[2025-09-05 23:59:16] [Rank 0] step:9241/10000 train_time:379108ms step_avg:41.02ms +[2025-09-05 23:59:16] [Rank 0] step:9241/10000 train_time:379108ms step_avg:41.02ms +[2025-09-05 23:59:16] [Rank 0] step:9261/10000 train_time:379844ms step_avg:41.02ms +[2025-09-05 23:59:16] [Rank 0] step:9261/10000 train_time:379844ms step_avg:41.02ms +[2025-09-05 23:59:17] [Rank 0] step:9281/10000 train_time:380580ms step_avg:41.01ms +[2025-09-05 23:59:17] [Rank 0] step:9281/10000 train_time:380580ms step_avg:41.01ms +[2025-09-05 23:59:18] [Rank 0] step:9301/10000 train_time:381316ms step_avg:41.00ms +[2025-09-05 23:59:18] [Rank 0] step:9301/10000 train_time:381316ms step_avg:41.00ms +[2025-09-05 23:59:19] [Rank 0] step:9321/10000 train_time:382052ms step_avg:40.99ms +[2025-09-05 23:59:19] [Rank 0] step:9321/10000 train_time:382052ms step_avg:40.99ms +[2025-09-05 23:59:19] [Rank 0] step:9341/10000 train_time:382788ms step_avg:40.98ms +[2025-09-05 23:59:19] [Rank 0] step:9341/10000 train_time:382788ms step_avg:40.98ms +[2025-09-05 23:59:20] [Rank 0] step:9361/10000 train_time:383524ms step_avg:40.97ms +[2025-09-05 23:59:20] [Rank 0] step:9361/10000 train_time:383524ms step_avg:40.97ms +[2025-09-05 23:59:21] [Rank 0] step:9381/10000 train_time:384261ms step_avg:40.96ms +[2025-09-05 23:59:21] [Rank 0] step:9381/10000 train_time:384261ms step_avg:40.96ms +[2025-09-05 23:59:22] [Rank 0] step:9401/10000 train_time:384997ms step_avg:40.95ms +[2025-09-05 23:59:22] [Rank 0] step:9401/10000 train_time:384997ms step_avg:40.95ms +[2025-09-05 23:59:22] [Rank 0] step:9421/10000 train_time:385732ms step_avg:40.94ms +[2025-09-05 23:59:22] [Rank 0] step:9421/10000 train_time:385732ms step_avg:40.94ms +[2025-09-05 23:59:23] [Rank 0] step:9441/10000 train_time:386468ms step_avg:40.94ms +[2025-09-05 23:59:23] [Rank 0] step:9441/10000 train_time:386468ms step_avg:40.94ms +[2025-09-05 23:59:24] [Rank 0] step:9461/10000 train_time:387204ms step_avg:40.93ms +[2025-09-05 23:59:24] [Rank 0] step:9461/10000 train_time:387204ms step_avg:40.93ms +[2025-09-05 23:59:24] [Rank 0] step:9481/10000 train_time:387940ms step_avg:40.92ms +[2025-09-05 23:59:24] [Rank 0] step:9481/10000 train_time:387940ms step_avg:40.92ms +[2025-09-05 23:59:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:59:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:59:26] [Rank 0] PRINT: step:9500/10000 train_loss:2.1711 val_loss:2.1586 train_time:388757ms step_avg:40.92ms +[2025-09-05 23:59:26] [Rank 0] PRINT: step:9500/10000 train_loss:2.1711 val_loss:2.1586 train_time:388757ms step_avg:40.92ms +[2025-09-05 23:59:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:59:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:59:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:59:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:00:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:00:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:00:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:00:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:00:47] [Rank 0] Total Loss: 4.6830 +[2025-09-06 00:00:47] [Rank 0] Total Loss: 4.6830 +[2025-09-06 00:00:47] [Rank 0] Total FTA (Unweighted): 0.3175 +[2025-09-06 00:00:47] [Rank 0] Total FTA (Unweighted): 0.3175 +[2025-09-06 00:00:47] [Rank 0] Total FTA (Weighted): 0.3175 +[2025-09-06 00:00:47] [Rank 0] Total FTA (Weighted): 0.3175 +[2025-09-06 00:00:47] [Rank 0] Group 0 Loss: 3.4090 +[2025-09-06 00:00:47] [Rank 0] Group 0 Loss: 3.4090 +[2025-09-06 00:00:47] [Rank 0] Group 1 Loss: 3.2786 +[2025-09-06 00:00:47] [Rank 0] Group 1 Loss: 3.2786 +[2025-09-06 00:00:47] [Rank 0] Group 2 Loss: 3.3178 +[2025-09-06 00:00:47] [Rank 0] Group 2 Loss: 3.3178 +[2025-09-06 00:00:47] [Rank 0] Group 3 Loss: 3.7144 +[2025-09-06 00:00:47] [Rank 0] Group 3 Loss: 3.7144 +[2025-09-06 00:00:47] [Rank 0] Group 4 Loss: 4.0584 +[2025-09-06 00:00:47] [Rank 0] Group 4 Loss: 4.0584 +[2025-09-06 00:00:47] [Rank 0] Group 5 Loss: 4.4711 +[2025-09-06 00:00:47] [Rank 0] Group 5 Loss: 4.4711 +[2025-09-06 00:00:47] [Rank 0] Group 6 Loss: 4.7941 +[2025-09-06 00:00:47] [Rank 0] Group 6 Loss: 4.7941 +[2025-09-06 00:00:47] [Rank 0] Group 7 Loss: 4.9313 +[2025-09-06 00:00:47] [Rank 0] Group 7 Loss: 4.9313 +[2025-09-06 00:00:47] [Rank 0] Group 8 Loss: 5.2187 +[2025-09-06 00:00:47] [Rank 0] Group 8 Loss: 5.2187 +[2025-09-06 00:00:47] [Rank 0] Group 9 Loss: 5.3352 +[2025-09-06 00:00:47] [Rank 0] Group 9 Loss: 5.3352 +[2025-09-06 00:00:47] [Rank 0] Group 10 Loss: 5.4071 +[2025-09-06 00:00:47] [Rank 0] Group 10 Loss: 5.4071 +[2025-09-06 00:00:47] [Rank 0] Group 11 Loss: 5.4398 +[2025-09-06 00:00:47] [Rank 0] Group 11 Loss: 5.4398 +[2025-09-06 00:00:47] [Rank 0] Group 12 Loss: 5.3735 +[2025-09-06 00:00:47] [Rank 0] Group 12 Loss: 5.3735 +[2025-09-06 00:00:47] [Rank 0] Group 13 Loss: 5.3935 +[2025-09-06 00:00:47] [Rank 0] Group 13 Loss: 5.3935 +[2025-09-06 00:00:47] [Rank 0] Group 14 Loss: 5.4064 +[2025-09-06 00:00:47] [Rank 0] Group 14 Loss: 5.4064 +[2025-09-06 00:00:47] [Rank 0] Group 15 Loss: 5.3798 +[2025-09-06 00:00:47] [Rank 0] Group 15 Loss: 5.3798 +[2025-09-06 00:00:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:00:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:00:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:00:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:00:47] [Rank 0] Group 2 FTA: 0.4600 +[2025-09-06 00:00:47] [Rank 0] Group 2 FTA: 0.4600 +[2025-09-06 00:00:47] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:00:47] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:00:47] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:00:47] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:00:47] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:00:47] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:00:47] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:00:47] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:00:47] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 00:00:47] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 00:00:47] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:00:47] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:00:47] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:00:47] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:00:47] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-06 00:00:47] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-06 00:00:47] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:00:47] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:00:47] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 00:00:47] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 00:00:47] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 00:00:47] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 00:00:47] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-06 00:00:47] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-06 00:00:47] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 00:00:47] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 00:00:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-06 00:00:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-06 00:00:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-06 00:00:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-06 00:00:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-06 00:00:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-06 00:00:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-06 00:00:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-06 00:00:49] [Rank 0] step:9501/10000 train_time:388766ms step_avg:40.92ms +[2025-09-06 00:00:49] [Rank 0] step:9501/10000 train_time:388766ms step_avg:40.92ms +[2025-09-06 00:00:50] [Rank 0] step:9521/10000 train_time:389433ms step_avg:40.90ms +[2025-09-06 00:00:50] [Rank 0] step:9521/10000 train_time:389433ms step_avg:40.90ms +[2025-09-06 00:00:50] [Rank 0] step:9541/10000 train_time:390169ms step_avg:40.89ms +[2025-09-06 00:00:50] [Rank 0] step:9541/10000 train_time:390169ms step_avg:40.89ms +[2025-09-06 00:00:51] [Rank 0] step:9561/10000 train_time:390905ms step_avg:40.89ms +[2025-09-06 00:00:51] [Rank 0] step:9561/10000 train_time:390905ms step_avg:40.89ms +[2025-09-06 00:00:52] [Rank 0] step:9581/10000 train_time:391641ms step_avg:40.88ms +[2025-09-06 00:00:52] [Rank 0] step:9581/10000 train_time:391641ms step_avg:40.88ms +[2025-09-06 00:00:53] [Rank 0] step:9601/10000 train_time:392377ms step_avg:40.87ms +[2025-09-06 00:00:53] [Rank 0] step:9601/10000 train_time:392377ms step_avg:40.87ms +[2025-09-06 00:00:53] [Rank 0] step:9621/10000 train_time:393114ms step_avg:40.86ms +[2025-09-06 00:00:53] [Rank 0] step:9621/10000 train_time:393114ms step_avg:40.86ms +[2025-09-06 00:00:54] [Rank 0] step:9641/10000 train_time:393849ms step_avg:40.85ms +[2025-09-06 00:00:54] [Rank 0] step:9641/10000 train_time:393849ms step_avg:40.85ms +[2025-09-06 00:00:55] [Rank 0] step:9661/10000 train_time:394862ms step_avg:40.87ms +[2025-09-06 00:00:55] [Rank 0] step:9661/10000 train_time:394862ms step_avg:40.87ms +[2025-09-06 00:00:56] [Rank 0] step:9681/10000 train_time:395598ms step_avg:40.86ms +[2025-09-06 00:00:56] [Rank 0] step:9681/10000 train_time:395598ms step_avg:40.86ms +[2025-09-06 00:00:57] [Rank 0] step:9701/10000 train_time:396333ms step_avg:40.85ms +[2025-09-06 00:00:57] [Rank 0] step:9701/10000 train_time:396333ms step_avg:40.85ms +[2025-09-06 00:00:57] [Rank 0] step:9721/10000 train_time:397069ms step_avg:40.85ms +[2025-09-06 00:00:57] [Rank 0] step:9721/10000 train_time:397069ms step_avg:40.85ms +[2025-09-06 00:00:58] [Rank 0] step:9741/10000 train_time:397947ms step_avg:40.85ms +[2025-09-06 00:00:58] [Rank 0] step:9741/10000 train_time:397947ms step_avg:40.85ms +[2025-09-06 00:00:59] [Rank 0] step:9761/10000 train_time:398682ms step_avg:40.84ms +[2025-09-06 00:00:59] [Rank 0] step:9761/10000 train_time:398682ms step_avg:40.84ms +[2025-09-06 00:01:00] [Rank 0] step:9781/10000 train_time:399418ms step_avg:40.84ms +[2025-09-06 00:01:00] [Rank 0] step:9781/10000 train_time:399418ms step_avg:40.84ms +[2025-09-06 00:01:01] [Rank 0] step:9801/10000 train_time:400304ms step_avg:40.84ms +[2025-09-06 00:01:01] [Rank 0] step:9801/10000 train_time:400304ms step_avg:40.84ms +[2025-09-06 00:01:01] [Rank 0] step:9821/10000 train_time:401070ms step_avg:40.84ms +[2025-09-06 00:01:01] [Rank 0] step:9821/10000 train_time:401070ms step_avg:40.84ms +[2025-09-06 00:01:02] [Rank 0] step:9841/10000 train_time:401807ms step_avg:40.83ms +[2025-09-06 00:01:02] [Rank 0] step:9841/10000 train_time:401807ms step_avg:40.83ms +[2025-09-06 00:01:03] [Rank 0] step:9861/10000 train_time:402544ms step_avg:40.82ms +[2025-09-06 00:01:03] [Rank 0] step:9861/10000 train_time:402544ms step_avg:40.82ms +[2025-09-06 00:01:03] [Rank 0] step:9881/10000 train_time:403280ms step_avg:40.81ms +[2025-09-06 00:01:03] [Rank 0] step:9881/10000 train_time:403280ms step_avg:40.81ms +[2025-09-06 00:01:04] [Rank 0] step:9901/10000 train_time:404016ms step_avg:40.81ms +[2025-09-06 00:01:04] [Rank 0] step:9901/10000 train_time:404016ms step_avg:40.81ms +[2025-09-06 00:01:05] [Rank 0] step:9921/10000 train_time:404752ms step_avg:40.80ms +[2025-09-06 00:01:05] [Rank 0] step:9921/10000 train_time:404752ms step_avg:40.80ms +[2025-09-06 00:01:06] [Rank 0] step:9941/10000 train_time:405487ms step_avg:40.79ms +[2025-09-06 00:01:06] [Rank 0] step:9941/10000 train_time:405487ms step_avg:40.79ms +[2025-09-06 00:01:06] [Rank 0] step:9961/10000 train_time:406224ms step_avg:40.78ms +[2025-09-06 00:01:06] [Rank 0] step:9961/10000 train_time:406224ms step_avg:40.78ms +[2025-09-06 00:01:07] [Rank 0] step:9981/10000 train_time:406960ms step_avg:40.77ms +[2025-09-06 00:01:07] [Rank 0] step:9981/10000 train_time:406960ms step_avg:40.77ms +[2025-09-06 00:01:08] [Rank 0] step:10000/10000 train_time:407660ms step_avg:40.77ms +[2025-09-06 00:01:08] [Rank 0] step:10000/10000 train_time:407660ms step_avg:40.77ms +[2025-09-06 00:01:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:01:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:01:08] [Rank 0] PRINT: step:10000/10000 train_loss:2.1622 val_loss:2.1506 train_time:407781ms step_avg:40.78ms +[2025-09-06 00:01:08] [Rank 0] PRINT: step:10000/10000 train_loss:2.1622 val_loss:2.1506 train_time:407781ms step_avg:40.78ms +[2025-09-06 00:01:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:01:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:01:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:01:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:02:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:02:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:02:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:02:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:02:30] [Rank 0] Total Loss: 4.6765 +[2025-09-06 00:02:30] [Rank 0] Total Loss: 4.6765 +[2025-09-06 00:02:30] [Rank 0] Total FTA (Unweighted): 0.3381 +[2025-09-06 00:02:30] [Rank 0] Total FTA (Unweighted): 0.3381 +[2025-09-06 00:02:30] [Rank 0] Total FTA (Weighted): 0.3381 +[2025-09-06 00:02:30] [Rank 0] Total FTA (Weighted): 0.3381 +[2025-09-06 00:02:30] [Rank 0] Group 0 Loss: 3.3776 +[2025-09-06 00:02:30] [Rank 0] Group 0 Loss: 3.3776 +[2025-09-06 00:02:30] [Rank 0] Group 1 Loss: 3.2731 +[2025-09-06 00:02:30] [Rank 0] Group 1 Loss: 3.2731 +[2025-09-06 00:02:30] [Rank 0] Group 2 Loss: 3.3452 +[2025-09-06 00:02:30] [Rank 0] Group 2 Loss: 3.3452 +[2025-09-06 00:02:30] [Rank 0] Group 3 Loss: 3.7132 +[2025-09-06 00:02:30] [Rank 0] Group 3 Loss: 3.7132 +[2025-09-06 00:02:30] [Rank 0] Group 4 Loss: 4.0609 +[2025-09-06 00:02:30] [Rank 0] Group 4 Loss: 4.0609 +[2025-09-06 00:02:30] [Rank 0] Group 5 Loss: 4.4675 +[2025-09-06 00:02:30] [Rank 0] Group 5 Loss: 4.4675 +[2025-09-06 00:02:30] [Rank 0] Group 6 Loss: 4.7777 +[2025-09-06 00:02:30] [Rank 0] Group 6 Loss: 4.7777 +[2025-09-06 00:02:30] [Rank 0] Group 7 Loss: 4.9255 +[2025-09-06 00:02:30] [Rank 0] Group 7 Loss: 4.9255 +[2025-09-06 00:02:30] [Rank 0] Group 8 Loss: 5.2100 +[2025-09-06 00:02:30] [Rank 0] Group 8 Loss: 5.2100 +[2025-09-06 00:02:30] [Rank 0] Group 9 Loss: 5.3185 +[2025-09-06 00:02:30] [Rank 0] Group 9 Loss: 5.3185 +[2025-09-06 00:02:30] [Rank 0] Group 10 Loss: 5.3995 +[2025-09-06 00:02:30] [Rank 0] Group 10 Loss: 5.3995 +[2025-09-06 00:02:30] [Rank 0] Group 11 Loss: 5.4274 +[2025-09-06 00:02:30] [Rank 0] Group 11 Loss: 5.4274 +[2025-09-06 00:02:30] [Rank 0] Group 12 Loss: 5.3612 +[2025-09-06 00:02:30] [Rank 0] Group 12 Loss: 5.3612 +[2025-09-06 00:02:30] [Rank 0] Group 13 Loss: 5.3853 +[2025-09-06 00:02:30] [Rank 0] Group 13 Loss: 5.3853 +[2025-09-06 00:02:30] [Rank 0] Group 14 Loss: 5.4074 +[2025-09-06 00:02:30] [Rank 0] Group 14 Loss: 5.4074 +[2025-09-06 00:02:30] [Rank 0] Group 15 Loss: 5.3739 +[2025-09-06 00:02:30] [Rank 0] Group 15 Loss: 5.3739 +[2025-09-06 00:02:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:02:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:02:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:02:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:02:30] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-06 00:02:30] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-06 00:02:30] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:02:30] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:02:30] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:02:30] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:02:30] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:02:30] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:02:30] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:02:30] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:02:30] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-06 00:02:30] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-06 00:02:30] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:02:30] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:02:30] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:02:30] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:02:30] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-06 00:02:30] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-06 00:02:30] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-06 00:02:30] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-06 00:02:30] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 00:02:30] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 00:02:30] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-06 00:02:30] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-06 00:02:30] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-06 00:02:30] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-06 00:02:30] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-06 00:02:30] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-06 00:02:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-06 00:02:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_loss_curves.png +[2025-09-06 00:02:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-06 00:02:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/per_class_acc_curves.png +[2025-09-06 00:02:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-06 00:02:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_loss_curve.png +[2025-09-06 00:02:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-06 00:02:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_42/total_acc_curve.png +[2025-09-06 00:02:32] [Rank 0] step:10001/10000 train_time:407791ms step_avg:40.78ms +[2025-09-06 00:02:32] [Rank 0] step:10001/10000 train_time:407791ms step_avg:40.78ms +[2025-09-06 00:02:32] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 00:02:32 2025 --- +[2025-09-06 00:02:32] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 00:02:32 2025 --- +[2025-09-06 00:02:32] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-06 00:02:32] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ac2cb16ef3619cfd5c0e7a9e07d94d999a9a9eb7 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.08, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "27a14318-7e29-4d54-a834-6f6df840acdb", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..93c235dd4c7bb6d9ab780cf99ad68baa35148721 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34d15ed57dbbdc3f63e1ce8cf06fcbab1973e7fdee0a1cbf0e249e4126c1dba2 +size 298941 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..1e8d96491048031172634536e1bf554a86b5e94e --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afea2205e24f72d6b6d1fb3b5a9e8351c452a83b03512d1680241f6258549576 +size 407249 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..f8a1019dc2a34b69267ece2664b3c3c9d70bae53 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30ac30d64ec36b2492d70bb7c0e90958dd8fe9d2bb4cc59e132ceabd9a32e9bb +size 88459 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..624a4d57668d4f31e469c6ab3cc9f5d8cdb89e87 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3559b6c9edd20a9b257126ff732e4753da07986c6b5ca83dd4e85114e564d1f0 +size 115916 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/training_log_27a14318-7e29-4d54-a834-6f6df840acdb.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/training_log_27a14318-7e29-4d54-a834-6f6df840acdb.txt new file mode 100644 index 0000000000000000000000000000000000000000..4bf92bb6dab25519e5dcd7604cc9f1e4377ca6d6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/training_log_27a14318-7e29-4d54-a834-6f6df840acdb.txt @@ -0,0 +1,5614 @@ +[2025-09-06 00:02:53] [Rank 0] PRINT: --- Script Start: Sat Sep 6 00:02:53 2025 --- +[2025-09-06 00:02:53] [Rank 0] PRINT: --- Script Start: Sat Sep 6 00:02:53 2025 --- +[2025-09-06 00:02:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 00:02:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 00:02:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 00:02:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 00:02:53] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-06 00:02:53] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-06 00:02:53] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43 +[2025-09-06 00:02:53] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43 +[2025-09-06 00:02:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 00:02:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 00:02:53] [Rank 0] PRINT: Constructing model... +[2025-09-06 00:02:53] [Rank 0] PRINT: Constructing model... +[2025-09-06 00:02:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 00:02:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 00:02:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 00:02:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 00:02:54] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 00:02:54] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 00:02:58] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 00:02:58] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 00:02:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 00:02:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 00:02:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 00:02:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 00:02:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 00:02:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 00:02:58] [Rank 0] PRINT: Model returns: +[2025-09-06 00:02:58] [Rank 0] PRINT: Model returns: +[2025-09-06 00:02:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 00:02:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 00:02:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 00:02:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 00:02:58] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-06 00:02:58] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-06 00:02:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 00:02:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 00:02:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 00:02:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 00:03:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 00:03:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 00:03:03] [Rank 0] PRINT: Starting warmup... +[2025-09-06 00:03:03] [Rank 0] PRINT: Starting warmup... +[2025-09-06 00:03:41] [Rank 0] PRINT: Warmup complete. +[2025-09-06 00:03:41] [Rank 0] PRINT: Warmup complete. +[2025-09-06 00:03:41] [Rank 0] PRINT: Starting training... +[2025-09-06 00:03:41] [Rank 0] PRINT: Starting training... +[2025-09-06 00:03:47] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/fixed_eval_indices.json +[2025-09-06 00:03:47] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/fixed_eval_indices.json +[2025-09-06 00:03:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:03:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:03:51] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 00:03:51] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 00:04:23] [Rank 0] step:21/10000 train_time:32516ms step_avg:1548.37ms +[2025-09-06 00:04:23] [Rank 0] step:21/10000 train_time:32516ms step_avg:1548.37ms +[2025-09-06 00:04:24] [Rank 0] step:41/10000 train_time:33245ms step_avg:810.85ms +[2025-09-06 00:04:24] [Rank 0] step:41/10000 train_time:33245ms step_avg:810.85ms +[2025-09-06 00:04:25] [Rank 0] step:61/10000 train_time:33971ms step_avg:556.90ms +[2025-09-06 00:04:25] [Rank 0] step:61/10000 train_time:33971ms step_avg:556.90ms +[2025-09-06 00:04:25] [Rank 0] step:81/10000 train_time:34698ms step_avg:428.37ms +[2025-09-06 00:04:25] [Rank 0] step:81/10000 train_time:34698ms step_avg:428.37ms +[2025-09-06 00:04:26] [Rank 0] step:101/10000 train_time:35425ms step_avg:350.74ms +[2025-09-06 00:04:26] [Rank 0] step:101/10000 train_time:35425ms step_avg:350.74ms +[2025-09-06 00:04:27] [Rank 0] step:121/10000 train_time:36152ms step_avg:298.78ms +[2025-09-06 00:04:27] [Rank 0] step:121/10000 train_time:36152ms step_avg:298.78ms +[2025-09-06 00:04:28] [Rank 0] step:141/10000 train_time:36879ms step_avg:261.56ms +[2025-09-06 00:04:28] [Rank 0] step:141/10000 train_time:36879ms step_avg:261.56ms +[2025-09-06 00:04:28] [Rank 0] step:161/10000 train_time:37607ms step_avg:233.58ms +[2025-09-06 00:04:28] [Rank 0] step:161/10000 train_time:37607ms step_avg:233.58ms +[2025-09-06 00:04:29] [Rank 0] step:181/10000 train_time:38334ms step_avg:211.79ms +[2025-09-06 00:04:29] [Rank 0] step:181/10000 train_time:38334ms step_avg:211.79ms +[2025-09-06 00:04:30] [Rank 0] step:201/10000 train_time:39061ms step_avg:194.34ms +[2025-09-06 00:04:30] [Rank 0] step:201/10000 train_time:39061ms step_avg:194.34ms +[2025-09-06 00:04:31] [Rank 0] step:221/10000 train_time:39789ms step_avg:180.04ms +[2025-09-06 00:04:31] [Rank 0] step:221/10000 train_time:39789ms step_avg:180.04ms +[2025-09-06 00:04:31] [Rank 0] step:241/10000 train_time:40516ms step_avg:168.12ms +[2025-09-06 00:04:31] [Rank 0] step:241/10000 train_time:40516ms step_avg:168.12ms +[2025-09-06 00:04:32] [Rank 0] step:261/10000 train_time:41243ms step_avg:158.02ms +[2025-09-06 00:04:32] [Rank 0] step:261/10000 train_time:41243ms step_avg:158.02ms +[2025-09-06 00:04:33] [Rank 0] step:281/10000 train_time:41971ms step_avg:149.36ms +[2025-09-06 00:04:33] [Rank 0] step:281/10000 train_time:41971ms step_avg:149.36ms +[2025-09-06 00:04:33] [Rank 0] step:301/10000 train_time:42698ms step_avg:141.85ms +[2025-09-06 00:04:33] [Rank 0] step:301/10000 train_time:42698ms step_avg:141.85ms +[2025-09-06 00:04:34] [Rank 0] step:321/10000 train_time:43425ms step_avg:135.28ms +[2025-09-06 00:04:34] [Rank 0] step:321/10000 train_time:43425ms step_avg:135.28ms +[2025-09-06 00:04:35] [Rank 0] step:341/10000 train_time:44151ms step_avg:129.48ms +[2025-09-06 00:04:35] [Rank 0] step:341/10000 train_time:44151ms step_avg:129.48ms +[2025-09-06 00:04:36] [Rank 0] step:361/10000 train_time:44878ms step_avg:124.32ms +[2025-09-06 00:04:36] [Rank 0] step:361/10000 train_time:44878ms step_avg:124.32ms +[2025-09-06 00:04:36] [Rank 0] step:381/10000 train_time:45605ms step_avg:119.70ms +[2025-09-06 00:04:36] [Rank 0] step:381/10000 train_time:45605ms step_avg:119.70ms +[2025-09-06 00:04:37] [Rank 0] step:401/10000 train_time:46332ms step_avg:115.54ms +[2025-09-06 00:04:37] [Rank 0] step:401/10000 train_time:46332ms step_avg:115.54ms +[2025-09-06 00:04:38] [Rank 0] step:421/10000 train_time:47060ms step_avg:111.78ms +[2025-09-06 00:04:38] [Rank 0] step:421/10000 train_time:47060ms step_avg:111.78ms +[2025-09-06 00:04:39] [Rank 0] step:441/10000 train_time:47787ms step_avg:108.36ms +[2025-09-06 00:04:39] [Rank 0] step:441/10000 train_time:47787ms step_avg:108.36ms +[2025-09-06 00:04:39] [Rank 0] step:461/10000 train_time:48514ms step_avg:105.24ms +[2025-09-06 00:04:39] [Rank 0] step:461/10000 train_time:48514ms step_avg:105.24ms +[2025-09-06 00:04:40] [Rank 0] step:481/10000 train_time:49241ms step_avg:102.37ms +[2025-09-06 00:04:40] [Rank 0] step:481/10000 train_time:49241ms step_avg:102.37ms +[2025-09-06 00:04:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:04:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:04:41] [Rank 0] PRINT: step:500/10000 train_loss:5.9507 val_loss:4.3212 train_time:50048ms step_avg:100.10ms +[2025-09-06 00:04:41] [Rank 0] PRINT: step:500/10000 train_loss:5.9507 val_loss:4.3212 train_time:50048ms step_avg:100.10ms +[2025-09-06 00:04:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:04:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:04:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:04:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:06:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:06:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:06:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:06:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:06:03] [Rank 0] Total Loss: 6.0132 +[2025-09-06 00:06:03] [Rank 0] Total Loss: 6.0132 +[2025-09-06 00:06:03] [Rank 0] Total FTA (Unweighted): 0.0813 +[2025-09-06 00:06:03] [Rank 0] Total FTA (Unweighted): 0.0813 +[2025-09-06 00:06:03] [Rank 0] Total FTA (Weighted): 0.0813 +[2025-09-06 00:06:03] [Rank 0] Total FTA (Weighted): 0.0813 +[2025-09-06 00:06:03] [Rank 0] Group 0 Loss: 3.6582 +[2025-09-06 00:06:03] [Rank 0] Group 0 Loss: 3.6582 +[2025-09-06 00:06:03] [Rank 0] Group 1 Loss: 3.9000 +[2025-09-06 00:06:03] [Rank 0] Group 1 Loss: 3.9000 +[2025-09-06 00:06:03] [Rank 0] Group 2 Loss: 4.8084 +[2025-09-06 00:06:03] [Rank 0] Group 2 Loss: 4.8084 +[2025-09-06 00:06:03] [Rank 0] Group 3 Loss: 5.5348 +[2025-09-06 00:06:03] [Rank 0] Group 3 Loss: 5.5348 +[2025-09-06 00:06:03] [Rank 0] Group 4 Loss: 6.2316 +[2025-09-06 00:06:03] [Rank 0] Group 4 Loss: 6.2316 +[2025-09-06 00:06:03] [Rank 0] Group 5 Loss: 6.3676 +[2025-09-06 00:06:03] [Rank 0] Group 5 Loss: 6.3676 +[2025-09-06 00:06:03] [Rank 0] Group 6 Loss: 6.4474 +[2025-09-06 00:06:03] [Rank 0] Group 6 Loss: 6.4474 +[2025-09-06 00:06:03] [Rank 0] Group 7 Loss: 6.4093 +[2025-09-06 00:06:03] [Rank 0] Group 7 Loss: 6.4093 +[2025-09-06 00:06:03] [Rank 0] Group 8 Loss: 6.5546 +[2025-09-06 00:06:03] [Rank 0] Group 8 Loss: 6.5546 +[2025-09-06 00:06:03] [Rank 0] Group 9 Loss: 6.6691 +[2025-09-06 00:06:03] [Rank 0] Group 9 Loss: 6.6691 +[2025-09-06 00:06:03] [Rank 0] Group 10 Loss: 6.6474 +[2025-09-06 00:06:03] [Rank 0] Group 10 Loss: 6.6474 +[2025-09-06 00:06:03] [Rank 0] Group 11 Loss: 6.7045 +[2025-09-06 00:06:03] [Rank 0] Group 11 Loss: 6.7045 +[2025-09-06 00:06:03] [Rank 0] Group 12 Loss: 6.5288 +[2025-09-06 00:06:03] [Rank 0] Group 12 Loss: 6.5288 +[2025-09-06 00:06:03] [Rank 0] Group 13 Loss: 6.5416 +[2025-09-06 00:06:03] [Rank 0] Group 13 Loss: 6.5416 +[2025-09-06 00:06:03] [Rank 0] Group 14 Loss: 6.6645 +[2025-09-06 00:06:03] [Rank 0] Group 14 Loss: 6.6645 +[2025-09-06 00:06:03] [Rank 0] Group 15 Loss: 6.5429 +[2025-09-06 00:06:03] [Rank 0] Group 15 Loss: 6.5429 +[2025-09-06 00:06:03] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 00:06:03] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 00:06:03] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:06:03] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:06:03] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 00:06:03] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 00:06:03] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 00:06:03] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 00:06:03] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 00:06:03] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 00:06:03] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 00:06:03] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 00:06:03] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 00:06:03] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 00:06:03] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 00:06:03] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 00:06:03] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-06 00:06:03] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-06 00:06:03] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-06 00:06:03] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-06 00:06:03] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-06 00:06:03] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-06 00:06:03] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 00:06:03] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 00:06:03] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:06:03] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:06:04] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 00:06:04] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 00:06:04] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:06:04] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:06:04] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 00:06:04] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 00:06:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:06:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:06:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:06:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:06:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:06:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:06:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:06:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:06:05] [Rank 0] step:501/10000 train_time:50057ms step_avg:99.91ms +[2025-09-06 00:06:05] [Rank 0] step:501/10000 train_time:50057ms step_avg:99.91ms +[2025-09-06 00:06:06] [Rank 0] step:521/10000 train_time:50722ms step_avg:97.36ms +[2025-09-06 00:06:06] [Rank 0] step:521/10000 train_time:50722ms step_avg:97.36ms +[2025-09-06 00:06:06] [Rank 0] step:541/10000 train_time:51459ms step_avg:95.12ms +[2025-09-06 00:06:06] [Rank 0] step:541/10000 train_time:51459ms step_avg:95.12ms +[2025-09-06 00:06:07] [Rank 0] step:561/10000 train_time:52186ms step_avg:93.02ms +[2025-09-06 00:06:07] [Rank 0] step:561/10000 train_time:52186ms step_avg:93.02ms +[2025-09-06 00:06:08] [Rank 0] step:581/10000 train_time:52911ms step_avg:91.07ms +[2025-09-06 00:06:08] [Rank 0] step:581/10000 train_time:52911ms step_avg:91.07ms +[2025-09-06 00:06:08] [Rank 0] step:601/10000 train_time:53637ms step_avg:89.25ms +[2025-09-06 00:06:08] [Rank 0] step:601/10000 train_time:53637ms step_avg:89.25ms +[2025-09-06 00:06:09] [Rank 0] step:621/10000 train_time:54363ms step_avg:87.54ms +[2025-09-06 00:06:09] [Rank 0] step:621/10000 train_time:54363ms step_avg:87.54ms +[2025-09-06 00:06:10] [Rank 0] step:641/10000 train_time:55091ms step_avg:85.95ms +[2025-09-06 00:06:10] [Rank 0] step:641/10000 train_time:55091ms step_avg:85.95ms +[2025-09-06 00:06:11] [Rank 0] step:661/10000 train_time:55818ms step_avg:84.45ms +[2025-09-06 00:06:11] [Rank 0] step:661/10000 train_time:55818ms step_avg:84.45ms +[2025-09-06 00:06:11] [Rank 0] step:681/10000 train_time:56546ms step_avg:83.03ms +[2025-09-06 00:06:11] [Rank 0] step:681/10000 train_time:56546ms step_avg:83.03ms +[2025-09-06 00:06:12] [Rank 0] step:701/10000 train_time:57272ms step_avg:81.70ms +[2025-09-06 00:06:12] [Rank 0] step:701/10000 train_time:57272ms step_avg:81.70ms +[2025-09-06 00:06:13] [Rank 0] step:721/10000 train_time:57998ms step_avg:80.44ms +[2025-09-06 00:06:13] [Rank 0] step:721/10000 train_time:57998ms step_avg:80.44ms +[2025-09-06 00:06:14] [Rank 0] step:741/10000 train_time:58726ms step_avg:79.25ms +[2025-09-06 00:06:14] [Rank 0] step:741/10000 train_time:58726ms step_avg:79.25ms +[2025-09-06 00:06:14] [Rank 0] step:761/10000 train_time:59633ms step_avg:78.36ms +[2025-09-06 00:06:14] [Rank 0] step:761/10000 train_time:59633ms step_avg:78.36ms +[2025-09-06 00:06:15] [Rank 0] step:781/10000 train_time:60366ms step_avg:77.29ms +[2025-09-06 00:06:15] [Rank 0] step:781/10000 train_time:60366ms step_avg:77.29ms +[2025-09-06 00:06:16] [Rank 0] step:801/10000 train_time:61098ms step_avg:76.28ms +[2025-09-06 00:06:16] [Rank 0] step:801/10000 train_time:61098ms step_avg:76.28ms +[2025-09-06 00:06:17] [Rank 0] step:821/10000 train_time:62167ms step_avg:75.72ms +[2025-09-06 00:06:17] [Rank 0] step:821/10000 train_time:62167ms step_avg:75.72ms +[2025-09-06 00:06:18] [Rank 0] step:841/10000 train_time:62899ms step_avg:74.79ms +[2025-09-06 00:06:18] [Rank 0] step:841/10000 train_time:62899ms step_avg:74.79ms +[2025-09-06 00:06:18] [Rank 0] step:861/10000 train_time:63631ms step_avg:73.90ms +[2025-09-06 00:06:18] [Rank 0] step:861/10000 train_time:63631ms step_avg:73.90ms +[2025-09-06 00:06:19] [Rank 0] step:881/10000 train_time:64364ms step_avg:73.06ms +[2025-09-06 00:06:19] [Rank 0] step:881/10000 train_time:64364ms step_avg:73.06ms +[2025-09-06 00:06:20] [Rank 0] step:901/10000 train_time:65096ms step_avg:72.25ms +[2025-09-06 00:06:20] [Rank 0] step:901/10000 train_time:65096ms step_avg:72.25ms +[2025-09-06 00:06:21] [Rank 0] step:921/10000 train_time:65829ms step_avg:71.48ms +[2025-09-06 00:06:21] [Rank 0] step:921/10000 train_time:65829ms step_avg:71.48ms +[2025-09-06 00:06:21] [Rank 0] step:941/10000 train_time:66561ms step_avg:70.73ms +[2025-09-06 00:06:21] [Rank 0] step:941/10000 train_time:66561ms step_avg:70.73ms +[2025-09-06 00:06:22] [Rank 0] step:961/10000 train_time:67292ms step_avg:70.02ms +[2025-09-06 00:06:22] [Rank 0] step:961/10000 train_time:67292ms step_avg:70.02ms +[2025-09-06 00:06:23] [Rank 0] step:981/10000 train_time:68024ms step_avg:69.34ms +[2025-09-06 00:06:23] [Rank 0] step:981/10000 train_time:68024ms step_avg:69.34ms +[2025-09-06 00:06:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:06:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:06:24] [Rank 0] PRINT: step:1000/10000 train_loss:3.8762 val_loss:3.5422 train_time:68836ms step_avg:68.84ms +[2025-09-06 00:06:24] [Rank 0] PRINT: step:1000/10000 train_loss:3.8762 val_loss:3.5422 train_time:68836ms step_avg:68.84ms +[2025-09-06 00:06:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:06:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:06:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:06:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:07:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:07:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:07:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:07:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:07:46] [Rank 0] Total Loss: 5.5107 +[2025-09-06 00:07:46] [Rank 0] Total Loss: 5.5107 +[2025-09-06 00:07:46] [Rank 0] Total FTA (Unweighted): 0.1075 +[2025-09-06 00:07:46] [Rank 0] Total FTA (Unweighted): 0.1075 +[2025-09-06 00:07:46] [Rank 0] Total FTA (Weighted): 0.1075 +[2025-09-06 00:07:46] [Rank 0] Total FTA (Weighted): 0.1075 +[2025-09-06 00:07:46] [Rank 0] Group 0 Loss: 3.3422 +[2025-09-06 00:07:46] [Rank 0] Group 0 Loss: 3.3422 +[2025-09-06 00:07:46] [Rank 0] Group 1 Loss: 3.3397 +[2025-09-06 00:07:46] [Rank 0] Group 1 Loss: 3.3397 +[2025-09-06 00:07:46] [Rank 0] Group 2 Loss: 3.7709 +[2025-09-06 00:07:46] [Rank 0] Group 2 Loss: 3.7709 +[2025-09-06 00:07:46] [Rank 0] Group 3 Loss: 4.5764 +[2025-09-06 00:07:46] [Rank 0] Group 3 Loss: 4.5764 +[2025-09-06 00:07:46] [Rank 0] Group 4 Loss: 5.4907 +[2025-09-06 00:07:46] [Rank 0] Group 4 Loss: 5.4907 +[2025-09-06 00:07:46] [Rank 0] Group 5 Loss: 5.7855 +[2025-09-06 00:07:46] [Rank 0] Group 5 Loss: 5.7855 +[2025-09-06 00:07:46] [Rank 0] Group 6 Loss: 5.9728 +[2025-09-06 00:07:46] [Rank 0] Group 6 Loss: 5.9728 +[2025-09-06 00:07:46] [Rank 0] Group 7 Loss: 5.9746 +[2025-09-06 00:07:46] [Rank 0] Group 7 Loss: 5.9746 +[2025-09-06 00:07:46] [Rank 0] Group 8 Loss: 6.1585 +[2025-09-06 00:07:46] [Rank 0] Group 8 Loss: 6.1585 +[2025-09-06 00:07:46] [Rank 0] Group 9 Loss: 6.3138 +[2025-09-06 00:07:46] [Rank 0] Group 9 Loss: 6.3138 +[2025-09-06 00:07:46] [Rank 0] Group 10 Loss: 6.2664 +[2025-09-06 00:07:46] [Rank 0] Group 10 Loss: 6.2664 +[2025-09-06 00:07:46] [Rank 0] Group 11 Loss: 6.3310 +[2025-09-06 00:07:46] [Rank 0] Group 11 Loss: 6.3310 +[2025-09-06 00:07:46] [Rank 0] Group 12 Loss: 6.1829 +[2025-09-06 00:07:46] [Rank 0] Group 12 Loss: 6.1829 +[2025-09-06 00:07:46] [Rank 0] Group 13 Loss: 6.1781 +[2025-09-06 00:07:46] [Rank 0] Group 13 Loss: 6.1781 +[2025-09-06 00:07:46] [Rank 0] Group 14 Loss: 6.2876 +[2025-09-06 00:07:46] [Rank 0] Group 14 Loss: 6.2876 +[2025-09-06 00:07:46] [Rank 0] Group 15 Loss: 6.2002 +[2025-09-06 00:07:46] [Rank 0] Group 15 Loss: 6.2002 +[2025-09-06 00:07:46] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 00:07:46] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 00:07:46] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:07:46] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:07:46] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:07:46] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:07:46] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:07:46] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:07:46] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:07:46] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:07:46] [Rank 0] Group 5 FTA: 0.1300 +[2025-09-06 00:07:46] [Rank 0] Group 5 FTA: 0.1300 +[2025-09-06 00:07:46] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 00:07:46] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 00:07:46] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 00:07:46] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-06 00:07:46] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 00:07:46] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 00:07:46] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 00:07:46] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 00:07:46] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 00:07:46] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 00:07:46] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:07:46] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:07:46] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:07:46] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:07:46] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:07:46] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:07:46] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:07:46] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:07:46] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 00:07:46] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 00:07:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:07:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:07:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:07:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:07:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:07:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:07:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:07:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:07:48] [Rank 0] step:1001/10000 train_time:68846ms step_avg:68.78ms +[2025-09-06 00:07:48] [Rank 0] step:1001/10000 train_time:68846ms step_avg:68.78ms +[2025-09-06 00:07:48] [Rank 0] step:1021/10000 train_time:69519ms step_avg:68.09ms +[2025-09-06 00:07:48] [Rank 0] step:1021/10000 train_time:69519ms step_avg:68.09ms +[2025-09-06 00:07:49] [Rank 0] step:1041/10000 train_time:70251ms step_avg:67.48ms +[2025-09-06 00:07:49] [Rank 0] step:1041/10000 train_time:70251ms step_avg:67.48ms +[2025-09-06 00:07:50] [Rank 0] step:1061/10000 train_time:70983ms step_avg:66.90ms +[2025-09-06 00:07:50] [Rank 0] step:1061/10000 train_time:70983ms step_avg:66.90ms +[2025-09-06 00:07:51] [Rank 0] step:1081/10000 train_time:71716ms step_avg:66.34ms +[2025-09-06 00:07:51] [Rank 0] step:1081/10000 train_time:71716ms step_avg:66.34ms +[2025-09-06 00:07:51] [Rank 0] step:1101/10000 train_time:72448ms step_avg:65.80ms +[2025-09-06 00:07:51] [Rank 0] step:1101/10000 train_time:72448ms step_avg:65.80ms +[2025-09-06 00:07:52] [Rank 0] step:1121/10000 train_time:73181ms step_avg:65.28ms +[2025-09-06 00:07:52] [Rank 0] step:1121/10000 train_time:73181ms step_avg:65.28ms +[2025-09-06 00:07:53] [Rank 0] step:1141/10000 train_time:73914ms step_avg:64.78ms +[2025-09-06 00:07:53] [Rank 0] step:1141/10000 train_time:73914ms step_avg:64.78ms +[2025-09-06 00:07:54] [Rank 0] step:1161/10000 train_time:74646ms step_avg:64.29ms +[2025-09-06 00:07:54] [Rank 0] step:1161/10000 train_time:74646ms step_avg:64.29ms +[2025-09-06 00:07:54] [Rank 0] step:1181/10000 train_time:75379ms step_avg:63.83ms +[2025-09-06 00:07:54] [Rank 0] step:1181/10000 train_time:75379ms step_avg:63.83ms +[2025-09-06 00:07:55] [Rank 0] step:1201/10000 train_time:76111ms step_avg:63.37ms +[2025-09-06 00:07:55] [Rank 0] step:1201/10000 train_time:76111ms step_avg:63.37ms +[2025-09-06 00:07:56] [Rank 0] step:1221/10000 train_time:76844ms step_avg:62.94ms +[2025-09-06 00:07:56] [Rank 0] step:1221/10000 train_time:76844ms step_avg:62.94ms +[2025-09-06 00:07:57] [Rank 0] step:1241/10000 train_time:77578ms step_avg:62.51ms +[2025-09-06 00:07:57] [Rank 0] step:1241/10000 train_time:77578ms step_avg:62.51ms +[2025-09-06 00:07:57] [Rank 0] step:1261/10000 train_time:78310ms step_avg:62.10ms +[2025-09-06 00:07:57] [Rank 0] step:1261/10000 train_time:78310ms step_avg:62.10ms +[2025-09-06 00:07:58] [Rank 0] step:1281/10000 train_time:79042ms step_avg:61.70ms +[2025-09-06 00:07:58] [Rank 0] step:1281/10000 train_time:79042ms step_avg:61.70ms +[2025-09-06 00:07:59] [Rank 0] step:1301/10000 train_time:79774ms step_avg:61.32ms +[2025-09-06 00:07:59] [Rank 0] step:1301/10000 train_time:79774ms step_avg:61.32ms +[2025-09-06 00:07:59] [Rank 0] step:1321/10000 train_time:80507ms step_avg:60.94ms +[2025-09-06 00:07:59] [Rank 0] step:1321/10000 train_time:80507ms step_avg:60.94ms +[2025-09-06 00:08:00] [Rank 0] step:1341/10000 train_time:81239ms step_avg:60.58ms +[2025-09-06 00:08:00] [Rank 0] step:1341/10000 train_time:81239ms step_avg:60.58ms +[2025-09-06 00:08:01] [Rank 0] step:1361/10000 train_time:81972ms step_avg:60.23ms +[2025-09-06 00:08:01] [Rank 0] step:1361/10000 train_time:81972ms step_avg:60.23ms +[2025-09-06 00:08:02] [Rank 0] step:1381/10000 train_time:82704ms step_avg:59.89ms +[2025-09-06 00:08:02] [Rank 0] step:1381/10000 train_time:82704ms step_avg:59.89ms +[2025-09-06 00:08:02] [Rank 0] step:1401/10000 train_time:83436ms step_avg:59.55ms +[2025-09-06 00:08:02] [Rank 0] step:1401/10000 train_time:83436ms step_avg:59.55ms +[2025-09-06 00:08:03] [Rank 0] step:1421/10000 train_time:84168ms step_avg:59.23ms +[2025-09-06 00:08:03] [Rank 0] step:1421/10000 train_time:84168ms step_avg:59.23ms +[2025-09-06 00:08:04] [Rank 0] step:1441/10000 train_time:84900ms step_avg:58.92ms +[2025-09-06 00:08:04] [Rank 0] step:1441/10000 train_time:84900ms step_avg:58.92ms +[2025-09-06 00:08:05] [Rank 0] step:1461/10000 train_time:85632ms step_avg:58.61ms +[2025-09-06 00:08:05] [Rank 0] step:1461/10000 train_time:85632ms step_avg:58.61ms +[2025-09-06 00:08:05] [Rank 0] step:1481/10000 train_time:86364ms step_avg:58.31ms +[2025-09-06 00:08:05] [Rank 0] step:1481/10000 train_time:86364ms step_avg:58.31ms +[2025-09-06 00:08:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:08:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:08:07] [Rank 0] PRINT: step:1500/10000 train_loss:3.3453 val_loss:3.1702 train_time:87176ms step_avg:58.12ms +[2025-09-06 00:08:07] [Rank 0] PRINT: step:1500/10000 train_loss:3.3453 val_loss:3.1702 train_time:87176ms step_avg:58.12ms +[2025-09-06 00:08:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:08:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:08:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:08:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:09:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:09:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:09:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:09:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:09:28] [Rank 0] Total Loss: 5.2517 +[2025-09-06 00:09:28] [Rank 0] Total Loss: 5.2517 +[2025-09-06 00:09:28] [Rank 0] Total FTA (Unweighted): 0.1306 +[2025-09-06 00:09:28] [Rank 0] Total FTA (Unweighted): 0.1306 +[2025-09-06 00:09:28] [Rank 0] Total FTA (Weighted): 0.1306 +[2025-09-06 00:09:28] [Rank 0] Total FTA (Weighted): 0.1306 +[2025-09-06 00:09:28] [Rank 0] Group 0 Loss: 3.2939 +[2025-09-06 00:09:28] [Rank 0] Group 0 Loss: 3.2939 +[2025-09-06 00:09:28] [Rank 0] Group 1 Loss: 3.2322 +[2025-09-06 00:09:28] [Rank 0] Group 1 Loss: 3.2322 +[2025-09-06 00:09:28] [Rank 0] Group 2 Loss: 3.5144 +[2025-09-06 00:09:28] [Rank 0] Group 2 Loss: 3.5144 +[2025-09-06 00:09:28] [Rank 0] Group 3 Loss: 4.1379 +[2025-09-06 00:09:28] [Rank 0] Group 3 Loss: 4.1379 +[2025-09-06 00:09:28] [Rank 0] Group 4 Loss: 5.0037 +[2025-09-06 00:09:28] [Rank 0] Group 4 Loss: 5.0037 +[2025-09-06 00:09:28] [Rank 0] Group 5 Loss: 5.4104 +[2025-09-06 00:09:28] [Rank 0] Group 5 Loss: 5.4104 +[2025-09-06 00:09:28] [Rank 0] Group 6 Loss: 5.6552 +[2025-09-06 00:09:28] [Rank 0] Group 6 Loss: 5.6552 +[2025-09-06 00:09:28] [Rank 0] Group 7 Loss: 5.6968 +[2025-09-06 00:09:28] [Rank 0] Group 7 Loss: 5.6968 +[2025-09-06 00:09:28] [Rank 0] Group 8 Loss: 5.9049 +[2025-09-06 00:09:28] [Rank 0] Group 8 Loss: 5.9049 +[2025-09-06 00:09:28] [Rank 0] Group 9 Loss: 6.0725 +[2025-09-06 00:09:28] [Rank 0] Group 9 Loss: 6.0725 +[2025-09-06 00:09:28] [Rank 0] Group 10 Loss: 6.0505 +[2025-09-06 00:09:28] [Rank 0] Group 10 Loss: 6.0505 +[2025-09-06 00:09:28] [Rank 0] Group 11 Loss: 6.1237 +[2025-09-06 00:09:28] [Rank 0] Group 11 Loss: 6.1237 +[2025-09-06 00:09:28] [Rank 0] Group 12 Loss: 5.9467 +[2025-09-06 00:09:28] [Rank 0] Group 12 Loss: 5.9467 +[2025-09-06 00:09:28] [Rank 0] Group 13 Loss: 5.9555 +[2025-09-06 00:09:28] [Rank 0] Group 13 Loss: 5.9555 +[2025-09-06 00:09:28] [Rank 0] Group 14 Loss: 6.0517 +[2025-09-06 00:09:28] [Rank 0] Group 14 Loss: 6.0517 +[2025-09-06 00:09:28] [Rank 0] Group 15 Loss: 5.9765 +[2025-09-06 00:09:28] [Rank 0] Group 15 Loss: 5.9765 +[2025-09-06 00:09:28] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 00:09:28] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 00:09:28] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:09:28] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:09:28] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:09:28] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:09:28] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:09:28] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:09:28] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:09:28] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:09:28] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 00:09:28] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 00:09:28] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 00:09:28] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 00:09:28] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:09:28] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:09:28] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 00:09:28] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 00:09:28] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:09:28] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:09:28] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 00:09:28] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 00:09:28] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:09:28] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:09:28] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:09:28] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:09:28] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:09:28] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:09:28] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:09:28] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:09:28] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:09:28] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:09:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:09:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:09:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:09:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:09:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:09:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:09:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:09:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:09:29] [Rank 0] step:1501/10000 train_time:87186ms step_avg:58.09ms +[2025-09-06 00:09:29] [Rank 0] step:1501/10000 train_time:87186ms step_avg:58.09ms +[2025-09-06 00:09:30] [Rank 0] step:1521/10000 train_time:87849ms step_avg:57.76ms +[2025-09-06 00:09:30] [Rank 0] step:1521/10000 train_time:87849ms step_avg:57.76ms +[2025-09-06 00:09:31] [Rank 0] step:1541/10000 train_time:88581ms step_avg:57.48ms +[2025-09-06 00:09:31] [Rank 0] step:1541/10000 train_time:88581ms step_avg:57.48ms +[2025-09-06 00:09:32] [Rank 0] step:1561/10000 train_time:89314ms step_avg:57.22ms +[2025-09-06 00:09:32] [Rank 0] step:1561/10000 train_time:89314ms step_avg:57.22ms +[2025-09-06 00:09:32] [Rank 0] step:1581/10000 train_time:90046ms step_avg:56.96ms +[2025-09-06 00:09:32] [Rank 0] step:1581/10000 train_time:90046ms step_avg:56.96ms +[2025-09-06 00:09:33] [Rank 0] step:1601/10000 train_time:90778ms step_avg:56.70ms +[2025-09-06 00:09:33] [Rank 0] step:1601/10000 train_time:90778ms step_avg:56.70ms +[2025-09-06 00:09:34] [Rank 0] step:1621/10000 train_time:91511ms step_avg:56.45ms +[2025-09-06 00:09:34] [Rank 0] step:1621/10000 train_time:91511ms step_avg:56.45ms +[2025-09-06 00:09:35] [Rank 0] step:1641/10000 train_time:92448ms step_avg:56.34ms +[2025-09-06 00:09:35] [Rank 0] step:1641/10000 train_time:92448ms step_avg:56.34ms +[2025-09-06 00:09:36] [Rank 0] step:1661/10000 train_time:93180ms step_avg:56.10ms +[2025-09-06 00:09:36] [Rank 0] step:1661/10000 train_time:93180ms step_avg:56.10ms +[2025-09-06 00:09:36] [Rank 0] step:1681/10000 train_time:93912ms step_avg:55.87ms +[2025-09-06 00:09:36] [Rank 0] step:1681/10000 train_time:93912ms step_avg:55.87ms +[2025-09-06 00:09:37] [Rank 0] step:1701/10000 train_time:94645ms step_avg:55.64ms +[2025-09-06 00:09:37] [Rank 0] step:1701/10000 train_time:94645ms step_avg:55.64ms +[2025-09-06 00:09:38] [Rank 0] step:1721/10000 train_time:95378ms step_avg:55.42ms +[2025-09-06 00:09:38] [Rank 0] step:1721/10000 train_time:95378ms step_avg:55.42ms +[2025-09-06 00:09:39] [Rank 0] step:1741/10000 train_time:96110ms step_avg:55.20ms +[2025-09-06 00:09:39] [Rank 0] step:1741/10000 train_time:96110ms step_avg:55.20ms +[2025-09-06 00:09:39] [Rank 0] step:1761/10000 train_time:96843ms step_avg:54.99ms +[2025-09-06 00:09:39] [Rank 0] step:1761/10000 train_time:96843ms step_avg:54.99ms +[2025-09-06 00:09:40] [Rank 0] step:1781/10000 train_time:97576ms step_avg:54.79ms +[2025-09-06 00:09:40] [Rank 0] step:1781/10000 train_time:97576ms step_avg:54.79ms +[2025-09-06 00:09:41] [Rank 0] step:1801/10000 train_time:98308ms step_avg:54.59ms +[2025-09-06 00:09:41] [Rank 0] step:1801/10000 train_time:98308ms step_avg:54.59ms +[2025-09-06 00:09:41] [Rank 0] step:1821/10000 train_time:99041ms step_avg:54.39ms +[2025-09-06 00:09:41] [Rank 0] step:1821/10000 train_time:99041ms step_avg:54.39ms +[2025-09-06 00:09:42] [Rank 0] step:1841/10000 train_time:99774ms step_avg:54.20ms +[2025-09-06 00:09:42] [Rank 0] step:1841/10000 train_time:99774ms step_avg:54.20ms +[2025-09-06 00:09:43] [Rank 0] step:1861/10000 train_time:100507ms step_avg:54.01ms +[2025-09-06 00:09:43] [Rank 0] step:1861/10000 train_time:100507ms step_avg:54.01ms +[2025-09-06 00:09:44] [Rank 0] step:1881/10000 train_time:101239ms step_avg:53.82ms +[2025-09-06 00:09:44] [Rank 0] step:1881/10000 train_time:101239ms step_avg:53.82ms +[2025-09-06 00:09:44] [Rank 0] step:1901/10000 train_time:101969ms step_avg:53.64ms +[2025-09-06 00:09:44] [Rank 0] step:1901/10000 train_time:101969ms step_avg:53.64ms +[2025-09-06 00:09:45] [Rank 0] step:1921/10000 train_time:102699ms step_avg:53.46ms +[2025-09-06 00:09:45] [Rank 0] step:1921/10000 train_time:102699ms step_avg:53.46ms +[2025-09-06 00:09:46] [Rank 0] step:1941/10000 train_time:103430ms step_avg:53.29ms +[2025-09-06 00:09:46] [Rank 0] step:1941/10000 train_time:103430ms step_avg:53.29ms +[2025-09-06 00:09:47] [Rank 0] step:1961/10000 train_time:104163ms step_avg:53.12ms +[2025-09-06 00:09:47] [Rank 0] step:1961/10000 train_time:104163ms step_avg:53.12ms +[2025-09-06 00:09:47] [Rank 0] step:1981/10000 train_time:104894ms step_avg:52.95ms +[2025-09-06 00:09:47] [Rank 0] step:1981/10000 train_time:104894ms step_avg:52.95ms +[2025-09-06 00:09:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:09:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:09:48] [Rank 0] PRINT: step:2000/10000 train_loss:3.0524 val_loss:2.9322 train_time:105706ms step_avg:52.85ms +[2025-09-06 00:09:48] [Rank 0] PRINT: step:2000/10000 train_loss:3.0524 val_loss:2.9322 train_time:105706ms step_avg:52.85ms +[2025-09-06 00:09:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:09:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:09:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:09:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:11:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:11:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:11:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:11:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:11:10] [Rank 0] Total Loss: 5.0588 +[2025-09-06 00:11:10] [Rank 0] Total Loss: 5.0588 +[2025-09-06 00:11:10] [Rank 0] Total FTA (Unweighted): 0.1656 +[2025-09-06 00:11:10] [Rank 0] Total FTA (Unweighted): 0.1656 +[2025-09-06 00:11:10] [Rank 0] Total FTA (Weighted): 0.1656 +[2025-09-06 00:11:10] [Rank 0] Total FTA (Weighted): 0.1656 +[2025-09-06 00:11:10] [Rank 0] Group 0 Loss: 3.1558 +[2025-09-06 00:11:10] [Rank 0] Group 0 Loss: 3.1558 +[2025-09-06 00:11:10] [Rank 0] Group 1 Loss: 3.1502 +[2025-09-06 00:11:10] [Rank 0] Group 1 Loss: 3.1502 +[2025-09-06 00:11:10] [Rank 0] Group 2 Loss: 3.4528 +[2025-09-06 00:11:10] [Rank 0] Group 2 Loss: 3.4528 +[2025-09-06 00:11:10] [Rank 0] Group 3 Loss: 3.8581 +[2025-09-06 00:11:10] [Rank 0] Group 3 Loss: 3.8581 +[2025-09-06 00:11:10] [Rank 0] Group 4 Loss: 4.6620 +[2025-09-06 00:11:10] [Rank 0] Group 4 Loss: 4.6620 +[2025-09-06 00:11:10] [Rank 0] Group 5 Loss: 5.1292 +[2025-09-06 00:11:10] [Rank 0] Group 5 Loss: 5.1292 +[2025-09-06 00:11:10] [Rank 0] Group 6 Loss: 5.4156 +[2025-09-06 00:11:10] [Rank 0] Group 6 Loss: 5.4156 +[2025-09-06 00:11:10] [Rank 0] Group 7 Loss: 5.4785 +[2025-09-06 00:11:10] [Rank 0] Group 7 Loss: 5.4785 +[2025-09-06 00:11:10] [Rank 0] Group 8 Loss: 5.7359 +[2025-09-06 00:11:10] [Rank 0] Group 8 Loss: 5.7359 +[2025-09-06 00:11:10] [Rank 0] Group 9 Loss: 5.8953 +[2025-09-06 00:11:10] [Rank 0] Group 9 Loss: 5.8953 +[2025-09-06 00:11:10] [Rank 0] Group 10 Loss: 5.8794 +[2025-09-06 00:11:10] [Rank 0] Group 10 Loss: 5.8794 +[2025-09-06 00:11:10] [Rank 0] Group 11 Loss: 5.9244 +[2025-09-06 00:11:10] [Rank 0] Group 11 Loss: 5.9244 +[2025-09-06 00:11:10] [Rank 0] Group 12 Loss: 5.7613 +[2025-09-06 00:11:10] [Rank 0] Group 12 Loss: 5.7613 +[2025-09-06 00:11:10] [Rank 0] Group 13 Loss: 5.7903 +[2025-09-06 00:11:10] [Rank 0] Group 13 Loss: 5.7903 +[2025-09-06 00:11:10] [Rank 0] Group 14 Loss: 5.8589 +[2025-09-06 00:11:10] [Rank 0] Group 14 Loss: 5.8589 +[2025-09-06 00:11:10] [Rank 0] Group 15 Loss: 5.7925 +[2025-09-06 00:11:10] [Rank 0] Group 15 Loss: 5.7925 +[2025-09-06 00:11:10] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 00:11:10] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 00:11:10] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:11:10] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:11:10] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:11:10] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:11:10] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:11:10] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:11:10] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:11:10] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:11:10] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:11:10] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:11:10] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 00:11:10] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 00:11:10] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:11:10] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:11:10] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-06 00:11:10] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-06 00:11:10] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:11:10] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:11:10] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 00:11:10] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 00:11:10] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:11:10] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:11:10] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:11:10] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:11:10] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:11:10] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:11:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:11:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:11:11] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:11:11] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:11:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:11:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:11:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:11:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:11:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:11:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:11:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:11:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:11:12] [Rank 0] step:2001/10000 train_time:105715ms step_avg:52.83ms +[2025-09-06 00:11:12] [Rank 0] step:2001/10000 train_time:105715ms step_avg:52.83ms +[2025-09-06 00:11:13] [Rank 0] step:2021/10000 train_time:106582ms step_avg:52.74ms +[2025-09-06 00:11:13] [Rank 0] step:2021/10000 train_time:106582ms step_avg:52.74ms +[2025-09-06 00:11:14] [Rank 0] step:2041/10000 train_time:107315ms step_avg:52.58ms +[2025-09-06 00:11:14] [Rank 0] step:2041/10000 train_time:107315ms step_avg:52.58ms +[2025-09-06 00:11:14] [Rank 0] step:2061/10000 train_time:108047ms step_avg:52.42ms +[2025-09-06 00:11:14] [Rank 0] step:2061/10000 train_time:108047ms step_avg:52.42ms +[2025-09-06 00:11:15] [Rank 0] step:2081/10000 train_time:108785ms step_avg:52.28ms +[2025-09-06 00:11:15] [Rank 0] step:2081/10000 train_time:108785ms step_avg:52.28ms +[2025-09-06 00:11:16] [Rank 0] step:2101/10000 train_time:109517ms step_avg:52.13ms +[2025-09-06 00:11:16] [Rank 0] step:2101/10000 train_time:109517ms step_avg:52.13ms +[2025-09-06 00:11:17] [Rank 0] step:2121/10000 train_time:110248ms step_avg:51.98ms +[2025-09-06 00:11:17] [Rank 0] step:2121/10000 train_time:110248ms step_avg:51.98ms +[2025-09-06 00:11:17] [Rank 0] step:2141/10000 train_time:110979ms step_avg:51.84ms +[2025-09-06 00:11:17] [Rank 0] step:2141/10000 train_time:110979ms step_avg:51.84ms +[2025-09-06 00:11:18] [Rank 0] step:2161/10000 train_time:111710ms step_avg:51.69ms +[2025-09-06 00:11:18] [Rank 0] step:2161/10000 train_time:111710ms step_avg:51.69ms +[2025-09-06 00:11:19] [Rank 0] step:2181/10000 train_time:112442ms step_avg:51.56ms +[2025-09-06 00:11:19] [Rank 0] step:2181/10000 train_time:112442ms step_avg:51.56ms +[2025-09-06 00:11:19] [Rank 0] step:2201/10000 train_time:113173ms step_avg:51.42ms +[2025-09-06 00:11:19] [Rank 0] step:2201/10000 train_time:113173ms step_avg:51.42ms +[2025-09-06 00:11:20] [Rank 0] step:2221/10000 train_time:113904ms step_avg:51.29ms +[2025-09-06 00:11:20] [Rank 0] step:2221/10000 train_time:113904ms step_avg:51.29ms +[2025-09-06 00:11:21] [Rank 0] step:2241/10000 train_time:114640ms step_avg:51.16ms +[2025-09-06 00:11:21] [Rank 0] step:2241/10000 train_time:114640ms step_avg:51.16ms +[2025-09-06 00:11:22] [Rank 0] step:2261/10000 train_time:115378ms step_avg:51.03ms +[2025-09-06 00:11:22] [Rank 0] step:2261/10000 train_time:115378ms step_avg:51.03ms +[2025-09-06 00:11:22] [Rank 0] step:2281/10000 train_time:116116ms step_avg:50.91ms +[2025-09-06 00:11:22] [Rank 0] step:2281/10000 train_time:116116ms step_avg:50.91ms +[2025-09-06 00:11:23] [Rank 0] step:2301/10000 train_time:116855ms step_avg:50.78ms +[2025-09-06 00:11:23] [Rank 0] step:2301/10000 train_time:116855ms step_avg:50.78ms +[2025-09-06 00:11:24] [Rank 0] step:2321/10000 train_time:117593ms step_avg:50.66ms +[2025-09-06 00:11:24] [Rank 0] step:2321/10000 train_time:117593ms step_avg:50.66ms +[2025-09-06 00:11:25] [Rank 0] step:2341/10000 train_time:118332ms step_avg:50.55ms +[2025-09-06 00:11:25] [Rank 0] step:2341/10000 train_time:118332ms step_avg:50.55ms +[2025-09-06 00:11:25] [Rank 0] step:2361/10000 train_time:119070ms step_avg:50.43ms +[2025-09-06 00:11:25] [Rank 0] step:2361/10000 train_time:119070ms step_avg:50.43ms +[2025-09-06 00:11:26] [Rank 0] step:2381/10000 train_time:119809ms step_avg:50.32ms +[2025-09-06 00:11:26] [Rank 0] step:2381/10000 train_time:119809ms step_avg:50.32ms +[2025-09-06 00:11:27] [Rank 0] step:2401/10000 train_time:120546ms step_avg:50.21ms +[2025-09-06 00:11:27] [Rank 0] step:2401/10000 train_time:120546ms step_avg:50.21ms +[2025-09-06 00:11:28] [Rank 0] step:2421/10000 train_time:121285ms step_avg:50.10ms +[2025-09-06 00:11:28] [Rank 0] step:2421/10000 train_time:121285ms step_avg:50.10ms +[2025-09-06 00:11:28] [Rank 0] step:2441/10000 train_time:122023ms step_avg:49.99ms +[2025-09-06 00:11:28] [Rank 0] step:2441/10000 train_time:122023ms step_avg:49.99ms +[2025-09-06 00:11:29] [Rank 0] step:2461/10000 train_time:122762ms step_avg:49.88ms +[2025-09-06 00:11:29] [Rank 0] step:2461/10000 train_time:122762ms step_avg:49.88ms +[2025-09-06 00:11:30] [Rank 0] step:2481/10000 train_time:123500ms step_avg:49.78ms +[2025-09-06 00:11:30] [Rank 0] step:2481/10000 train_time:123500ms step_avg:49.78ms +[2025-09-06 00:11:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:11:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:11:31] [Rank 0] PRINT: step:2500/10000 train_loss:2.8534 val_loss:2.7583 train_time:124320ms step_avg:49.73ms +[2025-09-06 00:11:31] [Rank 0] PRINT: step:2500/10000 train_loss:2.8534 val_loss:2.7583 train_time:124320ms step_avg:49.73ms +[2025-09-06 00:11:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:11:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:11:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:11:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:12:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:12:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:12:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:12:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:12:53] [Rank 0] Total Loss: 4.9335 +[2025-09-06 00:12:53] [Rank 0] Total Loss: 4.9335 +[2025-09-06 00:12:53] [Rank 0] Total FTA (Unweighted): 0.1706 +[2025-09-06 00:12:53] [Rank 0] Total FTA (Unweighted): 0.1706 +[2025-09-06 00:12:53] [Rank 0] Total FTA (Weighted): 0.1706 +[2025-09-06 00:12:53] [Rank 0] Total FTA (Weighted): 0.1706 +[2025-09-06 00:12:53] [Rank 0] Group 0 Loss: 3.1842 +[2025-09-06 00:12:53] [Rank 0] Group 0 Loss: 3.1842 +[2025-09-06 00:12:53] [Rank 0] Group 1 Loss: 3.1444 +[2025-09-06 00:12:53] [Rank 0] Group 1 Loss: 3.1444 +[2025-09-06 00:12:53] [Rank 0] Group 2 Loss: 3.3128 +[2025-09-06 00:12:53] [Rank 0] Group 2 Loss: 3.3128 +[2025-09-06 00:12:53] [Rank 0] Group 3 Loss: 3.7932 +[2025-09-06 00:12:53] [Rank 0] Group 3 Loss: 3.7932 +[2025-09-06 00:12:53] [Rank 0] Group 4 Loss: 4.4438 +[2025-09-06 00:12:53] [Rank 0] Group 4 Loss: 4.4438 +[2025-09-06 00:12:53] [Rank 0] Group 5 Loss: 4.9139 +[2025-09-06 00:12:53] [Rank 0] Group 5 Loss: 4.9139 +[2025-09-06 00:12:53] [Rank 0] Group 6 Loss: 5.2297 +[2025-09-06 00:12:53] [Rank 0] Group 6 Loss: 5.2297 +[2025-09-06 00:12:53] [Rank 0] Group 7 Loss: 5.3239 +[2025-09-06 00:12:53] [Rank 0] Group 7 Loss: 5.3239 +[2025-09-06 00:12:53] [Rank 0] Group 8 Loss: 5.5833 +[2025-09-06 00:12:53] [Rank 0] Group 8 Loss: 5.5833 +[2025-09-06 00:12:53] [Rank 0] Group 9 Loss: 5.7176 +[2025-09-06 00:12:53] [Rank 0] Group 9 Loss: 5.7176 +[2025-09-06 00:12:53] [Rank 0] Group 10 Loss: 5.7321 +[2025-09-06 00:12:53] [Rank 0] Group 10 Loss: 5.7321 +[2025-09-06 00:12:53] [Rank 0] Group 11 Loss: 5.7966 +[2025-09-06 00:12:53] [Rank 0] Group 11 Loss: 5.7966 +[2025-09-06 00:12:53] [Rank 0] Group 12 Loss: 5.6481 +[2025-09-06 00:12:53] [Rank 0] Group 12 Loss: 5.6481 +[2025-09-06 00:12:53] [Rank 0] Group 13 Loss: 5.6885 +[2025-09-06 00:12:53] [Rank 0] Group 13 Loss: 5.6885 +[2025-09-06 00:12:53] [Rank 0] Group 14 Loss: 5.7474 +[2025-09-06 00:12:53] [Rank 0] Group 14 Loss: 5.7474 +[2025-09-06 00:12:53] [Rank 0] Group 15 Loss: 5.6769 +[2025-09-06 00:12:53] [Rank 0] Group 15 Loss: 5.6769 +[2025-09-06 00:12:53] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 00:12:53] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 00:12:53] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:12:53] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:12:53] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:12:53] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:12:53] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:12:53] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:12:53] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:12:53] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:12:53] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:12:53] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:12:53] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:12:53] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:12:53] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:12:53] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:12:53] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 00:12:53] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 00:12:53] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:12:53] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:12:53] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 00:12:53] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 00:12:53] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:12:53] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:12:53] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:12:53] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:12:53] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:12:53] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:12:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:12:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:12:53] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:12:53] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:12:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:12:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:12:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:12:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:12:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:12:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:12:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:12:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:12:55] [Rank 0] step:2501/10000 train_time:124329ms step_avg:49.71ms +[2025-09-06 00:12:55] [Rank 0] step:2501/10000 train_time:124329ms step_avg:49.71ms +[2025-09-06 00:12:55] [Rank 0] step:2521/10000 train_time:124994ms step_avg:49.58ms +[2025-09-06 00:12:55] [Rank 0] step:2521/10000 train_time:124994ms step_avg:49.58ms +[2025-09-06 00:12:56] [Rank 0] step:2541/10000 train_time:125733ms step_avg:49.48ms +[2025-09-06 00:12:56] [Rank 0] step:2541/10000 train_time:125733ms step_avg:49.48ms +[2025-09-06 00:12:57] [Rank 0] step:2561/10000 train_time:126471ms step_avg:49.38ms +[2025-09-06 00:12:57] [Rank 0] step:2561/10000 train_time:126471ms step_avg:49.38ms +[2025-09-06 00:12:58] [Rank 0] step:2581/10000 train_time:127210ms step_avg:49.29ms +[2025-09-06 00:12:58] [Rank 0] step:2581/10000 train_time:127210ms step_avg:49.29ms +[2025-09-06 00:12:58] [Rank 0] step:2601/10000 train_time:127949ms step_avg:49.19ms +[2025-09-06 00:12:58] [Rank 0] step:2601/10000 train_time:127949ms step_avg:49.19ms +[2025-09-06 00:12:59] [Rank 0] step:2621/10000 train_time:128688ms step_avg:49.10ms +[2025-09-06 00:12:59] [Rank 0] step:2621/10000 train_time:128688ms step_avg:49.10ms +[2025-09-06 00:13:00] [Rank 0] step:2641/10000 train_time:129427ms step_avg:49.01ms +[2025-09-06 00:13:00] [Rank 0] step:2641/10000 train_time:129427ms step_avg:49.01ms +[2025-09-06 00:13:00] [Rank 0] step:2661/10000 train_time:130166ms step_avg:48.92ms +[2025-09-06 00:13:00] [Rank 0] step:2661/10000 train_time:130166ms step_avg:48.92ms +[2025-09-06 00:13:01] [Rank 0] step:2681/10000 train_time:130905ms step_avg:48.83ms +[2025-09-06 00:13:01] [Rank 0] step:2681/10000 train_time:130905ms step_avg:48.83ms +[2025-09-06 00:13:02] [Rank 0] step:2701/10000 train_time:131644ms step_avg:48.74ms +[2025-09-06 00:13:02] [Rank 0] step:2701/10000 train_time:131644ms step_avg:48.74ms +[2025-09-06 00:13:03] [Rank 0] step:2721/10000 train_time:132381ms step_avg:48.65ms +[2025-09-06 00:13:03] [Rank 0] step:2721/10000 train_time:132381ms step_avg:48.65ms +[2025-09-06 00:13:03] [Rank 0] step:2741/10000 train_time:133118ms step_avg:48.57ms +[2025-09-06 00:13:03] [Rank 0] step:2741/10000 train_time:133118ms step_avg:48.57ms +[2025-09-06 00:13:04] [Rank 0] step:2761/10000 train_time:133854ms step_avg:48.48ms +[2025-09-06 00:13:04] [Rank 0] step:2761/10000 train_time:133854ms step_avg:48.48ms +[2025-09-06 00:13:05] [Rank 0] step:2781/10000 train_time:134593ms step_avg:48.40ms +[2025-09-06 00:13:05] [Rank 0] step:2781/10000 train_time:134593ms step_avg:48.40ms +[2025-09-06 00:13:06] [Rank 0] step:2801/10000 train_time:135332ms step_avg:48.32ms +[2025-09-06 00:13:06] [Rank 0] step:2801/10000 train_time:135332ms step_avg:48.32ms +[2025-09-06 00:13:07] [Rank 0] step:2821/10000 train_time:136673ms step_avg:48.45ms +[2025-09-06 00:13:07] [Rank 0] step:2821/10000 train_time:136673ms step_avg:48.45ms +[2025-09-06 00:13:08] [Rank 0] step:2841/10000 train_time:137412ms step_avg:48.37ms +[2025-09-06 00:13:08] [Rank 0] step:2841/10000 train_time:137412ms step_avg:48.37ms +[2025-09-06 00:13:08] [Rank 0] step:2861/10000 train_time:138151ms step_avg:48.29ms +[2025-09-06 00:13:08] [Rank 0] step:2861/10000 train_time:138151ms step_avg:48.29ms +[2025-09-06 00:13:09] [Rank 0] step:2881/10000 train_time:138889ms step_avg:48.21ms +[2025-09-06 00:13:09] [Rank 0] step:2881/10000 train_time:138889ms step_avg:48.21ms +[2025-09-06 00:13:10] [Rank 0] step:2901/10000 train_time:139628ms step_avg:48.13ms +[2025-09-06 00:13:10] [Rank 0] step:2901/10000 train_time:139628ms step_avg:48.13ms +[2025-09-06 00:13:11] [Rank 0] step:2921/10000 train_time:140367ms step_avg:48.05ms +[2025-09-06 00:13:11] [Rank 0] step:2921/10000 train_time:140367ms step_avg:48.05ms +[2025-09-06 00:13:11] [Rank 0] step:2941/10000 train_time:141109ms step_avg:47.98ms +[2025-09-06 00:13:11] [Rank 0] step:2941/10000 train_time:141109ms step_avg:47.98ms +[2025-09-06 00:13:12] [Rank 0] step:2961/10000 train_time:141848ms step_avg:47.91ms +[2025-09-06 00:13:12] [Rank 0] step:2961/10000 train_time:141848ms step_avg:47.91ms +[2025-09-06 00:13:13] [Rank 0] step:2981/10000 train_time:142587ms step_avg:47.83ms +[2025-09-06 00:13:13] [Rank 0] step:2981/10000 train_time:142587ms step_avg:47.83ms +[2025-09-06 00:13:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:13:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:13:14] [Rank 0] PRINT: step:3000/10000 train_loss:2.6985 val_loss:2.6305 train_time:143407ms step_avg:47.80ms +[2025-09-06 00:13:14] [Rank 0] PRINT: step:3000/10000 train_loss:2.6985 val_loss:2.6305 train_time:143407ms step_avg:47.80ms +[2025-09-06 00:13:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:13:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:13:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:13:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:14:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:14:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:14:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:14:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:14:36] [Rank 0] Total Loss: 4.8348 +[2025-09-06 00:14:36] [Rank 0] Total Loss: 4.8348 +[2025-09-06 00:14:36] [Rank 0] Total FTA (Unweighted): 0.1963 +[2025-09-06 00:14:36] [Rank 0] Total FTA (Unweighted): 0.1963 +[2025-09-06 00:14:36] [Rank 0] Total FTA (Weighted): 0.1963 +[2025-09-06 00:14:36] [Rank 0] Total FTA (Weighted): 0.1963 +[2025-09-06 00:14:36] [Rank 0] Group 0 Loss: 3.1919 +[2025-09-06 00:14:36] [Rank 0] Group 0 Loss: 3.1919 +[2025-09-06 00:14:36] [Rank 0] Group 1 Loss: 3.1485 +[2025-09-06 00:14:36] [Rank 0] Group 1 Loss: 3.1485 +[2025-09-06 00:14:36] [Rank 0] Group 2 Loss: 3.2774 +[2025-09-06 00:14:36] [Rank 0] Group 2 Loss: 3.2774 +[2025-09-06 00:14:36] [Rank 0] Group 3 Loss: 3.6932 +[2025-09-06 00:14:36] [Rank 0] Group 3 Loss: 3.6932 +[2025-09-06 00:14:36] [Rank 0] Group 4 Loss: 4.2786 +[2025-09-06 00:14:36] [Rank 0] Group 4 Loss: 4.2786 +[2025-09-06 00:14:36] [Rank 0] Group 5 Loss: 4.7680 +[2025-09-06 00:14:36] [Rank 0] Group 5 Loss: 4.7680 +[2025-09-06 00:14:36] [Rank 0] Group 6 Loss: 5.0844 +[2025-09-06 00:14:36] [Rank 0] Group 6 Loss: 5.0844 +[2025-09-06 00:14:36] [Rank 0] Group 7 Loss: 5.1930 +[2025-09-06 00:14:36] [Rank 0] Group 7 Loss: 5.1930 +[2025-09-06 00:14:36] [Rank 0] Group 8 Loss: 5.4672 +[2025-09-06 00:14:36] [Rank 0] Group 8 Loss: 5.4672 +[2025-09-06 00:14:36] [Rank 0] Group 9 Loss: 5.6121 +[2025-09-06 00:14:36] [Rank 0] Group 9 Loss: 5.6121 +[2025-09-06 00:14:36] [Rank 0] Group 10 Loss: 5.6092 +[2025-09-06 00:14:36] [Rank 0] Group 10 Loss: 5.6092 +[2025-09-06 00:14:36] [Rank 0] Group 11 Loss: 5.6688 +[2025-09-06 00:14:36] [Rank 0] Group 11 Loss: 5.6688 +[2025-09-06 00:14:36] [Rank 0] Group 12 Loss: 5.5593 +[2025-09-06 00:14:36] [Rank 0] Group 12 Loss: 5.5593 +[2025-09-06 00:14:36] [Rank 0] Group 13 Loss: 5.5724 +[2025-09-06 00:14:36] [Rank 0] Group 13 Loss: 5.5724 +[2025-09-06 00:14:36] [Rank 0] Group 14 Loss: 5.6429 +[2025-09-06 00:14:36] [Rank 0] Group 14 Loss: 5.6429 +[2025-09-06 00:14:36] [Rank 0] Group 15 Loss: 5.5901 +[2025-09-06 00:14:36] [Rank 0] Group 15 Loss: 5.5901 +[2025-09-06 00:14:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:14:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:14:36] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-06 00:14:36] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-06 00:14:36] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:14:36] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:14:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:14:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:14:36] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:14:36] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:14:36] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:14:36] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:14:36] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:14:36] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:14:36] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:14:36] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:14:36] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-06 00:14:36] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-06 00:14:36] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:14:36] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:14:36] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 00:14:36] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 00:14:36] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:14:36] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:14:36] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:14:36] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:14:36] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 00:14:36] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 00:14:36] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:14:36] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:14:36] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:14:36] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:14:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:14:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:14:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:14:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:14:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:14:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:14:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:14:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:14:38] [Rank 0] step:3001/10000 train_time:143416ms step_avg:47.79ms +[2025-09-06 00:14:38] [Rank 0] step:3001/10000 train_time:143416ms step_avg:47.79ms +[2025-09-06 00:14:38] [Rank 0] step:3021/10000 train_time:144095ms step_avg:47.70ms +[2025-09-06 00:14:38] [Rank 0] step:3021/10000 train_time:144095ms step_avg:47.70ms +[2025-09-06 00:14:39] [Rank 0] step:3041/10000 train_time:144833ms step_avg:47.63ms +[2025-09-06 00:14:39] [Rank 0] step:3041/10000 train_time:144833ms step_avg:47.63ms +[2025-09-06 00:14:40] [Rank 0] step:3061/10000 train_time:145572ms step_avg:47.56ms +[2025-09-06 00:14:40] [Rank 0] step:3061/10000 train_time:145572ms step_avg:47.56ms +[2025-09-06 00:14:41] [Rank 0] step:3081/10000 train_time:146450ms step_avg:47.53ms +[2025-09-06 00:14:41] [Rank 0] step:3081/10000 train_time:146450ms step_avg:47.53ms +[2025-09-06 00:14:41] [Rank 0] step:3101/10000 train_time:147190ms step_avg:47.47ms +[2025-09-06 00:14:41] [Rank 0] step:3101/10000 train_time:147190ms step_avg:47.47ms +[2025-09-06 00:14:42] [Rank 0] step:3121/10000 train_time:147927ms step_avg:47.40ms +[2025-09-06 00:14:42] [Rank 0] step:3121/10000 train_time:147927ms step_avg:47.40ms +[2025-09-06 00:14:43] [Rank 0] step:3141/10000 train_time:148825ms step_avg:47.38ms +[2025-09-06 00:14:43] [Rank 0] step:3141/10000 train_time:148825ms step_avg:47.38ms +[2025-09-06 00:14:44] [Rank 0] step:3161/10000 train_time:149564ms step_avg:47.32ms +[2025-09-06 00:14:44] [Rank 0] step:3161/10000 train_time:149564ms step_avg:47.32ms +[2025-09-06 00:14:45] [Rank 0] step:3181/10000 train_time:150303ms step_avg:47.25ms +[2025-09-06 00:14:45] [Rank 0] step:3181/10000 train_time:150303ms step_avg:47.25ms +[2025-09-06 00:14:45] [Rank 0] step:3201/10000 train_time:151040ms step_avg:47.19ms +[2025-09-06 00:14:45] [Rank 0] step:3201/10000 train_time:151040ms step_avg:47.19ms +[2025-09-06 00:14:46] [Rank 0] step:3221/10000 train_time:151778ms step_avg:47.12ms +[2025-09-06 00:14:46] [Rank 0] step:3221/10000 train_time:151778ms step_avg:47.12ms +[2025-09-06 00:14:47] [Rank 0] step:3241/10000 train_time:152516ms step_avg:47.06ms +[2025-09-06 00:14:47] [Rank 0] step:3241/10000 train_time:152516ms step_avg:47.06ms +[2025-09-06 00:14:47] [Rank 0] step:3261/10000 train_time:153254ms step_avg:47.00ms +[2025-09-06 00:14:47] [Rank 0] step:3261/10000 train_time:153254ms step_avg:47.00ms +[2025-09-06 00:14:48] [Rank 0] step:3281/10000 train_time:153993ms step_avg:46.93ms +[2025-09-06 00:14:48] [Rank 0] step:3281/10000 train_time:153993ms step_avg:46.93ms +[2025-09-06 00:14:49] [Rank 0] step:3301/10000 train_time:154731ms step_avg:46.87ms +[2025-09-06 00:14:49] [Rank 0] step:3301/10000 train_time:154731ms step_avg:46.87ms +[2025-09-06 00:14:50] [Rank 0] step:3321/10000 train_time:155469ms step_avg:46.81ms +[2025-09-06 00:14:50] [Rank 0] step:3321/10000 train_time:155469ms step_avg:46.81ms +[2025-09-06 00:14:50] [Rank 0] step:3341/10000 train_time:156208ms step_avg:46.75ms +[2025-09-06 00:14:50] [Rank 0] step:3341/10000 train_time:156208ms step_avg:46.75ms +[2025-09-06 00:14:51] [Rank 0] step:3361/10000 train_time:156947ms step_avg:46.70ms +[2025-09-06 00:14:51] [Rank 0] step:3361/10000 train_time:156947ms step_avg:46.70ms +[2025-09-06 00:14:52] [Rank 0] step:3381/10000 train_time:157685ms step_avg:46.64ms +[2025-09-06 00:14:52] [Rank 0] step:3381/10000 train_time:157685ms step_avg:46.64ms +[2025-09-06 00:14:53] [Rank 0] step:3401/10000 train_time:158424ms step_avg:46.58ms +[2025-09-06 00:14:53] [Rank 0] step:3401/10000 train_time:158424ms step_avg:46.58ms +[2025-09-06 00:14:53] [Rank 0] step:3421/10000 train_time:159163ms step_avg:46.53ms +[2025-09-06 00:14:53] [Rank 0] step:3421/10000 train_time:159163ms step_avg:46.53ms +[2025-09-06 00:14:54] [Rank 0] step:3441/10000 train_time:159901ms step_avg:46.47ms +[2025-09-06 00:14:54] [Rank 0] step:3441/10000 train_time:159901ms step_avg:46.47ms +[2025-09-06 00:14:55] [Rank 0] step:3461/10000 train_time:160639ms step_avg:46.41ms +[2025-09-06 00:14:55] [Rank 0] step:3461/10000 train_time:160639ms step_avg:46.41ms +[2025-09-06 00:14:56] [Rank 0] step:3481/10000 train_time:161378ms step_avg:46.36ms +[2025-09-06 00:14:56] [Rank 0] step:3481/10000 train_time:161378ms step_avg:46.36ms +[2025-09-06 00:14:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:14:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:14:57] [Rank 0] PRINT: step:3500/10000 train_loss:2.5912 val_loss:2.5343 train_time:162197ms step_avg:46.34ms +[2025-09-06 00:14:57] [Rank 0] PRINT: step:3500/10000 train_loss:2.5912 val_loss:2.5343 train_time:162197ms step_avg:46.34ms +[2025-09-06 00:14:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:14:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:14:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:14:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:16:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:16:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:16:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:16:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:16:18] [Rank 0] Total Loss: 4.7964 +[2025-09-06 00:16:18] [Rank 0] Total Loss: 4.7964 +[2025-09-06 00:16:18] [Rank 0] Total FTA (Unweighted): 0.2100 +[2025-09-06 00:16:18] [Rank 0] Total FTA (Unweighted): 0.2100 +[2025-09-06 00:16:18] [Rank 0] Total FTA (Weighted): 0.2100 +[2025-09-06 00:16:18] [Rank 0] Total FTA (Weighted): 0.2100 +[2025-09-06 00:16:18] [Rank 0] Group 0 Loss: 3.2334 +[2025-09-06 00:16:18] [Rank 0] Group 0 Loss: 3.2334 +[2025-09-06 00:16:18] [Rank 0] Group 1 Loss: 3.1507 +[2025-09-06 00:16:18] [Rank 0] Group 1 Loss: 3.1507 +[2025-09-06 00:16:18] [Rank 0] Group 2 Loss: 3.2784 +[2025-09-06 00:16:18] [Rank 0] Group 2 Loss: 3.2784 +[2025-09-06 00:16:18] [Rank 0] Group 3 Loss: 3.6382 +[2025-09-06 00:16:18] [Rank 0] Group 3 Loss: 3.6382 +[2025-09-06 00:16:18] [Rank 0] Group 4 Loss: 4.2083 +[2025-09-06 00:16:18] [Rank 0] Group 4 Loss: 4.2083 +[2025-09-06 00:16:18] [Rank 0] Group 5 Loss: 4.6999 +[2025-09-06 00:16:18] [Rank 0] Group 5 Loss: 4.6999 +[2025-09-06 00:16:18] [Rank 0] Group 6 Loss: 5.0185 +[2025-09-06 00:16:18] [Rank 0] Group 6 Loss: 5.0185 +[2025-09-06 00:16:18] [Rank 0] Group 7 Loss: 5.1398 +[2025-09-06 00:16:18] [Rank 0] Group 7 Loss: 5.1398 +[2025-09-06 00:16:18] [Rank 0] Group 8 Loss: 5.4054 +[2025-09-06 00:16:18] [Rank 0] Group 8 Loss: 5.4054 +[2025-09-06 00:16:18] [Rank 0] Group 9 Loss: 5.5665 +[2025-09-06 00:16:18] [Rank 0] Group 9 Loss: 5.5665 +[2025-09-06 00:16:18] [Rank 0] Group 10 Loss: 5.5755 +[2025-09-06 00:16:18] [Rank 0] Group 10 Loss: 5.5755 +[2025-09-06 00:16:18] [Rank 0] Group 11 Loss: 5.6282 +[2025-09-06 00:16:18] [Rank 0] Group 11 Loss: 5.6282 +[2025-09-06 00:16:18] [Rank 0] Group 12 Loss: 5.5417 +[2025-09-06 00:16:18] [Rank 0] Group 12 Loss: 5.5417 +[2025-09-06 00:16:19] [Rank 0] Group 13 Loss: 5.5390 +[2025-09-06 00:16:19] [Rank 0] Group 13 Loss: 5.5390 +[2025-09-06 00:16:19] [Rank 0] Group 14 Loss: 5.5883 +[2025-09-06 00:16:19] [Rank 0] Group 14 Loss: 5.5883 +[2025-09-06 00:16:19] [Rank 0] Group 15 Loss: 5.5303 +[2025-09-06 00:16:19] [Rank 0] Group 15 Loss: 5.5303 +[2025-09-06 00:16:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:16:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:16:19] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 00:16:19] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 00:16:19] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:16:19] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:16:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:16:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:16:19] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 00:16:19] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 00:16:19] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:16:19] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:16:19] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 00:16:19] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 00:16:19] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:16:19] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:16:19] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 00:16:19] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 00:16:19] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 00:16:19] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 00:16:19] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-06 00:16:19] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:16:19] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:16:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:16:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:16:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:16:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:16:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:16:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:16:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:16:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:16:20] [Rank 0] step:3501/10000 train_time:162206ms step_avg:46.33ms +[2025-09-06 00:16:20] [Rank 0] step:3501/10000 train_time:162206ms step_avg:46.33ms +[2025-09-06 00:16:21] [Rank 0] step:3521/10000 train_time:162892ms step_avg:46.26ms +[2025-09-06 00:16:21] [Rank 0] step:3521/10000 train_time:162892ms step_avg:46.26ms +[2025-09-06 00:16:22] [Rank 0] step:3541/10000 train_time:163631ms step_avg:46.21ms +[2025-09-06 00:16:22] [Rank 0] step:3541/10000 train_time:163631ms step_avg:46.21ms +[2025-09-06 00:16:22] [Rank 0] step:3561/10000 train_time:164370ms step_avg:46.16ms +[2025-09-06 00:16:22] [Rank 0] step:3561/10000 train_time:164370ms step_avg:46.16ms +[2025-09-06 00:16:23] [Rank 0] step:3581/10000 train_time:165108ms step_avg:46.11ms +[2025-09-06 00:16:23] [Rank 0] step:3581/10000 train_time:165108ms step_avg:46.11ms +[2025-09-06 00:16:24] [Rank 0] step:3601/10000 train_time:165846ms step_avg:46.06ms +[2025-09-06 00:16:24] [Rank 0] step:3601/10000 train_time:165846ms step_avg:46.06ms +[2025-09-06 00:16:24] [Rank 0] step:3621/10000 train_time:166585ms step_avg:46.01ms +[2025-09-06 00:16:24] [Rank 0] step:3621/10000 train_time:166585ms step_avg:46.01ms +[2025-09-06 00:16:26] [Rank 0] step:3641/10000 train_time:167931ms step_avg:46.12ms +[2025-09-06 00:16:26] [Rank 0] step:3641/10000 train_time:167931ms step_avg:46.12ms +[2025-09-06 00:16:27] [Rank 0] step:3661/10000 train_time:168669ms step_avg:46.07ms +[2025-09-06 00:16:27] [Rank 0] step:3661/10000 train_time:168669ms step_avg:46.07ms +[2025-09-06 00:16:27] [Rank 0] step:3681/10000 train_time:169407ms step_avg:46.02ms +[2025-09-06 00:16:27] [Rank 0] step:3681/10000 train_time:169407ms step_avg:46.02ms +[2025-09-06 00:16:28] [Rank 0] step:3701/10000 train_time:170146ms step_avg:45.97ms +[2025-09-06 00:16:28] [Rank 0] step:3701/10000 train_time:170146ms step_avg:45.97ms +[2025-09-06 00:16:29] [Rank 0] step:3721/10000 train_time:170884ms step_avg:45.92ms +[2025-09-06 00:16:29] [Rank 0] step:3721/10000 train_time:170884ms step_avg:45.92ms +[2025-09-06 00:16:30] [Rank 0] step:3741/10000 train_time:171622ms step_avg:45.88ms +[2025-09-06 00:16:30] [Rank 0] step:3741/10000 train_time:171622ms step_avg:45.88ms +[2025-09-06 00:16:30] [Rank 0] step:3761/10000 train_time:172359ms step_avg:45.83ms +[2025-09-06 00:16:30] [Rank 0] step:3761/10000 train_time:172359ms step_avg:45.83ms +[2025-09-06 00:16:31] [Rank 0] step:3781/10000 train_time:173097ms step_avg:45.78ms +[2025-09-06 00:16:31] [Rank 0] step:3781/10000 train_time:173097ms step_avg:45.78ms +[2025-09-06 00:16:32] [Rank 0] step:3801/10000 train_time:173836ms step_avg:45.73ms +[2025-09-06 00:16:32] [Rank 0] step:3801/10000 train_time:173836ms step_avg:45.73ms +[2025-09-06 00:16:32] [Rank 0] step:3821/10000 train_time:174574ms step_avg:45.69ms +[2025-09-06 00:16:32] [Rank 0] step:3821/10000 train_time:174574ms step_avg:45.69ms +[2025-09-06 00:16:33] [Rank 0] step:3841/10000 train_time:175313ms step_avg:45.64ms +[2025-09-06 00:16:33] [Rank 0] step:3841/10000 train_time:175313ms step_avg:45.64ms +[2025-09-06 00:16:34] [Rank 0] step:3861/10000 train_time:176052ms step_avg:45.60ms +[2025-09-06 00:16:34] [Rank 0] step:3861/10000 train_time:176052ms step_avg:45.60ms +[2025-09-06 00:16:35] [Rank 0] step:3881/10000 train_time:176791ms step_avg:45.55ms +[2025-09-06 00:16:35] [Rank 0] step:3881/10000 train_time:176791ms step_avg:45.55ms +[2025-09-06 00:16:35] [Rank 0] step:3901/10000 train_time:177529ms step_avg:45.51ms +[2025-09-06 00:16:35] [Rank 0] step:3901/10000 train_time:177529ms step_avg:45.51ms +[2025-09-06 00:16:36] [Rank 0] step:3921/10000 train_time:178268ms step_avg:45.46ms +[2025-09-06 00:16:36] [Rank 0] step:3921/10000 train_time:178268ms step_avg:45.46ms +[2025-09-06 00:16:37] [Rank 0] step:3941/10000 train_time:179007ms step_avg:45.42ms +[2025-09-06 00:16:37] [Rank 0] step:3941/10000 train_time:179007ms step_avg:45.42ms +[2025-09-06 00:16:38] [Rank 0] step:3961/10000 train_time:179746ms step_avg:45.38ms +[2025-09-06 00:16:38] [Rank 0] step:3961/10000 train_time:179746ms step_avg:45.38ms +[2025-09-06 00:16:38] [Rank 0] step:3981/10000 train_time:180485ms step_avg:45.34ms +[2025-09-06 00:16:38] [Rank 0] step:3981/10000 train_time:180485ms step_avg:45.34ms +[2025-09-06 00:16:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:16:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:16:40] [Rank 0] PRINT: step:4000/10000 train_loss:2.5077 val_loss:2.4681 train_time:181305ms step_avg:45.33ms +[2025-09-06 00:16:40] [Rank 0] PRINT: step:4000/10000 train_loss:2.5077 val_loss:2.4681 train_time:181305ms step_avg:45.33ms +[2025-09-06 00:16:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:16:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:16:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:16:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:18:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:18:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:18:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:18:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:18:02] [Rank 0] Total Loss: 4.7653 +[2025-09-06 00:18:02] [Rank 0] Total Loss: 4.7653 +[2025-09-06 00:18:02] [Rank 0] Total FTA (Unweighted): 0.2313 +[2025-09-06 00:18:02] [Rank 0] Total FTA (Unweighted): 0.2313 +[2025-09-06 00:18:02] [Rank 0] Total FTA (Weighted): 0.2313 +[2025-09-06 00:18:02] [Rank 0] Total FTA (Weighted): 0.2313 +[2025-09-06 00:18:02] [Rank 0] Group 0 Loss: 3.2597 +[2025-09-06 00:18:02] [Rank 0] Group 0 Loss: 3.2597 +[2025-09-06 00:18:02] [Rank 0] Group 1 Loss: 3.1533 +[2025-09-06 00:18:02] [Rank 0] Group 1 Loss: 3.1533 +[2025-09-06 00:18:02] [Rank 0] Group 2 Loss: 3.2863 +[2025-09-06 00:18:02] [Rank 0] Group 2 Loss: 3.2863 +[2025-09-06 00:18:02] [Rank 0] Group 3 Loss: 3.7203 +[2025-09-06 00:18:02] [Rank 0] Group 3 Loss: 3.7203 +[2025-09-06 00:18:02] [Rank 0] Group 4 Loss: 4.1325 +[2025-09-06 00:18:02] [Rank 0] Group 4 Loss: 4.1325 +[2025-09-06 00:18:02] [Rank 0] Group 5 Loss: 4.6601 +[2025-09-06 00:18:02] [Rank 0] Group 5 Loss: 4.6601 +[2025-09-06 00:18:02] [Rank 0] Group 6 Loss: 4.9498 +[2025-09-06 00:18:02] [Rank 0] Group 6 Loss: 4.9498 +[2025-09-06 00:18:02] [Rank 0] Group 7 Loss: 5.0979 +[2025-09-06 00:18:02] [Rank 0] Group 7 Loss: 5.0979 +[2025-09-06 00:18:02] [Rank 0] Group 8 Loss: 5.3623 +[2025-09-06 00:18:02] [Rank 0] Group 8 Loss: 5.3623 +[2025-09-06 00:18:02] [Rank 0] Group 9 Loss: 5.4884 +[2025-09-06 00:18:02] [Rank 0] Group 9 Loss: 5.4884 +[2025-09-06 00:18:02] [Rank 0] Group 10 Loss: 5.5279 +[2025-09-06 00:18:02] [Rank 0] Group 10 Loss: 5.5279 +[2025-09-06 00:18:02] [Rank 0] Group 11 Loss: 5.5780 +[2025-09-06 00:18:02] [Rank 0] Group 11 Loss: 5.5780 +[2025-09-06 00:18:02] [Rank 0] Group 12 Loss: 5.4947 +[2025-09-06 00:18:02] [Rank 0] Group 12 Loss: 5.4947 +[2025-09-06 00:18:02] [Rank 0] Group 13 Loss: 5.4958 +[2025-09-06 00:18:02] [Rank 0] Group 13 Loss: 5.4958 +[2025-09-06 00:18:02] [Rank 0] Group 14 Loss: 5.5279 +[2025-09-06 00:18:02] [Rank 0] Group 14 Loss: 5.5279 +[2025-09-06 00:18:02] [Rank 0] Group 15 Loss: 5.5094 +[2025-09-06 00:18:02] [Rank 0] Group 15 Loss: 5.5094 +[2025-09-06 00:18:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:18:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:18:02] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 00:18:02] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 00:18:02] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:18:02] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:18:02] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:18:02] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:18:02] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-06 00:18:02] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-06 00:18:02] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 00:18:02] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 00:18:02] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 00:18:02] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 00:18:02] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 00:18:02] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 00:18:02] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 00:18:02] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 00:18:02] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:18:02] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:18:02] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 00:18:02] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 00:18:02] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 00:18:02] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 00:18:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:18:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:18:02] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 00:18:02] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 00:18:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 00:18:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 00:18:02] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:18:02] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:18:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:18:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:18:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:18:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:18:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:18:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:18:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:18:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:18:03] [Rank 0] step:4001/10000 train_time:181314ms step_avg:45.32ms +[2025-09-06 00:18:03] [Rank 0] step:4001/10000 train_time:181314ms step_avg:45.32ms +[2025-09-06 00:18:05] [Rank 0] step:4021/10000 train_time:182595ms step_avg:45.41ms +[2025-09-06 00:18:05] [Rank 0] step:4021/10000 train_time:182595ms step_avg:45.41ms +[2025-09-06 00:18:05] [Rank 0] step:4041/10000 train_time:183333ms step_avg:45.37ms +[2025-09-06 00:18:05] [Rank 0] step:4041/10000 train_time:183333ms step_avg:45.37ms +[2025-09-06 00:18:06] [Rank 0] step:4061/10000 train_time:184072ms step_avg:45.33ms +[2025-09-06 00:18:06] [Rank 0] step:4061/10000 train_time:184072ms step_avg:45.33ms +[2025-09-06 00:18:07] [Rank 0] step:4081/10000 train_time:184810ms step_avg:45.29ms +[2025-09-06 00:18:07] [Rank 0] step:4081/10000 train_time:184810ms step_avg:45.29ms +[2025-09-06 00:18:08] [Rank 0] step:4101/10000 train_time:185549ms step_avg:45.24ms +[2025-09-06 00:18:08] [Rank 0] step:4101/10000 train_time:185549ms step_avg:45.24ms +[2025-09-06 00:18:08] [Rank 0] step:4121/10000 train_time:186288ms step_avg:45.20ms +[2025-09-06 00:18:08] [Rank 0] step:4121/10000 train_time:186288ms step_avg:45.20ms +[2025-09-06 00:18:09] [Rank 0] step:4141/10000 train_time:187027ms step_avg:45.16ms +[2025-09-06 00:18:09] [Rank 0] step:4141/10000 train_time:187027ms step_avg:45.16ms +[2025-09-06 00:18:10] [Rank 0] step:4161/10000 train_time:187765ms step_avg:45.12ms +[2025-09-06 00:18:10] [Rank 0] step:4161/10000 train_time:187765ms step_avg:45.12ms +[2025-09-06 00:18:11] [Rank 0] step:4181/10000 train_time:188504ms step_avg:45.09ms +[2025-09-06 00:18:11] [Rank 0] step:4181/10000 train_time:188504ms step_avg:45.09ms +[2025-09-06 00:18:11] [Rank 0] step:4201/10000 train_time:189243ms step_avg:45.05ms +[2025-09-06 00:18:11] [Rank 0] step:4201/10000 train_time:189243ms step_avg:45.05ms +[2025-09-06 00:18:12] [Rank 0] step:4221/10000 train_time:189981ms step_avg:45.01ms +[2025-09-06 00:18:12] [Rank 0] step:4221/10000 train_time:189981ms step_avg:45.01ms +[2025-09-06 00:18:13] [Rank 0] step:4241/10000 train_time:190720ms step_avg:44.97ms +[2025-09-06 00:18:13] [Rank 0] step:4241/10000 train_time:190720ms step_avg:44.97ms +[2025-09-06 00:18:14] [Rank 0] step:4261/10000 train_time:191458ms step_avg:44.93ms +[2025-09-06 00:18:14] [Rank 0] step:4261/10000 train_time:191458ms step_avg:44.93ms +[2025-09-06 00:18:14] [Rank 0] step:4281/10000 train_time:192198ms step_avg:44.90ms +[2025-09-06 00:18:14] [Rank 0] step:4281/10000 train_time:192198ms step_avg:44.90ms +[2025-09-06 00:18:15] [Rank 0] step:4301/10000 train_time:192936ms step_avg:44.86ms +[2025-09-06 00:18:15] [Rank 0] step:4301/10000 train_time:192936ms step_avg:44.86ms +[2025-09-06 00:18:16] [Rank 0] step:4321/10000 train_time:193675ms step_avg:44.82ms +[2025-09-06 00:18:16] [Rank 0] step:4321/10000 train_time:193675ms step_avg:44.82ms +[2025-09-06 00:18:16] [Rank 0] step:4341/10000 train_time:194414ms step_avg:44.79ms +[2025-09-06 00:18:16] [Rank 0] step:4341/10000 train_time:194414ms step_avg:44.79ms +[2025-09-06 00:18:17] [Rank 0] step:4361/10000 train_time:195152ms step_avg:44.75ms +[2025-09-06 00:18:17] [Rank 0] step:4361/10000 train_time:195152ms step_avg:44.75ms +[2025-09-06 00:18:18] [Rank 0] step:4381/10000 train_time:195891ms step_avg:44.71ms +[2025-09-06 00:18:18] [Rank 0] step:4381/10000 train_time:195891ms step_avg:44.71ms +[2025-09-06 00:18:19] [Rank 0] step:4401/10000 train_time:196630ms step_avg:44.68ms +[2025-09-06 00:18:19] [Rank 0] step:4401/10000 train_time:196630ms step_avg:44.68ms +[2025-09-06 00:18:19] [Rank 0] step:4421/10000 train_time:197368ms step_avg:44.64ms +[2025-09-06 00:18:19] [Rank 0] step:4421/10000 train_time:197368ms step_avg:44.64ms +[2025-09-06 00:18:20] [Rank 0] step:4441/10000 train_time:198106ms step_avg:44.61ms +[2025-09-06 00:18:20] [Rank 0] step:4441/10000 train_time:198106ms step_avg:44.61ms +[2025-09-06 00:18:21] [Rank 0] step:4461/10000 train_time:198845ms step_avg:44.57ms +[2025-09-06 00:18:21] [Rank 0] step:4461/10000 train_time:198845ms step_avg:44.57ms +[2025-09-06 00:18:22] [Rank 0] step:4481/10000 train_time:199584ms step_avg:44.54ms +[2025-09-06 00:18:22] [Rank 0] step:4481/10000 train_time:199584ms step_avg:44.54ms +[2025-09-06 00:18:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:18:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:18:23] [Rank 0] PRINT: step:4500/10000 train_loss:2.4450 val_loss:2.4053 train_time:200403ms step_avg:44.53ms +[2025-09-06 00:18:23] [Rank 0] PRINT: step:4500/10000 train_loss:2.4450 val_loss:2.4053 train_time:200403ms step_avg:44.53ms +[2025-09-06 00:18:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:18:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:18:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:18:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:19:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:19:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:19:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:19:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:19:45] [Rank 0] Total Loss: 4.7121 +[2025-09-06 00:19:45] [Rank 0] Total Loss: 4.7121 +[2025-09-06 00:19:45] [Rank 0] Total FTA (Unweighted): 0.2575 +[2025-09-06 00:19:45] [Rank 0] Total FTA (Unweighted): 0.2575 +[2025-09-06 00:19:45] [Rank 0] Total FTA (Weighted): 0.2575 +[2025-09-06 00:19:45] [Rank 0] Total FTA (Weighted): 0.2575 +[2025-09-06 00:19:45] [Rank 0] Group 0 Loss: 3.2393 +[2025-09-06 00:19:45] [Rank 0] Group 0 Loss: 3.2393 +[2025-09-06 00:19:45] [Rank 0] Group 1 Loss: 3.1388 +[2025-09-06 00:19:45] [Rank 0] Group 1 Loss: 3.1388 +[2025-09-06 00:19:45] [Rank 0] Group 2 Loss: 3.2339 +[2025-09-06 00:19:45] [Rank 0] Group 2 Loss: 3.2339 +[2025-09-06 00:19:45] [Rank 0] Group 3 Loss: 3.6684 +[2025-09-06 00:19:45] [Rank 0] Group 3 Loss: 3.6684 +[2025-09-06 00:19:45] [Rank 0] Group 4 Loss: 4.0666 +[2025-09-06 00:19:45] [Rank 0] Group 4 Loss: 4.0666 +[2025-09-06 00:19:45] [Rank 0] Group 5 Loss: 4.5797 +[2025-09-06 00:19:45] [Rank 0] Group 5 Loss: 4.5797 +[2025-09-06 00:19:45] [Rank 0] Group 6 Loss: 4.8933 +[2025-09-06 00:19:45] [Rank 0] Group 6 Loss: 4.8933 +[2025-09-06 00:19:45] [Rank 0] Group 7 Loss: 5.0240 +[2025-09-06 00:19:45] [Rank 0] Group 7 Loss: 5.0240 +[2025-09-06 00:19:45] [Rank 0] Group 8 Loss: 5.3193 +[2025-09-06 00:19:45] [Rank 0] Group 8 Loss: 5.3193 +[2025-09-06 00:19:45] [Rank 0] Group 9 Loss: 5.4452 +[2025-09-06 00:19:45] [Rank 0] Group 9 Loss: 5.4452 +[2025-09-06 00:19:45] [Rank 0] Group 10 Loss: 5.4793 +[2025-09-06 00:19:45] [Rank 0] Group 10 Loss: 5.4793 +[2025-09-06 00:19:45] [Rank 0] Group 11 Loss: 5.5240 +[2025-09-06 00:19:45] [Rank 0] Group 11 Loss: 5.5240 +[2025-09-06 00:19:45] [Rank 0] Group 12 Loss: 5.4294 +[2025-09-06 00:19:45] [Rank 0] Group 12 Loss: 5.4294 +[2025-09-06 00:19:45] [Rank 0] Group 13 Loss: 5.4439 +[2025-09-06 00:19:45] [Rank 0] Group 13 Loss: 5.4439 +[2025-09-06 00:19:45] [Rank 0] Group 14 Loss: 5.4737 +[2025-09-06 00:19:45] [Rank 0] Group 14 Loss: 5.4737 +[2025-09-06 00:19:45] [Rank 0] Group 15 Loss: 5.4348 +[2025-09-06 00:19:45] [Rank 0] Group 15 Loss: 5.4348 +[2025-09-06 00:19:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:19:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:19:45] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 00:19:45] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 00:19:45] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:19:45] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:19:45] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:19:45] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:19:45] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-06 00:19:45] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-06 00:19:45] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 00:19:45] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 00:19:45] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-06 00:19:45] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-06 00:19:45] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-06 00:19:45] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-06 00:19:45] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 00:19:45] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 00:19:45] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:19:45] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:19:45] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 00:19:45] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 00:19:45] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 00:19:45] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 00:19:45] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 00:19:45] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 00:19:45] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 00:19:45] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 00:19:45] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:19:45] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:19:45] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:19:45] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:19:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:19:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:19:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:19:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:19:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:19:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:19:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:19:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:19:46] [Rank 0] step:4501/10000 train_time:200412ms step_avg:44.53ms +[2025-09-06 00:19:46] [Rank 0] step:4501/10000 train_time:200412ms step_avg:44.53ms +[2025-09-06 00:19:47] [Rank 0] step:4521/10000 train_time:201081ms step_avg:44.48ms +[2025-09-06 00:19:47] [Rank 0] step:4521/10000 train_time:201081ms step_avg:44.48ms +[2025-09-06 00:19:48] [Rank 0] step:4541/10000 train_time:201819ms step_avg:44.44ms +[2025-09-06 00:19:48] [Rank 0] step:4541/10000 train_time:201819ms step_avg:44.44ms +[2025-09-06 00:19:48] [Rank 0] step:4561/10000 train_time:202558ms step_avg:44.41ms +[2025-09-06 00:19:48] [Rank 0] step:4561/10000 train_time:202558ms step_avg:44.41ms +[2025-09-06 00:19:49] [Rank 0] step:4581/10000 train_time:203298ms step_avg:44.38ms +[2025-09-06 00:19:49] [Rank 0] step:4581/10000 train_time:203298ms step_avg:44.38ms +[2025-09-06 00:19:50] [Rank 0] step:4601/10000 train_time:204035ms step_avg:44.35ms +[2025-09-06 00:19:50] [Rank 0] step:4601/10000 train_time:204035ms step_avg:44.35ms +[2025-09-06 00:19:51] [Rank 0] step:4621/10000 train_time:204773ms step_avg:44.31ms +[2025-09-06 00:19:51] [Rank 0] step:4621/10000 train_time:204773ms step_avg:44.31ms +[2025-09-06 00:19:51] [Rank 0] step:4641/10000 train_time:205510ms step_avg:44.28ms +[2025-09-06 00:19:51] [Rank 0] step:4641/10000 train_time:205510ms step_avg:44.28ms +[2025-09-06 00:19:52] [Rank 0] step:4661/10000 train_time:206248ms step_avg:44.25ms +[2025-09-06 00:19:52] [Rank 0] step:4661/10000 train_time:206248ms step_avg:44.25ms +[2025-09-06 00:19:53] [Rank 0] step:4681/10000 train_time:206986ms step_avg:44.22ms +[2025-09-06 00:19:53] [Rank 0] step:4681/10000 train_time:206986ms step_avg:44.22ms +[2025-09-06 00:19:54] [Rank 0] step:4701/10000 train_time:207724ms step_avg:44.19ms +[2025-09-06 00:19:54] [Rank 0] step:4701/10000 train_time:207724ms step_avg:44.19ms +[2025-09-06 00:19:54] [Rank 0] step:4721/10000 train_time:208463ms step_avg:44.16ms +[2025-09-06 00:19:54] [Rank 0] step:4721/10000 train_time:208463ms step_avg:44.16ms +[2025-09-06 00:19:55] [Rank 0] step:4741/10000 train_time:209203ms step_avg:44.13ms +[2025-09-06 00:19:55] [Rank 0] step:4741/10000 train_time:209203ms step_avg:44.13ms +[2025-09-06 00:19:56] [Rank 0] step:4761/10000 train_time:209941ms step_avg:44.10ms +[2025-09-06 00:19:56] [Rank 0] step:4761/10000 train_time:209941ms step_avg:44.10ms +[2025-09-06 00:19:57] [Rank 0] step:4781/10000 train_time:210832ms step_avg:44.10ms +[2025-09-06 00:19:57] [Rank 0] step:4781/10000 train_time:210832ms step_avg:44.10ms +[2025-09-06 00:19:57] [Rank 0] step:4801/10000 train_time:211570ms step_avg:44.07ms +[2025-09-06 00:19:57] [Rank 0] step:4801/10000 train_time:211570ms step_avg:44.07ms +[2025-09-06 00:19:58] [Rank 0] step:4821/10000 train_time:212308ms step_avg:44.04ms +[2025-09-06 00:19:58] [Rank 0] step:4821/10000 train_time:212308ms step_avg:44.04ms +[2025-09-06 00:19:59] [Rank 0] step:4841/10000 train_time:213503ms step_avg:44.10ms +[2025-09-06 00:19:59] [Rank 0] step:4841/10000 train_time:213503ms step_avg:44.10ms +[2025-09-06 00:20:00] [Rank 0] step:4861/10000 train_time:214241ms step_avg:44.07ms +[2025-09-06 00:20:00] [Rank 0] step:4861/10000 train_time:214241ms step_avg:44.07ms +[2025-09-06 00:20:01] [Rank 0] step:4881/10000 train_time:214980ms step_avg:44.04ms +[2025-09-06 00:20:01] [Rank 0] step:4881/10000 train_time:214980ms step_avg:44.04ms +[2025-09-06 00:20:02] [Rank 0] step:4901/10000 train_time:215718ms step_avg:44.02ms +[2025-09-06 00:20:02] [Rank 0] step:4901/10000 train_time:215718ms step_avg:44.02ms +[2025-09-06 00:20:02] [Rank 0] step:4921/10000 train_time:216458ms step_avg:43.99ms +[2025-09-06 00:20:02] [Rank 0] step:4921/10000 train_time:216458ms step_avg:43.99ms +[2025-09-06 00:20:03] [Rank 0] step:4941/10000 train_time:217196ms step_avg:43.96ms +[2025-09-06 00:20:03] [Rank 0] step:4941/10000 train_time:217196ms step_avg:43.96ms +[2025-09-06 00:20:04] [Rank 0] step:4961/10000 train_time:217936ms step_avg:43.93ms +[2025-09-06 00:20:04] [Rank 0] step:4961/10000 train_time:217936ms step_avg:43.93ms +[2025-09-06 00:20:05] [Rank 0] step:4981/10000 train_time:218674ms step_avg:43.90ms +[2025-09-06 00:20:05] [Rank 0] step:4981/10000 train_time:218674ms step_avg:43.90ms +[2025-09-06 00:20:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:20:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:20:06] [Rank 0] PRINT: step:5000/10000 train_loss:2.3887 val_loss:2.3576 train_time:219493ms step_avg:43.90ms +[2025-09-06 00:20:06] [Rank 0] PRINT: step:5000/10000 train_loss:2.3887 val_loss:2.3576 train_time:219493ms step_avg:43.90ms +[2025-09-06 00:20:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:20:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:20:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:20:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:21:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:21:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:21:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:21:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:21:28] [Rank 0] Total Loss: 4.6059 +[2025-09-06 00:21:28] [Rank 0] Total Loss: 4.6059 +[2025-09-06 00:21:28] [Rank 0] Total FTA (Unweighted): 0.2656 +[2025-09-06 00:21:28] [Rank 0] Total FTA (Unweighted): 0.2656 +[2025-09-06 00:21:28] [Rank 0] Total FTA (Weighted): 0.2656 +[2025-09-06 00:21:28] [Rank 0] Total FTA (Weighted): 0.2656 +[2025-09-06 00:21:28] [Rank 0] Group 0 Loss: 3.1444 +[2025-09-06 00:21:28] [Rank 0] Group 0 Loss: 3.1444 +[2025-09-06 00:21:28] [Rank 0] Group 1 Loss: 3.1331 +[2025-09-06 00:21:28] [Rank 0] Group 1 Loss: 3.1331 +[2025-09-06 00:21:28] [Rank 0] Group 2 Loss: 3.1538 +[2025-09-06 00:21:28] [Rank 0] Group 2 Loss: 3.1538 +[2025-09-06 00:21:28] [Rank 0] Group 3 Loss: 3.5242 +[2025-09-06 00:21:28] [Rank 0] Group 3 Loss: 3.5242 +[2025-09-06 00:21:28] [Rank 0] Group 4 Loss: 3.9922 +[2025-09-06 00:21:28] [Rank 0] Group 4 Loss: 3.9922 +[2025-09-06 00:21:28] [Rank 0] Group 5 Loss: 4.4636 +[2025-09-06 00:21:28] [Rank 0] Group 5 Loss: 4.4636 +[2025-09-06 00:21:28] [Rank 0] Group 6 Loss: 4.7475 +[2025-09-06 00:21:28] [Rank 0] Group 6 Loss: 4.7475 +[2025-09-06 00:21:28] [Rank 0] Group 7 Loss: 4.9043 +[2025-09-06 00:21:28] [Rank 0] Group 7 Loss: 4.9043 +[2025-09-06 00:21:28] [Rank 0] Group 8 Loss: 5.2103 +[2025-09-06 00:21:28] [Rank 0] Group 8 Loss: 5.2103 +[2025-09-06 00:21:28] [Rank 0] Group 9 Loss: 5.3181 +[2025-09-06 00:21:28] [Rank 0] Group 9 Loss: 5.3181 +[2025-09-06 00:21:28] [Rank 0] Group 10 Loss: 5.3528 +[2025-09-06 00:21:28] [Rank 0] Group 10 Loss: 5.3528 +[2025-09-06 00:21:28] [Rank 0] Group 11 Loss: 5.3810 +[2025-09-06 00:21:28] [Rank 0] Group 11 Loss: 5.3810 +[2025-09-06 00:21:28] [Rank 0] Group 12 Loss: 5.3266 +[2025-09-06 00:21:28] [Rank 0] Group 12 Loss: 5.3266 +[2025-09-06 00:21:28] [Rank 0] Group 13 Loss: 5.3265 +[2025-09-06 00:21:28] [Rank 0] Group 13 Loss: 5.3265 +[2025-09-06 00:21:28] [Rank 0] Group 14 Loss: 5.3902 +[2025-09-06 00:21:28] [Rank 0] Group 14 Loss: 5.3902 +[2025-09-06 00:21:28] [Rank 0] Group 15 Loss: 5.3265 +[2025-09-06 00:21:28] [Rank 0] Group 15 Loss: 5.3265 +[2025-09-06 00:21:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:21:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:21:28] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 00:21:28] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 00:21:28] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:21:28] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:21:28] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:21:28] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:21:28] [Rank 0] Group 4 FTA: 0.2400 +[2025-09-06 00:21:28] [Rank 0] Group 4 FTA: 0.2400 +[2025-09-06 00:21:28] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 00:21:28] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 00:21:28] [Rank 0] Group 6 FTA: 0.2100 +[2025-09-06 00:21:28] [Rank 0] Group 6 FTA: 0.2100 +[2025-09-06 00:21:28] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:21:28] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:21:28] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:21:28] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:21:28] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:21:28] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:21:28] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 00:21:28] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 00:21:28] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-06 00:21:28] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-06 00:21:28] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 00:21:28] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 00:21:28] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 00:21:28] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 00:21:28] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:21:28] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:21:28] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:21:28] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:21:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:21:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:21:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:21:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:21:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:21:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:21:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:21:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:21:29] [Rank 0] step:5001/10000 train_time:219502ms step_avg:43.89ms +[2025-09-06 00:21:29] [Rank 0] step:5001/10000 train_time:219502ms step_avg:43.89ms +[2025-09-06 00:21:30] [Rank 0] step:5021/10000 train_time:220166ms step_avg:43.85ms +[2025-09-06 00:21:30] [Rank 0] step:5021/10000 train_time:220166ms step_avg:43.85ms +[2025-09-06 00:21:31] [Rank 0] step:5041/10000 train_time:220902ms step_avg:43.82ms +[2025-09-06 00:21:31] [Rank 0] step:5041/10000 train_time:220902ms step_avg:43.82ms +[2025-09-06 00:21:32] [Rank 0] step:5061/10000 train_time:221641ms step_avg:43.79ms +[2025-09-06 00:21:32] [Rank 0] step:5061/10000 train_time:221641ms step_avg:43.79ms +[2025-09-06 00:21:32] [Rank 0] step:5081/10000 train_time:222379ms step_avg:43.77ms +[2025-09-06 00:21:32] [Rank 0] step:5081/10000 train_time:222379ms step_avg:43.77ms +[2025-09-06 00:21:33] [Rank 0] step:5101/10000 train_time:223118ms step_avg:43.74ms +[2025-09-06 00:21:33] [Rank 0] step:5101/10000 train_time:223118ms step_avg:43.74ms +[2025-09-06 00:21:34] [Rank 0] step:5121/10000 train_time:223856ms step_avg:43.71ms +[2025-09-06 00:21:34] [Rank 0] step:5121/10000 train_time:223856ms step_avg:43.71ms +[2025-09-06 00:21:35] [Rank 0] step:5141/10000 train_time:224595ms step_avg:43.69ms +[2025-09-06 00:21:35] [Rank 0] step:5141/10000 train_time:224595ms step_avg:43.69ms +[2025-09-06 00:21:35] [Rank 0] step:5161/10000 train_time:225334ms step_avg:43.66ms +[2025-09-06 00:21:35] [Rank 0] step:5161/10000 train_time:225334ms step_avg:43.66ms +[2025-09-06 00:21:36] [Rank 0] step:5181/10000 train_time:226073ms step_avg:43.64ms +[2025-09-06 00:21:36] [Rank 0] step:5181/10000 train_time:226073ms step_avg:43.64ms +[2025-09-06 00:21:37] [Rank 0] step:5201/10000 train_time:226812ms step_avg:43.61ms +[2025-09-06 00:21:37] [Rank 0] step:5201/10000 train_time:226812ms step_avg:43.61ms +[2025-09-06 00:21:38] [Rank 0] step:5221/10000 train_time:227550ms step_avg:43.58ms +[2025-09-06 00:21:38] [Rank 0] step:5221/10000 train_time:227550ms step_avg:43.58ms +[2025-09-06 00:21:38] [Rank 0] step:5241/10000 train_time:228289ms step_avg:43.56ms +[2025-09-06 00:21:38] [Rank 0] step:5241/10000 train_time:228289ms step_avg:43.56ms +[2025-09-06 00:21:39] [Rank 0] step:5261/10000 train_time:229028ms step_avg:43.53ms +[2025-09-06 00:21:39] [Rank 0] step:5261/10000 train_time:229028ms step_avg:43.53ms +[2025-09-06 00:21:40] [Rank 0] step:5281/10000 train_time:229766ms step_avg:43.51ms +[2025-09-06 00:21:40] [Rank 0] step:5281/10000 train_time:229766ms step_avg:43.51ms +[2025-09-06 00:21:41] [Rank 0] step:5301/10000 train_time:230505ms step_avg:43.48ms +[2025-09-06 00:21:41] [Rank 0] step:5301/10000 train_time:230505ms step_avg:43.48ms +[2025-09-06 00:21:41] [Rank 0] step:5321/10000 train_time:231243ms step_avg:43.46ms +[2025-09-06 00:21:41] [Rank 0] step:5321/10000 train_time:231243ms step_avg:43.46ms +[2025-09-06 00:21:42] [Rank 0] step:5341/10000 train_time:231981ms step_avg:43.43ms +[2025-09-06 00:21:42] [Rank 0] step:5341/10000 train_time:231981ms step_avg:43.43ms +[2025-09-06 00:21:43] [Rank 0] step:5361/10000 train_time:232720ms step_avg:43.41ms +[2025-09-06 00:21:43] [Rank 0] step:5361/10000 train_time:232720ms step_avg:43.41ms +[2025-09-06 00:21:44] [Rank 0] step:5381/10000 train_time:233458ms step_avg:43.39ms +[2025-09-06 00:21:44] [Rank 0] step:5381/10000 train_time:233458ms step_avg:43.39ms +[2025-09-06 00:21:44] [Rank 0] step:5401/10000 train_time:234195ms step_avg:43.36ms +[2025-09-06 00:21:44] [Rank 0] step:5401/10000 train_time:234195ms step_avg:43.36ms +[2025-09-06 00:21:45] [Rank 0] step:5421/10000 train_time:234933ms step_avg:43.34ms +[2025-09-06 00:21:45] [Rank 0] step:5421/10000 train_time:234933ms step_avg:43.34ms +[2025-09-06 00:21:46] [Rank 0] step:5441/10000 train_time:235672ms step_avg:43.31ms +[2025-09-06 00:21:46] [Rank 0] step:5441/10000 train_time:235672ms step_avg:43.31ms +[2025-09-06 00:21:46] [Rank 0] step:5461/10000 train_time:236410ms step_avg:43.29ms +[2025-09-06 00:21:46] [Rank 0] step:5461/10000 train_time:236410ms step_avg:43.29ms +[2025-09-06 00:21:47] [Rank 0] step:5481/10000 train_time:237148ms step_avg:43.27ms +[2025-09-06 00:21:47] [Rank 0] step:5481/10000 train_time:237148ms step_avg:43.27ms +[2025-09-06 00:21:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:21:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:21:48] [Rank 0] PRINT: step:5500/10000 train_loss:2.3466 val_loss:2.3199 train_time:237967ms step_avg:43.27ms +[2025-09-06 00:21:48] [Rank 0] PRINT: step:5500/10000 train_loss:2.3466 val_loss:2.3199 train_time:237967ms step_avg:43.27ms +[2025-09-06 00:21:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:21:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:21:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:21:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:23:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:23:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:23:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:23:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:23:09] [Rank 0] Total Loss: 4.6059 +[2025-09-06 00:23:09] [Rank 0] Total Loss: 4.6059 +[2025-09-06 00:23:09] [Rank 0] Total FTA (Unweighted): 0.2781 +[2025-09-06 00:23:09] [Rank 0] Total FTA (Unweighted): 0.2781 +[2025-09-06 00:23:09] [Rank 0] Total FTA (Weighted): 0.2781 +[2025-09-06 00:23:09] [Rank 0] Total FTA (Weighted): 0.2781 +[2025-09-06 00:23:09] [Rank 0] Group 0 Loss: 3.2220 +[2025-09-06 00:23:09] [Rank 0] Group 0 Loss: 3.2220 +[2025-09-06 00:23:09] [Rank 0] Group 1 Loss: 3.1292 +[2025-09-06 00:23:09] [Rank 0] Group 1 Loss: 3.1292 +[2025-09-06 00:23:09] [Rank 0] Group 2 Loss: 3.1692 +[2025-09-06 00:23:09] [Rank 0] Group 2 Loss: 3.1692 +[2025-09-06 00:23:09] [Rank 0] Group 3 Loss: 3.5512 +[2025-09-06 00:23:09] [Rank 0] Group 3 Loss: 3.5512 +[2025-09-06 00:23:09] [Rank 0] Group 4 Loss: 3.9889 +[2025-09-06 00:23:09] [Rank 0] Group 4 Loss: 3.9889 +[2025-09-06 00:23:09] [Rank 0] Group 5 Loss: 4.4666 +[2025-09-06 00:23:09] [Rank 0] Group 5 Loss: 4.4666 +[2025-09-06 00:23:09] [Rank 0] Group 6 Loss: 4.7398 +[2025-09-06 00:23:09] [Rank 0] Group 6 Loss: 4.7398 +[2025-09-06 00:23:09] [Rank 0] Group 7 Loss: 4.9033 +[2025-09-06 00:23:09] [Rank 0] Group 7 Loss: 4.9033 +[2025-09-06 00:23:09] [Rank 0] Group 8 Loss: 5.1974 +[2025-09-06 00:23:09] [Rank 0] Group 8 Loss: 5.1974 +[2025-09-06 00:23:09] [Rank 0] Group 9 Loss: 5.2965 +[2025-09-06 00:23:09] [Rank 0] Group 9 Loss: 5.2965 +[2025-09-06 00:23:10] [Rank 0] Group 10 Loss: 5.3254 +[2025-09-06 00:23:10] [Rank 0] Group 10 Loss: 5.3254 +[2025-09-06 00:23:10] [Rank 0] Group 11 Loss: 5.3930 +[2025-09-06 00:23:10] [Rank 0] Group 11 Loss: 5.3930 +[2025-09-06 00:23:10] [Rank 0] Group 12 Loss: 5.3353 +[2025-09-06 00:23:10] [Rank 0] Group 12 Loss: 5.3353 +[2025-09-06 00:23:10] [Rank 0] Group 13 Loss: 5.3017 +[2025-09-06 00:23:10] [Rank 0] Group 13 Loss: 5.3017 +[2025-09-06 00:23:10] [Rank 0] Group 14 Loss: 5.3617 +[2025-09-06 00:23:10] [Rank 0] Group 14 Loss: 5.3617 +[2025-09-06 00:23:10] [Rank 0] Group 15 Loss: 5.3129 +[2025-09-06 00:23:10] [Rank 0] Group 15 Loss: 5.3129 +[2025-09-06 00:23:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:23:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:23:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:23:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:23:10] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:23:10] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:23:10] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:23:10] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:23:10] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:23:10] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:23:10] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:23:10] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:23:10] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 00:23:10] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 00:23:10] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:23:10] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:23:10] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:23:10] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:23:10] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:23:10] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:23:10] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 00:23:10] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 00:23:10] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 00:23:10] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 00:23:10] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-06 00:23:10] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-06 00:23:10] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 00:23:10] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 00:23:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:23:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:23:10] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-06 00:23:10] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-06 00:23:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:23:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:23:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:23:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:23:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:23:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:23:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:23:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:23:11] [Rank 0] step:5501/10000 train_time:237976ms step_avg:43.26ms +[2025-09-06 00:23:11] [Rank 0] step:5501/10000 train_time:237976ms step_avg:43.26ms +[2025-09-06 00:23:12] [Rank 0] step:5521/10000 train_time:238649ms step_avg:43.23ms +[2025-09-06 00:23:12] [Rank 0] step:5521/10000 train_time:238649ms step_avg:43.23ms +[2025-09-06 00:23:13] [Rank 0] step:5541/10000 train_time:239387ms step_avg:43.20ms +[2025-09-06 00:23:13] [Rank 0] step:5541/10000 train_time:239387ms step_avg:43.20ms +[2025-09-06 00:23:13] [Rank 0] step:5561/10000 train_time:240126ms step_avg:43.18ms +[2025-09-06 00:23:13] [Rank 0] step:5561/10000 train_time:240126ms step_avg:43.18ms +[2025-09-06 00:23:14] [Rank 0] step:5581/10000 train_time:240864ms step_avg:43.16ms +[2025-09-06 00:23:14] [Rank 0] step:5581/10000 train_time:240864ms step_avg:43.16ms +[2025-09-06 00:23:15] [Rank 0] step:5601/10000 train_time:241602ms step_avg:43.14ms +[2025-09-06 00:23:15] [Rank 0] step:5601/10000 train_time:241602ms step_avg:43.14ms +[2025-09-06 00:23:16] [Rank 0] step:5621/10000 train_time:242341ms step_avg:43.11ms +[2025-09-06 00:23:16] [Rank 0] step:5621/10000 train_time:242341ms step_avg:43.11ms +[2025-09-06 00:23:17] [Rank 0] step:5641/10000 train_time:243678ms step_avg:43.20ms +[2025-09-06 00:23:17] [Rank 0] step:5641/10000 train_time:243678ms step_avg:43.20ms +[2025-09-06 00:23:18] [Rank 0] step:5661/10000 train_time:244415ms step_avg:43.18ms +[2025-09-06 00:23:18] [Rank 0] step:5661/10000 train_time:244415ms step_avg:43.18ms +[2025-09-06 00:23:18] [Rank 0] step:5681/10000 train_time:245153ms step_avg:43.15ms +[2025-09-06 00:23:18] [Rank 0] step:5681/10000 train_time:245153ms step_avg:43.15ms +[2025-09-06 00:23:19] [Rank 0] step:5701/10000 train_time:245889ms step_avg:43.13ms +[2025-09-06 00:23:19] [Rank 0] step:5701/10000 train_time:245889ms step_avg:43.13ms +[2025-09-06 00:23:20] [Rank 0] step:5721/10000 train_time:246626ms step_avg:43.11ms +[2025-09-06 00:23:20] [Rank 0] step:5721/10000 train_time:246626ms step_avg:43.11ms +[2025-09-06 00:23:21] [Rank 0] step:5741/10000 train_time:247364ms step_avg:43.09ms +[2025-09-06 00:23:21] [Rank 0] step:5741/10000 train_time:247364ms step_avg:43.09ms +[2025-09-06 00:23:21] [Rank 0] step:5761/10000 train_time:248102ms step_avg:43.07ms +[2025-09-06 00:23:21] [Rank 0] step:5761/10000 train_time:248102ms step_avg:43.07ms +[2025-09-06 00:23:22] [Rank 0] step:5781/10000 train_time:248840ms step_avg:43.04ms +[2025-09-06 00:23:22] [Rank 0] step:5781/10000 train_time:248840ms step_avg:43.04ms +[2025-09-06 00:23:23] [Rank 0] step:5801/10000 train_time:249579ms step_avg:43.02ms +[2025-09-06 00:23:23] [Rank 0] step:5801/10000 train_time:249579ms step_avg:43.02ms +[2025-09-06 00:23:24] [Rank 0] step:5821/10000 train_time:250318ms step_avg:43.00ms +[2025-09-06 00:23:24] [Rank 0] step:5821/10000 train_time:250318ms step_avg:43.00ms +[2025-09-06 00:23:24] [Rank 0] step:5841/10000 train_time:251056ms step_avg:42.98ms +[2025-09-06 00:23:24] [Rank 0] step:5841/10000 train_time:251056ms step_avg:42.98ms +[2025-09-06 00:23:25] [Rank 0] step:5861/10000 train_time:251795ms step_avg:42.96ms +[2025-09-06 00:23:25] [Rank 0] step:5861/10000 train_time:251795ms step_avg:42.96ms +[2025-09-06 00:23:26] [Rank 0] step:5881/10000 train_time:252534ms step_avg:42.94ms +[2025-09-06 00:23:26] [Rank 0] step:5881/10000 train_time:252534ms step_avg:42.94ms +[2025-09-06 00:23:27] [Rank 0] step:5901/10000 train_time:253272ms step_avg:42.92ms +[2025-09-06 00:23:27] [Rank 0] step:5901/10000 train_time:253272ms step_avg:42.92ms +[2025-09-06 00:23:27] [Rank 0] step:5921/10000 train_time:254010ms step_avg:42.90ms +[2025-09-06 00:23:27] [Rank 0] step:5921/10000 train_time:254010ms step_avg:42.90ms +[2025-09-06 00:23:28] [Rank 0] step:5941/10000 train_time:254749ms step_avg:42.88ms +[2025-09-06 00:23:28] [Rank 0] step:5941/10000 train_time:254749ms step_avg:42.88ms +[2025-09-06 00:23:29] [Rank 0] step:5961/10000 train_time:255487ms step_avg:42.86ms +[2025-09-06 00:23:29] [Rank 0] step:5961/10000 train_time:255487ms step_avg:42.86ms +[2025-09-06 00:23:29] [Rank 0] step:5981/10000 train_time:256226ms step_avg:42.84ms +[2025-09-06 00:23:29] [Rank 0] step:5981/10000 train_time:256226ms step_avg:42.84ms +[2025-09-06 00:23:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:23:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:23:31] [Rank 0] PRINT: step:6000/10000 train_loss:2.3127 val_loss:2.2891 train_time:257045ms step_avg:42.84ms +[2025-09-06 00:23:31] [Rank 0] PRINT: step:6000/10000 train_loss:2.3127 val_loss:2.2891 train_time:257045ms step_avg:42.84ms +[2025-09-06 00:23:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:23:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:23:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:23:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:24:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:24:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:24:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:24:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:24:52] [Rank 0] Total Loss: 4.6088 +[2025-09-06 00:24:52] [Rank 0] Total Loss: 4.6088 +[2025-09-06 00:24:52] [Rank 0] Total FTA (Unweighted): 0.2856 +[2025-09-06 00:24:52] [Rank 0] Total FTA (Unweighted): 0.2856 +[2025-09-06 00:24:52] [Rank 0] Total FTA (Weighted): 0.2856 +[2025-09-06 00:24:52] [Rank 0] Total FTA (Weighted): 0.2856 +[2025-09-06 00:24:52] [Rank 0] Group 0 Loss: 3.2537 +[2025-09-06 00:24:52] [Rank 0] Group 0 Loss: 3.2537 +[2025-09-06 00:24:52] [Rank 0] Group 1 Loss: 3.1730 +[2025-09-06 00:24:52] [Rank 0] Group 1 Loss: 3.1730 +[2025-09-06 00:24:52] [Rank 0] Group 2 Loss: 3.1792 +[2025-09-06 00:24:52] [Rank 0] Group 2 Loss: 3.1792 +[2025-09-06 00:24:52] [Rank 0] Group 3 Loss: 3.5935 +[2025-09-06 00:24:52] [Rank 0] Group 3 Loss: 3.5935 +[2025-09-06 00:24:52] [Rank 0] Group 4 Loss: 3.9630 +[2025-09-06 00:24:52] [Rank 0] Group 4 Loss: 3.9630 +[2025-09-06 00:24:52] [Rank 0] Group 5 Loss: 4.4476 +[2025-09-06 00:24:52] [Rank 0] Group 5 Loss: 4.4476 +[2025-09-06 00:24:52] [Rank 0] Group 6 Loss: 4.7415 +[2025-09-06 00:24:52] [Rank 0] Group 6 Loss: 4.7415 +[2025-09-06 00:24:52] [Rank 0] Group 7 Loss: 4.8891 +[2025-09-06 00:24:52] [Rank 0] Group 7 Loss: 4.8891 +[2025-09-06 00:24:52] [Rank 0] Group 8 Loss: 5.1847 +[2025-09-06 00:24:52] [Rank 0] Group 8 Loss: 5.1847 +[2025-09-06 00:24:52] [Rank 0] Group 9 Loss: 5.2956 +[2025-09-06 00:24:52] [Rank 0] Group 9 Loss: 5.2956 +[2025-09-06 00:24:52] [Rank 0] Group 10 Loss: 5.3413 +[2025-09-06 00:24:52] [Rank 0] Group 10 Loss: 5.3413 +[2025-09-06 00:24:52] [Rank 0] Group 11 Loss: 5.3752 +[2025-09-06 00:24:52] [Rank 0] Group 11 Loss: 5.3752 +[2025-09-06 00:24:52] [Rank 0] Group 12 Loss: 5.3061 +[2025-09-06 00:24:52] [Rank 0] Group 12 Loss: 5.3061 +[2025-09-06 00:24:52] [Rank 0] Group 13 Loss: 5.3214 +[2025-09-06 00:24:52] [Rank 0] Group 13 Loss: 5.3214 +[2025-09-06 00:24:52] [Rank 0] Group 14 Loss: 5.3617 +[2025-09-06 00:24:52] [Rank 0] Group 14 Loss: 5.3617 +[2025-09-06 00:24:52] [Rank 0] Group 15 Loss: 5.3140 +[2025-09-06 00:24:52] [Rank 0] Group 15 Loss: 5.3140 +[2025-09-06 00:24:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:24:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:24:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:24:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:24:52] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:24:52] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:24:52] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:24:52] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:24:52] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:24:52] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:24:52] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:24:52] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:24:52] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 00:24:52] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 00:24:52] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:24:52] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:24:52] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:24:52] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:24:52] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:24:52] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:24:52] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-06 00:24:52] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-06 00:24:52] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 00:24:52] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 00:24:52] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-06 00:24:52] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-06 00:24:52] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-06 00:24:52] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-06 00:24:52] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:24:52] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:24:52] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-06 00:24:52] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-06 00:24:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:24:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:24:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:24:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:24:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:24:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:24:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:24:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:24:53] [Rank 0] step:6001/10000 train_time:257053ms step_avg:42.84ms +[2025-09-06 00:24:53] [Rank 0] step:6001/10000 train_time:257053ms step_avg:42.84ms +[2025-09-06 00:24:55] [Rank 0] step:6021/10000 train_time:258339ms step_avg:42.91ms +[2025-09-06 00:24:55] [Rank 0] step:6021/10000 train_time:258339ms step_avg:42.91ms +[2025-09-06 00:24:55] [Rank 0] step:6041/10000 train_time:259078ms step_avg:42.89ms +[2025-09-06 00:24:55] [Rank 0] step:6041/10000 train_time:259078ms step_avg:42.89ms +[2025-09-06 00:24:56] [Rank 0] step:6061/10000 train_time:259818ms step_avg:42.87ms +[2025-09-06 00:24:56] [Rank 0] step:6061/10000 train_time:259818ms step_avg:42.87ms +[2025-09-06 00:24:57] [Rank 0] step:6081/10000 train_time:260555ms step_avg:42.85ms +[2025-09-06 00:24:57] [Rank 0] step:6081/10000 train_time:260555ms step_avg:42.85ms +[2025-09-06 00:24:58] [Rank 0] step:6101/10000 train_time:261294ms step_avg:42.83ms +[2025-09-06 00:24:58] [Rank 0] step:6101/10000 train_time:261294ms step_avg:42.83ms +[2025-09-06 00:24:58] [Rank 0] step:6121/10000 train_time:262034ms step_avg:42.81ms +[2025-09-06 00:24:58] [Rank 0] step:6121/10000 train_time:262034ms step_avg:42.81ms +[2025-09-06 00:24:59] [Rank 0] step:6141/10000 train_time:262773ms step_avg:42.79ms +[2025-09-06 00:24:59] [Rank 0] step:6141/10000 train_time:262773ms step_avg:42.79ms +[2025-09-06 00:25:00] [Rank 0] step:6161/10000 train_time:263512ms step_avg:42.77ms +[2025-09-06 00:25:00] [Rank 0] step:6161/10000 train_time:263512ms step_avg:42.77ms +[2025-09-06 00:25:01] [Rank 0] step:6181/10000 train_time:264252ms step_avg:42.75ms +[2025-09-06 00:25:01] [Rank 0] step:6181/10000 train_time:264252ms step_avg:42.75ms +[2025-09-06 00:25:01] [Rank 0] step:6201/10000 train_time:264991ms step_avg:42.73ms +[2025-09-06 00:25:01] [Rank 0] step:6201/10000 train_time:264991ms step_avg:42.73ms +[2025-09-06 00:25:02] [Rank 0] step:6221/10000 train_time:265731ms step_avg:42.72ms +[2025-09-06 00:25:02] [Rank 0] step:6221/10000 train_time:265731ms step_avg:42.72ms +[2025-09-06 00:25:03] [Rank 0] step:6241/10000 train_time:266470ms step_avg:42.70ms +[2025-09-06 00:25:03] [Rank 0] step:6241/10000 train_time:266470ms step_avg:42.70ms +[2025-09-06 00:25:04] [Rank 0] step:6261/10000 train_time:267209ms step_avg:42.68ms +[2025-09-06 00:25:04] [Rank 0] step:6261/10000 train_time:267209ms step_avg:42.68ms +[2025-09-06 00:25:04] [Rank 0] step:6281/10000 train_time:267948ms step_avg:42.66ms +[2025-09-06 00:25:04] [Rank 0] step:6281/10000 train_time:267948ms step_avg:42.66ms +[2025-09-06 00:25:05] [Rank 0] step:6301/10000 train_time:268687ms step_avg:42.64ms +[2025-09-06 00:25:05] [Rank 0] step:6301/10000 train_time:268687ms step_avg:42.64ms +[2025-09-06 00:25:06] [Rank 0] step:6321/10000 train_time:269425ms step_avg:42.62ms +[2025-09-06 00:25:06] [Rank 0] step:6321/10000 train_time:269425ms step_avg:42.62ms +[2025-09-06 00:25:07] [Rank 0] step:6341/10000 train_time:270162ms step_avg:42.61ms +[2025-09-06 00:25:07] [Rank 0] step:6341/10000 train_time:270162ms step_avg:42.61ms +[2025-09-06 00:25:07] [Rank 0] step:6361/10000 train_time:270900ms step_avg:42.59ms +[2025-09-06 00:25:07] [Rank 0] step:6361/10000 train_time:270900ms step_avg:42.59ms +[2025-09-06 00:25:08] [Rank 0] step:6381/10000 train_time:271637ms step_avg:42.57ms +[2025-09-06 00:25:08] [Rank 0] step:6381/10000 train_time:271637ms step_avg:42.57ms +[2025-09-06 00:25:09] [Rank 0] step:6401/10000 train_time:272374ms step_avg:42.55ms +[2025-09-06 00:25:09] [Rank 0] step:6401/10000 train_time:272374ms step_avg:42.55ms +[2025-09-06 00:25:09] [Rank 0] step:6421/10000 train_time:273112ms step_avg:42.53ms +[2025-09-06 00:25:09] [Rank 0] step:6421/10000 train_time:273112ms step_avg:42.53ms +[2025-09-06 00:25:10] [Rank 0] step:6441/10000 train_time:273849ms step_avg:42.52ms +[2025-09-06 00:25:10] [Rank 0] step:6441/10000 train_time:273849ms step_avg:42.52ms +[2025-09-06 00:25:11] [Rank 0] step:6461/10000 train_time:274593ms step_avg:42.50ms +[2025-09-06 00:25:11] [Rank 0] step:6461/10000 train_time:274593ms step_avg:42.50ms +[2025-09-06 00:25:12] [Rank 0] step:6481/10000 train_time:275332ms step_avg:42.48ms +[2025-09-06 00:25:12] [Rank 0] step:6481/10000 train_time:275332ms step_avg:42.48ms +[2025-09-06 00:25:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:25:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:25:13] [Rank 0] PRINT: step:6500/10000 train_loss:2.2850 val_loss:2.2623 train_time:276264ms step_avg:42.50ms +[2025-09-06 00:25:13] [Rank 0] PRINT: step:6500/10000 train_loss:2.2850 val_loss:2.2623 train_time:276264ms step_avg:42.50ms +[2025-09-06 00:25:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:25:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:25:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:25:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:26:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:26:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:26:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:26:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:26:34] [Rank 0] Total Loss: 4.5813 +[2025-09-06 00:26:34] [Rank 0] Total Loss: 4.5813 +[2025-09-06 00:26:34] [Rank 0] Total FTA (Unweighted): 0.2944 +[2025-09-06 00:26:34] [Rank 0] Total FTA (Unweighted): 0.2944 +[2025-09-06 00:26:34] [Rank 0] Total FTA (Weighted): 0.2944 +[2025-09-06 00:26:34] [Rank 0] Total FTA (Weighted): 0.2944 +[2025-09-06 00:26:34] [Rank 0] Group 0 Loss: 3.2653 +[2025-09-06 00:26:34] [Rank 0] Group 0 Loss: 3.2653 +[2025-09-06 00:26:34] [Rank 0] Group 1 Loss: 3.1541 +[2025-09-06 00:26:34] [Rank 0] Group 1 Loss: 3.1541 +[2025-09-06 00:26:34] [Rank 0] Group 2 Loss: 3.1866 +[2025-09-06 00:26:34] [Rank 0] Group 2 Loss: 3.1866 +[2025-09-06 00:26:34] [Rank 0] Group 3 Loss: 3.5681 +[2025-09-06 00:26:34] [Rank 0] Group 3 Loss: 3.5681 +[2025-09-06 00:26:34] [Rank 0] Group 4 Loss: 3.9279 +[2025-09-06 00:26:34] [Rank 0] Group 4 Loss: 3.9279 +[2025-09-06 00:26:35] [Rank 0] Group 5 Loss: 4.3922 +[2025-09-06 00:26:35] [Rank 0] Group 5 Loss: 4.3922 +[2025-09-06 00:26:35] [Rank 0] Group 6 Loss: 4.6967 +[2025-09-06 00:26:35] [Rank 0] Group 6 Loss: 4.6967 +[2025-09-06 00:26:35] [Rank 0] Group 7 Loss: 4.8456 +[2025-09-06 00:26:35] [Rank 0] Group 7 Loss: 4.8456 +[2025-09-06 00:26:35] [Rank 0] Group 8 Loss: 5.1552 +[2025-09-06 00:26:35] [Rank 0] Group 8 Loss: 5.1552 +[2025-09-06 00:26:35] [Rank 0] Group 9 Loss: 5.2586 +[2025-09-06 00:26:35] [Rank 0] Group 9 Loss: 5.2586 +[2025-09-06 00:26:35] [Rank 0] Group 10 Loss: 5.3217 +[2025-09-06 00:26:35] [Rank 0] Group 10 Loss: 5.3217 +[2025-09-06 00:26:35] [Rank 0] Group 11 Loss: 5.3415 +[2025-09-06 00:26:35] [Rank 0] Group 11 Loss: 5.3415 +[2025-09-06 00:26:35] [Rank 0] Group 12 Loss: 5.2852 +[2025-09-06 00:26:35] [Rank 0] Group 12 Loss: 5.2852 +[2025-09-06 00:26:35] [Rank 0] Group 13 Loss: 5.2936 +[2025-09-06 00:26:35] [Rank 0] Group 13 Loss: 5.2936 +[2025-09-06 00:26:35] [Rank 0] Group 14 Loss: 5.3246 +[2025-09-06 00:26:35] [Rank 0] Group 14 Loss: 5.3246 +[2025-09-06 00:26:35] [Rank 0] Group 15 Loss: 5.2844 +[2025-09-06 00:26:35] [Rank 0] Group 15 Loss: 5.2844 +[2025-09-06 00:26:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:26:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:26:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:26:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:26:35] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:26:35] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:26:35] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:26:35] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:26:35] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:26:35] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:26:35] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:26:35] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:26:35] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 00:26:35] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 00:26:35] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:26:35] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:26:35] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:26:35] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:26:35] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:26:35] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:26:35] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:26:35] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:26:35] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:26:35] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:26:35] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-06 00:26:35] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-06 00:26:35] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:26:35] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:26:35] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 00:26:35] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 00:26:35] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 00:26:35] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 00:26:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:26:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:26:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:26:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:26:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:26:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:26:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:26:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:26:36] [Rank 0] step:6501/10000 train_time:276273ms step_avg:42.50ms +[2025-09-06 00:26:36] [Rank 0] step:6501/10000 train_time:276273ms step_avg:42.50ms +[2025-09-06 00:26:37] [Rank 0] step:6521/10000 train_time:276957ms step_avg:42.47ms +[2025-09-06 00:26:37] [Rank 0] step:6521/10000 train_time:276957ms step_avg:42.47ms +[2025-09-06 00:26:38] [Rank 0] step:6541/10000 train_time:277696ms step_avg:42.45ms +[2025-09-06 00:26:38] [Rank 0] step:6541/10000 train_time:277696ms step_avg:42.45ms +[2025-09-06 00:26:38] [Rank 0] step:6561/10000 train_time:278434ms step_avg:42.44ms +[2025-09-06 00:26:38] [Rank 0] step:6561/10000 train_time:278434ms step_avg:42.44ms +[2025-09-06 00:26:39] [Rank 0] step:6581/10000 train_time:279172ms step_avg:42.42ms +[2025-09-06 00:26:39] [Rank 0] step:6581/10000 train_time:279172ms step_avg:42.42ms +[2025-09-06 00:26:40] [Rank 0] step:6601/10000 train_time:279911ms step_avg:42.40ms +[2025-09-06 00:26:40] [Rank 0] step:6601/10000 train_time:279911ms step_avg:42.40ms +[2025-09-06 00:26:41] [Rank 0] step:6621/10000 train_time:280649ms step_avg:42.39ms +[2025-09-06 00:26:41] [Rank 0] step:6621/10000 train_time:280649ms step_avg:42.39ms +[2025-09-06 00:26:41] [Rank 0] step:6641/10000 train_time:281388ms step_avg:42.37ms +[2025-09-06 00:26:41] [Rank 0] step:6641/10000 train_time:281388ms step_avg:42.37ms +[2025-09-06 00:26:42] [Rank 0] step:6661/10000 train_time:282124ms step_avg:42.35ms +[2025-09-06 00:26:42] [Rank 0] step:6661/10000 train_time:282124ms step_avg:42.35ms +[2025-09-06 00:26:43] [Rank 0] step:6681/10000 train_time:282862ms step_avg:42.34ms +[2025-09-06 00:26:43] [Rank 0] step:6681/10000 train_time:282862ms step_avg:42.34ms +[2025-09-06 00:26:43] [Rank 0] step:6701/10000 train_time:283599ms step_avg:42.32ms +[2025-09-06 00:26:43] [Rank 0] step:6701/10000 train_time:283599ms step_avg:42.32ms +[2025-09-06 00:26:44] [Rank 0] step:6721/10000 train_time:284338ms step_avg:42.31ms +[2025-09-06 00:26:44] [Rank 0] step:6721/10000 train_time:284338ms step_avg:42.31ms +[2025-09-06 00:26:45] [Rank 0] step:6741/10000 train_time:285076ms step_avg:42.29ms +[2025-09-06 00:26:45] [Rank 0] step:6741/10000 train_time:285076ms step_avg:42.29ms +[2025-09-06 00:26:46] [Rank 0] step:6761/10000 train_time:285814ms step_avg:42.27ms +[2025-09-06 00:26:46] [Rank 0] step:6761/10000 train_time:285814ms step_avg:42.27ms +[2025-09-06 00:26:46] [Rank 0] step:6781/10000 train_time:286551ms step_avg:42.26ms +[2025-09-06 00:26:46] [Rank 0] step:6781/10000 train_time:286551ms step_avg:42.26ms +[2025-09-06 00:26:47] [Rank 0] step:6801/10000 train_time:287288ms step_avg:42.24ms +[2025-09-06 00:26:47] [Rank 0] step:6801/10000 train_time:287288ms step_avg:42.24ms +[2025-09-06 00:26:48] [Rank 0] step:6821/10000 train_time:288028ms step_avg:42.23ms +[2025-09-06 00:26:48] [Rank 0] step:6821/10000 train_time:288028ms step_avg:42.23ms +[2025-09-06 00:26:49] [Rank 0] step:6841/10000 train_time:289382ms step_avg:42.30ms +[2025-09-06 00:26:49] [Rank 0] step:6841/10000 train_time:289382ms step_avg:42.30ms +[2025-09-06 00:26:50] [Rank 0] step:6861/10000 train_time:290121ms step_avg:42.29ms +[2025-09-06 00:26:50] [Rank 0] step:6861/10000 train_time:290121ms step_avg:42.29ms +[2025-09-06 00:26:51] [Rank 0] step:6881/10000 train_time:290860ms step_avg:42.27ms +[2025-09-06 00:26:51] [Rank 0] step:6881/10000 train_time:290860ms step_avg:42.27ms +[2025-09-06 00:26:51] [Rank 0] step:6901/10000 train_time:291599ms step_avg:42.25ms +[2025-09-06 00:26:51] [Rank 0] step:6901/10000 train_time:291599ms step_avg:42.25ms +[2025-09-06 00:26:52] [Rank 0] step:6921/10000 train_time:292337ms step_avg:42.24ms +[2025-09-06 00:26:52] [Rank 0] step:6921/10000 train_time:292337ms step_avg:42.24ms +[2025-09-06 00:26:53] [Rank 0] step:6941/10000 train_time:293075ms step_avg:42.22ms +[2025-09-06 00:26:53] [Rank 0] step:6941/10000 train_time:293075ms step_avg:42.22ms +[2025-09-06 00:26:54] [Rank 0] step:6961/10000 train_time:293814ms step_avg:42.21ms +[2025-09-06 00:26:54] [Rank 0] step:6961/10000 train_time:293814ms step_avg:42.21ms +[2025-09-06 00:26:54] [Rank 0] step:6981/10000 train_time:294553ms step_avg:42.19ms +[2025-09-06 00:26:54] [Rank 0] step:6981/10000 train_time:294553ms step_avg:42.19ms +[2025-09-06 00:26:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:26:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:26:56] [Rank 0] PRINT: step:7000/10000 train_loss:2.2595 val_loss:2.2408 train_time:295373ms step_avg:42.20ms +[2025-09-06 00:26:56] [Rank 0] PRINT: step:7000/10000 train_loss:2.2595 val_loss:2.2408 train_time:295373ms step_avg:42.20ms +[2025-09-06 00:26:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:26:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:26:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:26:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:28:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:28:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:28:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:28:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:28:17] [Rank 0] Total Loss: 4.5429 +[2025-09-06 00:28:17] [Rank 0] Total Loss: 4.5429 +[2025-09-06 00:28:17] [Rank 0] Total FTA (Unweighted): 0.3000 +[2025-09-06 00:28:17] [Rank 0] Total FTA (Unweighted): 0.3000 +[2025-09-06 00:28:17] [Rank 0] Total FTA (Weighted): 0.3000 +[2025-09-06 00:28:17] [Rank 0] Total FTA (Weighted): 0.3000 +[2025-09-06 00:28:17] [Rank 0] Group 0 Loss: 3.2828 +[2025-09-06 00:28:17] [Rank 0] Group 0 Loss: 3.2828 +[2025-09-06 00:28:17] [Rank 0] Group 1 Loss: 3.1328 +[2025-09-06 00:28:17] [Rank 0] Group 1 Loss: 3.1328 +[2025-09-06 00:28:17] [Rank 0] Group 2 Loss: 3.1725 +[2025-09-06 00:28:17] [Rank 0] Group 2 Loss: 3.1725 +[2025-09-06 00:28:17] [Rank 0] Group 3 Loss: 3.5365 +[2025-09-06 00:28:17] [Rank 0] Group 3 Loss: 3.5365 +[2025-09-06 00:28:17] [Rank 0] Group 4 Loss: 3.9080 +[2025-09-06 00:28:17] [Rank 0] Group 4 Loss: 3.9080 +[2025-09-06 00:28:17] [Rank 0] Group 5 Loss: 4.3267 +[2025-09-06 00:28:17] [Rank 0] Group 5 Loss: 4.3267 +[2025-09-06 00:28:17] [Rank 0] Group 6 Loss: 4.6600 +[2025-09-06 00:28:17] [Rank 0] Group 6 Loss: 4.6600 +[2025-09-06 00:28:17] [Rank 0] Group 7 Loss: 4.7987 +[2025-09-06 00:28:17] [Rank 0] Group 7 Loss: 4.7987 +[2025-09-06 00:28:17] [Rank 0] Group 8 Loss: 5.0988 +[2025-09-06 00:28:17] [Rank 0] Group 8 Loss: 5.0988 +[2025-09-06 00:28:17] [Rank 0] Group 9 Loss: 5.2076 +[2025-09-06 00:28:17] [Rank 0] Group 9 Loss: 5.2076 +[2025-09-06 00:28:17] [Rank 0] Group 10 Loss: 5.2693 +[2025-09-06 00:28:17] [Rank 0] Group 10 Loss: 5.2693 +[2025-09-06 00:28:17] [Rank 0] Group 11 Loss: 5.2952 +[2025-09-06 00:28:17] [Rank 0] Group 11 Loss: 5.2952 +[2025-09-06 00:28:17] [Rank 0] Group 12 Loss: 5.2336 +[2025-09-06 00:28:17] [Rank 0] Group 12 Loss: 5.2336 +[2025-09-06 00:28:17] [Rank 0] Group 13 Loss: 5.2423 +[2025-09-06 00:28:17] [Rank 0] Group 13 Loss: 5.2423 +[2025-09-06 00:28:17] [Rank 0] Group 14 Loss: 5.2867 +[2025-09-06 00:28:17] [Rank 0] Group 14 Loss: 5.2867 +[2025-09-06 00:28:17] [Rank 0] Group 15 Loss: 5.2352 +[2025-09-06 00:28:17] [Rank 0] Group 15 Loss: 5.2352 +[2025-09-06 00:28:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:28:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:28:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:28:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:28:17] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:28:17] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:28:17] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:28:17] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:28:17] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:28:17] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:28:17] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:28:17] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:28:17] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:28:17] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:28:17] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:28:17] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:28:17] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:28:17] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:28:17] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:28:17] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:28:17] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:28:17] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:28:17] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:28:17] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:28:17] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 00:28:17] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 00:28:17] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:28:17] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:28:17] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 00:28:17] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 00:28:17] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-06 00:28:17] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-06 00:28:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:28:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:28:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:28:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:28:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:28:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:28:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:28:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:28:18] [Rank 0] step:7001/10000 train_time:295382ms step_avg:42.19ms +[2025-09-06 00:28:18] [Rank 0] step:7001/10000 train_time:295382ms step_avg:42.19ms +[2025-09-06 00:28:19] [Rank 0] step:7021/10000 train_time:296049ms step_avg:42.17ms +[2025-09-06 00:28:19] [Rank 0] step:7021/10000 train_time:296049ms step_avg:42.17ms +[2025-09-06 00:28:20] [Rank 0] step:7041/10000 train_time:296788ms step_avg:42.15ms +[2025-09-06 00:28:20] [Rank 0] step:7041/10000 train_time:296788ms step_avg:42.15ms +[2025-09-06 00:28:21] [Rank 0] step:7061/10000 train_time:297526ms step_avg:42.14ms +[2025-09-06 00:28:21] [Rank 0] step:7061/10000 train_time:297526ms step_avg:42.14ms +[2025-09-06 00:28:21] [Rank 0] step:7081/10000 train_time:298265ms step_avg:42.12ms +[2025-09-06 00:28:21] [Rank 0] step:7081/10000 train_time:298265ms step_avg:42.12ms +[2025-09-06 00:28:22] [Rank 0] step:7101/10000 train_time:299003ms step_avg:42.11ms +[2025-09-06 00:28:22] [Rank 0] step:7101/10000 train_time:299003ms step_avg:42.11ms +[2025-09-06 00:28:23] [Rank 0] step:7121/10000 train_time:299871ms step_avg:42.11ms +[2025-09-06 00:28:23] [Rank 0] step:7121/10000 train_time:299871ms step_avg:42.11ms +[2025-09-06 00:28:24] [Rank 0] step:7141/10000 train_time:300609ms step_avg:42.10ms +[2025-09-06 00:28:24] [Rank 0] step:7141/10000 train_time:300609ms step_avg:42.10ms +[2025-09-06 00:28:24] [Rank 0] step:7161/10000 train_time:301347ms step_avg:42.08ms +[2025-09-06 00:28:24] [Rank 0] step:7161/10000 train_time:301347ms step_avg:42.08ms +[2025-09-06 00:28:25] [Rank 0] step:7181/10000 train_time:302216ms step_avg:42.09ms +[2025-09-06 00:28:25] [Rank 0] step:7181/10000 train_time:302216ms step_avg:42.09ms +[2025-09-06 00:28:26] [Rank 0] step:7201/10000 train_time:302955ms step_avg:42.07ms +[2025-09-06 00:28:26] [Rank 0] step:7201/10000 train_time:302955ms step_avg:42.07ms +[2025-09-06 00:28:27] [Rank 0] step:7221/10000 train_time:303694ms step_avg:42.06ms +[2025-09-06 00:28:27] [Rank 0] step:7221/10000 train_time:303694ms step_avg:42.06ms +[2025-09-06 00:28:27] [Rank 0] step:7241/10000 train_time:304434ms step_avg:42.04ms +[2025-09-06 00:28:27] [Rank 0] step:7241/10000 train_time:304434ms step_avg:42.04ms +[2025-09-06 00:28:28] [Rank 0] step:7261/10000 train_time:305173ms step_avg:42.03ms +[2025-09-06 00:28:28] [Rank 0] step:7261/10000 train_time:305173ms step_avg:42.03ms +[2025-09-06 00:28:29] [Rank 0] step:7281/10000 train_time:305911ms step_avg:42.01ms +[2025-09-06 00:28:29] [Rank 0] step:7281/10000 train_time:305911ms step_avg:42.01ms +[2025-09-06 00:28:30] [Rank 0] step:7301/10000 train_time:306650ms step_avg:42.00ms +[2025-09-06 00:28:30] [Rank 0] step:7301/10000 train_time:306650ms step_avg:42.00ms +[2025-09-06 00:28:30] [Rank 0] step:7321/10000 train_time:307388ms step_avg:41.99ms +[2025-09-06 00:28:30] [Rank 0] step:7321/10000 train_time:307388ms step_avg:41.99ms +[2025-09-06 00:28:31] [Rank 0] step:7341/10000 train_time:308127ms step_avg:41.97ms +[2025-09-06 00:28:31] [Rank 0] step:7341/10000 train_time:308127ms step_avg:41.97ms +[2025-09-06 00:28:32] [Rank 0] step:7361/10000 train_time:308865ms step_avg:41.96ms +[2025-09-06 00:28:32] [Rank 0] step:7361/10000 train_time:308865ms step_avg:41.96ms +[2025-09-06 00:28:33] [Rank 0] step:7381/10000 train_time:309604ms step_avg:41.95ms +[2025-09-06 00:28:33] [Rank 0] step:7381/10000 train_time:309604ms step_avg:41.95ms +[2025-09-06 00:28:33] [Rank 0] step:7401/10000 train_time:310343ms step_avg:41.93ms +[2025-09-06 00:28:33] [Rank 0] step:7401/10000 train_time:310343ms step_avg:41.93ms +[2025-09-06 00:28:34] [Rank 0] step:7421/10000 train_time:311081ms step_avg:41.92ms +[2025-09-06 00:28:34] [Rank 0] step:7421/10000 train_time:311081ms step_avg:41.92ms +[2025-09-06 00:28:35] [Rank 0] step:7441/10000 train_time:311820ms step_avg:41.91ms +[2025-09-06 00:28:35] [Rank 0] step:7441/10000 train_time:311820ms step_avg:41.91ms +[2025-09-06 00:28:36] [Rank 0] step:7461/10000 train_time:312558ms step_avg:41.89ms +[2025-09-06 00:28:36] [Rank 0] step:7461/10000 train_time:312558ms step_avg:41.89ms +[2025-09-06 00:28:36] [Rank 0] step:7481/10000 train_time:313297ms step_avg:41.88ms +[2025-09-06 00:28:36] [Rank 0] step:7481/10000 train_time:313297ms step_avg:41.88ms +[2025-09-06 00:28:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:28:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:28:37] [Rank 0] PRINT: step:7500/10000 train_loss:2.2396 val_loss:2.2221 train_time:314117ms step_avg:41.88ms +[2025-09-06 00:28:37] [Rank 0] PRINT: step:7500/10000 train_loss:2.2396 val_loss:2.2221 train_time:314117ms step_avg:41.88ms +[2025-09-06 00:28:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:28:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:28:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:28:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:29:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:29:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:29:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:29:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:29:59] [Rank 0] Total Loss: 4.5458 +[2025-09-06 00:29:59] [Rank 0] Total Loss: 4.5458 +[2025-09-06 00:29:59] [Rank 0] Total FTA (Unweighted): 0.2981 +[2025-09-06 00:29:59] [Rank 0] Total FTA (Unweighted): 0.2981 +[2025-09-06 00:29:59] [Rank 0] Total FTA (Weighted): 0.2981 +[2025-09-06 00:29:59] [Rank 0] Total FTA (Weighted): 0.2981 +[2025-09-06 00:29:59] [Rank 0] Group 0 Loss: 3.2507 +[2025-09-06 00:29:59] [Rank 0] Group 0 Loss: 3.2507 +[2025-09-06 00:29:59] [Rank 0] Group 1 Loss: 3.1346 +[2025-09-06 00:29:59] [Rank 0] Group 1 Loss: 3.1346 +[2025-09-06 00:29:59] [Rank 0] Group 2 Loss: 3.1756 +[2025-09-06 00:29:59] [Rank 0] Group 2 Loss: 3.1756 +[2025-09-06 00:29:59] [Rank 0] Group 3 Loss: 3.5515 +[2025-09-06 00:29:59] [Rank 0] Group 3 Loss: 3.5515 +[2025-09-06 00:29:59] [Rank 0] Group 4 Loss: 3.9056 +[2025-09-06 00:29:59] [Rank 0] Group 4 Loss: 3.9056 +[2025-09-06 00:29:59] [Rank 0] Group 5 Loss: 4.3443 +[2025-09-06 00:29:59] [Rank 0] Group 5 Loss: 4.3443 +[2025-09-06 00:29:59] [Rank 0] Group 6 Loss: 4.6416 +[2025-09-06 00:29:59] [Rank 0] Group 6 Loss: 4.6416 +[2025-09-06 00:29:59] [Rank 0] Group 7 Loss: 4.7995 +[2025-09-06 00:29:59] [Rank 0] Group 7 Loss: 4.7995 +[2025-09-06 00:29:59] [Rank 0] Group 8 Loss: 5.1108 +[2025-09-06 00:29:59] [Rank 0] Group 8 Loss: 5.1108 +[2025-09-06 00:29:59] [Rank 0] Group 9 Loss: 5.2113 +[2025-09-06 00:29:59] [Rank 0] Group 9 Loss: 5.2113 +[2025-09-06 00:29:59] [Rank 0] Group 10 Loss: 5.2696 +[2025-09-06 00:29:59] [Rank 0] Group 10 Loss: 5.2696 +[2025-09-06 00:29:59] [Rank 0] Group 11 Loss: 5.3024 +[2025-09-06 00:29:59] [Rank 0] Group 11 Loss: 5.3024 +[2025-09-06 00:29:59] [Rank 0] Group 12 Loss: 5.2380 +[2025-09-06 00:29:59] [Rank 0] Group 12 Loss: 5.2380 +[2025-09-06 00:29:59] [Rank 0] Group 13 Loss: 5.2687 +[2025-09-06 00:29:59] [Rank 0] Group 13 Loss: 5.2687 +[2025-09-06 00:29:59] [Rank 0] Group 14 Loss: 5.2982 +[2025-09-06 00:29:59] [Rank 0] Group 14 Loss: 5.2982 +[2025-09-06 00:29:59] [Rank 0] Group 15 Loss: 5.2309 +[2025-09-06 00:29:59] [Rank 0] Group 15 Loss: 5.2309 +[2025-09-06 00:29:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:29:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:29:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:29:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:29:59] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:29:59] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:29:59] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:29:59] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:29:59] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:29:59] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:29:59] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:29:59] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:29:59] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:29:59] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:29:59] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:29:59] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:29:59] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:29:59] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:29:59] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:29:59] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:29:59] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:29:59] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:29:59] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:29:59] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:29:59] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 00:29:59] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 00:29:59] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:29:59] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:29:59] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 00:29:59] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 00:29:59] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:29:59] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:29:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:29:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:30:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:30:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:30:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:30:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:30:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:30:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:30:00] [Rank 0] step:7501/10000 train_time:314126ms step_avg:41.88ms +[2025-09-06 00:30:00] [Rank 0] step:7501/10000 train_time:314126ms step_avg:41.88ms +[2025-09-06 00:30:01] [Rank 0] step:7521/10000 train_time:314807ms step_avg:41.86ms +[2025-09-06 00:30:01] [Rank 0] step:7521/10000 train_time:314807ms step_avg:41.86ms +[2025-09-06 00:30:02] [Rank 0] step:7541/10000 train_time:315546ms step_avg:41.84ms +[2025-09-06 00:30:02] [Rank 0] step:7541/10000 train_time:315546ms step_avg:41.84ms +[2025-09-06 00:30:02] [Rank 0] step:7561/10000 train_time:316285ms step_avg:41.83ms +[2025-09-06 00:30:02] [Rank 0] step:7561/10000 train_time:316285ms step_avg:41.83ms +[2025-09-06 00:30:03] [Rank 0] step:7581/10000 train_time:317025ms step_avg:41.82ms +[2025-09-06 00:30:03] [Rank 0] step:7581/10000 train_time:317025ms step_avg:41.82ms +[2025-09-06 00:30:04] [Rank 0] step:7601/10000 train_time:317762ms step_avg:41.81ms +[2025-09-06 00:30:04] [Rank 0] step:7601/10000 train_time:317762ms step_avg:41.81ms +[2025-09-06 00:30:05] [Rank 0] step:7621/10000 train_time:318501ms step_avg:41.79ms +[2025-09-06 00:30:05] [Rank 0] step:7621/10000 train_time:318501ms step_avg:41.79ms +[2025-09-06 00:30:06] [Rank 0] step:7641/10000 train_time:319259ms step_avg:41.78ms +[2025-09-06 00:30:06] [Rank 0] step:7641/10000 train_time:319259ms step_avg:41.78ms +[2025-09-06 00:30:07] [Rank 0] step:7661/10000 train_time:320591ms step_avg:41.85ms +[2025-09-06 00:30:07] [Rank 0] step:7661/10000 train_time:320591ms step_avg:41.85ms +[2025-09-06 00:30:07] [Rank 0] step:7681/10000 train_time:321329ms step_avg:41.83ms +[2025-09-06 00:30:07] [Rank 0] step:7681/10000 train_time:321329ms step_avg:41.83ms +[2025-09-06 00:30:08] [Rank 0] step:7701/10000 train_time:322068ms step_avg:41.82ms +[2025-09-06 00:30:08] [Rank 0] step:7701/10000 train_time:322068ms step_avg:41.82ms +[2025-09-06 00:30:09] [Rank 0] step:7721/10000 train_time:322807ms step_avg:41.81ms +[2025-09-06 00:30:09] [Rank 0] step:7721/10000 train_time:322807ms step_avg:41.81ms +[2025-09-06 00:30:10] [Rank 0] step:7741/10000 train_time:323545ms step_avg:41.80ms +[2025-09-06 00:30:10] [Rank 0] step:7741/10000 train_time:323545ms step_avg:41.80ms +[2025-09-06 00:30:10] [Rank 0] step:7761/10000 train_time:324282ms step_avg:41.78ms +[2025-09-06 00:30:10] [Rank 0] step:7761/10000 train_time:324282ms step_avg:41.78ms +[2025-09-06 00:30:11] [Rank 0] step:7781/10000 train_time:325020ms step_avg:41.77ms +[2025-09-06 00:30:11] [Rank 0] step:7781/10000 train_time:325020ms step_avg:41.77ms +[2025-09-06 00:30:12] [Rank 0] step:7801/10000 train_time:325758ms step_avg:41.76ms +[2025-09-06 00:30:12] [Rank 0] step:7801/10000 train_time:325758ms step_avg:41.76ms +[2025-09-06 00:30:13] [Rank 0] step:7821/10000 train_time:326496ms step_avg:41.75ms +[2025-09-06 00:30:13] [Rank 0] step:7821/10000 train_time:326496ms step_avg:41.75ms +[2025-09-06 00:30:13] [Rank 0] step:7841/10000 train_time:327235ms step_avg:41.73ms +[2025-09-06 00:30:13] [Rank 0] step:7841/10000 train_time:327235ms step_avg:41.73ms +[2025-09-06 00:30:14] [Rank 0] step:7861/10000 train_time:327974ms step_avg:41.72ms +[2025-09-06 00:30:14] [Rank 0] step:7861/10000 train_time:327974ms step_avg:41.72ms +[2025-09-06 00:30:15] [Rank 0] step:7881/10000 train_time:328713ms step_avg:41.71ms +[2025-09-06 00:30:15] [Rank 0] step:7881/10000 train_time:328713ms step_avg:41.71ms +[2025-09-06 00:30:16] [Rank 0] step:7901/10000 train_time:329452ms step_avg:41.70ms +[2025-09-06 00:30:16] [Rank 0] step:7901/10000 train_time:329452ms step_avg:41.70ms +[2025-09-06 00:30:16] [Rank 0] step:7921/10000 train_time:330191ms step_avg:41.69ms +[2025-09-06 00:30:16] [Rank 0] step:7921/10000 train_time:330191ms step_avg:41.69ms +[2025-09-06 00:30:17] [Rank 0] step:7941/10000 train_time:330930ms step_avg:41.67ms +[2025-09-06 00:30:17] [Rank 0] step:7941/10000 train_time:330930ms step_avg:41.67ms +[2025-09-06 00:30:18] [Rank 0] step:7961/10000 train_time:331667ms step_avg:41.66ms +[2025-09-06 00:30:18] [Rank 0] step:7961/10000 train_time:331667ms step_avg:41.66ms +[2025-09-06 00:30:18] [Rank 0] step:7981/10000 train_time:332405ms step_avg:41.65ms +[2025-09-06 00:30:18] [Rank 0] step:7981/10000 train_time:332405ms step_avg:41.65ms +[2025-09-06 00:30:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:30:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:30:20] [Rank 0] PRINT: step:8000/10000 train_loss:2.2230 val_loss:2.2064 train_time:333224ms step_avg:41.65ms +[2025-09-06 00:30:20] [Rank 0] PRINT: step:8000/10000 train_loss:2.2230 val_loss:2.2064 train_time:333224ms step_avg:41.65ms +[2025-09-06 00:30:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:30:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:30:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:30:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:31:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:31:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:31:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:31:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:31:41] [Rank 0] Total Loss: 4.5296 +[2025-09-06 00:31:41] [Rank 0] Total Loss: 4.5296 +[2025-09-06 00:31:41] [Rank 0] Total FTA (Unweighted): 0.3012 +[2025-09-06 00:31:41] [Rank 0] Total FTA (Unweighted): 0.3012 +[2025-09-06 00:31:41] [Rank 0] Total FTA (Weighted): 0.3013 +[2025-09-06 00:31:41] [Rank 0] Total FTA (Weighted): 0.3013 +[2025-09-06 00:31:41] [Rank 0] Group 0 Loss: 3.2639 +[2025-09-06 00:31:41] [Rank 0] Group 0 Loss: 3.2639 +[2025-09-06 00:31:41] [Rank 0] Group 1 Loss: 3.1369 +[2025-09-06 00:31:41] [Rank 0] Group 1 Loss: 3.1369 +[2025-09-06 00:31:41] [Rank 0] Group 2 Loss: 3.1608 +[2025-09-06 00:31:41] [Rank 0] Group 2 Loss: 3.1608 +[2025-09-06 00:31:41] [Rank 0] Group 3 Loss: 3.5300 +[2025-09-06 00:31:41] [Rank 0] Group 3 Loss: 3.5300 +[2025-09-06 00:31:41] [Rank 0] Group 4 Loss: 3.8838 +[2025-09-06 00:31:41] [Rank 0] Group 4 Loss: 3.8838 +[2025-09-06 00:31:41] [Rank 0] Group 5 Loss: 4.3220 +[2025-09-06 00:31:41] [Rank 0] Group 5 Loss: 4.3220 +[2025-09-06 00:31:41] [Rank 0] Group 6 Loss: 4.6323 +[2025-09-06 00:31:41] [Rank 0] Group 6 Loss: 4.6323 +[2025-09-06 00:31:41] [Rank 0] Group 7 Loss: 4.7882 +[2025-09-06 00:31:41] [Rank 0] Group 7 Loss: 4.7882 +[2025-09-06 00:31:41] [Rank 0] Group 8 Loss: 5.0826 +[2025-09-06 00:31:41] [Rank 0] Group 8 Loss: 5.0826 +[2025-09-06 00:31:41] [Rank 0] Group 9 Loss: 5.1889 +[2025-09-06 00:31:41] [Rank 0] Group 9 Loss: 5.1889 +[2025-09-06 00:31:41] [Rank 0] Group 10 Loss: 5.2525 +[2025-09-06 00:31:41] [Rank 0] Group 10 Loss: 5.2525 +[2025-09-06 00:31:41] [Rank 0] Group 11 Loss: 5.2800 +[2025-09-06 00:31:41] [Rank 0] Group 11 Loss: 5.2800 +[2025-09-06 00:31:41] [Rank 0] Group 12 Loss: 5.2165 +[2025-09-06 00:31:41] [Rank 0] Group 12 Loss: 5.2165 +[2025-09-06 00:31:41] [Rank 0] Group 13 Loss: 5.2377 +[2025-09-06 00:31:41] [Rank 0] Group 13 Loss: 5.2377 +[2025-09-06 00:31:41] [Rank 0] Group 14 Loss: 5.2824 +[2025-09-06 00:31:41] [Rank 0] Group 14 Loss: 5.2824 +[2025-09-06 00:31:41] [Rank 0] Group 15 Loss: 5.2153 +[2025-09-06 00:31:41] [Rank 0] Group 15 Loss: 5.2153 +[2025-09-06 00:31:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:31:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:31:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:31:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:31:41] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:31:41] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:31:41] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:31:41] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:31:41] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:31:41] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:31:41] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:31:41] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 00:31:41] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:31:41] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:31:41] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:31:41] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:31:41] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:31:41] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:31:41] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:31:41] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:31:41] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:31:41] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:31:41] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:31:41] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:31:41] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 00:31:41] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 00:31:41] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:31:41] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:31:41] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 00:31:41] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 00:31:41] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 00:31:41] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 00:31:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:31:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:31:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:31:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:31:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:31:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:31:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:31:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:31:42] [Rank 0] step:8001/10000 train_time:333234ms step_avg:41.65ms +[2025-09-06 00:31:42] [Rank 0] step:8001/10000 train_time:333234ms step_avg:41.65ms +[2025-09-06 00:31:43] [Rank 0] step:8021/10000 train_time:334114ms step_avg:41.65ms +[2025-09-06 00:31:43] [Rank 0] step:8021/10000 train_time:334114ms step_avg:41.65ms +[2025-09-06 00:31:44] [Rank 0] step:8041/10000 train_time:334851ms step_avg:41.64ms +[2025-09-06 00:31:44] [Rank 0] step:8041/10000 train_time:334851ms step_avg:41.64ms +[2025-09-06 00:31:45] [Rank 0] step:8061/10000 train_time:335589ms step_avg:41.63ms +[2025-09-06 00:31:45] [Rank 0] step:8061/10000 train_time:335589ms step_avg:41.63ms +[2025-09-06 00:31:46] [Rank 0] step:8081/10000 train_time:336328ms step_avg:41.62ms +[2025-09-06 00:31:46] [Rank 0] step:8081/10000 train_time:336328ms step_avg:41.62ms +[2025-09-06 00:31:46] [Rank 0] step:8101/10000 train_time:337066ms step_avg:41.61ms +[2025-09-06 00:31:46] [Rank 0] step:8101/10000 train_time:337066ms step_avg:41.61ms +[2025-09-06 00:31:47] [Rank 0] step:8121/10000 train_time:337804ms step_avg:41.60ms +[2025-09-06 00:31:47] [Rank 0] step:8121/10000 train_time:337804ms step_avg:41.60ms +[2025-09-06 00:31:48] [Rank 0] step:8141/10000 train_time:338542ms step_avg:41.58ms +[2025-09-06 00:31:48] [Rank 0] step:8141/10000 train_time:338542ms step_avg:41.58ms +[2025-09-06 00:31:49] [Rank 0] step:8161/10000 train_time:339281ms step_avg:41.57ms +[2025-09-06 00:31:49] [Rank 0] step:8161/10000 train_time:339281ms step_avg:41.57ms +[2025-09-06 00:31:49] [Rank 0] step:8181/10000 train_time:340021ms step_avg:41.56ms +[2025-09-06 00:31:49] [Rank 0] step:8181/10000 train_time:340021ms step_avg:41.56ms +[2025-09-06 00:31:50] [Rank 0] step:8201/10000 train_time:340759ms step_avg:41.55ms +[2025-09-06 00:31:50] [Rank 0] step:8201/10000 train_time:340759ms step_avg:41.55ms +[2025-09-06 00:31:51] [Rank 0] step:8221/10000 train_time:341499ms step_avg:41.54ms +[2025-09-06 00:31:51] [Rank 0] step:8221/10000 train_time:341499ms step_avg:41.54ms +[2025-09-06 00:31:51] [Rank 0] step:8241/10000 train_time:342237ms step_avg:41.53ms +[2025-09-06 00:31:51] [Rank 0] step:8241/10000 train_time:342237ms step_avg:41.53ms +[2025-09-06 00:31:52] [Rank 0] step:8261/10000 train_time:342975ms step_avg:41.52ms +[2025-09-06 00:31:52] [Rank 0] step:8261/10000 train_time:342975ms step_avg:41.52ms +[2025-09-06 00:31:53] [Rank 0] step:8281/10000 train_time:343714ms step_avg:41.51ms +[2025-09-06 00:31:53] [Rank 0] step:8281/10000 train_time:343714ms step_avg:41.51ms +[2025-09-06 00:31:54] [Rank 0] step:8301/10000 train_time:344452ms step_avg:41.50ms +[2025-09-06 00:31:54] [Rank 0] step:8301/10000 train_time:344452ms step_avg:41.50ms +[2025-09-06 00:31:54] [Rank 0] step:8321/10000 train_time:345192ms step_avg:41.48ms +[2025-09-06 00:31:54] [Rank 0] step:8321/10000 train_time:345192ms step_avg:41.48ms +[2025-09-06 00:31:55] [Rank 0] step:8341/10000 train_time:345931ms step_avg:41.47ms +[2025-09-06 00:31:55] [Rank 0] step:8341/10000 train_time:345931ms step_avg:41.47ms +[2025-09-06 00:31:56] [Rank 0] step:8361/10000 train_time:346669ms step_avg:41.46ms +[2025-09-06 00:31:56] [Rank 0] step:8361/10000 train_time:346669ms step_avg:41.46ms +[2025-09-06 00:31:57] [Rank 0] step:8381/10000 train_time:347408ms step_avg:41.45ms +[2025-09-06 00:31:57] [Rank 0] step:8381/10000 train_time:347408ms step_avg:41.45ms +[2025-09-06 00:31:57] [Rank 0] step:8401/10000 train_time:348147ms step_avg:41.44ms +[2025-09-06 00:31:57] [Rank 0] step:8401/10000 train_time:348147ms step_avg:41.44ms +[2025-09-06 00:31:58] [Rank 0] step:8421/10000 train_time:348885ms step_avg:41.43ms +[2025-09-06 00:31:58] [Rank 0] step:8421/10000 train_time:348885ms step_avg:41.43ms +[2025-09-06 00:31:59] [Rank 0] step:8441/10000 train_time:349623ms step_avg:41.42ms +[2025-09-06 00:31:59] [Rank 0] step:8441/10000 train_time:349623ms step_avg:41.42ms +[2025-09-06 00:32:00] [Rank 0] step:8461/10000 train_time:350362ms step_avg:41.41ms +[2025-09-06 00:32:00] [Rank 0] step:8461/10000 train_time:350362ms step_avg:41.41ms +[2025-09-06 00:32:00] [Rank 0] step:8481/10000 train_time:351100ms step_avg:41.40ms +[2025-09-06 00:32:00] [Rank 0] step:8481/10000 train_time:351100ms step_avg:41.40ms +[2025-09-06 00:32:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:32:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:32:02] [Rank 0] PRINT: step:8500/10000 train_loss:2.2085 val_loss:2.1925 train_time:351920ms step_avg:41.40ms +[2025-09-06 00:32:02] [Rank 0] PRINT: step:8500/10000 train_loss:2.2085 val_loss:2.1925 train_time:351920ms step_avg:41.40ms +[2025-09-06 00:32:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:32:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:32:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:32:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:33:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:33:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:33:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:33:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:33:23] [Rank 0] Total Loss: 4.5357 +[2025-09-06 00:33:23] [Rank 0] Total Loss: 4.5357 +[2025-09-06 00:33:23] [Rank 0] Total FTA (Unweighted): 0.3069 +[2025-09-06 00:33:23] [Rank 0] Total FTA (Unweighted): 0.3069 +[2025-09-06 00:33:23] [Rank 0] Total FTA (Weighted): 0.3069 +[2025-09-06 00:33:23] [Rank 0] Total FTA (Weighted): 0.3069 +[2025-09-06 00:33:23] [Rank 0] Group 0 Loss: 3.2908 +[2025-09-06 00:33:23] [Rank 0] Group 0 Loss: 3.2908 +[2025-09-06 00:33:23] [Rank 0] Group 1 Loss: 3.1333 +[2025-09-06 00:33:23] [Rank 0] Group 1 Loss: 3.1333 +[2025-09-06 00:33:23] [Rank 0] Group 2 Loss: 3.1847 +[2025-09-06 00:33:23] [Rank 0] Group 2 Loss: 3.1847 +[2025-09-06 00:33:23] [Rank 0] Group 3 Loss: 3.5564 +[2025-09-06 00:33:23] [Rank 0] Group 3 Loss: 3.5564 +[2025-09-06 00:33:23] [Rank 0] Group 4 Loss: 3.8772 +[2025-09-06 00:33:23] [Rank 0] Group 4 Loss: 3.8772 +[2025-09-06 00:33:23] [Rank 0] Group 5 Loss: 4.3107 +[2025-09-06 00:33:23] [Rank 0] Group 5 Loss: 4.3107 +[2025-09-06 00:33:23] [Rank 0] Group 6 Loss: 4.6383 +[2025-09-06 00:33:23] [Rank 0] Group 6 Loss: 4.6383 +[2025-09-06 00:33:23] [Rank 0] Group 7 Loss: 4.7823 +[2025-09-06 00:33:23] [Rank 0] Group 7 Loss: 4.7823 +[2025-09-06 00:33:23] [Rank 0] Group 8 Loss: 5.0837 +[2025-09-06 00:33:23] [Rank 0] Group 8 Loss: 5.0837 +[2025-09-06 00:33:23] [Rank 0] Group 9 Loss: 5.1917 +[2025-09-06 00:33:23] [Rank 0] Group 9 Loss: 5.1917 +[2025-09-06 00:33:23] [Rank 0] Group 10 Loss: 5.2653 +[2025-09-06 00:33:23] [Rank 0] Group 10 Loss: 5.2653 +[2025-09-06 00:33:23] [Rank 0] Group 11 Loss: 5.2790 +[2025-09-06 00:33:23] [Rank 0] Group 11 Loss: 5.2790 +[2025-09-06 00:33:23] [Rank 0] Group 12 Loss: 5.2245 +[2025-09-06 00:33:23] [Rank 0] Group 12 Loss: 5.2245 +[2025-09-06 00:33:23] [Rank 0] Group 13 Loss: 5.2534 +[2025-09-06 00:33:23] [Rank 0] Group 13 Loss: 5.2534 +[2025-09-06 00:33:23] [Rank 0] Group 14 Loss: 5.2848 +[2025-09-06 00:33:23] [Rank 0] Group 14 Loss: 5.2848 +[2025-09-06 00:33:23] [Rank 0] Group 15 Loss: 5.2151 +[2025-09-06 00:33:23] [Rank 0] Group 15 Loss: 5.2151 +[2025-09-06 00:33:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:33:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:33:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:33:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:33:23] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:33:23] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:33:23] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:33:23] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:33:23] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:33:23] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:33:23] [Rank 0] Group 5 FTA: 0.2500 +[2025-09-06 00:33:23] [Rank 0] Group 5 FTA: 0.2500 +[2025-09-06 00:33:23] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:33:23] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:33:23] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:33:23] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:33:23] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:33:23] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:33:23] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:33:23] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:33:23] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 00:33:23] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 00:33:23] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:33:23] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:33:23] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 00:33:23] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 00:33:23] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 00:33:23] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 00:33:23] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 00:33:23] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 00:33:23] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 00:33:23] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 00:33:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:33:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:33:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:33:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:33:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:33:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:33:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:33:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:33:25] [Rank 0] step:8501/10000 train_time:351930ms step_avg:41.40ms +[2025-09-06 00:33:25] [Rank 0] step:8501/10000 train_time:351930ms step_avg:41.40ms +[2025-09-06 00:33:25] [Rank 0] step:8521/10000 train_time:352613ms step_avg:41.38ms +[2025-09-06 00:33:25] [Rank 0] step:8521/10000 train_time:352613ms step_avg:41.38ms +[2025-09-06 00:33:26] [Rank 0] step:8541/10000 train_time:353351ms step_avg:41.37ms +[2025-09-06 00:33:26] [Rank 0] step:8541/10000 train_time:353351ms step_avg:41.37ms +[2025-09-06 00:33:27] [Rank 0] step:8561/10000 train_time:354089ms step_avg:41.36ms +[2025-09-06 00:33:27] [Rank 0] step:8561/10000 train_time:354089ms step_avg:41.36ms +[2025-09-06 00:33:28] [Rank 0] step:8581/10000 train_time:354828ms step_avg:41.35ms +[2025-09-06 00:33:28] [Rank 0] step:8581/10000 train_time:354828ms step_avg:41.35ms +[2025-09-06 00:33:28] [Rank 0] step:8601/10000 train_time:355567ms step_avg:41.34ms +[2025-09-06 00:33:28] [Rank 0] step:8601/10000 train_time:355567ms step_avg:41.34ms +[2025-09-06 00:33:29] [Rank 0] step:8621/10000 train_time:356306ms step_avg:41.33ms +[2025-09-06 00:33:29] [Rank 0] step:8621/10000 train_time:356306ms step_avg:41.33ms +[2025-09-06 00:33:30] [Rank 0] step:8641/10000 train_time:357045ms step_avg:41.32ms +[2025-09-06 00:33:30] [Rank 0] step:8641/10000 train_time:357045ms step_avg:41.32ms +[2025-09-06 00:33:31] [Rank 0] step:8661/10000 train_time:357784ms step_avg:41.31ms +[2025-09-06 00:33:31] [Rank 0] step:8661/10000 train_time:357784ms step_avg:41.31ms +[2025-09-06 00:33:31] [Rank 0] step:8681/10000 train_time:358521ms step_avg:41.30ms +[2025-09-06 00:33:31] [Rank 0] step:8681/10000 train_time:358521ms step_avg:41.30ms +[2025-09-06 00:33:32] [Rank 0] step:8701/10000 train_time:359261ms step_avg:41.29ms +[2025-09-06 00:33:32] [Rank 0] step:8701/10000 train_time:359261ms step_avg:41.29ms +[2025-09-06 00:33:33] [Rank 0] step:8721/10000 train_time:359999ms step_avg:41.28ms +[2025-09-06 00:33:33] [Rank 0] step:8721/10000 train_time:359999ms step_avg:41.28ms +[2025-09-06 00:33:34] [Rank 0] step:8741/10000 train_time:360738ms step_avg:41.27ms +[2025-09-06 00:33:34] [Rank 0] step:8741/10000 train_time:360738ms step_avg:41.27ms +[2025-09-06 00:33:34] [Rank 0] step:8761/10000 train_time:361477ms step_avg:41.26ms +[2025-09-06 00:33:34] [Rank 0] step:8761/10000 train_time:361477ms step_avg:41.26ms +[2025-09-06 00:33:35] [Rank 0] step:8781/10000 train_time:362215ms step_avg:41.25ms +[2025-09-06 00:33:35] [Rank 0] step:8781/10000 train_time:362215ms step_avg:41.25ms +[2025-09-06 00:33:36] [Rank 0] step:8801/10000 train_time:362954ms step_avg:41.24ms +[2025-09-06 00:33:36] [Rank 0] step:8801/10000 train_time:362954ms step_avg:41.24ms +[2025-09-06 00:33:37] [Rank 0] step:8821/10000 train_time:363693ms step_avg:41.23ms +[2025-09-06 00:33:37] [Rank 0] step:8821/10000 train_time:363693ms step_avg:41.23ms +[2025-09-06 00:33:37] [Rank 0] step:8841/10000 train_time:364627ms step_avg:41.24ms +[2025-09-06 00:33:37] [Rank 0] step:8841/10000 train_time:364627ms step_avg:41.24ms +[2025-09-06 00:33:38] [Rank 0] step:8861/10000 train_time:365366ms step_avg:41.23ms +[2025-09-06 00:33:38] [Rank 0] step:8861/10000 train_time:365366ms step_avg:41.23ms +[2025-09-06 00:33:39] [Rank 0] step:8881/10000 train_time:366295ms step_avg:41.24ms +[2025-09-06 00:33:39] [Rank 0] step:8881/10000 train_time:366295ms step_avg:41.24ms +[2025-09-06 00:33:40] [Rank 0] step:8901/10000 train_time:367033ms step_avg:41.24ms +[2025-09-06 00:33:40] [Rank 0] step:8901/10000 train_time:367033ms step_avg:41.24ms +[2025-09-06 00:33:41] [Rank 0] step:8921/10000 train_time:367772ms step_avg:41.23ms +[2025-09-06 00:33:41] [Rank 0] step:8921/10000 train_time:367772ms step_avg:41.23ms +[2025-09-06 00:33:42] [Rank 0] step:8941/10000 train_time:368653ms step_avg:41.23ms +[2025-09-06 00:33:42] [Rank 0] step:8941/10000 train_time:368653ms step_avg:41.23ms +[2025-09-06 00:33:42] [Rank 0] step:8961/10000 train_time:369392ms step_avg:41.22ms +[2025-09-06 00:33:42] [Rank 0] step:8961/10000 train_time:369392ms step_avg:41.22ms +[2025-09-06 00:33:43] [Rank 0] step:8981/10000 train_time:370130ms step_avg:41.21ms +[2025-09-06 00:33:43] [Rank 0] step:8981/10000 train_time:370130ms step_avg:41.21ms +[2025-09-06 00:33:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:33:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:33:44] [Rank 0] PRINT: step:9000/10000 train_loss:2.1946 val_loss:2.1805 train_time:370949ms step_avg:41.22ms +[2025-09-06 00:33:44] [Rank 0] PRINT: step:9000/10000 train_loss:2.1946 val_loss:2.1805 train_time:370949ms step_avg:41.22ms +[2025-09-06 00:33:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:33:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:33:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:33:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:35:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:35:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:35:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:35:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:35:06] [Rank 0] Total Loss: 4.5306 +[2025-09-06 00:35:06] [Rank 0] Total Loss: 4.5306 +[2025-09-06 00:35:06] [Rank 0] Total FTA (Unweighted): 0.3113 +[2025-09-06 00:35:06] [Rank 0] Total FTA (Unweighted): 0.3113 +[2025-09-06 00:35:06] [Rank 0] Total FTA (Weighted): 0.3113 +[2025-09-06 00:35:06] [Rank 0] Total FTA (Weighted): 0.3113 +[2025-09-06 00:35:06] [Rank 0] Group 0 Loss: 3.3069 +[2025-09-06 00:35:06] [Rank 0] Group 0 Loss: 3.3069 +[2025-09-06 00:35:06] [Rank 0] Group 1 Loss: 3.1407 +[2025-09-06 00:35:06] [Rank 0] Group 1 Loss: 3.1407 +[2025-09-06 00:35:06] [Rank 0] Group 2 Loss: 3.1827 +[2025-09-06 00:35:06] [Rank 0] Group 2 Loss: 3.1827 +[2025-09-06 00:35:06] [Rank 0] Group 3 Loss: 3.5549 +[2025-09-06 00:35:06] [Rank 0] Group 3 Loss: 3.5549 +[2025-09-06 00:35:06] [Rank 0] Group 4 Loss: 3.8865 +[2025-09-06 00:35:06] [Rank 0] Group 4 Loss: 3.8865 +[2025-09-06 00:35:06] [Rank 0] Group 5 Loss: 4.3064 +[2025-09-06 00:35:06] [Rank 0] Group 5 Loss: 4.3064 +[2025-09-06 00:35:06] [Rank 0] Group 6 Loss: 4.6320 +[2025-09-06 00:35:06] [Rank 0] Group 6 Loss: 4.6320 +[2025-09-06 00:35:06] [Rank 0] Group 7 Loss: 4.7697 +[2025-09-06 00:35:06] [Rank 0] Group 7 Loss: 4.7697 +[2025-09-06 00:35:06] [Rank 0] Group 8 Loss: 5.0710 +[2025-09-06 00:35:06] [Rank 0] Group 8 Loss: 5.0710 +[2025-09-06 00:35:06] [Rank 0] Group 9 Loss: 5.1866 +[2025-09-06 00:35:06] [Rank 0] Group 9 Loss: 5.1866 +[2025-09-06 00:35:06] [Rank 0] Group 10 Loss: 5.2480 +[2025-09-06 00:35:06] [Rank 0] Group 10 Loss: 5.2480 +[2025-09-06 00:35:06] [Rank 0] Group 11 Loss: 5.2714 +[2025-09-06 00:35:06] [Rank 0] Group 11 Loss: 5.2714 +[2025-09-06 00:35:06] [Rank 0] Group 12 Loss: 5.2130 +[2025-09-06 00:35:06] [Rank 0] Group 12 Loss: 5.2130 +[2025-09-06 00:35:06] [Rank 0] Group 13 Loss: 5.2454 +[2025-09-06 00:35:06] [Rank 0] Group 13 Loss: 5.2454 +[2025-09-06 00:35:06] [Rank 0] Group 14 Loss: 5.2725 +[2025-09-06 00:35:06] [Rank 0] Group 14 Loss: 5.2725 +[2025-09-06 00:35:06] [Rank 0] Group 15 Loss: 5.2016 +[2025-09-06 00:35:06] [Rank 0] Group 15 Loss: 5.2016 +[2025-09-06 00:35:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:35:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:35:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:35:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:35:06] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:35:06] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:35:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:35:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:35:06] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:35:06] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:35:06] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:35:06] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:35:06] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:35:06] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:35:06] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:35:06] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:35:06] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:35:06] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:35:06] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:35:06] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:35:06] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:35:06] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 00:35:06] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:35:06] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:35:06] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-06 00:35:06] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-06 00:35:06] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 00:35:06] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 00:35:06] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-06 00:35:06] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-06 00:35:06] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 00:35:06] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 00:35:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:35:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:35:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:35:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:35:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:35:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:35:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:35:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:35:08] [Rank 0] step:9001/10000 train_time:370959ms step_avg:41.21ms +[2025-09-06 00:35:08] [Rank 0] step:9001/10000 train_time:370959ms step_avg:41.21ms +[2025-09-06 00:35:09] [Rank 0] step:9021/10000 train_time:371643ms step_avg:41.20ms +[2025-09-06 00:35:09] [Rank 0] step:9021/10000 train_time:371643ms step_avg:41.20ms +[2025-09-06 00:35:10] [Rank 0] step:9041/10000 train_time:372379ms step_avg:41.19ms +[2025-09-06 00:35:10] [Rank 0] step:9041/10000 train_time:372379ms step_avg:41.19ms +[2025-09-06 00:35:10] [Rank 0] step:9061/10000 train_time:373116ms step_avg:41.18ms +[2025-09-06 00:35:10] [Rank 0] step:9061/10000 train_time:373116ms step_avg:41.18ms +[2025-09-06 00:35:11] [Rank 0] step:9081/10000 train_time:373853ms step_avg:41.17ms +[2025-09-06 00:35:11] [Rank 0] step:9081/10000 train_time:373853ms step_avg:41.17ms +[2025-09-06 00:35:12] [Rank 0] step:9101/10000 train_time:374592ms step_avg:41.16ms +[2025-09-06 00:35:12] [Rank 0] step:9101/10000 train_time:374592ms step_avg:41.16ms +[2025-09-06 00:35:13] [Rank 0] step:9121/10000 train_time:375330ms step_avg:41.15ms +[2025-09-06 00:35:13] [Rank 0] step:9121/10000 train_time:375330ms step_avg:41.15ms +[2025-09-06 00:35:13] [Rank 0] step:9141/10000 train_time:376068ms step_avg:41.14ms +[2025-09-06 00:35:13] [Rank 0] step:9141/10000 train_time:376068ms step_avg:41.14ms +[2025-09-06 00:35:14] [Rank 0] step:9161/10000 train_time:376806ms step_avg:41.13ms +[2025-09-06 00:35:14] [Rank 0] step:9161/10000 train_time:376806ms step_avg:41.13ms +[2025-09-06 00:35:15] [Rank 0] step:9181/10000 train_time:377545ms step_avg:41.12ms +[2025-09-06 00:35:15] [Rank 0] step:9181/10000 train_time:377545ms step_avg:41.12ms +[2025-09-06 00:35:16] [Rank 0] step:9201/10000 train_time:378284ms step_avg:41.11ms +[2025-09-06 00:35:16] [Rank 0] step:9201/10000 train_time:378284ms step_avg:41.11ms +[2025-09-06 00:35:16] [Rank 0] step:9221/10000 train_time:379023ms step_avg:41.10ms +[2025-09-06 00:35:16] [Rank 0] step:9221/10000 train_time:379023ms step_avg:41.10ms +[2025-09-06 00:35:17] [Rank 0] step:9241/10000 train_time:379765ms step_avg:41.10ms +[2025-09-06 00:35:17] [Rank 0] step:9241/10000 train_time:379765ms step_avg:41.10ms +[2025-09-06 00:35:18] [Rank 0] step:9261/10000 train_time:380504ms step_avg:41.09ms +[2025-09-06 00:35:18] [Rank 0] step:9261/10000 train_time:380504ms step_avg:41.09ms +[2025-09-06 00:35:19] [Rank 0] step:9281/10000 train_time:381243ms step_avg:41.08ms +[2025-09-06 00:35:19] [Rank 0] step:9281/10000 train_time:381243ms step_avg:41.08ms +[2025-09-06 00:35:19] [Rank 0] step:9301/10000 train_time:381982ms step_avg:41.07ms +[2025-09-06 00:35:19] [Rank 0] step:9301/10000 train_time:381982ms step_avg:41.07ms +[2025-09-06 00:35:20] [Rank 0] step:9321/10000 train_time:382719ms step_avg:41.06ms +[2025-09-06 00:35:20] [Rank 0] step:9321/10000 train_time:382719ms step_avg:41.06ms +[2025-09-06 00:35:21] [Rank 0] step:9341/10000 train_time:383458ms step_avg:41.05ms +[2025-09-06 00:35:21] [Rank 0] step:9341/10000 train_time:383458ms step_avg:41.05ms +[2025-09-06 00:35:22] [Rank 0] step:9361/10000 train_time:384197ms step_avg:41.04ms +[2025-09-06 00:35:22] [Rank 0] step:9361/10000 train_time:384197ms step_avg:41.04ms +[2025-09-06 00:35:22] [Rank 0] step:9381/10000 train_time:384979ms step_avg:41.04ms +[2025-09-06 00:35:22] [Rank 0] step:9381/10000 train_time:384979ms step_avg:41.04ms +[2025-09-06 00:35:23] [Rank 0] step:9401/10000 train_time:385718ms step_avg:41.03ms +[2025-09-06 00:35:23] [Rank 0] step:9401/10000 train_time:385718ms step_avg:41.03ms +[2025-09-06 00:35:24] [Rank 0] step:9421/10000 train_time:386457ms step_avg:41.02ms +[2025-09-06 00:35:24] [Rank 0] step:9421/10000 train_time:386457ms step_avg:41.02ms +[2025-09-06 00:35:25] [Rank 0] step:9441/10000 train_time:387197ms step_avg:41.01ms +[2025-09-06 00:35:25] [Rank 0] step:9441/10000 train_time:387197ms step_avg:41.01ms +[2025-09-06 00:35:25] [Rank 0] step:9461/10000 train_time:387936ms step_avg:41.00ms +[2025-09-06 00:35:25] [Rank 0] step:9461/10000 train_time:387936ms step_avg:41.00ms +[2025-09-06 00:35:26] [Rank 0] step:9481/10000 train_time:388676ms step_avg:41.00ms +[2025-09-06 00:35:26] [Rank 0] step:9481/10000 train_time:388676ms step_avg:41.00ms +[2025-09-06 00:35:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:35:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:35:27] [Rank 0] PRINT: step:9500/10000 train_loss:2.1830 val_loss:2.1705 train_time:389495ms step_avg:41.00ms +[2025-09-06 00:35:27] [Rank 0] PRINT: step:9500/10000 train_loss:2.1830 val_loss:2.1705 train_time:389495ms step_avg:41.00ms +[2025-09-06 00:35:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:35:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:35:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:35:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:36:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:36:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:36:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:36:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:36:48] [Rank 0] Total Loss: 4.5060 +[2025-09-06 00:36:48] [Rank 0] Total Loss: 4.5060 +[2025-09-06 00:36:48] [Rank 0] Total FTA (Unweighted): 0.3094 +[2025-09-06 00:36:48] [Rank 0] Total FTA (Unweighted): 0.3094 +[2025-09-06 00:36:48] [Rank 0] Total FTA (Weighted): 0.3094 +[2025-09-06 00:36:48] [Rank 0] Total FTA (Weighted): 0.3094 +[2025-09-06 00:36:48] [Rank 0] Group 0 Loss: 3.2814 +[2025-09-06 00:36:48] [Rank 0] Group 0 Loss: 3.2814 +[2025-09-06 00:36:48] [Rank 0] Group 1 Loss: 3.1378 +[2025-09-06 00:36:48] [Rank 0] Group 1 Loss: 3.1378 +[2025-09-06 00:36:48] [Rank 0] Group 2 Loss: 3.1690 +[2025-09-06 00:36:48] [Rank 0] Group 2 Loss: 3.1690 +[2025-09-06 00:36:48] [Rank 0] Group 3 Loss: 3.5378 +[2025-09-06 00:36:48] [Rank 0] Group 3 Loss: 3.5378 +[2025-09-06 00:36:48] [Rank 0] Group 4 Loss: 3.8596 +[2025-09-06 00:36:48] [Rank 0] Group 4 Loss: 3.8596 +[2025-09-06 00:36:48] [Rank 0] Group 5 Loss: 4.2807 +[2025-09-06 00:36:48] [Rank 0] Group 5 Loss: 4.2807 +[2025-09-06 00:36:48] [Rank 0] Group 6 Loss: 4.5944 +[2025-09-06 00:36:48] [Rank 0] Group 6 Loss: 4.5944 +[2025-09-06 00:36:48] [Rank 0] Group 7 Loss: 4.7510 +[2025-09-06 00:36:48] [Rank 0] Group 7 Loss: 4.7510 +[2025-09-06 00:36:48] [Rank 0] Group 8 Loss: 5.0436 +[2025-09-06 00:36:48] [Rank 0] Group 8 Loss: 5.0436 +[2025-09-06 00:36:48] [Rank 0] Group 9 Loss: 5.1574 +[2025-09-06 00:36:48] [Rank 0] Group 9 Loss: 5.1574 +[2025-09-06 00:36:48] [Rank 0] Group 10 Loss: 5.2249 +[2025-09-06 00:36:48] [Rank 0] Group 10 Loss: 5.2249 +[2025-09-06 00:36:48] [Rank 0] Group 11 Loss: 5.2371 +[2025-09-06 00:36:48] [Rank 0] Group 11 Loss: 5.2371 +[2025-09-06 00:36:48] [Rank 0] Group 12 Loss: 5.1871 +[2025-09-06 00:36:48] [Rank 0] Group 12 Loss: 5.1871 +[2025-09-06 00:36:48] [Rank 0] Group 13 Loss: 5.2098 +[2025-09-06 00:36:48] [Rank 0] Group 13 Loss: 5.2098 +[2025-09-06 00:36:48] [Rank 0] Group 14 Loss: 5.2435 +[2025-09-06 00:36:48] [Rank 0] Group 14 Loss: 5.2435 +[2025-09-06 00:36:48] [Rank 0] Group 15 Loss: 5.1812 +[2025-09-06 00:36:48] [Rank 0] Group 15 Loss: 5.1812 +[2025-09-06 00:36:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:36:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:36:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:36:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:36:48] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:36:48] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:36:48] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:36:48] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:36:48] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:36:48] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:36:48] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:36:48] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:36:48] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:36:48] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:36:48] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:36:48] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:36:48] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:36:48] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:36:48] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:36:48] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:36:48] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 00:36:48] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 00:36:48] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:36:48] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 00:36:48] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 00:36:48] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 00:36:48] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-06 00:36:48] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-06 00:36:48] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 00:36:48] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 00:36:48] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 00:36:48] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 00:36:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:36:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:36:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:36:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:36:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:36:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:36:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:36:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:36:50] [Rank 0] step:9501/10000 train_time:389506ms step_avg:41.00ms +[2025-09-06 00:36:50] [Rank 0] step:9501/10000 train_time:389506ms step_avg:41.00ms +[2025-09-06 00:36:51] [Rank 0] step:9521/10000 train_time:390182ms step_avg:40.98ms +[2025-09-06 00:36:51] [Rank 0] step:9521/10000 train_time:390182ms step_avg:40.98ms +[2025-09-06 00:36:51] [Rank 0] step:9541/10000 train_time:391026ms step_avg:40.98ms +[2025-09-06 00:36:51] [Rank 0] step:9541/10000 train_time:391026ms step_avg:40.98ms +[2025-09-06 00:36:52] [Rank 0] step:9561/10000 train_time:391763ms step_avg:40.98ms +[2025-09-06 00:36:52] [Rank 0] step:9561/10000 train_time:391763ms step_avg:40.98ms +[2025-09-06 00:36:53] [Rank 0] step:9581/10000 train_time:392501ms step_avg:40.97ms +[2025-09-06 00:36:53] [Rank 0] step:9581/10000 train_time:392501ms step_avg:40.97ms +[2025-09-06 00:36:54] [Rank 0] step:9601/10000 train_time:393239ms step_avg:40.96ms +[2025-09-06 00:36:54] [Rank 0] step:9601/10000 train_time:393239ms step_avg:40.96ms +[2025-09-06 00:36:54] [Rank 0] step:9621/10000 train_time:393978ms step_avg:40.95ms +[2025-09-06 00:36:54] [Rank 0] step:9621/10000 train_time:393978ms step_avg:40.95ms +[2025-09-06 00:36:55] [Rank 0] step:9641/10000 train_time:394717ms step_avg:40.94ms +[2025-09-06 00:36:55] [Rank 0] step:9641/10000 train_time:394717ms step_avg:40.94ms +[2025-09-06 00:36:56] [Rank 0] step:9661/10000 train_time:395729ms step_avg:40.96ms +[2025-09-06 00:36:56] [Rank 0] step:9661/10000 train_time:395729ms step_avg:40.96ms +[2025-09-06 00:36:57] [Rank 0] step:9681/10000 train_time:396468ms step_avg:40.95ms +[2025-09-06 00:36:57] [Rank 0] step:9681/10000 train_time:396468ms step_avg:40.95ms +[2025-09-06 00:36:58] [Rank 0] step:9701/10000 train_time:397206ms step_avg:40.94ms +[2025-09-06 00:36:58] [Rank 0] step:9701/10000 train_time:397206ms step_avg:40.94ms +[2025-09-06 00:36:58] [Rank 0] step:9721/10000 train_time:397945ms step_avg:40.94ms +[2025-09-06 00:36:58] [Rank 0] step:9721/10000 train_time:397945ms step_avg:40.94ms +[2025-09-06 00:36:59] [Rank 0] step:9741/10000 train_time:398684ms step_avg:40.93ms +[2025-09-06 00:36:59] [Rank 0] step:9741/10000 train_time:398684ms step_avg:40.93ms +[2025-09-06 00:37:00] [Rank 0] step:9761/10000 train_time:399423ms step_avg:40.92ms +[2025-09-06 00:37:00] [Rank 0] step:9761/10000 train_time:399423ms step_avg:40.92ms +[2025-09-06 00:37:01] [Rank 0] step:9781/10000 train_time:400161ms step_avg:40.91ms +[2025-09-06 00:37:01] [Rank 0] step:9781/10000 train_time:400161ms step_avg:40.91ms +[2025-09-06 00:37:01] [Rank 0] step:9801/10000 train_time:400900ms step_avg:40.90ms +[2025-09-06 00:37:01] [Rank 0] step:9801/10000 train_time:400900ms step_avg:40.90ms +[2025-09-06 00:37:02] [Rank 0] step:9821/10000 train_time:401638ms step_avg:40.90ms +[2025-09-06 00:37:02] [Rank 0] step:9821/10000 train_time:401638ms step_avg:40.90ms +[2025-09-06 00:37:03] [Rank 0] step:9841/10000 train_time:402381ms step_avg:40.89ms +[2025-09-06 00:37:03] [Rank 0] step:9841/10000 train_time:402381ms step_avg:40.89ms +[2025-09-06 00:37:03] [Rank 0] step:9861/10000 train_time:403121ms step_avg:40.88ms +[2025-09-06 00:37:03] [Rank 0] step:9861/10000 train_time:403121ms step_avg:40.88ms +[2025-09-06 00:37:04] [Rank 0] step:9881/10000 train_time:403860ms step_avg:40.87ms +[2025-09-06 00:37:04] [Rank 0] step:9881/10000 train_time:403860ms step_avg:40.87ms +[2025-09-06 00:37:05] [Rank 0] step:9901/10000 train_time:404597ms step_avg:40.86ms +[2025-09-06 00:37:05] [Rank 0] step:9901/10000 train_time:404597ms step_avg:40.86ms +[2025-09-06 00:37:06] [Rank 0] step:9921/10000 train_time:405336ms step_avg:40.86ms +[2025-09-06 00:37:06] [Rank 0] step:9921/10000 train_time:405336ms step_avg:40.86ms +[2025-09-06 00:37:06] [Rank 0] step:9941/10000 train_time:406075ms step_avg:40.85ms +[2025-09-06 00:37:06] [Rank 0] step:9941/10000 train_time:406075ms step_avg:40.85ms +[2025-09-06 00:37:07] [Rank 0] step:9961/10000 train_time:406814ms step_avg:40.84ms +[2025-09-06 00:37:07] [Rank 0] step:9961/10000 train_time:406814ms step_avg:40.84ms +[2025-09-06 00:37:08] [Rank 0] step:9981/10000 train_time:407553ms step_avg:40.83ms +[2025-09-06 00:37:08] [Rank 0] step:9981/10000 train_time:407553ms step_avg:40.83ms +[2025-09-06 00:37:09] [Rank 0] step:10000/10000 train_time:408255ms step_avg:40.83ms +[2025-09-06 00:37:09] [Rank 0] step:10000/10000 train_time:408255ms step_avg:40.83ms +[2025-09-06 00:37:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:37:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:37:09] [Rank 0] PRINT: step:10000/10000 train_loss:2.1743 val_loss:2.1625 train_time:408380ms step_avg:40.84ms +[2025-09-06 00:37:09] [Rank 0] PRINT: step:10000/10000 train_loss:2.1743 val_loss:2.1625 train_time:408380ms step_avg:40.84ms +[2025-09-06 00:37:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:37:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:37:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:37:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:38:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:38:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:38:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:38:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:38:30] [Rank 0] Total Loss: 4.5142 +[2025-09-06 00:38:30] [Rank 0] Total Loss: 4.5142 +[2025-09-06 00:38:30] [Rank 0] Total FTA (Unweighted): 0.3094 +[2025-09-06 00:38:30] [Rank 0] Total FTA (Unweighted): 0.3094 +[2025-09-06 00:38:30] [Rank 0] Total FTA (Weighted): 0.3094 +[2025-09-06 00:38:30] [Rank 0] Total FTA (Weighted): 0.3094 +[2025-09-06 00:38:30] [Rank 0] Group 0 Loss: 3.2942 +[2025-09-06 00:38:30] [Rank 0] Group 0 Loss: 3.2942 +[2025-09-06 00:38:30] [Rank 0] Group 1 Loss: 3.1399 +[2025-09-06 00:38:30] [Rank 0] Group 1 Loss: 3.1399 +[2025-09-06 00:38:30] [Rank 0] Group 2 Loss: 3.1869 +[2025-09-06 00:38:30] [Rank 0] Group 2 Loss: 3.1869 +[2025-09-06 00:38:30] [Rank 0] Group 3 Loss: 3.5586 +[2025-09-06 00:38:30] [Rank 0] Group 3 Loss: 3.5586 +[2025-09-06 00:38:30] [Rank 0] Group 4 Loss: 3.8549 +[2025-09-06 00:38:30] [Rank 0] Group 4 Loss: 3.8549 +[2025-09-06 00:38:30] [Rank 0] Group 5 Loss: 4.2966 +[2025-09-06 00:38:30] [Rank 0] Group 5 Loss: 4.2966 +[2025-09-06 00:38:30] [Rank 0] Group 6 Loss: 4.6145 +[2025-09-06 00:38:30] [Rank 0] Group 6 Loss: 4.6145 +[2025-09-06 00:38:30] [Rank 0] Group 7 Loss: 4.7604 +[2025-09-06 00:38:30] [Rank 0] Group 7 Loss: 4.7604 +[2025-09-06 00:38:30] [Rank 0] Group 8 Loss: 5.0555 +[2025-09-06 00:38:30] [Rank 0] Group 8 Loss: 5.0555 +[2025-09-06 00:38:30] [Rank 0] Group 9 Loss: 5.1534 +[2025-09-06 00:38:30] [Rank 0] Group 9 Loss: 5.1534 +[2025-09-06 00:38:30] [Rank 0] Group 10 Loss: 5.2219 +[2025-09-06 00:38:30] [Rank 0] Group 10 Loss: 5.2219 +[2025-09-06 00:38:30] [Rank 0] Group 11 Loss: 5.2506 +[2025-09-06 00:38:30] [Rank 0] Group 11 Loss: 5.2506 +[2025-09-06 00:38:30] [Rank 0] Group 12 Loss: 5.1841 +[2025-09-06 00:38:30] [Rank 0] Group 12 Loss: 5.1841 +[2025-09-06 00:38:30] [Rank 0] Group 13 Loss: 5.2210 +[2025-09-06 00:38:30] [Rank 0] Group 13 Loss: 5.2210 +[2025-09-06 00:38:30] [Rank 0] Group 14 Loss: 5.2497 +[2025-09-06 00:38:30] [Rank 0] Group 14 Loss: 5.2497 +[2025-09-06 00:38:30] [Rank 0] Group 15 Loss: 5.1847 +[2025-09-06 00:38:30] [Rank 0] Group 15 Loss: 5.1847 +[2025-09-06 00:38:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:38:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:38:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:38:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:38:30] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:38:30] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 00:38:30] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:38:30] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:38:30] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:38:30] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 00:38:30] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:38:30] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 00:38:30] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:38:30] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 00:38:30] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:38:30] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 00:38:30] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:38:30] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:38:30] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:38:30] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-06 00:38:30] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 00:38:30] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 00:38:30] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-06 00:38:30] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-06 00:38:30] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 00:38:30] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 00:38:30] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:38:30] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 00:38:30] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 00:38:30] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 00:38:30] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 00:38:30] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 00:38:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:38:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_loss_curves.png +[2025-09-06 00:38:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:38:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/per_class_acc_curves.png +[2025-09-06 00:38:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:38:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_loss_curve.png +[2025-09-06 00:38:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:38:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_43/total_acc_curve.png +[2025-09-06 00:38:31] [Rank 0] step:10001/10000 train_time:408389ms step_avg:40.83ms +[2025-09-06 00:38:31] [Rank 0] step:10001/10000 train_time:408389ms step_avg:40.83ms +[2025-09-06 00:38:31] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 00:38:31 2025 --- +[2025-09-06 00:38:31] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 00:38:31 2025 --- +[2025-09-06 00:38:31] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-06 00:38:31] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..851e1153c5c1dcc24c2c03037ebe9ad4b9576472 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.08, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "53906568-92c2-4543-b61b-b1e3f3af477c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..b56299e4ecc3babb46d85ac148c5beccecdad2f8 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb7c145180d22fd06b404d2544b0aedc054448165846c1bef7053a103c8d3450 +size 302759 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..da1ddb4f885a0dc8d7e250719d9c45df8e7729c6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e81b8e0ca0c823476f49319a3943339dcab6760e465aefe057f8fb853e490a4a +size 416142 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..4011f80ce8d35b8daf5e5c65e3df7130f87ef4ea --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4912a3d052784931e3662f9cb237d59e765a5f5a44c8bc28159c774851125903 +size 88944 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..b5653299c0dcb82f5c0981bae326bf4f451d3837 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dea78bfa8e2b2e0d76daa9b8e08f9c045c4b407a44a88e9b0855004a0399f8f8 +size 118694 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/training_log_53906568-92c2-4543-b61b-b1e3f3af477c.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/training_log_53906568-92c2-4543-b61b-b1e3f3af477c.txt new file mode 100644 index 0000000000000000000000000000000000000000..2472deebdc4ad8a2af68542d591758a793d3aa54 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/training_log_53906568-92c2-4543-b61b-b1e3f3af477c.txt @@ -0,0 +1,5614 @@ +[2025-09-06 00:38:54] [Rank 0] PRINT: --- Script Start: Sat Sep 6 00:38:54 2025 --- +[2025-09-06 00:38:54] [Rank 0] PRINT: --- Script Start: Sat Sep 6 00:38:54 2025 --- +[2025-09-06 00:38:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 00:38:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 00:38:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 00:38:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 00:38:54] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-06 00:38:54] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-06 00:38:54] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44 +[2025-09-06 00:38:54] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44 +[2025-09-06 00:38:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 00:38:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 00:38:54] [Rank 0] PRINT: Constructing model... +[2025-09-06 00:38:54] [Rank 0] PRINT: Constructing model... +[2025-09-06 00:38:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 00:38:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 00:38:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 00:38:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 00:38:56] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 00:38:56] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 00:38:59] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 00:38:59] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 00:38:59] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 00:38:59] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 00:39:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 00:39:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 00:39:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 00:39:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 00:39:00] [Rank 0] PRINT: Model returns: +[2025-09-06 00:39:00] [Rank 0] PRINT: Model returns: +[2025-09-06 00:39:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 00:39:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 00:39:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 00:39:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 00:39:00] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-06 00:39:00] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-06 00:39:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 00:39:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 00:39:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 00:39:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 00:39:04] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 00:39:04] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 00:39:04] [Rank 0] PRINT: Starting warmup... +[2025-09-06 00:39:04] [Rank 0] PRINT: Starting warmup... +[2025-09-06 00:39:41] [Rank 0] PRINT: Warmup complete. +[2025-09-06 00:39:41] [Rank 0] PRINT: Warmup complete. +[2025-09-06 00:39:41] [Rank 0] PRINT: Starting training... +[2025-09-06 00:39:41] [Rank 0] PRINT: Starting training... +[2025-09-06 00:39:48] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/fixed_eval_indices.json +[2025-09-06 00:39:48] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/fixed_eval_indices.json +[2025-09-06 00:39:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:39:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:39:51] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 00:39:51] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 00:40:23] [Rank 0] step:21/10000 train_time:31945ms step_avg:1521.21ms +[2025-09-06 00:40:23] [Rank 0] step:21/10000 train_time:31945ms step_avg:1521.21ms +[2025-09-06 00:40:24] [Rank 0] step:41/10000 train_time:32675ms step_avg:796.94ms +[2025-09-06 00:40:24] [Rank 0] step:41/10000 train_time:32675ms step_avg:796.94ms +[2025-09-06 00:40:25] [Rank 0] step:61/10000 train_time:33402ms step_avg:547.57ms +[2025-09-06 00:40:25] [Rank 0] step:61/10000 train_time:33402ms step_avg:547.57ms +[2025-09-06 00:40:25] [Rank 0] step:81/10000 train_time:34130ms step_avg:421.36ms +[2025-09-06 00:40:25] [Rank 0] step:81/10000 train_time:34130ms step_avg:421.36ms +[2025-09-06 00:40:26] [Rank 0] step:101/10000 train_time:34858ms step_avg:345.13ms +[2025-09-06 00:40:26] [Rank 0] step:101/10000 train_time:34858ms step_avg:345.13ms +[2025-09-06 00:40:27] [Rank 0] step:121/10000 train_time:35586ms step_avg:294.10ms +[2025-09-06 00:40:27] [Rank 0] step:121/10000 train_time:35586ms step_avg:294.10ms +[2025-09-06 00:40:28] [Rank 0] step:141/10000 train_time:36313ms step_avg:257.54ms +[2025-09-06 00:40:28] [Rank 0] step:141/10000 train_time:36313ms step_avg:257.54ms +[2025-09-06 00:40:28] [Rank 0] step:161/10000 train_time:37040ms step_avg:230.06ms +[2025-09-06 00:40:28] [Rank 0] step:161/10000 train_time:37040ms step_avg:230.06ms +[2025-09-06 00:40:29] [Rank 0] step:181/10000 train_time:37768ms step_avg:208.66ms +[2025-09-06 00:40:29] [Rank 0] step:181/10000 train_time:37768ms step_avg:208.66ms +[2025-09-06 00:40:30] [Rank 0] step:201/10000 train_time:38495ms step_avg:191.52ms +[2025-09-06 00:40:30] [Rank 0] step:201/10000 train_time:38495ms step_avg:191.52ms +[2025-09-06 00:40:31] [Rank 0] step:221/10000 train_time:39223ms step_avg:177.48ms +[2025-09-06 00:40:31] [Rank 0] step:221/10000 train_time:39223ms step_avg:177.48ms +[2025-09-06 00:40:31] [Rank 0] step:241/10000 train_time:39950ms step_avg:165.77ms +[2025-09-06 00:40:31] [Rank 0] step:241/10000 train_time:39950ms step_avg:165.77ms +[2025-09-06 00:40:32] [Rank 0] step:261/10000 train_time:40677ms step_avg:155.85ms +[2025-09-06 00:40:32] [Rank 0] step:261/10000 train_time:40677ms step_avg:155.85ms +[2025-09-06 00:40:33] [Rank 0] step:281/10000 train_time:41403ms step_avg:147.34ms +[2025-09-06 00:40:33] [Rank 0] step:281/10000 train_time:41403ms step_avg:147.34ms +[2025-09-06 00:40:33] [Rank 0] step:301/10000 train_time:42132ms step_avg:139.97ms +[2025-09-06 00:40:33] [Rank 0] step:301/10000 train_time:42132ms step_avg:139.97ms +[2025-09-06 00:40:34] [Rank 0] step:321/10000 train_time:42859ms step_avg:133.52ms +[2025-09-06 00:40:34] [Rank 0] step:321/10000 train_time:42859ms step_avg:133.52ms +[2025-09-06 00:40:35] [Rank 0] step:341/10000 train_time:43587ms step_avg:127.82ms +[2025-09-06 00:40:35] [Rank 0] step:341/10000 train_time:43587ms step_avg:127.82ms +[2025-09-06 00:40:36] [Rank 0] step:361/10000 train_time:44314ms step_avg:122.75ms +[2025-09-06 00:40:36] [Rank 0] step:361/10000 train_time:44314ms step_avg:122.75ms +[2025-09-06 00:40:36] [Rank 0] step:381/10000 train_time:45042ms step_avg:118.22ms +[2025-09-06 00:40:36] [Rank 0] step:381/10000 train_time:45042ms step_avg:118.22ms +[2025-09-06 00:40:37] [Rank 0] step:401/10000 train_time:45770ms step_avg:114.14ms +[2025-09-06 00:40:37] [Rank 0] step:401/10000 train_time:45770ms step_avg:114.14ms +[2025-09-06 00:40:38] [Rank 0] step:421/10000 train_time:46498ms step_avg:110.45ms +[2025-09-06 00:40:38] [Rank 0] step:421/10000 train_time:46498ms step_avg:110.45ms +[2025-09-06 00:40:39] [Rank 0] step:441/10000 train_time:47226ms step_avg:107.09ms +[2025-09-06 00:40:39] [Rank 0] step:441/10000 train_time:47226ms step_avg:107.09ms +[2025-09-06 00:40:39] [Rank 0] step:461/10000 train_time:47954ms step_avg:104.02ms +[2025-09-06 00:40:39] [Rank 0] step:461/10000 train_time:47954ms step_avg:104.02ms +[2025-09-06 00:40:40] [Rank 0] step:481/10000 train_time:48681ms step_avg:101.21ms +[2025-09-06 00:40:40] [Rank 0] step:481/10000 train_time:48681ms step_avg:101.21ms +[2025-09-06 00:40:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:40:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:40:41] [Rank 0] PRINT: step:500/10000 train_loss:5.9604 val_loss:4.3054 train_time:49489ms step_avg:98.98ms +[2025-09-06 00:40:41] [Rank 0] PRINT: step:500/10000 train_loss:5.9604 val_loss:4.3054 train_time:49489ms step_avg:98.98ms +[2025-09-06 00:40:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:40:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:40:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:40:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:42:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:42:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:42:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:42:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:42:02] [Rank 0] Total Loss: 6.0345 +[2025-09-06 00:42:02] [Rank 0] Total Loss: 6.0345 +[2025-09-06 00:42:02] [Rank 0] Total FTA (Unweighted): 0.0813 +[2025-09-06 00:42:02] [Rank 0] Total FTA (Unweighted): 0.0813 +[2025-09-06 00:42:02] [Rank 0] Total FTA (Weighted): 0.0813 +[2025-09-06 00:42:02] [Rank 0] Total FTA (Weighted): 0.0813 +[2025-09-06 00:42:02] [Rank 0] Group 0 Loss: 3.7722 +[2025-09-06 00:42:02] [Rank 0] Group 0 Loss: 3.7722 +[2025-09-06 00:42:02] [Rank 0] Group 1 Loss: 3.9104 +[2025-09-06 00:42:02] [Rank 0] Group 1 Loss: 3.9104 +[2025-09-06 00:42:02] [Rank 0] Group 2 Loss: 4.8292 +[2025-09-06 00:42:02] [Rank 0] Group 2 Loss: 4.8292 +[2025-09-06 00:42:02] [Rank 0] Group 3 Loss: 5.5469 +[2025-09-06 00:42:02] [Rank 0] Group 3 Loss: 5.5469 +[2025-09-06 00:42:02] [Rank 0] Group 4 Loss: 6.2491 +[2025-09-06 00:42:02] [Rank 0] Group 4 Loss: 6.2491 +[2025-09-06 00:42:02] [Rank 0] Group 5 Loss: 6.3840 +[2025-09-06 00:42:02] [Rank 0] Group 5 Loss: 6.3840 +[2025-09-06 00:42:02] [Rank 0] Group 6 Loss: 6.4648 +[2025-09-06 00:42:02] [Rank 0] Group 6 Loss: 6.4648 +[2025-09-06 00:42:02] [Rank 0] Group 7 Loss: 6.4141 +[2025-09-06 00:42:02] [Rank 0] Group 7 Loss: 6.4141 +[2025-09-06 00:42:02] [Rank 0] Group 8 Loss: 6.5582 +[2025-09-06 00:42:02] [Rank 0] Group 8 Loss: 6.5582 +[2025-09-06 00:42:02] [Rank 0] Group 9 Loss: 6.7013 +[2025-09-06 00:42:02] [Rank 0] Group 9 Loss: 6.7013 +[2025-09-06 00:42:02] [Rank 0] Group 10 Loss: 6.6792 +[2025-09-06 00:42:02] [Rank 0] Group 10 Loss: 6.6792 +[2025-09-06 00:42:02] [Rank 0] Group 11 Loss: 6.7433 +[2025-09-06 00:42:02] [Rank 0] Group 11 Loss: 6.7433 +[2025-09-06 00:42:02] [Rank 0] Group 12 Loss: 6.5503 +[2025-09-06 00:42:02] [Rank 0] Group 12 Loss: 6.5503 +[2025-09-06 00:42:02] [Rank 0] Group 13 Loss: 6.5325 +[2025-09-06 00:42:02] [Rank 0] Group 13 Loss: 6.5325 +[2025-09-06 00:42:02] [Rank 0] Group 14 Loss: 6.6645 +[2025-09-06 00:42:02] [Rank 0] Group 14 Loss: 6.6645 +[2025-09-06 00:42:02] [Rank 0] Group 15 Loss: 6.5513 +[2025-09-06 00:42:02] [Rank 0] Group 15 Loss: 6.5513 +[2025-09-06 00:42:02] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 00:42:02] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 00:42:02] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:42:02] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:42:02] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 00:42:02] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 00:42:02] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 00:42:02] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 00:42:02] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 00:42:02] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 00:42:02] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 00:42:02] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 00:42:02] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 00:42:02] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 00:42:02] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 00:42:02] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 00:42:02] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-06 00:42:02] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-06 00:42:02] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-06 00:42:02] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-06 00:42:02] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-06 00:42:02] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-06 00:42:02] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 00:42:02] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 00:42:02] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:42:02] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:42:02] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 00:42:02] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 00:42:02] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:42:02] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:42:02] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 00:42:02] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 00:42:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:42:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:42:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:42:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:42:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:42:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:42:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:42:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:42:04] [Rank 0] step:501/10000 train_time:49498ms step_avg:98.80ms +[2025-09-06 00:42:04] [Rank 0] step:501/10000 train_time:49498ms step_avg:98.80ms +[2025-09-06 00:42:05] [Rank 0] step:521/10000 train_time:50297ms step_avg:96.54ms +[2025-09-06 00:42:05] [Rank 0] step:521/10000 train_time:50297ms step_avg:96.54ms +[2025-09-06 00:42:06] [Rank 0] step:541/10000 train_time:51024ms step_avg:94.31ms +[2025-09-06 00:42:06] [Rank 0] step:541/10000 train_time:51024ms step_avg:94.31ms +[2025-09-06 00:42:06] [Rank 0] step:561/10000 train_time:51752ms step_avg:92.25ms +[2025-09-06 00:42:06] [Rank 0] step:561/10000 train_time:51752ms step_avg:92.25ms +[2025-09-06 00:42:07] [Rank 0] step:581/10000 train_time:52617ms step_avg:90.56ms +[2025-09-06 00:42:07] [Rank 0] step:581/10000 train_time:52617ms step_avg:90.56ms +[2025-09-06 00:42:08] [Rank 0] step:601/10000 train_time:53345ms step_avg:88.76ms +[2025-09-06 00:42:08] [Rank 0] step:601/10000 train_time:53345ms step_avg:88.76ms +[2025-09-06 00:42:09] [Rank 0] step:621/10000 train_time:54072ms step_avg:87.07ms +[2025-09-06 00:42:09] [Rank 0] step:621/10000 train_time:54072ms step_avg:87.07ms +[2025-09-06 00:42:09] [Rank 0] step:641/10000 train_time:54799ms step_avg:85.49ms +[2025-09-06 00:42:09] [Rank 0] step:641/10000 train_time:54799ms step_avg:85.49ms +[2025-09-06 00:42:10] [Rank 0] step:661/10000 train_time:55527ms step_avg:84.00ms +[2025-09-06 00:42:10] [Rank 0] step:661/10000 train_time:55527ms step_avg:84.00ms +[2025-09-06 00:42:11] [Rank 0] step:681/10000 train_time:56253ms step_avg:82.60ms +[2025-09-06 00:42:11] [Rank 0] step:681/10000 train_time:56253ms step_avg:82.60ms +[2025-09-06 00:42:12] [Rank 0] step:701/10000 train_time:56980ms step_avg:81.28ms +[2025-09-06 00:42:12] [Rank 0] step:701/10000 train_time:56980ms step_avg:81.28ms +[2025-09-06 00:42:12] [Rank 0] step:721/10000 train_time:57707ms step_avg:80.04ms +[2025-09-06 00:42:12] [Rank 0] step:721/10000 train_time:57707ms step_avg:80.04ms +[2025-09-06 00:42:13] [Rank 0] step:741/10000 train_time:58435ms step_avg:78.86ms +[2025-09-06 00:42:13] [Rank 0] step:741/10000 train_time:58435ms step_avg:78.86ms +[2025-09-06 00:42:14] [Rank 0] step:761/10000 train_time:59167ms step_avg:77.75ms +[2025-09-06 00:42:14] [Rank 0] step:761/10000 train_time:59167ms step_avg:77.75ms +[2025-09-06 00:42:14] [Rank 0] step:781/10000 train_time:59898ms step_avg:76.69ms +[2025-09-06 00:42:14] [Rank 0] step:781/10000 train_time:59898ms step_avg:76.69ms +[2025-09-06 00:42:15] [Rank 0] step:801/10000 train_time:60630ms step_avg:75.69ms +[2025-09-06 00:42:15] [Rank 0] step:801/10000 train_time:60630ms step_avg:75.69ms +[2025-09-06 00:42:16] [Rank 0] step:821/10000 train_time:61969ms step_avg:75.48ms +[2025-09-06 00:42:16] [Rank 0] step:821/10000 train_time:61969ms step_avg:75.48ms +[2025-09-06 00:42:17] [Rank 0] step:841/10000 train_time:62701ms step_avg:74.56ms +[2025-09-06 00:42:17] [Rank 0] step:841/10000 train_time:62701ms step_avg:74.56ms +[2025-09-06 00:42:18] [Rank 0] step:861/10000 train_time:63434ms step_avg:73.67ms +[2025-09-06 00:42:18] [Rank 0] step:861/10000 train_time:63434ms step_avg:73.67ms +[2025-09-06 00:42:19] [Rank 0] step:881/10000 train_time:64166ms step_avg:72.83ms +[2025-09-06 00:42:19] [Rank 0] step:881/10000 train_time:64166ms step_avg:72.83ms +[2025-09-06 00:42:19] [Rank 0] step:901/10000 train_time:64898ms step_avg:72.03ms +[2025-09-06 00:42:19] [Rank 0] step:901/10000 train_time:64898ms step_avg:72.03ms +[2025-09-06 00:42:20] [Rank 0] step:921/10000 train_time:65632ms step_avg:71.26ms +[2025-09-06 00:42:20] [Rank 0] step:921/10000 train_time:65632ms step_avg:71.26ms +[2025-09-06 00:42:21] [Rank 0] step:941/10000 train_time:66364ms step_avg:70.53ms +[2025-09-06 00:42:21] [Rank 0] step:941/10000 train_time:66364ms step_avg:70.53ms +[2025-09-06 00:42:22] [Rank 0] step:961/10000 train_time:67097ms step_avg:69.82ms +[2025-09-06 00:42:22] [Rank 0] step:961/10000 train_time:67097ms step_avg:69.82ms +[2025-09-06 00:42:22] [Rank 0] step:981/10000 train_time:67829ms step_avg:69.14ms +[2025-09-06 00:42:22] [Rank 0] step:981/10000 train_time:67829ms step_avg:69.14ms +[2025-09-06 00:42:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:42:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:42:24] [Rank 0] PRINT: step:1000/10000 train_loss:3.8824 val_loss:3.5385 train_time:68642ms step_avg:68.64ms +[2025-09-06 00:42:24] [Rank 0] PRINT: step:1000/10000 train_loss:3.8824 val_loss:3.5385 train_time:68642ms step_avg:68.64ms +[2025-09-06 00:42:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:42:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:42:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:42:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:43:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:43:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:43:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:43:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:43:44] [Rank 0] Total Loss: 5.4986 +[2025-09-06 00:43:44] [Rank 0] Total Loss: 5.4986 +[2025-09-06 00:43:44] [Rank 0] Total FTA (Unweighted): 0.1225 +[2025-09-06 00:43:44] [Rank 0] Total FTA (Unweighted): 0.1225 +[2025-09-06 00:43:44] [Rank 0] Total FTA (Weighted): 0.1225 +[2025-09-06 00:43:44] [Rank 0] Total FTA (Weighted): 0.1225 +[2025-09-06 00:43:44] [Rank 0] Group 0 Loss: 3.3205 +[2025-09-06 00:43:44] [Rank 0] Group 0 Loss: 3.3205 +[2025-09-06 00:43:44] [Rank 0] Group 1 Loss: 3.2534 +[2025-09-06 00:43:44] [Rank 0] Group 1 Loss: 3.2534 +[2025-09-06 00:43:44] [Rank 0] Group 2 Loss: 3.7993 +[2025-09-06 00:43:44] [Rank 0] Group 2 Loss: 3.7993 +[2025-09-06 00:43:44] [Rank 0] Group 3 Loss: 4.5548 +[2025-09-06 00:43:44] [Rank 0] Group 3 Loss: 4.5548 +[2025-09-06 00:43:44] [Rank 0] Group 4 Loss: 5.4729 +[2025-09-06 00:43:44] [Rank 0] Group 4 Loss: 5.4729 +[2025-09-06 00:43:44] [Rank 0] Group 5 Loss: 5.7841 +[2025-09-06 00:43:44] [Rank 0] Group 5 Loss: 5.7841 +[2025-09-06 00:43:44] [Rank 0] Group 6 Loss: 5.9227 +[2025-09-06 00:43:44] [Rank 0] Group 6 Loss: 5.9227 +[2025-09-06 00:43:44] [Rank 0] Group 7 Loss: 5.9692 +[2025-09-06 00:43:44] [Rank 0] Group 7 Loss: 5.9692 +[2025-09-06 00:43:44] [Rank 0] Group 8 Loss: 6.1354 +[2025-09-06 00:43:44] [Rank 0] Group 8 Loss: 6.1354 +[2025-09-06 00:43:44] [Rank 0] Group 9 Loss: 6.3195 +[2025-09-06 00:43:44] [Rank 0] Group 9 Loss: 6.3195 +[2025-09-06 00:43:44] [Rank 0] Group 10 Loss: 6.2719 +[2025-09-06 00:43:44] [Rank 0] Group 10 Loss: 6.2719 +[2025-09-06 00:43:44] [Rank 0] Group 11 Loss: 6.3379 +[2025-09-06 00:43:44] [Rank 0] Group 11 Loss: 6.3379 +[2025-09-06 00:43:44] [Rank 0] Group 12 Loss: 6.1917 +[2025-09-06 00:43:44] [Rank 0] Group 12 Loss: 6.1917 +[2025-09-06 00:43:44] [Rank 0] Group 13 Loss: 6.1716 +[2025-09-06 00:43:44] [Rank 0] Group 13 Loss: 6.1716 +[2025-09-06 00:43:44] [Rank 0] Group 14 Loss: 6.2745 +[2025-09-06 00:43:44] [Rank 0] Group 14 Loss: 6.2745 +[2025-09-06 00:43:44] [Rank 0] Group 15 Loss: 6.1980 +[2025-09-06 00:43:44] [Rank 0] Group 15 Loss: 6.1980 +[2025-09-06 00:43:44] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 00:43:44] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 00:43:44] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:43:44] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:43:44] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:43:44] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:43:44] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:43:44] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:43:44] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:43:44] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:43:44] [Rank 0] Group 5 FTA: 0.1100 +[2025-09-06 00:43:44] [Rank 0] Group 5 FTA: 0.1100 +[2025-09-06 00:43:44] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 00:43:44] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 00:43:44] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:43:44] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:43:44] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 00:43:44] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 00:43:44] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 00:43:44] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 00:43:44] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-06 00:43:44] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-06 00:43:44] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:43:44] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:43:44] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:43:44] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:43:44] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:43:44] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:43:44] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:43:44] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:43:44] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 00:43:44] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 00:43:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:43:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:43:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:43:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:43:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:43:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:43:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:43:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:43:46] [Rank 0] step:1001/10000 train_time:68652ms step_avg:68.58ms +[2025-09-06 00:43:46] [Rank 0] step:1001/10000 train_time:68652ms step_avg:68.58ms +[2025-09-06 00:43:47] [Rank 0] step:1021/10000 train_time:69330ms step_avg:67.90ms +[2025-09-06 00:43:47] [Rank 0] step:1021/10000 train_time:69330ms step_avg:67.90ms +[2025-09-06 00:43:47] [Rank 0] step:1041/10000 train_time:70063ms step_avg:67.30ms +[2025-09-06 00:43:47] [Rank 0] step:1041/10000 train_time:70063ms step_avg:67.30ms +[2025-09-06 00:43:48] [Rank 0] step:1061/10000 train_time:70796ms step_avg:66.73ms +[2025-09-06 00:43:48] [Rank 0] step:1061/10000 train_time:70796ms step_avg:66.73ms +[2025-09-06 00:43:49] [Rank 0] step:1081/10000 train_time:71528ms step_avg:66.17ms +[2025-09-06 00:43:49] [Rank 0] step:1081/10000 train_time:71528ms step_avg:66.17ms +[2025-09-06 00:43:50] [Rank 0] step:1101/10000 train_time:72259ms step_avg:65.63ms +[2025-09-06 00:43:50] [Rank 0] step:1101/10000 train_time:72259ms step_avg:65.63ms +[2025-09-06 00:43:50] [Rank 0] step:1121/10000 train_time:72991ms step_avg:65.11ms +[2025-09-06 00:43:50] [Rank 0] step:1121/10000 train_time:72991ms step_avg:65.11ms +[2025-09-06 00:43:51] [Rank 0] step:1141/10000 train_time:73724ms step_avg:64.61ms +[2025-09-06 00:43:51] [Rank 0] step:1141/10000 train_time:73724ms step_avg:64.61ms +[2025-09-06 00:43:52] [Rank 0] step:1161/10000 train_time:74457ms step_avg:64.13ms +[2025-09-06 00:43:52] [Rank 0] step:1161/10000 train_time:74457ms step_avg:64.13ms +[2025-09-06 00:43:53] [Rank 0] step:1181/10000 train_time:75189ms step_avg:63.67ms +[2025-09-06 00:43:53] [Rank 0] step:1181/10000 train_time:75189ms step_avg:63.67ms +[2025-09-06 00:43:53] [Rank 0] step:1201/10000 train_time:75922ms step_avg:63.22ms +[2025-09-06 00:43:53] [Rank 0] step:1201/10000 train_time:75922ms step_avg:63.22ms +[2025-09-06 00:43:54] [Rank 0] step:1221/10000 train_time:76653ms step_avg:62.78ms +[2025-09-06 00:43:54] [Rank 0] step:1221/10000 train_time:76653ms step_avg:62.78ms +[2025-09-06 00:43:55] [Rank 0] step:1241/10000 train_time:77387ms step_avg:62.36ms +[2025-09-06 00:43:55] [Rank 0] step:1241/10000 train_time:77387ms step_avg:62.36ms +[2025-09-06 00:43:56] [Rank 0] step:1261/10000 train_time:78119ms step_avg:61.95ms +[2025-09-06 00:43:56] [Rank 0] step:1261/10000 train_time:78119ms step_avg:61.95ms +[2025-09-06 00:43:56] [Rank 0] step:1281/10000 train_time:78851ms step_avg:61.55ms +[2025-09-06 00:43:56] [Rank 0] step:1281/10000 train_time:78851ms step_avg:61.55ms +[2025-09-06 00:43:57] [Rank 0] step:1301/10000 train_time:79584ms step_avg:61.17ms +[2025-09-06 00:43:57] [Rank 0] step:1301/10000 train_time:79584ms step_avg:61.17ms +[2025-09-06 00:43:58] [Rank 0] step:1321/10000 train_time:80316ms step_avg:60.80ms +[2025-09-06 00:43:58] [Rank 0] step:1321/10000 train_time:80316ms step_avg:60.80ms +[2025-09-06 00:43:58] [Rank 0] step:1341/10000 train_time:81048ms step_avg:60.44ms +[2025-09-06 00:43:58] [Rank 0] step:1341/10000 train_time:81048ms step_avg:60.44ms +[2025-09-06 00:43:59] [Rank 0] step:1361/10000 train_time:81781ms step_avg:60.09ms +[2025-09-06 00:43:59] [Rank 0] step:1361/10000 train_time:81781ms step_avg:60.09ms +[2025-09-06 00:44:00] [Rank 0] step:1381/10000 train_time:82513ms step_avg:59.75ms +[2025-09-06 00:44:00] [Rank 0] step:1381/10000 train_time:82513ms step_avg:59.75ms +[2025-09-06 00:44:01] [Rank 0] step:1401/10000 train_time:83246ms step_avg:59.42ms +[2025-09-06 00:44:01] [Rank 0] step:1401/10000 train_time:83246ms step_avg:59.42ms +[2025-09-06 00:44:01] [Rank 0] step:1421/10000 train_time:83979ms step_avg:59.10ms +[2025-09-06 00:44:01] [Rank 0] step:1421/10000 train_time:83979ms step_avg:59.10ms +[2025-09-06 00:44:02] [Rank 0] step:1441/10000 train_time:84712ms step_avg:58.79ms +[2025-09-06 00:44:02] [Rank 0] step:1441/10000 train_time:84712ms step_avg:58.79ms +[2025-09-06 00:44:03] [Rank 0] step:1461/10000 train_time:85446ms step_avg:58.48ms +[2025-09-06 00:44:03] [Rank 0] step:1461/10000 train_time:85446ms step_avg:58.48ms +[2025-09-06 00:44:04] [Rank 0] step:1481/10000 train_time:86178ms step_avg:58.19ms +[2025-09-06 00:44:04] [Rank 0] step:1481/10000 train_time:86178ms step_avg:58.19ms +[2025-09-06 00:44:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:44:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:44:05] [Rank 0] PRINT: step:1500/10000 train_loss:3.3480 val_loss:3.1730 train_time:86989ms step_avg:57.99ms +[2025-09-06 00:44:05] [Rank 0] PRINT: step:1500/10000 train_loss:3.3480 val_loss:3.1730 train_time:86989ms step_avg:57.99ms +[2025-09-06 00:44:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:44:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:44:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:44:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:45:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:45:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:45:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:45:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:45:25] [Rank 0] Total Loss: 5.2410 +[2025-09-06 00:45:25] [Rank 0] Total Loss: 5.2410 +[2025-09-06 00:45:25] [Rank 0] Total FTA (Unweighted): 0.1281 +[2025-09-06 00:45:25] [Rank 0] Total FTA (Unweighted): 0.1281 +[2025-09-06 00:45:25] [Rank 0] Total FTA (Weighted): 0.1281 +[2025-09-06 00:45:25] [Rank 0] Total FTA (Weighted): 0.1281 +[2025-09-06 00:45:25] [Rank 0] Group 0 Loss: 3.1690 +[2025-09-06 00:45:25] [Rank 0] Group 0 Loss: 3.1690 +[2025-09-06 00:45:25] [Rank 0] Group 1 Loss: 3.1600 +[2025-09-06 00:45:25] [Rank 0] Group 1 Loss: 3.1600 +[2025-09-06 00:45:25] [Rank 0] Group 2 Loss: 3.5638 +[2025-09-06 00:45:25] [Rank 0] Group 2 Loss: 3.5638 +[2025-09-06 00:45:25] [Rank 0] Group 3 Loss: 4.1177 +[2025-09-06 00:45:25] [Rank 0] Group 3 Loss: 4.1177 +[2025-09-06 00:45:25] [Rank 0] Group 4 Loss: 4.9820 +[2025-09-06 00:45:25] [Rank 0] Group 4 Loss: 4.9820 +[2025-09-06 00:45:25] [Rank 0] Group 5 Loss: 5.3965 +[2025-09-06 00:45:25] [Rank 0] Group 5 Loss: 5.3965 +[2025-09-06 00:45:25] [Rank 0] Group 6 Loss: 5.6402 +[2025-09-06 00:45:25] [Rank 0] Group 6 Loss: 5.6402 +[2025-09-06 00:45:25] [Rank 0] Group 7 Loss: 5.7172 +[2025-09-06 00:45:25] [Rank 0] Group 7 Loss: 5.7172 +[2025-09-06 00:45:25] [Rank 0] Group 8 Loss: 5.9109 +[2025-09-06 00:45:25] [Rank 0] Group 8 Loss: 5.9109 +[2025-09-06 00:45:25] [Rank 0] Group 9 Loss: 6.0607 +[2025-09-06 00:45:25] [Rank 0] Group 9 Loss: 6.0607 +[2025-09-06 00:45:25] [Rank 0] Group 10 Loss: 6.0517 +[2025-09-06 00:45:25] [Rank 0] Group 10 Loss: 6.0517 +[2025-09-06 00:45:25] [Rank 0] Group 11 Loss: 6.1157 +[2025-09-06 00:45:25] [Rank 0] Group 11 Loss: 6.1157 +[2025-09-06 00:45:25] [Rank 0] Group 12 Loss: 5.9613 +[2025-09-06 00:45:25] [Rank 0] Group 12 Loss: 5.9613 +[2025-09-06 00:45:25] [Rank 0] Group 13 Loss: 5.9764 +[2025-09-06 00:45:25] [Rank 0] Group 13 Loss: 5.9764 +[2025-09-06 00:45:25] [Rank 0] Group 14 Loss: 6.0493 +[2025-09-06 00:45:25] [Rank 0] Group 14 Loss: 6.0493 +[2025-09-06 00:45:25] [Rank 0] Group 15 Loss: 5.9833 +[2025-09-06 00:45:25] [Rank 0] Group 15 Loss: 5.9833 +[2025-09-06 00:45:25] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 00:45:25] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 00:45:25] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:45:25] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:45:25] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:45:25] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:45:25] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:45:25] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:45:25] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:45:25] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:45:25] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 00:45:25] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 00:45:25] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 00:45:25] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 00:45:25] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:45:25] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:45:25] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 00:45:25] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 00:45:25] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:45:25] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:45:25] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 00:45:25] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 00:45:25] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:45:25] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:45:25] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:45:25] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 00:45:25] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:45:25] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:45:25] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:45:25] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:45:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:45:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:45:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:45:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:45:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:45:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:45:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:45:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:45:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:45:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:45:27] [Rank 0] step:1501/10000 train_time:86999ms step_avg:57.96ms +[2025-09-06 00:45:27] [Rank 0] step:1501/10000 train_time:86999ms step_avg:57.96ms +[2025-09-06 00:45:28] [Rank 0] step:1521/10000 train_time:87662ms step_avg:57.63ms +[2025-09-06 00:45:28] [Rank 0] step:1521/10000 train_time:87662ms step_avg:57.63ms +[2025-09-06 00:45:28] [Rank 0] step:1541/10000 train_time:88394ms step_avg:57.36ms +[2025-09-06 00:45:28] [Rank 0] step:1541/10000 train_time:88394ms step_avg:57.36ms +[2025-09-06 00:45:29] [Rank 0] step:1561/10000 train_time:89127ms step_avg:57.10ms +[2025-09-06 00:45:29] [Rank 0] step:1561/10000 train_time:89127ms step_avg:57.10ms +[2025-09-06 00:45:30] [Rank 0] step:1581/10000 train_time:89860ms step_avg:56.84ms +[2025-09-06 00:45:30] [Rank 0] step:1581/10000 train_time:89860ms step_avg:56.84ms +[2025-09-06 00:45:30] [Rank 0] step:1601/10000 train_time:90599ms step_avg:56.59ms +[2025-09-06 00:45:30] [Rank 0] step:1601/10000 train_time:90599ms step_avg:56.59ms +[2025-09-06 00:45:31] [Rank 0] step:1621/10000 train_time:91331ms step_avg:56.34ms +[2025-09-06 00:45:31] [Rank 0] step:1621/10000 train_time:91331ms step_avg:56.34ms +[2025-09-06 00:45:33] [Rank 0] step:1641/10000 train_time:92665ms step_avg:56.47ms +[2025-09-06 00:45:33] [Rank 0] step:1641/10000 train_time:92665ms step_avg:56.47ms +[2025-09-06 00:45:33] [Rank 0] step:1661/10000 train_time:93397ms step_avg:56.23ms +[2025-09-06 00:45:33] [Rank 0] step:1661/10000 train_time:93397ms step_avg:56.23ms +[2025-09-06 00:45:34] [Rank 0] step:1681/10000 train_time:94130ms step_avg:56.00ms +[2025-09-06 00:45:34] [Rank 0] step:1681/10000 train_time:94130ms step_avg:56.00ms +[2025-09-06 00:45:35] [Rank 0] step:1701/10000 train_time:94863ms step_avg:55.77ms +[2025-09-06 00:45:35] [Rank 0] step:1701/10000 train_time:94863ms step_avg:55.77ms +[2025-09-06 00:45:35] [Rank 0] step:1721/10000 train_time:95596ms step_avg:55.55ms +[2025-09-06 00:45:35] [Rank 0] step:1721/10000 train_time:95596ms step_avg:55.55ms +[2025-09-06 00:45:36] [Rank 0] step:1741/10000 train_time:96328ms step_avg:55.33ms +[2025-09-06 00:45:36] [Rank 0] step:1741/10000 train_time:96328ms step_avg:55.33ms +[2025-09-06 00:45:37] [Rank 0] step:1761/10000 train_time:97060ms step_avg:55.12ms +[2025-09-06 00:45:37] [Rank 0] step:1761/10000 train_time:97060ms step_avg:55.12ms +[2025-09-06 00:45:38] [Rank 0] step:1781/10000 train_time:97792ms step_avg:54.91ms +[2025-09-06 00:45:38] [Rank 0] step:1781/10000 train_time:97792ms step_avg:54.91ms +[2025-09-06 00:45:38] [Rank 0] step:1801/10000 train_time:98524ms step_avg:54.71ms +[2025-09-06 00:45:38] [Rank 0] step:1801/10000 train_time:98524ms step_avg:54.71ms +[2025-09-06 00:45:39] [Rank 0] step:1821/10000 train_time:99256ms step_avg:54.51ms +[2025-09-06 00:45:39] [Rank 0] step:1821/10000 train_time:99256ms step_avg:54.51ms +[2025-09-06 00:45:40] [Rank 0] step:1841/10000 train_time:99987ms step_avg:54.31ms +[2025-09-06 00:45:40] [Rank 0] step:1841/10000 train_time:99987ms step_avg:54.31ms +[2025-09-06 00:45:41] [Rank 0] step:1861/10000 train_time:100719ms step_avg:54.12ms +[2025-09-06 00:45:41] [Rank 0] step:1861/10000 train_time:100719ms step_avg:54.12ms +[2025-09-06 00:45:41] [Rank 0] step:1881/10000 train_time:101451ms step_avg:53.93ms +[2025-09-06 00:45:41] [Rank 0] step:1881/10000 train_time:101451ms step_avg:53.93ms +[2025-09-06 00:45:42] [Rank 0] step:1901/10000 train_time:102183ms step_avg:53.75ms +[2025-09-06 00:45:42] [Rank 0] step:1901/10000 train_time:102183ms step_avg:53.75ms +[2025-09-06 00:45:43] [Rank 0] step:1921/10000 train_time:102915ms step_avg:53.57ms +[2025-09-06 00:45:43] [Rank 0] step:1921/10000 train_time:102915ms step_avg:53.57ms +[2025-09-06 00:45:44] [Rank 0] step:1941/10000 train_time:103648ms step_avg:53.40ms +[2025-09-06 00:45:44] [Rank 0] step:1941/10000 train_time:103648ms step_avg:53.40ms +[2025-09-06 00:45:44] [Rank 0] step:1961/10000 train_time:104381ms step_avg:53.23ms +[2025-09-06 00:45:44] [Rank 0] step:1961/10000 train_time:104381ms step_avg:53.23ms +[2025-09-06 00:45:45] [Rank 0] step:1981/10000 train_time:105113ms step_avg:53.06ms +[2025-09-06 00:45:45] [Rank 0] step:1981/10000 train_time:105113ms step_avg:53.06ms +[2025-09-06 00:45:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:45:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:45:46] [Rank 0] PRINT: step:2000/10000 train_loss:3.0605 val_loss:2.9531 train_time:105925ms step_avg:52.96ms +[2025-09-06 00:45:46] [Rank 0] PRINT: step:2000/10000 train_loss:3.0605 val_loss:2.9531 train_time:105925ms step_avg:52.96ms +[2025-09-06 00:45:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:45:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:45:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:45:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:47:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:47:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:47:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:47:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:47:07] [Rank 0] Total Loss: 5.0607 +[2025-09-06 00:47:07] [Rank 0] Total Loss: 5.0607 +[2025-09-06 00:47:07] [Rank 0] Total FTA (Unweighted): 0.1531 +[2025-09-06 00:47:07] [Rank 0] Total FTA (Unweighted): 0.1531 +[2025-09-06 00:47:07] [Rank 0] Total FTA (Weighted): 0.1531 +[2025-09-06 00:47:07] [Rank 0] Total FTA (Weighted): 0.1531 +[2025-09-06 00:47:07] [Rank 0] Group 0 Loss: 3.1486 +[2025-09-06 00:47:07] [Rank 0] Group 0 Loss: 3.1486 +[2025-09-06 00:47:07] [Rank 0] Group 1 Loss: 3.0573 +[2025-09-06 00:47:07] [Rank 0] Group 1 Loss: 3.0573 +[2025-09-06 00:47:07] [Rank 0] Group 2 Loss: 3.4007 +[2025-09-06 00:47:07] [Rank 0] Group 2 Loss: 3.4007 +[2025-09-06 00:47:07] [Rank 0] Group 3 Loss: 3.8417 +[2025-09-06 00:47:07] [Rank 0] Group 3 Loss: 3.8417 +[2025-09-06 00:47:07] [Rank 0] Group 4 Loss: 4.6760 +[2025-09-06 00:47:07] [Rank 0] Group 4 Loss: 4.6760 +[2025-09-06 00:47:07] [Rank 0] Group 5 Loss: 5.1340 +[2025-09-06 00:47:07] [Rank 0] Group 5 Loss: 5.1340 +[2025-09-06 00:47:07] [Rank 0] Group 6 Loss: 5.4141 +[2025-09-06 00:47:07] [Rank 0] Group 6 Loss: 5.4141 +[2025-09-06 00:47:07] [Rank 0] Group 7 Loss: 5.4954 +[2025-09-06 00:47:07] [Rank 0] Group 7 Loss: 5.4954 +[2025-09-06 00:47:07] [Rank 0] Group 8 Loss: 5.7486 +[2025-09-06 00:47:07] [Rank 0] Group 8 Loss: 5.7486 +[2025-09-06 00:47:07] [Rank 0] Group 9 Loss: 5.8936 +[2025-09-06 00:47:07] [Rank 0] Group 9 Loss: 5.8936 +[2025-09-06 00:47:07] [Rank 0] Group 10 Loss: 5.8696 +[2025-09-06 00:47:07] [Rank 0] Group 10 Loss: 5.8696 +[2025-09-06 00:47:07] [Rank 0] Group 11 Loss: 5.9424 +[2025-09-06 00:47:07] [Rank 0] Group 11 Loss: 5.9424 +[2025-09-06 00:47:07] [Rank 0] Group 12 Loss: 5.8043 +[2025-09-06 00:47:07] [Rank 0] Group 12 Loss: 5.8043 +[2025-09-06 00:47:07] [Rank 0] Group 13 Loss: 5.8283 +[2025-09-06 00:47:07] [Rank 0] Group 13 Loss: 5.8283 +[2025-09-06 00:47:07] [Rank 0] Group 14 Loss: 5.8869 +[2025-09-06 00:47:07] [Rank 0] Group 14 Loss: 5.8869 +[2025-09-06 00:47:07] [Rank 0] Group 15 Loss: 5.8298 +[2025-09-06 00:47:07] [Rank 0] Group 15 Loss: 5.8298 +[2025-09-06 00:47:07] [Rank 0] Group 0 FTA: 0.6200 +[2025-09-06 00:47:07] [Rank 0] Group 0 FTA: 0.6200 +[2025-09-06 00:47:07] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:47:07] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:47:07] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:47:07] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:47:07] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:47:07] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:47:07] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:47:07] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:47:07] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:47:07] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:47:07] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 00:47:07] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 00:47:07] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:47:07] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:47:07] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 00:47:07] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 00:47:07] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:47:07] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:47:07] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:47:07] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:47:07] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:47:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:47:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:47:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:47:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:47:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:47:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:47:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:47:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:47:09] [Rank 0] step:2001/10000 train_time:105935ms step_avg:52.94ms +[2025-09-06 00:47:09] [Rank 0] step:2001/10000 train_time:105935ms step_avg:52.94ms +[2025-09-06 00:47:10] [Rank 0] step:2021/10000 train_time:106608ms step_avg:52.75ms +[2025-09-06 00:47:10] [Rank 0] step:2021/10000 train_time:106608ms step_avg:52.75ms +[2025-09-06 00:47:10] [Rank 0] step:2041/10000 train_time:107340ms step_avg:52.59ms +[2025-09-06 00:47:10] [Rank 0] step:2041/10000 train_time:107340ms step_avg:52.59ms +[2025-09-06 00:47:11] [Rank 0] step:2061/10000 train_time:108071ms step_avg:52.44ms +[2025-09-06 00:47:11] [Rank 0] step:2061/10000 train_time:108071ms step_avg:52.44ms +[2025-09-06 00:47:12] [Rank 0] step:2081/10000 train_time:108803ms step_avg:52.28ms +[2025-09-06 00:47:12] [Rank 0] step:2081/10000 train_time:108803ms step_avg:52.28ms +[2025-09-06 00:47:13] [Rank 0] step:2101/10000 train_time:109535ms step_avg:52.13ms +[2025-09-06 00:47:13] [Rank 0] step:2101/10000 train_time:109535ms step_avg:52.13ms +[2025-09-06 00:47:13] [Rank 0] step:2121/10000 train_time:110268ms step_avg:51.99ms +[2025-09-06 00:47:13] [Rank 0] step:2121/10000 train_time:110268ms step_avg:51.99ms +[2025-09-06 00:47:14] [Rank 0] step:2141/10000 train_time:110999ms step_avg:51.84ms +[2025-09-06 00:47:14] [Rank 0] step:2141/10000 train_time:110999ms step_avg:51.84ms +[2025-09-06 00:47:15] [Rank 0] step:2161/10000 train_time:111731ms step_avg:51.70ms +[2025-09-06 00:47:15] [Rank 0] step:2161/10000 train_time:111731ms step_avg:51.70ms +[2025-09-06 00:47:15] [Rank 0] step:2181/10000 train_time:112463ms step_avg:51.57ms +[2025-09-06 00:47:15] [Rank 0] step:2181/10000 train_time:112463ms step_avg:51.57ms +[2025-09-06 00:47:16] [Rank 0] step:2201/10000 train_time:113197ms step_avg:51.43ms +[2025-09-06 00:47:16] [Rank 0] step:2201/10000 train_time:113197ms step_avg:51.43ms +[2025-09-06 00:47:17] [Rank 0] step:2221/10000 train_time:113929ms step_avg:51.30ms +[2025-09-06 00:47:17] [Rank 0] step:2221/10000 train_time:113929ms step_avg:51.30ms +[2025-09-06 00:47:18] [Rank 0] step:2241/10000 train_time:114667ms step_avg:51.17ms +[2025-09-06 00:47:18] [Rank 0] step:2241/10000 train_time:114667ms step_avg:51.17ms +[2025-09-06 00:47:18] [Rank 0] step:2261/10000 train_time:115405ms step_avg:51.04ms +[2025-09-06 00:47:18] [Rank 0] step:2261/10000 train_time:115405ms step_avg:51.04ms +[2025-09-06 00:47:19] [Rank 0] step:2281/10000 train_time:116144ms step_avg:50.92ms +[2025-09-06 00:47:19] [Rank 0] step:2281/10000 train_time:116144ms step_avg:50.92ms +[2025-09-06 00:47:20] [Rank 0] step:2301/10000 train_time:116883ms step_avg:50.80ms +[2025-09-06 00:47:20] [Rank 0] step:2301/10000 train_time:116883ms step_avg:50.80ms +[2025-09-06 00:47:21] [Rank 0] step:2321/10000 train_time:117621ms step_avg:50.68ms +[2025-09-06 00:47:21] [Rank 0] step:2321/10000 train_time:117621ms step_avg:50.68ms +[2025-09-06 00:47:21] [Rank 0] step:2341/10000 train_time:118481ms step_avg:50.61ms +[2025-09-06 00:47:21] [Rank 0] step:2341/10000 train_time:118481ms step_avg:50.61ms +[2025-09-06 00:47:22] [Rank 0] step:2361/10000 train_time:119245ms step_avg:50.51ms +[2025-09-06 00:47:22] [Rank 0] step:2361/10000 train_time:119245ms step_avg:50.51ms +[2025-09-06 00:47:23] [Rank 0] step:2381/10000 train_time:119984ms step_avg:50.39ms +[2025-09-06 00:47:23] [Rank 0] step:2381/10000 train_time:119984ms step_avg:50.39ms +[2025-09-06 00:47:24] [Rank 0] step:2401/10000 train_time:120860ms step_avg:50.34ms +[2025-09-06 00:47:24] [Rank 0] step:2401/10000 train_time:120860ms step_avg:50.34ms +[2025-09-06 00:47:25] [Rank 0] step:2421/10000 train_time:121598ms step_avg:50.23ms +[2025-09-06 00:47:25] [Rank 0] step:2421/10000 train_time:121598ms step_avg:50.23ms +[2025-09-06 00:47:25] [Rank 0] step:2441/10000 train_time:122337ms step_avg:50.12ms +[2025-09-06 00:47:25] [Rank 0] step:2441/10000 train_time:122337ms step_avg:50.12ms +[2025-09-06 00:47:26] [Rank 0] step:2461/10000 train_time:123077ms step_avg:50.01ms +[2025-09-06 00:47:26] [Rank 0] step:2461/10000 train_time:123077ms step_avg:50.01ms +[2025-09-06 00:47:27] [Rank 0] step:2481/10000 train_time:123816ms step_avg:49.91ms +[2025-09-06 00:47:27] [Rank 0] step:2481/10000 train_time:123816ms step_avg:49.91ms +[2025-09-06 00:47:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:47:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:47:28] [Rank 0] PRINT: step:2500/10000 train_loss:2.8664 val_loss:2.7702 train_time:124635ms step_avg:49.85ms +[2025-09-06 00:47:28] [Rank 0] PRINT: step:2500/10000 train_loss:2.8664 val_loss:2.7702 train_time:124635ms step_avg:49.85ms +[2025-09-06 00:47:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:47:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:47:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:47:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:48:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:48:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:48:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:48:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:48:49] [Rank 0] Total Loss: 4.9796 +[2025-09-06 00:48:49] [Rank 0] Total Loss: 4.9796 +[2025-09-06 00:48:49] [Rank 0] Total FTA (Unweighted): 0.1700 +[2025-09-06 00:48:49] [Rank 0] Total FTA (Unweighted): 0.1700 +[2025-09-06 00:48:49] [Rank 0] Total FTA (Weighted): 0.1700 +[2025-09-06 00:48:49] [Rank 0] Total FTA (Weighted): 0.1700 +[2025-09-06 00:48:49] [Rank 0] Group 0 Loss: 3.1965 +[2025-09-06 00:48:49] [Rank 0] Group 0 Loss: 3.1965 +[2025-09-06 00:48:49] [Rank 0] Group 1 Loss: 3.0700 +[2025-09-06 00:48:49] [Rank 0] Group 1 Loss: 3.0700 +[2025-09-06 00:48:49] [Rank 0] Group 2 Loss: 3.3826 +[2025-09-06 00:48:49] [Rank 0] Group 2 Loss: 3.3826 +[2025-09-06 00:48:49] [Rank 0] Group 3 Loss: 3.7497 +[2025-09-06 00:48:49] [Rank 0] Group 3 Loss: 3.7497 +[2025-09-06 00:48:49] [Rank 0] Group 4 Loss: 4.4907 +[2025-09-06 00:48:49] [Rank 0] Group 4 Loss: 4.4907 +[2025-09-06 00:48:49] [Rank 0] Group 5 Loss: 5.0208 +[2025-09-06 00:48:49] [Rank 0] Group 5 Loss: 5.0208 +[2025-09-06 00:48:49] [Rank 0] Group 6 Loss: 5.2897 +[2025-09-06 00:48:49] [Rank 0] Group 6 Loss: 5.2897 +[2025-09-06 00:48:49] [Rank 0] Group 7 Loss: 5.3801 +[2025-09-06 00:48:49] [Rank 0] Group 7 Loss: 5.3801 +[2025-09-06 00:48:49] [Rank 0] Group 8 Loss: 5.6260 +[2025-09-06 00:48:49] [Rank 0] Group 8 Loss: 5.6260 +[2025-09-06 00:48:49] [Rank 0] Group 9 Loss: 5.8053 +[2025-09-06 00:48:49] [Rank 0] Group 9 Loss: 5.8053 +[2025-09-06 00:48:49] [Rank 0] Group 10 Loss: 5.8057 +[2025-09-06 00:48:49] [Rank 0] Group 10 Loss: 5.8057 +[2025-09-06 00:48:49] [Rank 0] Group 11 Loss: 5.8333 +[2025-09-06 00:48:49] [Rank 0] Group 11 Loss: 5.8333 +[2025-09-06 00:48:49] [Rank 0] Group 12 Loss: 5.7322 +[2025-09-06 00:48:49] [Rank 0] Group 12 Loss: 5.7322 +[2025-09-06 00:48:49] [Rank 0] Group 13 Loss: 5.7310 +[2025-09-06 00:48:49] [Rank 0] Group 13 Loss: 5.7310 +[2025-09-06 00:48:49] [Rank 0] Group 14 Loss: 5.7957 +[2025-09-06 00:48:49] [Rank 0] Group 14 Loss: 5.7957 +[2025-09-06 00:48:49] [Rank 0] Group 15 Loss: 5.7638 +[2025-09-06 00:48:49] [Rank 0] Group 15 Loss: 5.7638 +[2025-09-06 00:48:49] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 00:48:49] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 00:48:49] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:48:49] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 00:48:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:48:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:48:49] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:48:49] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:48:49] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:48:49] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 00:48:49] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:48:49] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:48:49] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:48:49] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:48:49] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:48:49] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:48:49] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 00:48:49] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 00:48:49] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:48:49] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 00:48:49] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 00:48:49] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 00:48:49] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:48:49] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:48:49] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:48:49] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:48:49] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 00:48:49] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 00:48:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:48:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:48:49] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:48:49] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:48:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:48:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:48:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:48:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:48:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:48:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:48:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:48:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:48:51] [Rank 0] step:2501/10000 train_time:124644ms step_avg:49.84ms +[2025-09-06 00:48:51] [Rank 0] step:2501/10000 train_time:124644ms step_avg:49.84ms +[2025-09-06 00:48:51] [Rank 0] step:2521/10000 train_time:125315ms step_avg:49.71ms +[2025-09-06 00:48:51] [Rank 0] step:2521/10000 train_time:125315ms step_avg:49.71ms +[2025-09-06 00:48:52] [Rank 0] step:2541/10000 train_time:126052ms step_avg:49.61ms +[2025-09-06 00:48:52] [Rank 0] step:2541/10000 train_time:126052ms step_avg:49.61ms +[2025-09-06 00:48:53] [Rank 0] step:2561/10000 train_time:126790ms step_avg:49.51ms +[2025-09-06 00:48:53] [Rank 0] step:2561/10000 train_time:126790ms step_avg:49.51ms +[2025-09-06 00:48:54] [Rank 0] step:2581/10000 train_time:127530ms step_avg:49.41ms +[2025-09-06 00:48:54] [Rank 0] step:2581/10000 train_time:127530ms step_avg:49.41ms +[2025-09-06 00:48:54] [Rank 0] step:2601/10000 train_time:128268ms step_avg:49.31ms +[2025-09-06 00:48:54] [Rank 0] step:2601/10000 train_time:128268ms step_avg:49.31ms +[2025-09-06 00:48:55] [Rank 0] step:2621/10000 train_time:129006ms step_avg:49.22ms +[2025-09-06 00:48:55] [Rank 0] step:2621/10000 train_time:129006ms step_avg:49.22ms +[2025-09-06 00:48:56] [Rank 0] step:2641/10000 train_time:129745ms step_avg:49.13ms +[2025-09-06 00:48:56] [Rank 0] step:2641/10000 train_time:129745ms step_avg:49.13ms +[2025-09-06 00:48:57] [Rank 0] step:2661/10000 train_time:130488ms step_avg:49.04ms +[2025-09-06 00:48:57] [Rank 0] step:2661/10000 train_time:130488ms step_avg:49.04ms +[2025-09-06 00:48:57] [Rank 0] step:2681/10000 train_time:131227ms step_avg:48.95ms +[2025-09-06 00:48:57] [Rank 0] step:2681/10000 train_time:131227ms step_avg:48.95ms +[2025-09-06 00:48:58] [Rank 0] step:2701/10000 train_time:131965ms step_avg:48.86ms +[2025-09-06 00:48:58] [Rank 0] step:2701/10000 train_time:131965ms step_avg:48.86ms +[2025-09-06 00:48:59] [Rank 0] step:2721/10000 train_time:132704ms step_avg:48.77ms +[2025-09-06 00:48:59] [Rank 0] step:2721/10000 train_time:132704ms step_avg:48.77ms +[2025-09-06 00:48:59] [Rank 0] step:2741/10000 train_time:133442ms step_avg:48.68ms +[2025-09-06 00:48:59] [Rank 0] step:2741/10000 train_time:133442ms step_avg:48.68ms +[2025-09-06 00:49:00] [Rank 0] step:2761/10000 train_time:134179ms step_avg:48.60ms +[2025-09-06 00:49:00] [Rank 0] step:2761/10000 train_time:134179ms step_avg:48.60ms +[2025-09-06 00:49:01] [Rank 0] step:2781/10000 train_time:134919ms step_avg:48.51ms +[2025-09-06 00:49:01] [Rank 0] step:2781/10000 train_time:134919ms step_avg:48.51ms +[2025-09-06 00:49:02] [Rank 0] step:2801/10000 train_time:135657ms step_avg:48.43ms +[2025-09-06 00:49:02] [Rank 0] step:2801/10000 train_time:135657ms step_avg:48.43ms +[2025-09-06 00:49:03] [Rank 0] step:2821/10000 train_time:137020ms step_avg:48.57ms +[2025-09-06 00:49:03] [Rank 0] step:2821/10000 train_time:137020ms step_avg:48.57ms +[2025-09-06 00:49:04] [Rank 0] step:2841/10000 train_time:137759ms step_avg:48.49ms +[2025-09-06 00:49:04] [Rank 0] step:2841/10000 train_time:137759ms step_avg:48.49ms +[2025-09-06 00:49:05] [Rank 0] step:2861/10000 train_time:138498ms step_avg:48.41ms +[2025-09-06 00:49:05] [Rank 0] step:2861/10000 train_time:138498ms step_avg:48.41ms +[2025-09-06 00:49:05] [Rank 0] step:2881/10000 train_time:139237ms step_avg:48.33ms +[2025-09-06 00:49:05] [Rank 0] step:2881/10000 train_time:139237ms step_avg:48.33ms +[2025-09-06 00:49:06] [Rank 0] step:2901/10000 train_time:139976ms step_avg:48.25ms +[2025-09-06 00:49:06] [Rank 0] step:2901/10000 train_time:139976ms step_avg:48.25ms +[2025-09-06 00:49:07] [Rank 0] step:2921/10000 train_time:140715ms step_avg:48.17ms +[2025-09-06 00:49:07] [Rank 0] step:2921/10000 train_time:140715ms step_avg:48.17ms +[2025-09-06 00:49:08] [Rank 0] step:2941/10000 train_time:141454ms step_avg:48.10ms +[2025-09-06 00:49:08] [Rank 0] step:2941/10000 train_time:141454ms step_avg:48.10ms +[2025-09-06 00:49:08] [Rank 0] step:2961/10000 train_time:142192ms step_avg:48.02ms +[2025-09-06 00:49:08] [Rank 0] step:2961/10000 train_time:142192ms step_avg:48.02ms +[2025-09-06 00:49:09] [Rank 0] step:2981/10000 train_time:142930ms step_avg:47.95ms +[2025-09-06 00:49:09] [Rank 0] step:2981/10000 train_time:142930ms step_avg:47.95ms +[2025-09-06 00:49:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:49:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:49:10] [Rank 0] PRINT: step:3000/10000 train_loss:2.7088 val_loss:2.6371 train_time:143749ms step_avg:47.92ms +[2025-09-06 00:49:10] [Rank 0] PRINT: step:3000/10000 train_loss:2.7088 val_loss:2.6371 train_time:143749ms step_avg:47.92ms +[2025-09-06 00:49:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:49:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:49:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:49:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:50:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:50:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:50:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:50:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:50:31] [Rank 0] Total Loss: 4.9294 +[2025-09-06 00:50:31] [Rank 0] Total Loss: 4.9294 +[2025-09-06 00:50:31] [Rank 0] Total FTA (Unweighted): 0.1906 +[2025-09-06 00:50:31] [Rank 0] Total FTA (Unweighted): 0.1906 +[2025-09-06 00:50:31] [Rank 0] Total FTA (Weighted): 0.1906 +[2025-09-06 00:50:31] [Rank 0] Total FTA (Weighted): 0.1906 +[2025-09-06 00:50:31] [Rank 0] Group 0 Loss: 3.2005 +[2025-09-06 00:50:31] [Rank 0] Group 0 Loss: 3.2005 +[2025-09-06 00:50:31] [Rank 0] Group 1 Loss: 3.2343 +[2025-09-06 00:50:31] [Rank 0] Group 1 Loss: 3.2343 +[2025-09-06 00:50:31] [Rank 0] Group 2 Loss: 3.3665 +[2025-09-06 00:50:31] [Rank 0] Group 2 Loss: 3.3665 +[2025-09-06 00:50:31] [Rank 0] Group 3 Loss: 3.7617 +[2025-09-06 00:50:31] [Rank 0] Group 3 Loss: 3.7617 +[2025-09-06 00:50:31] [Rank 0] Group 4 Loss: 4.3762 +[2025-09-06 00:50:31] [Rank 0] Group 4 Loss: 4.3762 +[2025-09-06 00:50:31] [Rank 0] Group 5 Loss: 4.9015 +[2025-09-06 00:50:31] [Rank 0] Group 5 Loss: 4.9015 +[2025-09-06 00:50:31] [Rank 0] Group 6 Loss: 5.1830 +[2025-09-06 00:50:31] [Rank 0] Group 6 Loss: 5.1830 +[2025-09-06 00:50:31] [Rank 0] Group 7 Loss: 5.3064 +[2025-09-06 00:50:31] [Rank 0] Group 7 Loss: 5.3064 +[2025-09-06 00:50:31] [Rank 0] Group 8 Loss: 5.5788 +[2025-09-06 00:50:31] [Rank 0] Group 8 Loss: 5.5788 +[2025-09-06 00:50:31] [Rank 0] Group 9 Loss: 5.7058 +[2025-09-06 00:50:31] [Rank 0] Group 9 Loss: 5.7058 +[2025-09-06 00:50:31] [Rank 0] Group 10 Loss: 5.7176 +[2025-09-06 00:50:31] [Rank 0] Group 10 Loss: 5.7176 +[2025-09-06 00:50:31] [Rank 0] Group 11 Loss: 5.7505 +[2025-09-06 00:50:31] [Rank 0] Group 11 Loss: 5.7505 +[2025-09-06 00:50:31] [Rank 0] Group 12 Loss: 5.6745 +[2025-09-06 00:50:31] [Rank 0] Group 12 Loss: 5.6745 +[2025-09-06 00:50:31] [Rank 0] Group 13 Loss: 5.6834 +[2025-09-06 00:50:31] [Rank 0] Group 13 Loss: 5.6834 +[2025-09-06 00:50:31] [Rank 0] Group 14 Loss: 5.7429 +[2025-09-06 00:50:31] [Rank 0] Group 14 Loss: 5.7429 +[2025-09-06 00:50:31] [Rank 0] Group 15 Loss: 5.6874 +[2025-09-06 00:50:31] [Rank 0] Group 15 Loss: 5.6874 +[2025-09-06 00:50:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:50:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:50:31] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 00:50:31] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 00:50:31] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:50:31] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:50:31] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:50:31] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 00:50:31] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:50:31] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:50:31] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:50:31] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:50:31] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:50:31] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-06 00:50:31] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:50:31] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 00:50:31] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 00:50:31] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 00:50:31] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 00:50:31] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 00:50:31] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 00:50:31] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 00:50:31] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:50:31] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:50:31] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:50:31] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:50:31] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 00:50:31] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 00:50:31] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:50:31] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:50:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:50:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:50:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:50:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:50:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:50:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:50:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:50:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:50:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:50:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:50:33] [Rank 0] step:3001/10000 train_time:143758ms step_avg:47.90ms +[2025-09-06 00:50:33] [Rank 0] step:3001/10000 train_time:143758ms step_avg:47.90ms +[2025-09-06 00:50:34] [Rank 0] step:3021/10000 train_time:144538ms step_avg:47.84ms +[2025-09-06 00:50:34] [Rank 0] step:3021/10000 train_time:144538ms step_avg:47.84ms +[2025-09-06 00:50:34] [Rank 0] step:3041/10000 train_time:145276ms step_avg:47.77ms +[2025-09-06 00:50:34] [Rank 0] step:3041/10000 train_time:145276ms step_avg:47.77ms +[2025-09-06 00:50:35] [Rank 0] step:3061/10000 train_time:146013ms step_avg:47.70ms +[2025-09-06 00:50:35] [Rank 0] step:3061/10000 train_time:146013ms step_avg:47.70ms +[2025-09-06 00:50:36] [Rank 0] step:3081/10000 train_time:146750ms step_avg:47.63ms +[2025-09-06 00:50:36] [Rank 0] step:3081/10000 train_time:146750ms step_avg:47.63ms +[2025-09-06 00:50:37] [Rank 0] step:3101/10000 train_time:147488ms step_avg:47.56ms +[2025-09-06 00:50:37] [Rank 0] step:3101/10000 train_time:147488ms step_avg:47.56ms +[2025-09-06 00:50:37] [Rank 0] step:3121/10000 train_time:148227ms step_avg:47.49ms +[2025-09-06 00:50:37] [Rank 0] step:3121/10000 train_time:148227ms step_avg:47.49ms +[2025-09-06 00:50:38] [Rank 0] step:3141/10000 train_time:148965ms step_avg:47.43ms +[2025-09-06 00:50:38] [Rank 0] step:3141/10000 train_time:148965ms step_avg:47.43ms +[2025-09-06 00:50:39] [Rank 0] step:3161/10000 train_time:149703ms step_avg:47.36ms +[2025-09-06 00:50:39] [Rank 0] step:3161/10000 train_time:149703ms step_avg:47.36ms +[2025-09-06 00:50:40] [Rank 0] step:3181/10000 train_time:150441ms step_avg:47.29ms +[2025-09-06 00:50:40] [Rank 0] step:3181/10000 train_time:150441ms step_avg:47.29ms +[2025-09-06 00:50:40] [Rank 0] step:3201/10000 train_time:151179ms step_avg:47.23ms +[2025-09-06 00:50:40] [Rank 0] step:3201/10000 train_time:151179ms step_avg:47.23ms +[2025-09-06 00:50:41] [Rank 0] step:3221/10000 train_time:151917ms step_avg:47.16ms +[2025-09-06 00:50:41] [Rank 0] step:3221/10000 train_time:151917ms step_avg:47.16ms +[2025-09-06 00:50:42] [Rank 0] step:3241/10000 train_time:152654ms step_avg:47.10ms +[2025-09-06 00:50:42] [Rank 0] step:3241/10000 train_time:152654ms step_avg:47.10ms +[2025-09-06 00:50:42] [Rank 0] step:3261/10000 train_time:153399ms step_avg:47.04ms +[2025-09-06 00:50:42] [Rank 0] step:3261/10000 train_time:153399ms step_avg:47.04ms +[2025-09-06 00:50:43] [Rank 0] step:3281/10000 train_time:154137ms step_avg:46.98ms +[2025-09-06 00:50:43] [Rank 0] step:3281/10000 train_time:154137ms step_avg:46.98ms +[2025-09-06 00:50:44] [Rank 0] step:3301/10000 train_time:154874ms step_avg:46.92ms +[2025-09-06 00:50:44] [Rank 0] step:3301/10000 train_time:154874ms step_avg:46.92ms +[2025-09-06 00:50:45] [Rank 0] step:3321/10000 train_time:155613ms step_avg:46.86ms +[2025-09-06 00:50:45] [Rank 0] step:3321/10000 train_time:155613ms step_avg:46.86ms +[2025-09-06 00:50:45] [Rank 0] step:3341/10000 train_time:156350ms step_avg:46.80ms +[2025-09-06 00:50:45] [Rank 0] step:3341/10000 train_time:156350ms step_avg:46.80ms +[2025-09-06 00:50:46] [Rank 0] step:3361/10000 train_time:157089ms step_avg:46.74ms +[2025-09-06 00:50:46] [Rank 0] step:3361/10000 train_time:157089ms step_avg:46.74ms +[2025-09-06 00:50:47] [Rank 0] step:3381/10000 train_time:157827ms step_avg:46.68ms +[2025-09-06 00:50:47] [Rank 0] step:3381/10000 train_time:157827ms step_avg:46.68ms +[2025-09-06 00:50:48] [Rank 0] step:3401/10000 train_time:158565ms step_avg:46.62ms +[2025-09-06 00:50:48] [Rank 0] step:3401/10000 train_time:158565ms step_avg:46.62ms +[2025-09-06 00:50:48] [Rank 0] step:3421/10000 train_time:159303ms step_avg:46.57ms +[2025-09-06 00:50:48] [Rank 0] step:3421/10000 train_time:159303ms step_avg:46.57ms +[2025-09-06 00:50:49] [Rank 0] step:3441/10000 train_time:160041ms step_avg:46.51ms +[2025-09-06 00:50:49] [Rank 0] step:3441/10000 train_time:160041ms step_avg:46.51ms +[2025-09-06 00:50:50] [Rank 0] step:3461/10000 train_time:160779ms step_avg:46.45ms +[2025-09-06 00:50:50] [Rank 0] step:3461/10000 train_time:160779ms step_avg:46.45ms +[2025-09-06 00:50:51] [Rank 0] step:3481/10000 train_time:161517ms step_avg:46.40ms +[2025-09-06 00:50:51] [Rank 0] step:3481/10000 train_time:161517ms step_avg:46.40ms +[2025-09-06 00:50:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:50:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:50:52] [Rank 0] PRINT: step:3500/10000 train_loss:2.5957 val_loss:2.5464 train_time:162335ms step_avg:46.38ms +[2025-09-06 00:50:52] [Rank 0] PRINT: step:3500/10000 train_loss:2.5957 val_loss:2.5464 train_time:162335ms step_avg:46.38ms +[2025-09-06 00:50:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:50:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:50:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:50:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:52:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:52:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:52:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:52:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:52:13] [Rank 0] Total Loss: 4.9034 +[2025-09-06 00:52:13] [Rank 0] Total Loss: 4.9034 +[2025-09-06 00:52:13] [Rank 0] Total FTA (Unweighted): 0.2050 +[2025-09-06 00:52:13] [Rank 0] Total FTA (Unweighted): 0.2050 +[2025-09-06 00:52:13] [Rank 0] Total FTA (Weighted): 0.2050 +[2025-09-06 00:52:13] [Rank 0] Total FTA (Weighted): 0.2050 +[2025-09-06 00:52:13] [Rank 0] Group 0 Loss: 3.2626 +[2025-09-06 00:52:13] [Rank 0] Group 0 Loss: 3.2626 +[2025-09-06 00:52:13] [Rank 0] Group 1 Loss: 3.2644 +[2025-09-06 00:52:13] [Rank 0] Group 1 Loss: 3.2644 +[2025-09-06 00:52:13] [Rank 0] Group 2 Loss: 3.3636 +[2025-09-06 00:52:13] [Rank 0] Group 2 Loss: 3.3636 +[2025-09-06 00:52:13] [Rank 0] Group 3 Loss: 3.7352 +[2025-09-06 00:52:13] [Rank 0] Group 3 Loss: 3.7352 +[2025-09-06 00:52:13] [Rank 0] Group 4 Loss: 4.3164 +[2025-09-06 00:52:13] [Rank 0] Group 4 Loss: 4.3164 +[2025-09-06 00:52:13] [Rank 0] Group 5 Loss: 4.8369 +[2025-09-06 00:52:13] [Rank 0] Group 5 Loss: 4.8369 +[2025-09-06 00:52:13] [Rank 0] Group 6 Loss: 5.1288 +[2025-09-06 00:52:13] [Rank 0] Group 6 Loss: 5.1288 +[2025-09-06 00:52:13] [Rank 0] Group 7 Loss: 5.2385 +[2025-09-06 00:52:13] [Rank 0] Group 7 Loss: 5.2385 +[2025-09-06 00:52:13] [Rank 0] Group 8 Loss: 5.5329 +[2025-09-06 00:52:13] [Rank 0] Group 8 Loss: 5.5329 +[2025-09-06 00:52:13] [Rank 0] Group 9 Loss: 5.6758 +[2025-09-06 00:52:13] [Rank 0] Group 9 Loss: 5.6758 +[2025-09-06 00:52:13] [Rank 0] Group 10 Loss: 5.6926 +[2025-09-06 00:52:13] [Rank 0] Group 10 Loss: 5.6926 +[2025-09-06 00:52:13] [Rank 0] Group 11 Loss: 5.7302 +[2025-09-06 00:52:13] [Rank 0] Group 11 Loss: 5.7302 +[2025-09-06 00:52:13] [Rank 0] Group 12 Loss: 5.6434 +[2025-09-06 00:52:13] [Rank 0] Group 12 Loss: 5.6434 +[2025-09-06 00:52:13] [Rank 0] Group 13 Loss: 5.6512 +[2025-09-06 00:52:13] [Rank 0] Group 13 Loss: 5.6512 +[2025-09-06 00:52:13] [Rank 0] Group 14 Loss: 5.7121 +[2025-09-06 00:52:13] [Rank 0] Group 14 Loss: 5.7121 +[2025-09-06 00:52:13] [Rank 0] Group 15 Loss: 5.6695 +[2025-09-06 00:52:13] [Rank 0] Group 15 Loss: 5.6695 +[2025-09-06 00:52:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:52:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:52:13] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 00:52:13] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 00:52:13] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:52:13] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:52:13] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:52:13] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:52:13] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:52:13] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 00:52:13] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:52:13] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 00:52:13] [Rank 0] Group 6 FTA: 0.1200 +[2025-09-06 00:52:13] [Rank 0] Group 6 FTA: 0.1200 +[2025-09-06 00:52:13] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 00:52:13] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 00:52:13] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 00:52:13] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 00:52:13] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 00:52:13] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 00:52:13] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 00:52:13] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 00:52:13] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:52:13] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 00:52:13] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:52:13] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 00:52:13] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 00:52:13] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 00:52:13] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:52:13] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:52:13] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:52:13] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:52:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:52:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:52:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:52:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:52:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:52:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:52:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:52:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:52:14] [Rank 0] step:3501/10000 train_time:162344ms step_avg:46.37ms +[2025-09-06 00:52:14] [Rank 0] step:3501/10000 train_time:162344ms step_avg:46.37ms +[2025-09-06 00:52:15] [Rank 0] step:3521/10000 train_time:163029ms step_avg:46.30ms +[2025-09-06 00:52:15] [Rank 0] step:3521/10000 train_time:163029ms step_avg:46.30ms +[2025-09-06 00:52:16] [Rank 0] step:3541/10000 train_time:163767ms step_avg:46.25ms +[2025-09-06 00:52:16] [Rank 0] step:3541/10000 train_time:163767ms step_avg:46.25ms +[2025-09-06 00:52:16] [Rank 0] step:3561/10000 train_time:164504ms step_avg:46.20ms +[2025-09-06 00:52:16] [Rank 0] step:3561/10000 train_time:164504ms step_avg:46.20ms +[2025-09-06 00:52:17] [Rank 0] step:3581/10000 train_time:165243ms step_avg:46.14ms +[2025-09-06 00:52:17] [Rank 0] step:3581/10000 train_time:165243ms step_avg:46.14ms +[2025-09-06 00:52:18] [Rank 0] step:3601/10000 train_time:165981ms step_avg:46.09ms +[2025-09-06 00:52:18] [Rank 0] step:3601/10000 train_time:165981ms step_avg:46.09ms +[2025-09-06 00:52:19] [Rank 0] step:3621/10000 train_time:166720ms step_avg:46.04ms +[2025-09-06 00:52:19] [Rank 0] step:3621/10000 train_time:166720ms step_avg:46.04ms +[2025-09-06 00:52:20] [Rank 0] step:3641/10000 train_time:168067ms step_avg:46.16ms +[2025-09-06 00:52:20] [Rank 0] step:3641/10000 train_time:168067ms step_avg:46.16ms +[2025-09-06 00:52:21] [Rank 0] step:3661/10000 train_time:168805ms step_avg:46.11ms +[2025-09-06 00:52:21] [Rank 0] step:3661/10000 train_time:168805ms step_avg:46.11ms +[2025-09-06 00:52:21] [Rank 0] step:3681/10000 train_time:169544ms step_avg:46.06ms +[2025-09-06 00:52:21] [Rank 0] step:3681/10000 train_time:169544ms step_avg:46.06ms +[2025-09-06 00:52:22] [Rank 0] step:3701/10000 train_time:170281ms step_avg:46.01ms +[2025-09-06 00:52:22] [Rank 0] step:3701/10000 train_time:170281ms step_avg:46.01ms +[2025-09-06 00:52:23] [Rank 0] step:3721/10000 train_time:171019ms step_avg:45.96ms +[2025-09-06 00:52:23] [Rank 0] step:3721/10000 train_time:171019ms step_avg:45.96ms +[2025-09-06 00:52:24] [Rank 0] step:3741/10000 train_time:171758ms step_avg:45.91ms +[2025-09-06 00:52:24] [Rank 0] step:3741/10000 train_time:171758ms step_avg:45.91ms +[2025-09-06 00:52:24] [Rank 0] step:3761/10000 train_time:172497ms step_avg:45.86ms +[2025-09-06 00:52:24] [Rank 0] step:3761/10000 train_time:172497ms step_avg:45.86ms +[2025-09-06 00:52:25] [Rank 0] step:3781/10000 train_time:173236ms step_avg:45.82ms +[2025-09-06 00:52:25] [Rank 0] step:3781/10000 train_time:173236ms step_avg:45.82ms +[2025-09-06 00:52:26] [Rank 0] step:3801/10000 train_time:173975ms step_avg:45.77ms +[2025-09-06 00:52:26] [Rank 0] step:3801/10000 train_time:173975ms step_avg:45.77ms +[2025-09-06 00:52:27] [Rank 0] step:3821/10000 train_time:174713ms step_avg:45.72ms +[2025-09-06 00:52:27] [Rank 0] step:3821/10000 train_time:174713ms step_avg:45.72ms +[2025-09-06 00:52:27] [Rank 0] step:3841/10000 train_time:175451ms step_avg:45.68ms +[2025-09-06 00:52:27] [Rank 0] step:3841/10000 train_time:175451ms step_avg:45.68ms +[2025-09-06 00:52:28] [Rank 0] step:3861/10000 train_time:176189ms step_avg:45.63ms +[2025-09-06 00:52:28] [Rank 0] step:3861/10000 train_time:176189ms step_avg:45.63ms +[2025-09-06 00:52:29] [Rank 0] step:3881/10000 train_time:176928ms step_avg:45.59ms +[2025-09-06 00:52:29] [Rank 0] step:3881/10000 train_time:176928ms step_avg:45.59ms +[2025-09-06 00:52:30] [Rank 0] step:3901/10000 train_time:177667ms step_avg:45.54ms +[2025-09-06 00:52:30] [Rank 0] step:3901/10000 train_time:177667ms step_avg:45.54ms +[2025-09-06 00:52:30] [Rank 0] step:3921/10000 train_time:178405ms step_avg:45.50ms +[2025-09-06 00:52:30] [Rank 0] step:3921/10000 train_time:178405ms step_avg:45.50ms +[2025-09-06 00:52:31] [Rank 0] step:3941/10000 train_time:179143ms step_avg:45.46ms +[2025-09-06 00:52:31] [Rank 0] step:3941/10000 train_time:179143ms step_avg:45.46ms +[2025-09-06 00:52:32] [Rank 0] step:3961/10000 train_time:179881ms step_avg:45.41ms +[2025-09-06 00:52:32] [Rank 0] step:3961/10000 train_time:179881ms step_avg:45.41ms +[2025-09-06 00:52:32] [Rank 0] step:3981/10000 train_time:180618ms step_avg:45.37ms +[2025-09-06 00:52:32] [Rank 0] step:3981/10000 train_time:180618ms step_avg:45.37ms +[2025-09-06 00:52:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:52:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:52:34] [Rank 0] PRINT: step:4000/10000 train_loss:2.5153 val_loss:2.4676 train_time:181437ms step_avg:45.36ms +[2025-09-06 00:52:34] [Rank 0] PRINT: step:4000/10000 train_loss:2.5153 val_loss:2.4676 train_time:181437ms step_avg:45.36ms +[2025-09-06 00:52:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:52:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:52:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:52:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:53:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:53:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:53:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:53:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:53:55] [Rank 0] Total Loss: 4.8983 +[2025-09-06 00:53:55] [Rank 0] Total Loss: 4.8983 +[2025-09-06 00:53:55] [Rank 0] Total FTA (Unweighted): 0.2225 +[2025-09-06 00:53:55] [Rank 0] Total FTA (Unweighted): 0.2225 +[2025-09-06 00:53:55] [Rank 0] Total FTA (Weighted): 0.2225 +[2025-09-06 00:53:55] [Rank 0] Total FTA (Weighted): 0.2225 +[2025-09-06 00:53:55] [Rank 0] Group 0 Loss: 3.2692 +[2025-09-06 00:53:55] [Rank 0] Group 0 Loss: 3.2692 +[2025-09-06 00:53:56] [Rank 0] Group 1 Loss: 3.3551 +[2025-09-06 00:53:56] [Rank 0] Group 1 Loss: 3.3551 +[2025-09-06 00:53:56] [Rank 0] Group 2 Loss: 3.3986 +[2025-09-06 00:53:56] [Rank 0] Group 2 Loss: 3.3986 +[2025-09-06 00:53:56] [Rank 0] Group 3 Loss: 3.8106 +[2025-09-06 00:53:56] [Rank 0] Group 3 Loss: 3.8106 +[2025-09-06 00:53:56] [Rank 0] Group 4 Loss: 4.2558 +[2025-09-06 00:53:56] [Rank 0] Group 4 Loss: 4.2558 +[2025-09-06 00:53:56] [Rank 0] Group 5 Loss: 4.7771 +[2025-09-06 00:53:56] [Rank 0] Group 5 Loss: 4.7771 +[2025-09-06 00:53:56] [Rank 0] Group 6 Loss: 5.0894 +[2025-09-06 00:53:56] [Rank 0] Group 6 Loss: 5.0894 +[2025-09-06 00:53:56] [Rank 0] Group 7 Loss: 5.2235 +[2025-09-06 00:53:56] [Rank 0] Group 7 Loss: 5.2235 +[2025-09-06 00:53:56] [Rank 0] Group 8 Loss: 5.5064 +[2025-09-06 00:53:56] [Rank 0] Group 8 Loss: 5.5064 +[2025-09-06 00:53:56] [Rank 0] Group 9 Loss: 5.6531 +[2025-09-06 00:53:56] [Rank 0] Group 9 Loss: 5.6531 +[2025-09-06 00:53:56] [Rank 0] Group 10 Loss: 5.7112 +[2025-09-06 00:53:56] [Rank 0] Group 10 Loss: 5.7112 +[2025-09-06 00:53:56] [Rank 0] Group 11 Loss: 5.7323 +[2025-09-06 00:53:56] [Rank 0] Group 11 Loss: 5.7323 +[2025-09-06 00:53:56] [Rank 0] Group 12 Loss: 5.6308 +[2025-09-06 00:53:56] [Rank 0] Group 12 Loss: 5.6308 +[2025-09-06 00:53:56] [Rank 0] Group 13 Loss: 5.6257 +[2025-09-06 00:53:56] [Rank 0] Group 13 Loss: 5.6257 +[2025-09-06 00:53:56] [Rank 0] Group 14 Loss: 5.6896 +[2025-09-06 00:53:56] [Rank 0] Group 14 Loss: 5.6896 +[2025-09-06 00:53:56] [Rank 0] Group 15 Loss: 5.6436 +[2025-09-06 00:53:56] [Rank 0] Group 15 Loss: 5.6436 +[2025-09-06 00:53:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:53:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:53:56] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 00:53:56] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 00:53:56] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:53:56] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:53:56] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:53:56] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:53:56] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 00:53:56] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 00:53:56] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 00:53:56] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 00:53:56] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 00:53:56] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 00:53:56] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-06 00:53:56] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-06 00:53:56] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 00:53:56] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 00:53:56] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:53:56] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:53:56] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 00:53:56] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 00:53:56] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:53:56] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:53:56] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 00:53:56] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 00:53:56] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 00:53:56] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 00:53:56] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 00:53:56] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 00:53:56] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:53:56] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 00:53:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:53:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:53:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:53:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:53:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:53:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:53:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:53:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:53:57] [Rank 0] step:4001/10000 train_time:181445ms step_avg:45.35ms +[2025-09-06 00:53:57] [Rank 0] step:4001/10000 train_time:181445ms step_avg:45.35ms +[2025-09-06 00:53:58] [Rank 0] step:4021/10000 train_time:182724ms step_avg:45.44ms +[2025-09-06 00:53:58] [Rank 0] step:4021/10000 train_time:182724ms step_avg:45.44ms +[2025-09-06 00:53:59] [Rank 0] step:4041/10000 train_time:183463ms step_avg:45.40ms +[2025-09-06 00:53:59] [Rank 0] step:4041/10000 train_time:183463ms step_avg:45.40ms +[2025-09-06 00:54:00] [Rank 0] step:4061/10000 train_time:184202ms step_avg:45.36ms +[2025-09-06 00:54:00] [Rank 0] step:4061/10000 train_time:184202ms step_avg:45.36ms +[2025-09-06 00:54:01] [Rank 0] step:4081/10000 train_time:184940ms step_avg:45.32ms +[2025-09-06 00:54:01] [Rank 0] step:4081/10000 train_time:184940ms step_avg:45.32ms +[2025-09-06 00:54:01] [Rank 0] step:4101/10000 train_time:185678ms step_avg:45.28ms +[2025-09-06 00:54:01] [Rank 0] step:4101/10000 train_time:185678ms step_avg:45.28ms +[2025-09-06 00:54:02] [Rank 0] step:4121/10000 train_time:186418ms step_avg:45.24ms +[2025-09-06 00:54:02] [Rank 0] step:4121/10000 train_time:186418ms step_avg:45.24ms +[2025-09-06 00:54:03] [Rank 0] step:4141/10000 train_time:187158ms step_avg:45.20ms +[2025-09-06 00:54:03] [Rank 0] step:4141/10000 train_time:187158ms step_avg:45.20ms +[2025-09-06 00:54:04] [Rank 0] step:4161/10000 train_time:187895ms step_avg:45.16ms +[2025-09-06 00:54:04] [Rank 0] step:4161/10000 train_time:187895ms step_avg:45.16ms +[2025-09-06 00:54:04] [Rank 0] step:4181/10000 train_time:188633ms step_avg:45.12ms +[2025-09-06 00:54:04] [Rank 0] step:4181/10000 train_time:188633ms step_avg:45.12ms +[2025-09-06 00:54:05] [Rank 0] step:4201/10000 train_time:189370ms step_avg:45.08ms +[2025-09-06 00:54:05] [Rank 0] step:4201/10000 train_time:189370ms step_avg:45.08ms +[2025-09-06 00:54:06] [Rank 0] step:4221/10000 train_time:190109ms step_avg:45.04ms +[2025-09-06 00:54:06] [Rank 0] step:4221/10000 train_time:190109ms step_avg:45.04ms +[2025-09-06 00:54:07] [Rank 0] step:4241/10000 train_time:190847ms step_avg:45.00ms +[2025-09-06 00:54:07] [Rank 0] step:4241/10000 train_time:190847ms step_avg:45.00ms +[2025-09-06 00:54:07] [Rank 0] step:4261/10000 train_time:191586ms step_avg:44.96ms +[2025-09-06 00:54:07] [Rank 0] step:4261/10000 train_time:191586ms step_avg:44.96ms +[2025-09-06 00:54:08] [Rank 0] step:4281/10000 train_time:192323ms step_avg:44.92ms +[2025-09-06 00:54:08] [Rank 0] step:4281/10000 train_time:192323ms step_avg:44.92ms +[2025-09-06 00:54:09] [Rank 0] step:4301/10000 train_time:193062ms step_avg:44.89ms +[2025-09-06 00:54:09] [Rank 0] step:4301/10000 train_time:193062ms step_avg:44.89ms +[2025-09-06 00:54:10] [Rank 0] step:4321/10000 train_time:193801ms step_avg:44.85ms +[2025-09-06 00:54:10] [Rank 0] step:4321/10000 train_time:193801ms step_avg:44.85ms +[2025-09-06 00:54:10] [Rank 0] step:4341/10000 train_time:194539ms step_avg:44.81ms +[2025-09-06 00:54:10] [Rank 0] step:4341/10000 train_time:194539ms step_avg:44.81ms +[2025-09-06 00:54:11] [Rank 0] step:4361/10000 train_time:195278ms step_avg:44.78ms +[2025-09-06 00:54:11] [Rank 0] step:4361/10000 train_time:195278ms step_avg:44.78ms +[2025-09-06 00:54:12] [Rank 0] step:4381/10000 train_time:196017ms step_avg:44.74ms +[2025-09-06 00:54:12] [Rank 0] step:4381/10000 train_time:196017ms step_avg:44.74ms +[2025-09-06 00:54:13] [Rank 0] step:4401/10000 train_time:196755ms step_avg:44.71ms +[2025-09-06 00:54:13] [Rank 0] step:4401/10000 train_time:196755ms step_avg:44.71ms +[2025-09-06 00:54:13] [Rank 0] step:4421/10000 train_time:197493ms step_avg:44.67ms +[2025-09-06 00:54:13] [Rank 0] step:4421/10000 train_time:197493ms step_avg:44.67ms +[2025-09-06 00:54:14] [Rank 0] step:4441/10000 train_time:198230ms step_avg:44.64ms +[2025-09-06 00:54:14] [Rank 0] step:4441/10000 train_time:198230ms step_avg:44.64ms +[2025-09-06 00:54:15] [Rank 0] step:4461/10000 train_time:198968ms step_avg:44.60ms +[2025-09-06 00:54:15] [Rank 0] step:4461/10000 train_time:198968ms step_avg:44.60ms +[2025-09-06 00:54:15] [Rank 0] step:4481/10000 train_time:199707ms step_avg:44.57ms +[2025-09-06 00:54:15] [Rank 0] step:4481/10000 train_time:199707ms step_avg:44.57ms +[2025-09-06 00:54:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:54:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:54:17] [Rank 0] PRINT: step:4500/10000 train_loss:2.4475 val_loss:2.4069 train_time:200526ms step_avg:44.56ms +[2025-09-06 00:54:17] [Rank 0] PRINT: step:4500/10000 train_loss:2.4475 val_loss:2.4069 train_time:200526ms step_avg:44.56ms +[2025-09-06 00:54:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:54:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:54:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:54:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:55:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:55:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:55:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:55:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:55:38] [Rank 0] Total Loss: 4.7817 +[2025-09-06 00:55:38] [Rank 0] Total Loss: 4.7817 +[2025-09-06 00:55:38] [Rank 0] Total FTA (Unweighted): 0.2356 +[2025-09-06 00:55:38] [Rank 0] Total FTA (Unweighted): 0.2356 +[2025-09-06 00:55:38] [Rank 0] Total FTA (Weighted): 0.2356 +[2025-09-06 00:55:38] [Rank 0] Total FTA (Weighted): 0.2356 +[2025-09-06 00:55:38] [Rank 0] Group 0 Loss: 3.2696 +[2025-09-06 00:55:38] [Rank 0] Group 0 Loss: 3.2696 +[2025-09-06 00:55:38] [Rank 0] Group 1 Loss: 3.2371 +[2025-09-06 00:55:38] [Rank 0] Group 1 Loss: 3.2371 +[2025-09-06 00:55:38] [Rank 0] Group 2 Loss: 3.3666 +[2025-09-06 00:55:38] [Rank 0] Group 2 Loss: 3.3666 +[2025-09-06 00:55:38] [Rank 0] Group 3 Loss: 3.7098 +[2025-09-06 00:55:38] [Rank 0] Group 3 Loss: 3.7098 +[2025-09-06 00:55:38] [Rank 0] Group 4 Loss: 4.1123 +[2025-09-06 00:55:38] [Rank 0] Group 4 Loss: 4.1123 +[2025-09-06 00:55:38] [Rank 0] Group 5 Loss: 4.6645 +[2025-09-06 00:55:38] [Rank 0] Group 5 Loss: 4.6645 +[2025-09-06 00:55:38] [Rank 0] Group 6 Loss: 4.9409 +[2025-09-06 00:55:38] [Rank 0] Group 6 Loss: 4.9409 +[2025-09-06 00:55:38] [Rank 0] Group 7 Loss: 5.0989 +[2025-09-06 00:55:38] [Rank 0] Group 7 Loss: 5.0989 +[2025-09-06 00:55:38] [Rank 0] Group 8 Loss: 5.3554 +[2025-09-06 00:55:38] [Rank 0] Group 8 Loss: 5.3554 +[2025-09-06 00:55:38] [Rank 0] Group 9 Loss: 5.5060 +[2025-09-06 00:55:38] [Rank 0] Group 9 Loss: 5.5060 +[2025-09-06 00:55:38] [Rank 0] Group 10 Loss: 5.5484 +[2025-09-06 00:55:38] [Rank 0] Group 10 Loss: 5.5484 +[2025-09-06 00:55:38] [Rank 0] Group 11 Loss: 5.5826 +[2025-09-06 00:55:38] [Rank 0] Group 11 Loss: 5.5826 +[2025-09-06 00:55:38] [Rank 0] Group 12 Loss: 5.5177 +[2025-09-06 00:55:38] [Rank 0] Group 12 Loss: 5.5177 +[2025-09-06 00:55:38] [Rank 0] Group 13 Loss: 5.5227 +[2025-09-06 00:55:38] [Rank 0] Group 13 Loss: 5.5227 +[2025-09-06 00:55:38] [Rank 0] Group 14 Loss: 5.5584 +[2025-09-06 00:55:38] [Rank 0] Group 14 Loss: 5.5584 +[2025-09-06 00:55:38] [Rank 0] Group 15 Loss: 5.5166 +[2025-09-06 00:55:38] [Rank 0] Group 15 Loss: 5.5166 +[2025-09-06 00:55:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:55:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:55:38] [Rank 0] Group 1 FTA: 0.7000 +[2025-09-06 00:55:38] [Rank 0] Group 1 FTA: 0.7000 +[2025-09-06 00:55:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:55:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 00:55:38] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:55:38] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:55:38] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 00:55:38] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 00:55:38] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 00:55:38] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 00:55:38] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 00:55:38] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 00:55:38] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 00:55:38] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 00:55:38] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:55:38] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:55:38] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:55:38] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:55:38] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 00:55:38] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-06 00:55:38] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:55:38] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 00:55:38] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 00:55:38] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 00:55:38] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 00:55:38] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 00:55:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:55:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 00:55:38] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:55:38] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:55:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:55:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:55:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:55:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:55:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:55:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:55:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:55:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:55:39] [Rank 0] step:4501/10000 train_time:200535ms step_avg:44.55ms +[2025-09-06 00:55:39] [Rank 0] step:4501/10000 train_time:200535ms step_avg:44.55ms +[2025-09-06 00:55:40] [Rank 0] step:4521/10000 train_time:201213ms step_avg:44.51ms +[2025-09-06 00:55:40] [Rank 0] step:4521/10000 train_time:201213ms step_avg:44.51ms +[2025-09-06 00:55:41] [Rank 0] step:4541/10000 train_time:201951ms step_avg:44.47ms +[2025-09-06 00:55:41] [Rank 0] step:4541/10000 train_time:201951ms step_avg:44.47ms +[2025-09-06 00:55:42] [Rank 0] step:4561/10000 train_time:202689ms step_avg:44.44ms +[2025-09-06 00:55:42] [Rank 0] step:4561/10000 train_time:202689ms step_avg:44.44ms +[2025-09-06 00:55:42] [Rank 0] step:4581/10000 train_time:203428ms step_avg:44.41ms +[2025-09-06 00:55:42] [Rank 0] step:4581/10000 train_time:203428ms step_avg:44.41ms +[2025-09-06 00:55:43] [Rank 0] step:4601/10000 train_time:204166ms step_avg:44.37ms +[2025-09-06 00:55:43] [Rank 0] step:4601/10000 train_time:204166ms step_avg:44.37ms +[2025-09-06 00:55:44] [Rank 0] step:4621/10000 train_time:204904ms step_avg:44.34ms +[2025-09-06 00:55:44] [Rank 0] step:4621/10000 train_time:204904ms step_avg:44.34ms +[2025-09-06 00:55:45] [Rank 0] step:4641/10000 train_time:205642ms step_avg:44.31ms +[2025-09-06 00:55:45] [Rank 0] step:4641/10000 train_time:205642ms step_avg:44.31ms +[2025-09-06 00:55:45] [Rank 0] step:4661/10000 train_time:206380ms step_avg:44.28ms +[2025-09-06 00:55:45] [Rank 0] step:4661/10000 train_time:206380ms step_avg:44.28ms +[2025-09-06 00:55:46] [Rank 0] step:4681/10000 train_time:207118ms step_avg:44.25ms +[2025-09-06 00:55:46] [Rank 0] step:4681/10000 train_time:207118ms step_avg:44.25ms +[2025-09-06 00:55:47] [Rank 0] step:4701/10000 train_time:207972ms step_avg:44.24ms +[2025-09-06 00:55:47] [Rank 0] step:4701/10000 train_time:207972ms step_avg:44.24ms +[2025-09-06 00:55:48] [Rank 0] step:4721/10000 train_time:208711ms step_avg:44.21ms +[2025-09-06 00:55:48] [Rank 0] step:4721/10000 train_time:208711ms step_avg:44.21ms +[2025-09-06 00:55:48] [Rank 0] step:4741/10000 train_time:209449ms step_avg:44.18ms +[2025-09-06 00:55:48] [Rank 0] step:4741/10000 train_time:209449ms step_avg:44.18ms +[2025-09-06 00:55:49] [Rank 0] step:4761/10000 train_time:210186ms step_avg:44.15ms +[2025-09-06 00:55:49] [Rank 0] step:4761/10000 train_time:210186ms step_avg:44.15ms +[2025-09-06 00:55:50] [Rank 0] step:4781/10000 train_time:211070ms step_avg:44.15ms +[2025-09-06 00:55:50] [Rank 0] step:4781/10000 train_time:211070ms step_avg:44.15ms +[2025-09-06 00:55:51] [Rank 0] step:4801/10000 train_time:211808ms step_avg:44.12ms +[2025-09-06 00:55:51] [Rank 0] step:4801/10000 train_time:211808ms step_avg:44.12ms +[2025-09-06 00:55:51] [Rank 0] step:4821/10000 train_time:212547ms step_avg:44.09ms +[2025-09-06 00:55:51] [Rank 0] step:4821/10000 train_time:212547ms step_avg:44.09ms +[2025-09-06 00:55:53] [Rank 0] step:4841/10000 train_time:213592ms step_avg:44.12ms +[2025-09-06 00:55:53] [Rank 0] step:4841/10000 train_time:213592ms step_avg:44.12ms +[2025-09-06 00:55:53] [Rank 0] step:4861/10000 train_time:214330ms step_avg:44.09ms +[2025-09-06 00:55:53] [Rank 0] step:4861/10000 train_time:214330ms step_avg:44.09ms +[2025-09-06 00:55:54] [Rank 0] step:4881/10000 train_time:215067ms step_avg:44.06ms +[2025-09-06 00:55:54] [Rank 0] step:4881/10000 train_time:215067ms step_avg:44.06ms +[2025-09-06 00:55:55] [Rank 0] step:4901/10000 train_time:215804ms step_avg:44.03ms +[2025-09-06 00:55:55] [Rank 0] step:4901/10000 train_time:215804ms step_avg:44.03ms +[2025-09-06 00:55:55] [Rank 0] step:4921/10000 train_time:216542ms step_avg:44.00ms +[2025-09-06 00:55:55] [Rank 0] step:4921/10000 train_time:216542ms step_avg:44.00ms +[2025-09-06 00:55:56] [Rank 0] step:4941/10000 train_time:217280ms step_avg:43.98ms +[2025-09-06 00:55:56] [Rank 0] step:4941/10000 train_time:217280ms step_avg:43.98ms +[2025-09-06 00:55:57] [Rank 0] step:4961/10000 train_time:218017ms step_avg:43.95ms +[2025-09-06 00:55:57] [Rank 0] step:4961/10000 train_time:218017ms step_avg:43.95ms +[2025-09-06 00:55:58] [Rank 0] step:4981/10000 train_time:218754ms step_avg:43.92ms +[2025-09-06 00:55:58] [Rank 0] step:4981/10000 train_time:218754ms step_avg:43.92ms +[2025-09-06 00:55:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:55:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:55:59] [Rank 0] PRINT: step:5000/10000 train_loss:2.3911 val_loss:2.3616 train_time:219571ms step_avg:43.91ms +[2025-09-06 00:55:59] [Rank 0] PRINT: step:5000/10000 train_loss:2.3911 val_loss:2.3616 train_time:219571ms step_avg:43.91ms +[2025-09-06 00:55:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:55:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:55:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:55:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:57:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:57:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:57:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:57:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:57:20] [Rank 0] Total Loss: 4.6796 +[2025-09-06 00:57:20] [Rank 0] Total Loss: 4.6796 +[2025-09-06 00:57:20] [Rank 0] Total FTA (Unweighted): 0.2619 +[2025-09-06 00:57:20] [Rank 0] Total FTA (Unweighted): 0.2619 +[2025-09-06 00:57:20] [Rank 0] Total FTA (Weighted): 0.2619 +[2025-09-06 00:57:20] [Rank 0] Total FTA (Weighted): 0.2619 +[2025-09-06 00:57:20] [Rank 0] Group 0 Loss: 3.2447 +[2025-09-06 00:57:20] [Rank 0] Group 0 Loss: 3.2447 +[2025-09-06 00:57:20] [Rank 0] Group 1 Loss: 3.2394 +[2025-09-06 00:57:20] [Rank 0] Group 1 Loss: 3.2394 +[2025-09-06 00:57:20] [Rank 0] Group 2 Loss: 3.2296 +[2025-09-06 00:57:20] [Rank 0] Group 2 Loss: 3.2296 +[2025-09-06 00:57:20] [Rank 0] Group 3 Loss: 3.5667 +[2025-09-06 00:57:20] [Rank 0] Group 3 Loss: 3.5667 +[2025-09-06 00:57:20] [Rank 0] Group 4 Loss: 3.9687 +[2025-09-06 00:57:20] [Rank 0] Group 4 Loss: 3.9687 +[2025-09-06 00:57:20] [Rank 0] Group 5 Loss: 4.5752 +[2025-09-06 00:57:20] [Rank 0] Group 5 Loss: 4.5752 +[2025-09-06 00:57:20] [Rank 0] Group 6 Loss: 4.8465 +[2025-09-06 00:57:20] [Rank 0] Group 6 Loss: 4.8465 +[2025-09-06 00:57:20] [Rank 0] Group 7 Loss: 4.9729 +[2025-09-06 00:57:20] [Rank 0] Group 7 Loss: 4.9729 +[2025-09-06 00:57:20] [Rank 0] Group 8 Loss: 5.2477 +[2025-09-06 00:57:20] [Rank 0] Group 8 Loss: 5.2477 +[2025-09-06 00:57:20] [Rank 0] Group 9 Loss: 5.4001 +[2025-09-06 00:57:20] [Rank 0] Group 9 Loss: 5.4001 +[2025-09-06 00:57:20] [Rank 0] Group 10 Loss: 5.4335 +[2025-09-06 00:57:20] [Rank 0] Group 10 Loss: 5.4335 +[2025-09-06 00:57:20] [Rank 0] Group 11 Loss: 5.4387 +[2025-09-06 00:57:20] [Rank 0] Group 11 Loss: 5.4387 +[2025-09-06 00:57:20] [Rank 0] Group 12 Loss: 5.4118 +[2025-09-06 00:57:20] [Rank 0] Group 12 Loss: 5.4118 +[2025-09-06 00:57:20] [Rank 0] Group 13 Loss: 5.4283 +[2025-09-06 00:57:20] [Rank 0] Group 13 Loss: 5.4283 +[2025-09-06 00:57:20] [Rank 0] Group 14 Loss: 5.4674 +[2025-09-06 00:57:20] [Rank 0] Group 14 Loss: 5.4674 +[2025-09-06 00:57:20] [Rank 0] Group 15 Loss: 5.4030 +[2025-09-06 00:57:20] [Rank 0] Group 15 Loss: 5.4030 +[2025-09-06 00:57:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:57:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:57:20] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 00:57:20] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 00:57:20] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:57:20] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:57:20] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:57:20] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:57:20] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 00:57:20] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 00:57:20] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 00:57:20] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 00:57:20] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-06 00:57:20] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-06 00:57:20] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 00:57:20] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 00:57:20] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 00:57:20] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 00:57:20] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:57:20] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 00:57:20] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 00:57:20] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 00:57:20] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-06 00:57:20] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-06 00:57:20] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 00:57:20] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 00:57:20] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-06 00:57:20] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-06 00:57:20] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 00:57:20] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 00:57:20] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:57:20] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 00:57:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:57:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:57:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:57:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:57:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:57:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:57:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:57:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:57:22] [Rank 0] step:5001/10000 train_time:219580ms step_avg:43.91ms +[2025-09-06 00:57:22] [Rank 0] step:5001/10000 train_time:219580ms step_avg:43.91ms +[2025-09-06 00:57:22] [Rank 0] step:5021/10000 train_time:220264ms step_avg:43.87ms +[2025-09-06 00:57:22] [Rank 0] step:5021/10000 train_time:220264ms step_avg:43.87ms +[2025-09-06 00:57:23] [Rank 0] step:5041/10000 train_time:221003ms step_avg:43.84ms +[2025-09-06 00:57:23] [Rank 0] step:5041/10000 train_time:221003ms step_avg:43.84ms +[2025-09-06 00:57:24] [Rank 0] step:5061/10000 train_time:221742ms step_avg:43.81ms +[2025-09-06 00:57:24] [Rank 0] step:5061/10000 train_time:221742ms step_avg:43.81ms +[2025-09-06 00:57:25] [Rank 0] step:5081/10000 train_time:222481ms step_avg:43.79ms +[2025-09-06 00:57:25] [Rank 0] step:5081/10000 train_time:222481ms step_avg:43.79ms +[2025-09-06 00:57:25] [Rank 0] step:5101/10000 train_time:223219ms step_avg:43.76ms +[2025-09-06 00:57:25] [Rank 0] step:5101/10000 train_time:223219ms step_avg:43.76ms +[2025-09-06 00:57:26] [Rank 0] step:5121/10000 train_time:223959ms step_avg:43.73ms +[2025-09-06 00:57:26] [Rank 0] step:5121/10000 train_time:223959ms step_avg:43.73ms +[2025-09-06 00:57:27] [Rank 0] step:5141/10000 train_time:224697ms step_avg:43.71ms +[2025-09-06 00:57:27] [Rank 0] step:5141/10000 train_time:224697ms step_avg:43.71ms +[2025-09-06 00:57:28] [Rank 0] step:5161/10000 train_time:225435ms step_avg:43.68ms +[2025-09-06 00:57:28] [Rank 0] step:5161/10000 train_time:225435ms step_avg:43.68ms +[2025-09-06 00:57:28] [Rank 0] step:5181/10000 train_time:226173ms step_avg:43.65ms +[2025-09-06 00:57:28] [Rank 0] step:5181/10000 train_time:226173ms step_avg:43.65ms +[2025-09-06 00:57:29] [Rank 0] step:5201/10000 train_time:226912ms step_avg:43.63ms +[2025-09-06 00:57:29] [Rank 0] step:5201/10000 train_time:226912ms step_avg:43.63ms +[2025-09-06 00:57:30] [Rank 0] step:5221/10000 train_time:227650ms step_avg:43.60ms +[2025-09-06 00:57:30] [Rank 0] step:5221/10000 train_time:227650ms step_avg:43.60ms +[2025-09-06 00:57:31] [Rank 0] step:5241/10000 train_time:228389ms step_avg:43.58ms +[2025-09-06 00:57:31] [Rank 0] step:5241/10000 train_time:228389ms step_avg:43.58ms +[2025-09-06 00:57:31] [Rank 0] step:5261/10000 train_time:229127ms step_avg:43.55ms +[2025-09-06 00:57:31] [Rank 0] step:5261/10000 train_time:229127ms step_avg:43.55ms +[2025-09-06 00:57:32] [Rank 0] step:5281/10000 train_time:229864ms step_avg:43.53ms +[2025-09-06 00:57:32] [Rank 0] step:5281/10000 train_time:229864ms step_avg:43.53ms +[2025-09-06 00:57:33] [Rank 0] step:5301/10000 train_time:230603ms step_avg:43.50ms +[2025-09-06 00:57:33] [Rank 0] step:5301/10000 train_time:230603ms step_avg:43.50ms +[2025-09-06 00:57:33] [Rank 0] step:5321/10000 train_time:231340ms step_avg:43.48ms +[2025-09-06 00:57:33] [Rank 0] step:5321/10000 train_time:231340ms step_avg:43.48ms +[2025-09-06 00:57:34] [Rank 0] step:5341/10000 train_time:232078ms step_avg:43.45ms +[2025-09-06 00:57:34] [Rank 0] step:5341/10000 train_time:232078ms step_avg:43.45ms +[2025-09-06 00:57:35] [Rank 0] step:5361/10000 train_time:232816ms step_avg:43.43ms +[2025-09-06 00:57:35] [Rank 0] step:5361/10000 train_time:232816ms step_avg:43.43ms +[2025-09-06 00:57:36] [Rank 0] step:5381/10000 train_time:233554ms step_avg:43.40ms +[2025-09-06 00:57:36] [Rank 0] step:5381/10000 train_time:233554ms step_avg:43.40ms +[2025-09-06 00:57:36] [Rank 0] step:5401/10000 train_time:234292ms step_avg:43.38ms +[2025-09-06 00:57:36] [Rank 0] step:5401/10000 train_time:234292ms step_avg:43.38ms +[2025-09-06 00:57:37] [Rank 0] step:5421/10000 train_time:235031ms step_avg:43.36ms +[2025-09-06 00:57:37] [Rank 0] step:5421/10000 train_time:235031ms step_avg:43.36ms +[2025-09-06 00:57:38] [Rank 0] step:5441/10000 train_time:235769ms step_avg:43.33ms +[2025-09-06 00:57:38] [Rank 0] step:5441/10000 train_time:235769ms step_avg:43.33ms +[2025-09-06 00:57:39] [Rank 0] step:5461/10000 train_time:236508ms step_avg:43.31ms +[2025-09-06 00:57:39] [Rank 0] step:5461/10000 train_time:236508ms step_avg:43.31ms +[2025-09-06 00:57:39] [Rank 0] step:5481/10000 train_time:237246ms step_avg:43.29ms +[2025-09-06 00:57:39] [Rank 0] step:5481/10000 train_time:237246ms step_avg:43.29ms +[2025-09-06 00:57:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:57:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:57:41] [Rank 0] PRINT: step:5500/10000 train_loss:2.3482 val_loss:2.3191 train_time:238065ms step_avg:43.28ms +[2025-09-06 00:57:41] [Rank 0] PRINT: step:5500/10000 train_loss:2.3482 val_loss:2.3191 train_time:238065ms step_avg:43.28ms +[2025-09-06 00:57:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:57:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:57:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:57:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:59:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:59:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 00:59:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:59:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 00:59:01] [Rank 0] Total Loss: 4.6126 +[2025-09-06 00:59:01] [Rank 0] Total Loss: 4.6126 +[2025-09-06 00:59:01] [Rank 0] Total FTA (Unweighted): 0.2744 +[2025-09-06 00:59:01] [Rank 0] Total FTA (Unweighted): 0.2744 +[2025-09-06 00:59:01] [Rank 0] Total FTA (Weighted): 0.2744 +[2025-09-06 00:59:01] [Rank 0] Total FTA (Weighted): 0.2744 +[2025-09-06 00:59:01] [Rank 0] Group 0 Loss: 3.1753 +[2025-09-06 00:59:01] [Rank 0] Group 0 Loss: 3.1753 +[2025-09-06 00:59:01] [Rank 0] Group 1 Loss: 3.1017 +[2025-09-06 00:59:01] [Rank 0] Group 1 Loss: 3.1017 +[2025-09-06 00:59:01] [Rank 0] Group 2 Loss: 3.1866 +[2025-09-06 00:59:01] [Rank 0] Group 2 Loss: 3.1866 +[2025-09-06 00:59:01] [Rank 0] Group 3 Loss: 3.4985 +[2025-09-06 00:59:01] [Rank 0] Group 3 Loss: 3.4985 +[2025-09-06 00:59:01] [Rank 0] Group 4 Loss: 3.9578 +[2025-09-06 00:59:01] [Rank 0] Group 4 Loss: 3.9578 +[2025-09-06 00:59:01] [Rank 0] Group 5 Loss: 4.4684 +[2025-09-06 00:59:01] [Rank 0] Group 5 Loss: 4.4684 +[2025-09-06 00:59:01] [Rank 0] Group 6 Loss: 4.7746 +[2025-09-06 00:59:01] [Rank 0] Group 6 Loss: 4.7746 +[2025-09-06 00:59:01] [Rank 0] Group 7 Loss: 4.8971 +[2025-09-06 00:59:01] [Rank 0] Group 7 Loss: 4.8971 +[2025-09-06 00:59:01] [Rank 0] Group 8 Loss: 5.1870 +[2025-09-06 00:59:01] [Rank 0] Group 8 Loss: 5.1870 +[2025-09-06 00:59:01] [Rank 0] Group 9 Loss: 5.3443 +[2025-09-06 00:59:01] [Rank 0] Group 9 Loss: 5.3443 +[2025-09-06 00:59:01] [Rank 0] Group 10 Loss: 5.3557 +[2025-09-06 00:59:01] [Rank 0] Group 10 Loss: 5.3557 +[2025-09-06 00:59:01] [Rank 0] Group 11 Loss: 5.3917 +[2025-09-06 00:59:01] [Rank 0] Group 11 Loss: 5.3917 +[2025-09-06 00:59:01] [Rank 0] Group 12 Loss: 5.3399 +[2025-09-06 00:59:01] [Rank 0] Group 12 Loss: 5.3399 +[2025-09-06 00:59:01] [Rank 0] Group 13 Loss: 5.3540 +[2025-09-06 00:59:01] [Rank 0] Group 13 Loss: 5.3540 +[2025-09-06 00:59:01] [Rank 0] Group 14 Loss: 5.3982 +[2025-09-06 00:59:01] [Rank 0] Group 14 Loss: 5.3982 +[2025-09-06 00:59:01] [Rank 0] Group 15 Loss: 5.3709 +[2025-09-06 00:59:01] [Rank 0] Group 15 Loss: 5.3709 +[2025-09-06 00:59:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:59:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 00:59:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:59:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 00:59:01] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:59:01] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 00:59:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:59:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 00:59:01] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-06 00:59:01] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-06 00:59:01] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-06 00:59:01] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-06 00:59:01] [Rank 0] Group 6 FTA: 0.1900 +[2025-09-06 00:59:01] [Rank 0] Group 6 FTA: 0.1900 +[2025-09-06 00:59:01] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 00:59:01] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 00:59:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:59:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 00:59:01] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:59:01] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 00:59:01] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 00:59:01] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 00:59:01] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-06 00:59:01] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-06 00:59:01] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 00:59:01] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 00:59:01] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-06 00:59:01] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-06 00:59:01] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:59:01] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 00:59:01] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 00:59:01] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 00:59:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:59:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 00:59:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:59:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 00:59:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:59:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 00:59:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:59:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 00:59:03] [Rank 0] step:5501/10000 train_time:238074ms step_avg:43.28ms +[2025-09-06 00:59:03] [Rank 0] step:5501/10000 train_time:238074ms step_avg:43.28ms +[2025-09-06 00:59:04] [Rank 0] step:5521/10000 train_time:238752ms step_avg:43.24ms +[2025-09-06 00:59:04] [Rank 0] step:5521/10000 train_time:238752ms step_avg:43.24ms +[2025-09-06 00:59:04] [Rank 0] step:5541/10000 train_time:239490ms step_avg:43.22ms +[2025-09-06 00:59:04] [Rank 0] step:5541/10000 train_time:239490ms step_avg:43.22ms +[2025-09-06 00:59:05] [Rank 0] step:5561/10000 train_time:240229ms step_avg:43.20ms +[2025-09-06 00:59:05] [Rank 0] step:5561/10000 train_time:240229ms step_avg:43.20ms +[2025-09-06 00:59:06] [Rank 0] step:5581/10000 train_time:240966ms step_avg:43.18ms +[2025-09-06 00:59:06] [Rank 0] step:5581/10000 train_time:240966ms step_avg:43.18ms +[2025-09-06 00:59:07] [Rank 0] step:5601/10000 train_time:241705ms step_avg:43.15ms +[2025-09-06 00:59:07] [Rank 0] step:5601/10000 train_time:241705ms step_avg:43.15ms +[2025-09-06 00:59:07] [Rank 0] step:5621/10000 train_time:242444ms step_avg:43.13ms +[2025-09-06 00:59:07] [Rank 0] step:5621/10000 train_time:242444ms step_avg:43.13ms +[2025-09-06 00:59:08] [Rank 0] step:5641/10000 train_time:243378ms step_avg:43.14ms +[2025-09-06 00:59:08] [Rank 0] step:5641/10000 train_time:243378ms step_avg:43.14ms +[2025-09-06 00:59:09] [Rank 0] step:5661/10000 train_time:244115ms step_avg:43.12ms +[2025-09-06 00:59:09] [Rank 0] step:5661/10000 train_time:244115ms step_avg:43.12ms +[2025-09-06 00:59:10] [Rank 0] step:5681/10000 train_time:244853ms step_avg:43.10ms +[2025-09-06 00:59:10] [Rank 0] step:5681/10000 train_time:244853ms step_avg:43.10ms +[2025-09-06 00:59:10] [Rank 0] step:5701/10000 train_time:245592ms step_avg:43.08ms +[2025-09-06 00:59:10] [Rank 0] step:5701/10000 train_time:245592ms step_avg:43.08ms +[2025-09-06 00:59:11] [Rank 0] step:5721/10000 train_time:246331ms step_avg:43.06ms +[2025-09-06 00:59:11] [Rank 0] step:5721/10000 train_time:246331ms step_avg:43.06ms +[2025-09-06 00:59:12] [Rank 0] step:5741/10000 train_time:247069ms step_avg:43.04ms +[2025-09-06 00:59:12] [Rank 0] step:5741/10000 train_time:247069ms step_avg:43.04ms +[2025-09-06 00:59:13] [Rank 0] step:5761/10000 train_time:247808ms step_avg:43.01ms +[2025-09-06 00:59:13] [Rank 0] step:5761/10000 train_time:247808ms step_avg:43.01ms +[2025-09-06 00:59:13] [Rank 0] step:5781/10000 train_time:248547ms step_avg:42.99ms +[2025-09-06 00:59:13] [Rank 0] step:5781/10000 train_time:248547ms step_avg:42.99ms +[2025-09-06 00:59:14] [Rank 0] step:5801/10000 train_time:249293ms step_avg:42.97ms +[2025-09-06 00:59:14] [Rank 0] step:5801/10000 train_time:249293ms step_avg:42.97ms +[2025-09-06 00:59:15] [Rank 0] step:5821/10000 train_time:250031ms step_avg:42.95ms +[2025-09-06 00:59:15] [Rank 0] step:5821/10000 train_time:250031ms step_avg:42.95ms +[2025-09-06 00:59:16] [Rank 0] step:5841/10000 train_time:250768ms step_avg:42.93ms +[2025-09-06 00:59:16] [Rank 0] step:5841/10000 train_time:250768ms step_avg:42.93ms +[2025-09-06 00:59:16] [Rank 0] step:5861/10000 train_time:251507ms step_avg:42.91ms +[2025-09-06 00:59:16] [Rank 0] step:5861/10000 train_time:251507ms step_avg:42.91ms +[2025-09-06 00:59:17] [Rank 0] step:5881/10000 train_time:252245ms step_avg:42.89ms +[2025-09-06 00:59:17] [Rank 0] step:5881/10000 train_time:252245ms step_avg:42.89ms +[2025-09-06 00:59:18] [Rank 0] step:5901/10000 train_time:252983ms step_avg:42.87ms +[2025-09-06 00:59:18] [Rank 0] step:5901/10000 train_time:252983ms step_avg:42.87ms +[2025-09-06 00:59:19] [Rank 0] step:5921/10000 train_time:253721ms step_avg:42.85ms +[2025-09-06 00:59:19] [Rank 0] step:5921/10000 train_time:253721ms step_avg:42.85ms +[2025-09-06 00:59:19] [Rank 0] step:5941/10000 train_time:254460ms step_avg:42.83ms +[2025-09-06 00:59:19] [Rank 0] step:5941/10000 train_time:254460ms step_avg:42.83ms +[2025-09-06 00:59:20] [Rank 0] step:5961/10000 train_time:255197ms step_avg:42.81ms +[2025-09-06 00:59:20] [Rank 0] step:5961/10000 train_time:255197ms step_avg:42.81ms +[2025-09-06 00:59:21] [Rank 0] step:5981/10000 train_time:255936ms step_avg:42.79ms +[2025-09-06 00:59:21] [Rank 0] step:5981/10000 train_time:255936ms step_avg:42.79ms +[2025-09-06 00:59:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:59:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 00:59:22] [Rank 0] PRINT: step:6000/10000 train_loss:2.3120 val_loss:2.2892 train_time:256754ms step_avg:42.79ms +[2025-09-06 00:59:22] [Rank 0] PRINT: step:6000/10000 train_loss:2.3120 val_loss:2.2892 train_time:256754ms step_avg:42.79ms +[2025-09-06 00:59:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:59:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 00:59:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 00:59:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:00:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:00:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:00:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:00:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:00:43] [Rank 0] Total Loss: 4.6136 +[2025-09-06 01:00:43] [Rank 0] Total Loss: 4.6136 +[2025-09-06 01:00:43] [Rank 0] Total FTA (Unweighted): 0.2925 +[2025-09-06 01:00:43] [Rank 0] Total FTA (Unweighted): 0.2925 +[2025-09-06 01:00:43] [Rank 0] Total FTA (Weighted): 0.2925 +[2025-09-06 01:00:43] [Rank 0] Total FTA (Weighted): 0.2925 +[2025-09-06 01:00:43] [Rank 0] Group 0 Loss: 3.2007 +[2025-09-06 01:00:43] [Rank 0] Group 0 Loss: 3.2007 +[2025-09-06 01:00:43] [Rank 0] Group 1 Loss: 3.1551 +[2025-09-06 01:00:43] [Rank 0] Group 1 Loss: 3.1551 +[2025-09-06 01:00:43] [Rank 0] Group 2 Loss: 3.2435 +[2025-09-06 01:00:43] [Rank 0] Group 2 Loss: 3.2435 +[2025-09-06 01:00:43] [Rank 0] Group 3 Loss: 3.5396 +[2025-09-06 01:00:43] [Rank 0] Group 3 Loss: 3.5396 +[2025-09-06 01:00:43] [Rank 0] Group 4 Loss: 3.9444 +[2025-09-06 01:00:43] [Rank 0] Group 4 Loss: 3.9444 +[2025-09-06 01:00:43] [Rank 0] Group 5 Loss: 4.4743 +[2025-09-06 01:00:43] [Rank 0] Group 5 Loss: 4.4743 +[2025-09-06 01:00:43] [Rank 0] Group 6 Loss: 4.7584 +[2025-09-06 01:00:43] [Rank 0] Group 6 Loss: 4.7584 +[2025-09-06 01:00:43] [Rank 0] Group 7 Loss: 4.8776 +[2025-09-06 01:00:43] [Rank 0] Group 7 Loss: 4.8776 +[2025-09-06 01:00:43] [Rank 0] Group 8 Loss: 5.1647 +[2025-09-06 01:00:43] [Rank 0] Group 8 Loss: 5.1647 +[2025-09-06 01:00:43] [Rank 0] Group 9 Loss: 5.3383 +[2025-09-06 01:00:43] [Rank 0] Group 9 Loss: 5.3383 +[2025-09-06 01:00:43] [Rank 0] Group 10 Loss: 5.3454 +[2025-09-06 01:00:43] [Rank 0] Group 10 Loss: 5.3454 +[2025-09-06 01:00:43] [Rank 0] Group 11 Loss: 5.3860 +[2025-09-06 01:00:43] [Rank 0] Group 11 Loss: 5.3860 +[2025-09-06 01:00:43] [Rank 0] Group 12 Loss: 5.3350 +[2025-09-06 01:00:43] [Rank 0] Group 12 Loss: 5.3350 +[2025-09-06 01:00:43] [Rank 0] Group 13 Loss: 5.3366 +[2025-09-06 01:00:43] [Rank 0] Group 13 Loss: 5.3366 +[2025-09-06 01:00:43] [Rank 0] Group 14 Loss: 5.3829 +[2025-09-06 01:00:43] [Rank 0] Group 14 Loss: 5.3829 +[2025-09-06 01:00:43] [Rank 0] Group 15 Loss: 5.3347 +[2025-09-06 01:00:43] [Rank 0] Group 15 Loss: 5.3347 +[2025-09-06 01:00:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:00:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:00:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:00:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:00:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:00:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:00:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:00:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:00:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:00:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:00:43] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:00:43] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:00:43] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:00:43] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:00:43] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:00:43] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:00:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:00:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:00:43] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:00:43] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:00:43] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 01:00:43] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 01:00:43] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:00:43] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:00:43] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 01:00:43] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 01:00:43] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-06 01:00:43] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-06 01:00:43] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 01:00:43] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 01:00:43] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:00:43] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:00:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:00:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:00:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:00:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:00:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:00:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:00:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:00:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:00:45] [Rank 0] step:6001/10000 train_time:256763ms step_avg:42.79ms +[2025-09-06 01:00:45] [Rank 0] step:6001/10000 train_time:256763ms step_avg:42.79ms +[2025-09-06 01:00:46] [Rank 0] step:6021/10000 train_time:258057ms step_avg:42.86ms +[2025-09-06 01:00:46] [Rank 0] step:6021/10000 train_time:258057ms step_avg:42.86ms +[2025-09-06 01:00:47] [Rank 0] step:6041/10000 train_time:258794ms step_avg:42.84ms +[2025-09-06 01:00:47] [Rank 0] step:6041/10000 train_time:258794ms step_avg:42.84ms +[2025-09-06 01:00:48] [Rank 0] step:6061/10000 train_time:259531ms step_avg:42.82ms +[2025-09-06 01:00:48] [Rank 0] step:6061/10000 train_time:259531ms step_avg:42.82ms +[2025-09-06 01:00:48] [Rank 0] step:6081/10000 train_time:260268ms step_avg:42.80ms +[2025-09-06 01:00:48] [Rank 0] step:6081/10000 train_time:260268ms step_avg:42.80ms +[2025-09-06 01:00:49] [Rank 0] step:6101/10000 train_time:261007ms step_avg:42.78ms +[2025-09-06 01:00:49] [Rank 0] step:6101/10000 train_time:261007ms step_avg:42.78ms +[2025-09-06 01:00:50] [Rank 0] step:6121/10000 train_time:261746ms step_avg:42.76ms +[2025-09-06 01:00:50] [Rank 0] step:6121/10000 train_time:261746ms step_avg:42.76ms +[2025-09-06 01:00:51] [Rank 0] step:6141/10000 train_time:262485ms step_avg:42.74ms +[2025-09-06 01:00:51] [Rank 0] step:6141/10000 train_time:262485ms step_avg:42.74ms +[2025-09-06 01:00:51] [Rank 0] step:6161/10000 train_time:263223ms step_avg:42.72ms +[2025-09-06 01:00:51] [Rank 0] step:6161/10000 train_time:263223ms step_avg:42.72ms +[2025-09-06 01:00:52] [Rank 0] step:6181/10000 train_time:263962ms step_avg:42.71ms +[2025-09-06 01:00:52] [Rank 0] step:6181/10000 train_time:263962ms step_avg:42.71ms +[2025-09-06 01:00:53] [Rank 0] step:6201/10000 train_time:264699ms step_avg:42.69ms +[2025-09-06 01:00:53] [Rank 0] step:6201/10000 train_time:264699ms step_avg:42.69ms +[2025-09-06 01:00:54] [Rank 0] step:6221/10000 train_time:265438ms step_avg:42.67ms +[2025-09-06 01:00:54] [Rank 0] step:6221/10000 train_time:265438ms step_avg:42.67ms +[2025-09-06 01:00:54] [Rank 0] step:6241/10000 train_time:266177ms step_avg:42.65ms +[2025-09-06 01:00:54] [Rank 0] step:6241/10000 train_time:266177ms step_avg:42.65ms +[2025-09-06 01:00:55] [Rank 0] step:6261/10000 train_time:266916ms step_avg:42.63ms +[2025-09-06 01:00:55] [Rank 0] step:6261/10000 train_time:266916ms step_avg:42.63ms +[2025-09-06 01:00:56] [Rank 0] step:6281/10000 train_time:267654ms step_avg:42.61ms +[2025-09-06 01:00:56] [Rank 0] step:6281/10000 train_time:267654ms step_avg:42.61ms +[2025-09-06 01:00:57] [Rank 0] step:6301/10000 train_time:268393ms step_avg:42.60ms +[2025-09-06 01:00:57] [Rank 0] step:6301/10000 train_time:268393ms step_avg:42.60ms +[2025-09-06 01:00:57] [Rank 0] step:6321/10000 train_time:269132ms step_avg:42.58ms +[2025-09-06 01:00:57] [Rank 0] step:6321/10000 train_time:269132ms step_avg:42.58ms +[2025-09-06 01:00:58] [Rank 0] step:6341/10000 train_time:269870ms step_avg:42.56ms +[2025-09-06 01:00:58] [Rank 0] step:6341/10000 train_time:269870ms step_avg:42.56ms +[2025-09-06 01:00:59] [Rank 0] step:6361/10000 train_time:270609ms step_avg:42.54ms +[2025-09-06 01:00:59] [Rank 0] step:6361/10000 train_time:270609ms step_avg:42.54ms +[2025-09-06 01:00:59] [Rank 0] step:6381/10000 train_time:271348ms step_avg:42.52ms +[2025-09-06 01:00:59] [Rank 0] step:6381/10000 train_time:271348ms step_avg:42.52ms +[2025-09-06 01:01:00] [Rank 0] step:6401/10000 train_time:272087ms step_avg:42.51ms +[2025-09-06 01:01:00] [Rank 0] step:6401/10000 train_time:272087ms step_avg:42.51ms +[2025-09-06 01:01:01] [Rank 0] step:6421/10000 train_time:272826ms step_avg:42.49ms +[2025-09-06 01:01:01] [Rank 0] step:6421/10000 train_time:272826ms step_avg:42.49ms +[2025-09-06 01:01:02] [Rank 0] step:6441/10000 train_time:273564ms step_avg:42.47ms +[2025-09-06 01:01:02] [Rank 0] step:6441/10000 train_time:273564ms step_avg:42.47ms +[2025-09-06 01:01:02] [Rank 0] step:6461/10000 train_time:274303ms step_avg:42.46ms +[2025-09-06 01:01:02] [Rank 0] step:6461/10000 train_time:274303ms step_avg:42.46ms +[2025-09-06 01:01:03] [Rank 0] step:6481/10000 train_time:275189ms step_avg:42.46ms +[2025-09-06 01:01:03] [Rank 0] step:6481/10000 train_time:275189ms step_avg:42.46ms +[2025-09-06 01:01:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:01:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:01:04] [Rank 0] PRINT: step:6500/10000 train_loss:2.2853 val_loss:2.2633 train_time:276007ms step_avg:42.46ms +[2025-09-06 01:01:04] [Rank 0] PRINT: step:6500/10000 train_loss:2.2853 val_loss:2.2633 train_time:276007ms step_avg:42.46ms +[2025-09-06 01:01:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:01:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:01:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:01:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:02:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:02:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:02:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:02:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:02:26] [Rank 0] Total Loss: 4.5652 +[2025-09-06 01:02:26] [Rank 0] Total Loss: 4.5652 +[2025-09-06 01:02:26] [Rank 0] Total FTA (Unweighted): 0.2912 +[2025-09-06 01:02:26] [Rank 0] Total FTA (Unweighted): 0.2912 +[2025-09-06 01:02:26] [Rank 0] Total FTA (Weighted): 0.2913 +[2025-09-06 01:02:26] [Rank 0] Total FTA (Weighted): 0.2913 +[2025-09-06 01:02:26] [Rank 0] Group 0 Loss: 3.2172 +[2025-09-06 01:02:26] [Rank 0] Group 0 Loss: 3.2172 +[2025-09-06 01:02:26] [Rank 0] Group 1 Loss: 3.0846 +[2025-09-06 01:02:26] [Rank 0] Group 1 Loss: 3.0846 +[2025-09-06 01:02:26] [Rank 0] Group 2 Loss: 3.2025 +[2025-09-06 01:02:26] [Rank 0] Group 2 Loss: 3.2025 +[2025-09-06 01:02:26] [Rank 0] Group 3 Loss: 3.4782 +[2025-09-06 01:02:26] [Rank 0] Group 3 Loss: 3.4782 +[2025-09-06 01:02:26] [Rank 0] Group 4 Loss: 3.9098 +[2025-09-06 01:02:26] [Rank 0] Group 4 Loss: 3.9098 +[2025-09-06 01:02:26] [Rank 0] Group 5 Loss: 4.4194 +[2025-09-06 01:02:26] [Rank 0] Group 5 Loss: 4.4194 +[2025-09-06 01:02:26] [Rank 0] Group 6 Loss: 4.6894 +[2025-09-06 01:02:26] [Rank 0] Group 6 Loss: 4.6894 +[2025-09-06 01:02:26] [Rank 0] Group 7 Loss: 4.8356 +[2025-09-06 01:02:26] [Rank 0] Group 7 Loss: 4.8356 +[2025-09-06 01:02:26] [Rank 0] Group 8 Loss: 5.1310 +[2025-09-06 01:02:26] [Rank 0] Group 8 Loss: 5.1310 +[2025-09-06 01:02:26] [Rank 0] Group 9 Loss: 5.2867 +[2025-09-06 01:02:26] [Rank 0] Group 9 Loss: 5.2867 +[2025-09-06 01:02:26] [Rank 0] Group 10 Loss: 5.3177 +[2025-09-06 01:02:26] [Rank 0] Group 10 Loss: 5.3177 +[2025-09-06 01:02:26] [Rank 0] Group 11 Loss: 5.3255 +[2025-09-06 01:02:26] [Rank 0] Group 11 Loss: 5.3255 +[2025-09-06 01:02:26] [Rank 0] Group 12 Loss: 5.2621 +[2025-09-06 01:02:26] [Rank 0] Group 12 Loss: 5.2621 +[2025-09-06 01:02:26] [Rank 0] Group 13 Loss: 5.2851 +[2025-09-06 01:02:26] [Rank 0] Group 13 Loss: 5.2851 +[2025-09-06 01:02:26] [Rank 0] Group 14 Loss: 5.3211 +[2025-09-06 01:02:26] [Rank 0] Group 14 Loss: 5.3211 +[2025-09-06 01:02:26] [Rank 0] Group 15 Loss: 5.2771 +[2025-09-06 01:02:26] [Rank 0] Group 15 Loss: 5.2771 +[2025-09-06 01:02:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:02:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:02:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:02:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:02:26] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:02:26] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:02:26] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:02:26] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:02:26] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:02:26] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:02:26] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:02:26] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:02:26] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:02:26] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:02:26] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:02:26] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:02:26] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:02:26] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:02:26] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:02:26] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:02:26] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-06 01:02:26] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-06 01:02:26] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:02:26] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:02:26] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 01:02:26] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 01:02:26] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-06 01:02:26] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-06 01:02:26] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 01:02:26] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 01:02:26] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:02:26] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:02:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:02:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:02:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:02:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:02:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:02:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:02:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:02:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:02:28] [Rank 0] step:6501/10000 train_time:276016ms step_avg:42.46ms +[2025-09-06 01:02:28] [Rank 0] step:6501/10000 train_time:276016ms step_avg:42.46ms +[2025-09-06 01:02:29] [Rank 0] step:6521/10000 train_time:276697ms step_avg:42.43ms +[2025-09-06 01:02:29] [Rank 0] step:6521/10000 train_time:276697ms step_avg:42.43ms +[2025-09-06 01:02:29] [Rank 0] step:6541/10000 train_time:277434ms step_avg:42.41ms +[2025-09-06 01:02:29] [Rank 0] step:6541/10000 train_time:277434ms step_avg:42.41ms +[2025-09-06 01:02:30] [Rank 0] step:6561/10000 train_time:278173ms step_avg:42.40ms +[2025-09-06 01:02:30] [Rank 0] step:6561/10000 train_time:278173ms step_avg:42.40ms +[2025-09-06 01:02:31] [Rank 0] step:6581/10000 train_time:278912ms step_avg:42.38ms +[2025-09-06 01:02:31] [Rank 0] step:6581/10000 train_time:278912ms step_avg:42.38ms +[2025-09-06 01:02:32] [Rank 0] step:6601/10000 train_time:279650ms step_avg:42.36ms +[2025-09-06 01:02:32] [Rank 0] step:6601/10000 train_time:279650ms step_avg:42.36ms +[2025-09-06 01:02:32] [Rank 0] step:6621/10000 train_time:280388ms step_avg:42.35ms +[2025-09-06 01:02:32] [Rank 0] step:6621/10000 train_time:280388ms step_avg:42.35ms +[2025-09-06 01:02:33] [Rank 0] step:6641/10000 train_time:281127ms step_avg:42.33ms +[2025-09-06 01:02:33] [Rank 0] step:6641/10000 train_time:281127ms step_avg:42.33ms +[2025-09-06 01:02:34] [Rank 0] step:6661/10000 train_time:281865ms step_avg:42.32ms +[2025-09-06 01:02:34] [Rank 0] step:6661/10000 train_time:281865ms step_avg:42.32ms +[2025-09-06 01:02:34] [Rank 0] step:6681/10000 train_time:282604ms step_avg:42.30ms +[2025-09-06 01:02:34] [Rank 0] step:6681/10000 train_time:282604ms step_avg:42.30ms +[2025-09-06 01:02:35] [Rank 0] step:6701/10000 train_time:283342ms step_avg:42.28ms +[2025-09-06 01:02:35] [Rank 0] step:6701/10000 train_time:283342ms step_avg:42.28ms +[2025-09-06 01:02:36] [Rank 0] step:6721/10000 train_time:284080ms step_avg:42.27ms +[2025-09-06 01:02:36] [Rank 0] step:6721/10000 train_time:284080ms step_avg:42.27ms +[2025-09-06 01:02:37] [Rank 0] step:6741/10000 train_time:284817ms step_avg:42.25ms +[2025-09-06 01:02:37] [Rank 0] step:6741/10000 train_time:284817ms step_avg:42.25ms +[2025-09-06 01:02:37] [Rank 0] step:6761/10000 train_time:285556ms step_avg:42.24ms +[2025-09-06 01:02:37] [Rank 0] step:6761/10000 train_time:285556ms step_avg:42.24ms +[2025-09-06 01:02:38] [Rank 0] step:6781/10000 train_time:286302ms step_avg:42.22ms +[2025-09-06 01:02:38] [Rank 0] step:6781/10000 train_time:286302ms step_avg:42.22ms +[2025-09-06 01:02:39] [Rank 0] step:6801/10000 train_time:287041ms step_avg:42.21ms +[2025-09-06 01:02:39] [Rank 0] step:6801/10000 train_time:287041ms step_avg:42.21ms +[2025-09-06 01:02:40] [Rank 0] step:6821/10000 train_time:287782ms step_avg:42.19ms +[2025-09-06 01:02:40] [Rank 0] step:6821/10000 train_time:287782ms step_avg:42.19ms +[2025-09-06 01:02:41] [Rank 0] step:6841/10000 train_time:289138ms step_avg:42.27ms +[2025-09-06 01:02:41] [Rank 0] step:6841/10000 train_time:289138ms step_avg:42.27ms +[2025-09-06 01:02:42] [Rank 0] step:6861/10000 train_time:289876ms step_avg:42.25ms +[2025-09-06 01:02:42] [Rank 0] step:6861/10000 train_time:289876ms step_avg:42.25ms +[2025-09-06 01:02:42] [Rank 0] step:6881/10000 train_time:290614ms step_avg:42.23ms +[2025-09-06 01:02:42] [Rank 0] step:6881/10000 train_time:290614ms step_avg:42.23ms +[2025-09-06 01:02:43] [Rank 0] step:6901/10000 train_time:291353ms step_avg:42.22ms +[2025-09-06 01:02:43] [Rank 0] step:6901/10000 train_time:291353ms step_avg:42.22ms +[2025-09-06 01:02:44] [Rank 0] step:6921/10000 train_time:292091ms step_avg:42.20ms +[2025-09-06 01:02:44] [Rank 0] step:6921/10000 train_time:292091ms step_avg:42.20ms +[2025-09-06 01:02:45] [Rank 0] step:6941/10000 train_time:292830ms step_avg:42.19ms +[2025-09-06 01:02:45] [Rank 0] step:6941/10000 train_time:292830ms step_avg:42.19ms +[2025-09-06 01:02:45] [Rank 0] step:6961/10000 train_time:293567ms step_avg:42.17ms +[2025-09-06 01:02:45] [Rank 0] step:6961/10000 train_time:293567ms step_avg:42.17ms +[2025-09-06 01:02:46] [Rank 0] step:6981/10000 train_time:294306ms step_avg:42.16ms +[2025-09-06 01:02:46] [Rank 0] step:6981/10000 train_time:294306ms step_avg:42.16ms +[2025-09-06 01:02:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:02:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:02:47] [Rank 0] PRINT: step:7000/10000 train_loss:2.2603 val_loss:2.2405 train_time:295125ms step_avg:42.16ms +[2025-09-06 01:02:47] [Rank 0] PRINT: step:7000/10000 train_loss:2.2603 val_loss:2.2405 train_time:295125ms step_avg:42.16ms +[2025-09-06 01:02:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:02:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:02:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:02:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:04:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:04:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:04:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:04:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:04:09] [Rank 0] Total Loss: 4.5990 +[2025-09-06 01:04:09] [Rank 0] Total Loss: 4.5990 +[2025-09-06 01:04:09] [Rank 0] Total FTA (Unweighted): 0.2969 +[2025-09-06 01:04:09] [Rank 0] Total FTA (Unweighted): 0.2969 +[2025-09-06 01:04:09] [Rank 0] Total FTA (Weighted): 0.2969 +[2025-09-06 01:04:09] [Rank 0] Total FTA (Weighted): 0.2969 +[2025-09-06 01:04:09] [Rank 0] Group 0 Loss: 3.2157 +[2025-09-06 01:04:09] [Rank 0] Group 0 Loss: 3.2157 +[2025-09-06 01:04:09] [Rank 0] Group 1 Loss: 3.1487 +[2025-09-06 01:04:09] [Rank 0] Group 1 Loss: 3.1487 +[2025-09-06 01:04:09] [Rank 0] Group 2 Loss: 3.2293 +[2025-09-06 01:04:09] [Rank 0] Group 2 Loss: 3.2293 +[2025-09-06 01:04:09] [Rank 0] Group 3 Loss: 3.5172 +[2025-09-06 01:04:09] [Rank 0] Group 3 Loss: 3.5172 +[2025-09-06 01:04:09] [Rank 0] Group 4 Loss: 3.9237 +[2025-09-06 01:04:09] [Rank 0] Group 4 Loss: 3.9237 +[2025-09-06 01:04:09] [Rank 0] Group 5 Loss: 4.4599 +[2025-09-06 01:04:09] [Rank 0] Group 5 Loss: 4.4599 +[2025-09-06 01:04:09] [Rank 0] Group 6 Loss: 4.7137 +[2025-09-06 01:04:09] [Rank 0] Group 6 Loss: 4.7137 +[2025-09-06 01:04:09] [Rank 0] Group 7 Loss: 4.8806 +[2025-09-06 01:04:09] [Rank 0] Group 7 Loss: 4.8806 +[2025-09-06 01:04:09] [Rank 0] Group 8 Loss: 5.1533 +[2025-09-06 01:04:09] [Rank 0] Group 8 Loss: 5.1533 +[2025-09-06 01:04:09] [Rank 0] Group 9 Loss: 5.3030 +[2025-09-06 01:04:09] [Rank 0] Group 9 Loss: 5.3030 +[2025-09-06 01:04:09] [Rank 0] Group 10 Loss: 5.3438 +[2025-09-06 01:04:09] [Rank 0] Group 10 Loss: 5.3438 +[2025-09-06 01:04:09] [Rank 0] Group 11 Loss: 5.3651 +[2025-09-06 01:04:09] [Rank 0] Group 11 Loss: 5.3651 +[2025-09-06 01:04:09] [Rank 0] Group 12 Loss: 5.3234 +[2025-09-06 01:04:09] [Rank 0] Group 12 Loss: 5.3234 +[2025-09-06 01:04:09] [Rank 0] Group 13 Loss: 5.3243 +[2025-09-06 01:04:09] [Rank 0] Group 13 Loss: 5.3243 +[2025-09-06 01:04:09] [Rank 0] Group 14 Loss: 5.3576 +[2025-09-06 01:04:09] [Rank 0] Group 14 Loss: 5.3576 +[2025-09-06 01:04:09] [Rank 0] Group 15 Loss: 5.3244 +[2025-09-06 01:04:09] [Rank 0] Group 15 Loss: 5.3244 +[2025-09-06 01:04:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:04:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:04:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:04:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:04:09] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:04:09] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:04:09] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:04:09] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:04:09] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:04:09] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:04:09] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:04:09] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:04:09] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:04:09] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:04:09] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:04:09] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:04:09] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:04:09] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:04:09] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:04:09] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:04:09] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 01:04:09] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 01:04:09] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:04:09] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:04:09] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 01:04:09] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 01:04:09] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-06 01:04:09] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-06 01:04:09] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 01:04:09] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 01:04:09] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:04:09] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:04:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:04:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:04:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:04:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:04:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:04:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:04:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:04:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:04:10] [Rank 0] step:7001/10000 train_time:295134ms step_avg:42.16ms +[2025-09-06 01:04:10] [Rank 0] step:7001/10000 train_time:295134ms step_avg:42.16ms +[2025-09-06 01:04:11] [Rank 0] step:7021/10000 train_time:295814ms step_avg:42.13ms +[2025-09-06 01:04:11] [Rank 0] step:7021/10000 train_time:295814ms step_avg:42.13ms +[2025-09-06 01:04:12] [Rank 0] step:7041/10000 train_time:296553ms step_avg:42.12ms +[2025-09-06 01:04:12] [Rank 0] step:7041/10000 train_time:296553ms step_avg:42.12ms +[2025-09-06 01:04:13] [Rank 0] step:7061/10000 train_time:297404ms step_avg:42.12ms +[2025-09-06 01:04:13] [Rank 0] step:7061/10000 train_time:297404ms step_avg:42.12ms +[2025-09-06 01:04:13] [Rank 0] step:7081/10000 train_time:298142ms step_avg:42.10ms +[2025-09-06 01:04:13] [Rank 0] step:7081/10000 train_time:298142ms step_avg:42.10ms +[2025-09-06 01:04:14] [Rank 0] step:7101/10000 train_time:298881ms step_avg:42.09ms +[2025-09-06 01:04:14] [Rank 0] step:7101/10000 train_time:298881ms step_avg:42.09ms +[2025-09-06 01:04:15] [Rank 0] step:7121/10000 train_time:299737ms step_avg:42.09ms +[2025-09-06 01:04:15] [Rank 0] step:7121/10000 train_time:299737ms step_avg:42.09ms +[2025-09-06 01:04:16] [Rank 0] step:7141/10000 train_time:300476ms step_avg:42.08ms +[2025-09-06 01:04:16] [Rank 0] step:7141/10000 train_time:300476ms step_avg:42.08ms +[2025-09-06 01:04:16] [Rank 0] step:7161/10000 train_time:301215ms step_avg:42.06ms +[2025-09-06 01:04:16] [Rank 0] step:7161/10000 train_time:301215ms step_avg:42.06ms +[2025-09-06 01:04:17] [Rank 0] step:7181/10000 train_time:301954ms step_avg:42.05ms +[2025-09-06 01:04:17] [Rank 0] step:7181/10000 train_time:301954ms step_avg:42.05ms +[2025-09-06 01:04:18] [Rank 0] step:7201/10000 train_time:302693ms step_avg:42.03ms +[2025-09-06 01:04:18] [Rank 0] step:7201/10000 train_time:302693ms step_avg:42.03ms +[2025-09-06 01:04:19] [Rank 0] step:7221/10000 train_time:303432ms step_avg:42.02ms +[2025-09-06 01:04:19] [Rank 0] step:7221/10000 train_time:303432ms step_avg:42.02ms +[2025-09-06 01:04:19] [Rank 0] step:7241/10000 train_time:304170ms step_avg:42.01ms +[2025-09-06 01:04:19] [Rank 0] step:7241/10000 train_time:304170ms step_avg:42.01ms +[2025-09-06 01:04:20] [Rank 0] step:7261/10000 train_time:304909ms step_avg:41.99ms +[2025-09-06 01:04:20] [Rank 0] step:7261/10000 train_time:304909ms step_avg:41.99ms +[2025-09-06 01:04:21] [Rank 0] step:7281/10000 train_time:305650ms step_avg:41.98ms +[2025-09-06 01:04:21] [Rank 0] step:7281/10000 train_time:305650ms step_avg:41.98ms +[2025-09-06 01:04:22] [Rank 0] step:7301/10000 train_time:306390ms step_avg:41.97ms +[2025-09-06 01:04:22] [Rank 0] step:7301/10000 train_time:306390ms step_avg:41.97ms +[2025-09-06 01:04:22] [Rank 0] step:7321/10000 train_time:307128ms step_avg:41.95ms +[2025-09-06 01:04:22] [Rank 0] step:7321/10000 train_time:307128ms step_avg:41.95ms +[2025-09-06 01:04:23] [Rank 0] step:7341/10000 train_time:307865ms step_avg:41.94ms +[2025-09-06 01:04:23] [Rank 0] step:7341/10000 train_time:307865ms step_avg:41.94ms +[2025-09-06 01:04:24] [Rank 0] step:7361/10000 train_time:308604ms step_avg:41.92ms +[2025-09-06 01:04:24] [Rank 0] step:7361/10000 train_time:308604ms step_avg:41.92ms +[2025-09-06 01:04:25] [Rank 0] step:7381/10000 train_time:309340ms step_avg:41.91ms +[2025-09-06 01:04:25] [Rank 0] step:7381/10000 train_time:309340ms step_avg:41.91ms +[2025-09-06 01:04:25] [Rank 0] step:7401/10000 train_time:310078ms step_avg:41.90ms +[2025-09-06 01:04:25] [Rank 0] step:7401/10000 train_time:310078ms step_avg:41.90ms +[2025-09-06 01:04:26] [Rank 0] step:7421/10000 train_time:310814ms step_avg:41.88ms +[2025-09-06 01:04:26] [Rank 0] step:7421/10000 train_time:310814ms step_avg:41.88ms +[2025-09-06 01:04:27] [Rank 0] step:7441/10000 train_time:311550ms step_avg:41.87ms +[2025-09-06 01:04:27] [Rank 0] step:7441/10000 train_time:311550ms step_avg:41.87ms +[2025-09-06 01:04:28] [Rank 0] step:7461/10000 train_time:312288ms step_avg:41.86ms +[2025-09-06 01:04:28] [Rank 0] step:7461/10000 train_time:312288ms step_avg:41.86ms +[2025-09-06 01:04:28] [Rank 0] step:7481/10000 train_time:313025ms step_avg:41.84ms +[2025-09-06 01:04:28] [Rank 0] step:7481/10000 train_time:313025ms step_avg:41.84ms +[2025-09-06 01:04:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:04:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:04:29] [Rank 0] PRINT: step:7500/10000 train_loss:2.2388 val_loss:2.2210 train_time:313843ms step_avg:41.85ms +[2025-09-06 01:04:29] [Rank 0] PRINT: step:7500/10000 train_loss:2.2388 val_loss:2.2210 train_time:313843ms step_avg:41.85ms +[2025-09-06 01:04:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:04:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:04:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:04:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:05:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:05:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:05:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:05:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:05:51] [Rank 0] Total Loss: 4.5935 +[2025-09-06 01:05:51] [Rank 0] Total Loss: 4.5935 +[2025-09-06 01:05:51] [Rank 0] Total FTA (Unweighted): 0.3000 +[2025-09-06 01:05:51] [Rank 0] Total FTA (Unweighted): 0.3000 +[2025-09-06 01:05:51] [Rank 0] Total FTA (Weighted): 0.3000 +[2025-09-06 01:05:51] [Rank 0] Total FTA (Weighted): 0.3000 +[2025-09-06 01:05:51] [Rank 0] Group 0 Loss: 3.2532 +[2025-09-06 01:05:51] [Rank 0] Group 0 Loss: 3.2532 +[2025-09-06 01:05:51] [Rank 0] Group 1 Loss: 3.2154 +[2025-09-06 01:05:51] [Rank 0] Group 1 Loss: 3.2154 +[2025-09-06 01:05:51] [Rank 0] Group 2 Loss: 3.2626 +[2025-09-06 01:05:51] [Rank 0] Group 2 Loss: 3.2626 +[2025-09-06 01:05:51] [Rank 0] Group 3 Loss: 3.5606 +[2025-09-06 01:05:51] [Rank 0] Group 3 Loss: 3.5606 +[2025-09-06 01:05:51] [Rank 0] Group 4 Loss: 3.9334 +[2025-09-06 01:05:51] [Rank 0] Group 4 Loss: 3.9334 +[2025-09-06 01:05:51] [Rank 0] Group 5 Loss: 4.4368 +[2025-09-06 01:05:51] [Rank 0] Group 5 Loss: 4.4368 +[2025-09-06 01:05:51] [Rank 0] Group 6 Loss: 4.6886 +[2025-09-06 01:05:51] [Rank 0] Group 6 Loss: 4.6886 +[2025-09-06 01:05:51] [Rank 0] Group 7 Loss: 4.8328 +[2025-09-06 01:05:51] [Rank 0] Group 7 Loss: 4.8328 +[2025-09-06 01:05:51] [Rank 0] Group 8 Loss: 5.1235 +[2025-09-06 01:05:51] [Rank 0] Group 8 Loss: 5.1235 +[2025-09-06 01:05:51] [Rank 0] Group 9 Loss: 5.2789 +[2025-09-06 01:05:51] [Rank 0] Group 9 Loss: 5.2789 +[2025-09-06 01:05:51] [Rank 0] Group 10 Loss: 5.3204 +[2025-09-06 01:05:51] [Rank 0] Group 10 Loss: 5.3204 +[2025-09-06 01:05:51] [Rank 0] Group 11 Loss: 5.3457 +[2025-09-06 01:05:51] [Rank 0] Group 11 Loss: 5.3457 +[2025-09-06 01:05:51] [Rank 0] Group 12 Loss: 5.2946 +[2025-09-06 01:05:51] [Rank 0] Group 12 Loss: 5.2946 +[2025-09-06 01:05:51] [Rank 0] Group 13 Loss: 5.2936 +[2025-09-06 01:05:51] [Rank 0] Group 13 Loss: 5.2936 +[2025-09-06 01:05:51] [Rank 0] Group 14 Loss: 5.3413 +[2025-09-06 01:05:51] [Rank 0] Group 14 Loss: 5.3413 +[2025-09-06 01:05:51] [Rank 0] Group 15 Loss: 5.3155 +[2025-09-06 01:05:51] [Rank 0] Group 15 Loss: 5.3155 +[2025-09-06 01:05:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:05:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:05:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:05:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:05:51] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:05:51] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:05:51] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:05:51] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:05:51] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:05:51] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:05:51] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:05:51] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:05:51] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:05:51] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:05:51] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:05:51] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:05:51] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:05:51] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:05:51] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:05:51] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:05:51] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:05:51] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:05:51] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:05:51] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:05:51] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 01:05:51] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 01:05:51] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 01:05:51] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 01:05:51] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-06 01:05:51] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-06 01:05:51] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 01:05:51] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 01:05:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:05:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:05:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:05:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:05:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:05:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:05:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:05:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:05:53] [Rank 0] step:7501/10000 train_time:313852ms step_avg:41.84ms +[2025-09-06 01:05:53] [Rank 0] step:7501/10000 train_time:313852ms step_avg:41.84ms +[2025-09-06 01:05:53] [Rank 0] step:7521/10000 train_time:314535ms step_avg:41.82ms +[2025-09-06 01:05:53] [Rank 0] step:7521/10000 train_time:314535ms step_avg:41.82ms +[2025-09-06 01:05:54] [Rank 0] step:7541/10000 train_time:315273ms step_avg:41.81ms +[2025-09-06 01:05:54] [Rank 0] step:7541/10000 train_time:315273ms step_avg:41.81ms +[2025-09-06 01:05:55] [Rank 0] step:7561/10000 train_time:316012ms step_avg:41.79ms +[2025-09-06 01:05:55] [Rank 0] step:7561/10000 train_time:316012ms step_avg:41.79ms +[2025-09-06 01:05:56] [Rank 0] step:7581/10000 train_time:316750ms step_avg:41.78ms +[2025-09-06 01:05:56] [Rank 0] step:7581/10000 train_time:316750ms step_avg:41.78ms +[2025-09-06 01:05:56] [Rank 0] step:7601/10000 train_time:317489ms step_avg:41.77ms +[2025-09-06 01:05:56] [Rank 0] step:7601/10000 train_time:317489ms step_avg:41.77ms +[2025-09-06 01:05:57] [Rank 0] step:7621/10000 train_time:318227ms step_avg:41.76ms +[2025-09-06 01:05:57] [Rank 0] step:7621/10000 train_time:318227ms step_avg:41.76ms +[2025-09-06 01:05:58] [Rank 0] step:7641/10000 train_time:319616ms step_avg:41.83ms +[2025-09-06 01:05:58] [Rank 0] step:7641/10000 train_time:319616ms step_avg:41.83ms +[2025-09-06 01:05:59] [Rank 0] step:7661/10000 train_time:320325ms step_avg:41.81ms +[2025-09-06 01:05:59] [Rank 0] step:7661/10000 train_time:320325ms step_avg:41.81ms +[2025-09-06 01:06:00] [Rank 0] step:7681/10000 train_time:321063ms step_avg:41.80ms +[2025-09-06 01:06:00] [Rank 0] step:7681/10000 train_time:321063ms step_avg:41.80ms +[2025-09-06 01:06:01] [Rank 0] step:7701/10000 train_time:321800ms step_avg:41.79ms +[2025-09-06 01:06:01] [Rank 0] step:7701/10000 train_time:321800ms step_avg:41.79ms +[2025-09-06 01:06:01] [Rank 0] step:7721/10000 train_time:322538ms step_avg:41.77ms +[2025-09-06 01:06:01] [Rank 0] step:7721/10000 train_time:322538ms step_avg:41.77ms +[2025-09-06 01:06:02] [Rank 0] step:7741/10000 train_time:323276ms step_avg:41.76ms +[2025-09-06 01:06:02] [Rank 0] step:7741/10000 train_time:323276ms step_avg:41.76ms +[2025-09-06 01:06:03] [Rank 0] step:7761/10000 train_time:324015ms step_avg:41.75ms +[2025-09-06 01:06:03] [Rank 0] step:7761/10000 train_time:324015ms step_avg:41.75ms +[2025-09-06 01:06:04] [Rank 0] step:7781/10000 train_time:324754ms step_avg:41.74ms +[2025-09-06 01:06:04] [Rank 0] step:7781/10000 train_time:324754ms step_avg:41.74ms +[2025-09-06 01:06:04] [Rank 0] step:7801/10000 train_time:325493ms step_avg:41.72ms +[2025-09-06 01:06:04] [Rank 0] step:7801/10000 train_time:325493ms step_avg:41.72ms +[2025-09-06 01:06:05] [Rank 0] step:7821/10000 train_time:326230ms step_avg:41.71ms +[2025-09-06 01:06:05] [Rank 0] step:7821/10000 train_time:326230ms step_avg:41.71ms +[2025-09-06 01:06:06] [Rank 0] step:7841/10000 train_time:326968ms step_avg:41.70ms +[2025-09-06 01:06:06] [Rank 0] step:7841/10000 train_time:326968ms step_avg:41.70ms +[2025-09-06 01:06:07] [Rank 0] step:7861/10000 train_time:327707ms step_avg:41.69ms +[2025-09-06 01:06:07] [Rank 0] step:7861/10000 train_time:327707ms step_avg:41.69ms +[2025-09-06 01:06:07] [Rank 0] step:7881/10000 train_time:328445ms step_avg:41.68ms +[2025-09-06 01:06:07] [Rank 0] step:7881/10000 train_time:328445ms step_avg:41.68ms +[2025-09-06 01:06:08] [Rank 0] step:7901/10000 train_time:329183ms step_avg:41.66ms +[2025-09-06 01:06:08] [Rank 0] step:7901/10000 train_time:329183ms step_avg:41.66ms +[2025-09-06 01:06:09] [Rank 0] step:7921/10000 train_time:329921ms step_avg:41.65ms +[2025-09-06 01:06:09] [Rank 0] step:7921/10000 train_time:329921ms step_avg:41.65ms +[2025-09-06 01:06:09] [Rank 0] step:7941/10000 train_time:330660ms step_avg:41.64ms +[2025-09-06 01:06:09] [Rank 0] step:7941/10000 train_time:330660ms step_avg:41.64ms +[2025-09-06 01:06:10] [Rank 0] step:7961/10000 train_time:331398ms step_avg:41.63ms +[2025-09-06 01:06:10] [Rank 0] step:7961/10000 train_time:331398ms step_avg:41.63ms +[2025-09-06 01:06:11] [Rank 0] step:7981/10000 train_time:332136ms step_avg:41.62ms +[2025-09-06 01:06:11] [Rank 0] step:7981/10000 train_time:332136ms step_avg:41.62ms +[2025-09-06 01:06:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:06:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:06:12] [Rank 0] PRINT: step:8000/10000 train_loss:2.2217 val_loss:2.2061 train_time:332955ms step_avg:41.62ms +[2025-09-06 01:06:12] [Rank 0] PRINT: step:8000/10000 train_loss:2.2217 val_loss:2.2061 train_time:332955ms step_avg:41.62ms +[2025-09-06 01:06:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:06:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:06:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:06:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:07:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:07:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:07:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:07:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:07:34] [Rank 0] Total Loss: 4.5656 +[2025-09-06 01:07:34] [Rank 0] Total Loss: 4.5656 +[2025-09-06 01:07:34] [Rank 0] Total FTA (Unweighted): 0.3062 +[2025-09-06 01:07:34] [Rank 0] Total FTA (Unweighted): 0.3062 +[2025-09-06 01:07:34] [Rank 0] Total FTA (Weighted): 0.3063 +[2025-09-06 01:07:34] [Rank 0] Total FTA (Weighted): 0.3063 +[2025-09-06 01:07:34] [Rank 0] Group 0 Loss: 3.2896 +[2025-09-06 01:07:34] [Rank 0] Group 0 Loss: 3.2896 +[2025-09-06 01:07:34] [Rank 0] Group 1 Loss: 3.1644 +[2025-09-06 01:07:34] [Rank 0] Group 1 Loss: 3.1644 +[2025-09-06 01:07:34] [Rank 0] Group 2 Loss: 3.2369 +[2025-09-06 01:07:34] [Rank 0] Group 2 Loss: 3.2369 +[2025-09-06 01:07:34] [Rank 0] Group 3 Loss: 3.5391 +[2025-09-06 01:07:34] [Rank 0] Group 3 Loss: 3.5391 +[2025-09-06 01:07:34] [Rank 0] Group 4 Loss: 3.8874 +[2025-09-06 01:07:34] [Rank 0] Group 4 Loss: 3.8874 +[2025-09-06 01:07:34] [Rank 0] Group 5 Loss: 4.4024 +[2025-09-06 01:07:34] [Rank 0] Group 5 Loss: 4.4024 +[2025-09-06 01:07:34] [Rank 0] Group 6 Loss: 4.6713 +[2025-09-06 01:07:34] [Rank 0] Group 6 Loss: 4.6713 +[2025-09-06 01:07:34] [Rank 0] Group 7 Loss: 4.8110 +[2025-09-06 01:07:34] [Rank 0] Group 7 Loss: 4.8110 +[2025-09-06 01:07:34] [Rank 0] Group 8 Loss: 5.0855 +[2025-09-06 01:07:34] [Rank 0] Group 8 Loss: 5.0855 +[2025-09-06 01:07:34] [Rank 0] Group 9 Loss: 5.2565 +[2025-09-06 01:07:34] [Rank 0] Group 9 Loss: 5.2565 +[2025-09-06 01:07:34] [Rank 0] Group 10 Loss: 5.3001 +[2025-09-06 01:07:34] [Rank 0] Group 10 Loss: 5.3001 +[2025-09-06 01:07:34] [Rank 0] Group 11 Loss: 5.3008 +[2025-09-06 01:07:34] [Rank 0] Group 11 Loss: 5.3008 +[2025-09-06 01:07:34] [Rank 0] Group 12 Loss: 5.2698 +[2025-09-06 01:07:34] [Rank 0] Group 12 Loss: 5.2698 +[2025-09-06 01:07:34] [Rank 0] Group 13 Loss: 5.2661 +[2025-09-06 01:07:34] [Rank 0] Group 13 Loss: 5.2661 +[2025-09-06 01:07:34] [Rank 0] Group 14 Loss: 5.3077 +[2025-09-06 01:07:34] [Rank 0] Group 14 Loss: 5.3077 +[2025-09-06 01:07:34] [Rank 0] Group 15 Loss: 5.2617 +[2025-09-06 01:07:34] [Rank 0] Group 15 Loss: 5.2617 +[2025-09-06 01:07:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:07:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:07:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:07:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:07:34] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-06 01:07:34] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-06 01:07:34] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:07:34] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:07:34] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:07:34] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:07:34] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:07:34] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:07:34] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:07:34] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:07:34] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:07:34] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:07:34] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:07:34] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:07:34] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:07:34] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:07:34] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:07:34] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:07:34] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:07:34] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:07:34] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 01:07:34] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 01:07:34] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:07:34] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:07:34] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 01:07:34] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 01:07:34] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 01:07:34] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 01:07:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:07:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:07:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:07:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:07:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:07:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:07:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:07:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:07:35] [Rank 0] step:8001/10000 train_time:332965ms step_avg:41.62ms +[2025-09-06 01:07:35] [Rank 0] step:8001/10000 train_time:332965ms step_avg:41.62ms +[2025-09-06 01:07:37] [Rank 0] step:8021/10000 train_time:334274ms step_avg:41.67ms +[2025-09-06 01:07:37] [Rank 0] step:8021/10000 train_time:334274ms step_avg:41.67ms +[2025-09-06 01:07:37] [Rank 0] step:8041/10000 train_time:335011ms step_avg:41.66ms +[2025-09-06 01:07:37] [Rank 0] step:8041/10000 train_time:335011ms step_avg:41.66ms +[2025-09-06 01:07:38] [Rank 0] step:8061/10000 train_time:335749ms step_avg:41.65ms +[2025-09-06 01:07:38] [Rank 0] step:8061/10000 train_time:335749ms step_avg:41.65ms +[2025-09-06 01:07:39] [Rank 0] step:8081/10000 train_time:336486ms step_avg:41.64ms +[2025-09-06 01:07:39] [Rank 0] step:8081/10000 train_time:336486ms step_avg:41.64ms +[2025-09-06 01:07:39] [Rank 0] step:8101/10000 train_time:337224ms step_avg:41.63ms +[2025-09-06 01:07:39] [Rank 0] step:8101/10000 train_time:337224ms step_avg:41.63ms +[2025-09-06 01:07:40] [Rank 0] step:8121/10000 train_time:337961ms step_avg:41.62ms +[2025-09-06 01:07:40] [Rank 0] step:8121/10000 train_time:337961ms step_avg:41.62ms +[2025-09-06 01:07:41] [Rank 0] step:8141/10000 train_time:338699ms step_avg:41.60ms +[2025-09-06 01:07:41] [Rank 0] step:8141/10000 train_time:338699ms step_avg:41.60ms +[2025-09-06 01:07:42] [Rank 0] step:8161/10000 train_time:339437ms step_avg:41.59ms +[2025-09-06 01:07:42] [Rank 0] step:8161/10000 train_time:339437ms step_avg:41.59ms +[2025-09-06 01:07:42] [Rank 0] step:8181/10000 train_time:340176ms step_avg:41.58ms +[2025-09-06 01:07:42] [Rank 0] step:8181/10000 train_time:340176ms step_avg:41.58ms +[2025-09-06 01:07:43] [Rank 0] step:8201/10000 train_time:340913ms step_avg:41.57ms +[2025-09-06 01:07:43] [Rank 0] step:8201/10000 train_time:340913ms step_avg:41.57ms +[2025-09-06 01:07:44] [Rank 0] step:8221/10000 train_time:341649ms step_avg:41.56ms +[2025-09-06 01:07:44] [Rank 0] step:8221/10000 train_time:341649ms step_avg:41.56ms +[2025-09-06 01:07:45] [Rank 0] step:8241/10000 train_time:342386ms step_avg:41.55ms +[2025-09-06 01:07:45] [Rank 0] step:8241/10000 train_time:342386ms step_avg:41.55ms +[2025-09-06 01:07:45] [Rank 0] step:8261/10000 train_time:343122ms step_avg:41.54ms +[2025-09-06 01:07:45] [Rank 0] step:8261/10000 train_time:343122ms step_avg:41.54ms +[2025-09-06 01:07:46] [Rank 0] step:8281/10000 train_time:343860ms step_avg:41.52ms +[2025-09-06 01:07:46] [Rank 0] step:8281/10000 train_time:343860ms step_avg:41.52ms +[2025-09-06 01:07:47] [Rank 0] step:8301/10000 train_time:344597ms step_avg:41.51ms +[2025-09-06 01:07:47] [Rank 0] step:8301/10000 train_time:344597ms step_avg:41.51ms +[2025-09-06 01:07:48] [Rank 0] step:8321/10000 train_time:345335ms step_avg:41.50ms +[2025-09-06 01:07:48] [Rank 0] step:8321/10000 train_time:345335ms step_avg:41.50ms +[2025-09-06 01:07:48] [Rank 0] step:8341/10000 train_time:346072ms step_avg:41.49ms +[2025-09-06 01:07:48] [Rank 0] step:8341/10000 train_time:346072ms step_avg:41.49ms +[2025-09-06 01:07:49] [Rank 0] step:8361/10000 train_time:346809ms step_avg:41.48ms +[2025-09-06 01:07:49] [Rank 0] step:8361/10000 train_time:346809ms step_avg:41.48ms +[2025-09-06 01:07:50] [Rank 0] step:8381/10000 train_time:347547ms step_avg:41.47ms +[2025-09-06 01:07:50] [Rank 0] step:8381/10000 train_time:347547ms step_avg:41.47ms +[2025-09-06 01:07:51] [Rank 0] step:8401/10000 train_time:348285ms step_avg:41.46ms +[2025-09-06 01:07:51] [Rank 0] step:8401/10000 train_time:348285ms step_avg:41.46ms +[2025-09-06 01:07:51] [Rank 0] step:8421/10000 train_time:349023ms step_avg:41.45ms +[2025-09-06 01:07:51] [Rank 0] step:8421/10000 train_time:349023ms step_avg:41.45ms +[2025-09-06 01:07:52] [Rank 0] step:8441/10000 train_time:349761ms step_avg:41.44ms +[2025-09-06 01:07:52] [Rank 0] step:8441/10000 train_time:349761ms step_avg:41.44ms +[2025-09-06 01:07:53] [Rank 0] step:8461/10000 train_time:350498ms step_avg:41.43ms +[2025-09-06 01:07:53] [Rank 0] step:8461/10000 train_time:350498ms step_avg:41.43ms +[2025-09-06 01:07:53] [Rank 0] step:8481/10000 train_time:351238ms step_avg:41.41ms +[2025-09-06 01:07:53] [Rank 0] step:8481/10000 train_time:351238ms step_avg:41.41ms +[2025-09-06 01:07:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:07:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:07:55] [Rank 0] PRINT: step:8500/10000 train_loss:2.2077 val_loss:2.1916 train_time:352057ms step_avg:41.42ms +[2025-09-06 01:07:55] [Rank 0] PRINT: step:8500/10000 train_loss:2.2077 val_loss:2.1916 train_time:352057ms step_avg:41.42ms +[2025-09-06 01:07:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:07:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:07:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:07:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:09:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:09:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:09:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:09:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:09:17] [Rank 0] Total Loss: 4.5883 +[2025-09-06 01:09:17] [Rank 0] Total Loss: 4.5883 +[2025-09-06 01:09:17] [Rank 0] Total FTA (Unweighted): 0.3031 +[2025-09-06 01:09:17] [Rank 0] Total FTA (Unweighted): 0.3031 +[2025-09-06 01:09:17] [Rank 0] Total FTA (Weighted): 0.3031 +[2025-09-06 01:09:17] [Rank 0] Total FTA (Weighted): 0.3031 +[2025-09-06 01:09:17] [Rank 0] Group 0 Loss: 3.2559 +[2025-09-06 01:09:17] [Rank 0] Group 0 Loss: 3.2559 +[2025-09-06 01:09:17] [Rank 0] Group 1 Loss: 3.2406 +[2025-09-06 01:09:17] [Rank 0] Group 1 Loss: 3.2406 +[2025-09-06 01:09:17] [Rank 0] Group 2 Loss: 3.2725 +[2025-09-06 01:09:17] [Rank 0] Group 2 Loss: 3.2725 +[2025-09-06 01:09:17] [Rank 0] Group 3 Loss: 3.5710 +[2025-09-06 01:09:17] [Rank 0] Group 3 Loss: 3.5710 +[2025-09-06 01:09:17] [Rank 0] Group 4 Loss: 3.9249 +[2025-09-06 01:09:17] [Rank 0] Group 4 Loss: 3.9249 +[2025-09-06 01:09:17] [Rank 0] Group 5 Loss: 4.4430 +[2025-09-06 01:09:17] [Rank 0] Group 5 Loss: 4.4430 +[2025-09-06 01:09:17] [Rank 0] Group 6 Loss: 4.6746 +[2025-09-06 01:09:17] [Rank 0] Group 6 Loss: 4.6746 +[2025-09-06 01:09:17] [Rank 0] Group 7 Loss: 4.8240 +[2025-09-06 01:09:17] [Rank 0] Group 7 Loss: 4.8240 +[2025-09-06 01:09:17] [Rank 0] Group 8 Loss: 5.1058 +[2025-09-06 01:09:17] [Rank 0] Group 8 Loss: 5.1058 +[2025-09-06 01:09:17] [Rank 0] Group 9 Loss: 5.2693 +[2025-09-06 01:09:17] [Rank 0] Group 9 Loss: 5.2693 +[2025-09-06 01:09:17] [Rank 0] Group 10 Loss: 5.3126 +[2025-09-06 01:09:17] [Rank 0] Group 10 Loss: 5.3126 +[2025-09-06 01:09:17] [Rank 0] Group 11 Loss: 5.3239 +[2025-09-06 01:09:17] [Rank 0] Group 11 Loss: 5.3239 +[2025-09-06 01:09:17] [Rank 0] Group 12 Loss: 5.2814 +[2025-09-06 01:09:17] [Rank 0] Group 12 Loss: 5.2814 +[2025-09-06 01:09:17] [Rank 0] Group 13 Loss: 5.2967 +[2025-09-06 01:09:17] [Rank 0] Group 13 Loss: 5.2967 +[2025-09-06 01:09:17] [Rank 0] Group 14 Loss: 5.3278 +[2025-09-06 01:09:17] [Rank 0] Group 14 Loss: 5.3278 +[2025-09-06 01:09:17] [Rank 0] Group 15 Loss: 5.2888 +[2025-09-06 01:09:17] [Rank 0] Group 15 Loss: 5.2888 +[2025-09-06 01:09:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:09:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:09:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:09:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:09:17] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:09:17] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:09:17] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:09:17] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:09:17] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:09:17] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:09:17] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:09:17] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:09:17] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:09:17] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:09:17] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:09:17] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:09:17] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:09:17] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:09:17] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:09:17] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:09:17] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:09:17] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:09:17] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:09:17] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:09:17] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 01:09:17] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 01:09:17] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 01:09:17] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 01:09:17] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-06 01:09:17] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-06 01:09:17] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 01:09:17] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 01:09:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:09:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:09:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:09:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:09:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:09:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:09:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:09:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:09:19] [Rank 0] step:8501/10000 train_time:352067ms step_avg:41.41ms +[2025-09-06 01:09:19] [Rank 0] step:8501/10000 train_time:352067ms step_avg:41.41ms +[2025-09-06 01:09:19] [Rank 0] step:8521/10000 train_time:352737ms step_avg:41.40ms +[2025-09-06 01:09:19] [Rank 0] step:8521/10000 train_time:352737ms step_avg:41.40ms +[2025-09-06 01:09:20] [Rank 0] step:8541/10000 train_time:353475ms step_avg:41.39ms +[2025-09-06 01:09:20] [Rank 0] step:8541/10000 train_time:353475ms step_avg:41.39ms +[2025-09-06 01:09:21] [Rank 0] step:8561/10000 train_time:354213ms step_avg:41.38ms +[2025-09-06 01:09:21] [Rank 0] step:8561/10000 train_time:354213ms step_avg:41.38ms +[2025-09-06 01:09:21] [Rank 0] step:8581/10000 train_time:354952ms step_avg:41.36ms +[2025-09-06 01:09:21] [Rank 0] step:8581/10000 train_time:354952ms step_avg:41.36ms +[2025-09-06 01:09:22] [Rank 0] step:8601/10000 train_time:355692ms step_avg:41.35ms +[2025-09-06 01:09:22] [Rank 0] step:8601/10000 train_time:355692ms step_avg:41.35ms +[2025-09-06 01:09:23] [Rank 0] step:8621/10000 train_time:356429ms step_avg:41.34ms +[2025-09-06 01:09:23] [Rank 0] step:8621/10000 train_time:356429ms step_avg:41.34ms +[2025-09-06 01:09:24] [Rank 0] step:8641/10000 train_time:357166ms step_avg:41.33ms +[2025-09-06 01:09:24] [Rank 0] step:8641/10000 train_time:357166ms step_avg:41.33ms +[2025-09-06 01:09:24] [Rank 0] step:8661/10000 train_time:357905ms step_avg:41.32ms +[2025-09-06 01:09:24] [Rank 0] step:8661/10000 train_time:357905ms step_avg:41.32ms +[2025-09-06 01:09:25] [Rank 0] step:8681/10000 train_time:358644ms step_avg:41.31ms +[2025-09-06 01:09:25] [Rank 0] step:8681/10000 train_time:358644ms step_avg:41.31ms +[2025-09-06 01:09:26] [Rank 0] step:8701/10000 train_time:359381ms step_avg:41.30ms +[2025-09-06 01:09:26] [Rank 0] step:8701/10000 train_time:359381ms step_avg:41.30ms +[2025-09-06 01:09:27] [Rank 0] step:8721/10000 train_time:360118ms step_avg:41.29ms +[2025-09-06 01:09:27] [Rank 0] step:8721/10000 train_time:360118ms step_avg:41.29ms +[2025-09-06 01:09:27] [Rank 0] step:8741/10000 train_time:360857ms step_avg:41.28ms +[2025-09-06 01:09:27] [Rank 0] step:8741/10000 train_time:360857ms step_avg:41.28ms +[2025-09-06 01:09:28] [Rank 0] step:8761/10000 train_time:361596ms step_avg:41.27ms +[2025-09-06 01:09:28] [Rank 0] step:8761/10000 train_time:361596ms step_avg:41.27ms +[2025-09-06 01:09:29] [Rank 0] step:8781/10000 train_time:362333ms step_avg:41.26ms +[2025-09-06 01:09:29] [Rank 0] step:8781/10000 train_time:362333ms step_avg:41.26ms +[2025-09-06 01:09:30] [Rank 0] step:8801/10000 train_time:363197ms step_avg:41.27ms +[2025-09-06 01:09:30] [Rank 0] step:8801/10000 train_time:363197ms step_avg:41.27ms +[2025-09-06 01:09:30] [Rank 0] step:8821/10000 train_time:363936ms step_avg:41.26ms +[2025-09-06 01:09:30] [Rank 0] step:8821/10000 train_time:363936ms step_avg:41.26ms +[2025-09-06 01:09:32] [Rank 0] step:8841/10000 train_time:365011ms step_avg:41.29ms +[2025-09-06 01:09:32] [Rank 0] step:8841/10000 train_time:365011ms step_avg:41.29ms +[2025-09-06 01:09:32] [Rank 0] step:8861/10000 train_time:365770ms step_avg:41.28ms +[2025-09-06 01:09:32] [Rank 0] step:8861/10000 train_time:365770ms step_avg:41.28ms +[2025-09-06 01:09:33] [Rank 0] step:8881/10000 train_time:366507ms step_avg:41.27ms +[2025-09-06 01:09:33] [Rank 0] step:8881/10000 train_time:366507ms step_avg:41.27ms +[2025-09-06 01:09:34] [Rank 0] step:8901/10000 train_time:367244ms step_avg:41.26ms +[2025-09-06 01:09:34] [Rank 0] step:8901/10000 train_time:367244ms step_avg:41.26ms +[2025-09-06 01:09:35] [Rank 0] step:8921/10000 train_time:367982ms step_avg:41.25ms +[2025-09-06 01:09:35] [Rank 0] step:8921/10000 train_time:367982ms step_avg:41.25ms +[2025-09-06 01:09:35] [Rank 0] step:8941/10000 train_time:368720ms step_avg:41.24ms +[2025-09-06 01:09:35] [Rank 0] step:8941/10000 train_time:368720ms step_avg:41.24ms +[2025-09-06 01:09:36] [Rank 0] step:8961/10000 train_time:369458ms step_avg:41.23ms +[2025-09-06 01:09:36] [Rank 0] step:8961/10000 train_time:369458ms step_avg:41.23ms +[2025-09-06 01:09:37] [Rank 0] step:8981/10000 train_time:370197ms step_avg:41.22ms +[2025-09-06 01:09:37] [Rank 0] step:8981/10000 train_time:370197ms step_avg:41.22ms +[2025-09-06 01:09:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:09:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:09:38] [Rank 0] PRINT: step:9000/10000 train_loss:2.1938 val_loss:2.1798 train_time:371016ms step_avg:41.22ms +[2025-09-06 01:09:38] [Rank 0] PRINT: step:9000/10000 train_loss:2.1938 val_loss:2.1798 train_time:371016ms step_avg:41.22ms +[2025-09-06 01:09:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:09:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:09:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:09:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:10:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:10:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:10:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:10:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:10:59] [Rank 0] Total Loss: 4.5464 +[2025-09-06 01:10:59] [Rank 0] Total Loss: 4.5464 +[2025-09-06 01:10:59] [Rank 0] Total FTA (Unweighted): 0.3031 +[2025-09-06 01:10:59] [Rank 0] Total FTA (Unweighted): 0.3031 +[2025-09-06 01:10:59] [Rank 0] Total FTA (Weighted): 0.3031 +[2025-09-06 01:10:59] [Rank 0] Total FTA (Weighted): 0.3031 +[2025-09-06 01:10:59] [Rank 0] Group 0 Loss: 3.2455 +[2025-09-06 01:10:59] [Rank 0] Group 0 Loss: 3.2455 +[2025-09-06 01:10:59] [Rank 0] Group 1 Loss: 3.0927 +[2025-09-06 01:10:59] [Rank 0] Group 1 Loss: 3.0927 +[2025-09-06 01:10:59] [Rank 0] Group 2 Loss: 3.2069 +[2025-09-06 01:10:59] [Rank 0] Group 2 Loss: 3.2069 +[2025-09-06 01:10:59] [Rank 0] Group 3 Loss: 3.5411 +[2025-09-06 01:10:59] [Rank 0] Group 3 Loss: 3.5411 +[2025-09-06 01:10:59] [Rank 0] Group 4 Loss: 3.8749 +[2025-09-06 01:10:59] [Rank 0] Group 4 Loss: 3.8749 +[2025-09-06 01:10:59] [Rank 0] Group 5 Loss: 4.4003 +[2025-09-06 01:10:59] [Rank 0] Group 5 Loss: 4.4003 +[2025-09-06 01:10:59] [Rank 0] Group 6 Loss: 4.6415 +[2025-09-06 01:10:59] [Rank 0] Group 6 Loss: 4.6415 +[2025-09-06 01:10:59] [Rank 0] Group 7 Loss: 4.8036 +[2025-09-06 01:10:59] [Rank 0] Group 7 Loss: 4.8036 +[2025-09-06 01:10:59] [Rank 0] Group 8 Loss: 5.0682 +[2025-09-06 01:10:59] [Rank 0] Group 8 Loss: 5.0682 +[2025-09-06 01:10:59] [Rank 0] Group 9 Loss: 5.2429 +[2025-09-06 01:10:59] [Rank 0] Group 9 Loss: 5.2429 +[2025-09-06 01:10:59] [Rank 0] Group 10 Loss: 5.2788 +[2025-09-06 01:10:59] [Rank 0] Group 10 Loss: 5.2788 +[2025-09-06 01:10:59] [Rank 0] Group 11 Loss: 5.2854 +[2025-09-06 01:10:59] [Rank 0] Group 11 Loss: 5.2854 +[2025-09-06 01:10:59] [Rank 0] Group 12 Loss: 5.2573 +[2025-09-06 01:10:59] [Rank 0] Group 12 Loss: 5.2573 +[2025-09-06 01:10:59] [Rank 0] Group 13 Loss: 5.2649 +[2025-09-06 01:10:59] [Rank 0] Group 13 Loss: 5.2649 +[2025-09-06 01:10:59] [Rank 0] Group 14 Loss: 5.2901 +[2025-09-06 01:10:59] [Rank 0] Group 14 Loss: 5.2901 +[2025-09-06 01:10:59] [Rank 0] Group 15 Loss: 5.2483 +[2025-09-06 01:10:59] [Rank 0] Group 15 Loss: 5.2483 +[2025-09-06 01:10:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:10:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:10:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:10:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:10:59] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:10:59] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:10:59] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:10:59] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:10:59] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:10:59] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:10:59] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:10:59] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:10:59] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:10:59] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:10:59] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 01:10:59] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 01:10:59] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:10:59] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:10:59] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:10:59] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:10:59] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:10:59] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:10:59] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:10:59] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:10:59] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 01:10:59] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 01:10:59] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:10:59] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:10:59] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-06 01:10:59] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-06 01:10:59] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 01:10:59] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 01:10:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:10:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:11:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:11:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:11:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:11:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:11:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:11:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:11:00] [Rank 0] step:9001/10000 train_time:371025ms step_avg:41.22ms +[2025-09-06 01:11:00] [Rank 0] step:9001/10000 train_time:371025ms step_avg:41.22ms +[2025-09-06 01:11:01] [Rank 0] step:9021/10000 train_time:371707ms step_avg:41.20ms +[2025-09-06 01:11:01] [Rank 0] step:9021/10000 train_time:371707ms step_avg:41.20ms +[2025-09-06 01:11:02] [Rank 0] step:9041/10000 train_time:372446ms step_avg:41.20ms +[2025-09-06 01:11:02] [Rank 0] step:9041/10000 train_time:372446ms step_avg:41.20ms +[2025-09-06 01:11:03] [Rank 0] step:9061/10000 train_time:373184ms step_avg:41.19ms +[2025-09-06 01:11:03] [Rank 0] step:9061/10000 train_time:373184ms step_avg:41.19ms +[2025-09-06 01:11:03] [Rank 0] step:9081/10000 train_time:373926ms step_avg:41.18ms +[2025-09-06 01:11:03] [Rank 0] step:9081/10000 train_time:373926ms step_avg:41.18ms +[2025-09-06 01:11:04] [Rank 0] step:9101/10000 train_time:374666ms step_avg:41.17ms +[2025-09-06 01:11:04] [Rank 0] step:9101/10000 train_time:374666ms step_avg:41.17ms +[2025-09-06 01:11:05] [Rank 0] step:9121/10000 train_time:375404ms step_avg:41.16ms +[2025-09-06 01:11:05] [Rank 0] step:9121/10000 train_time:375404ms step_avg:41.16ms +[2025-09-06 01:11:06] [Rank 0] step:9141/10000 train_time:376144ms step_avg:41.15ms +[2025-09-06 01:11:06] [Rank 0] step:9141/10000 train_time:376144ms step_avg:41.15ms +[2025-09-06 01:11:06] [Rank 0] step:9161/10000 train_time:376882ms step_avg:41.14ms +[2025-09-06 01:11:06] [Rank 0] step:9161/10000 train_time:376882ms step_avg:41.14ms +[2025-09-06 01:11:07] [Rank 0] step:9181/10000 train_time:377620ms step_avg:41.13ms +[2025-09-06 01:11:07] [Rank 0] step:9181/10000 train_time:377620ms step_avg:41.13ms +[2025-09-06 01:11:08] [Rank 0] step:9201/10000 train_time:378358ms step_avg:41.12ms +[2025-09-06 01:11:08] [Rank 0] step:9201/10000 train_time:378358ms step_avg:41.12ms +[2025-09-06 01:11:09] [Rank 0] step:9221/10000 train_time:379095ms step_avg:41.11ms +[2025-09-06 01:11:09] [Rank 0] step:9221/10000 train_time:379095ms step_avg:41.11ms +[2025-09-06 01:11:09] [Rank 0] step:9241/10000 train_time:379834ms step_avg:41.10ms +[2025-09-06 01:11:09] [Rank 0] step:9241/10000 train_time:379834ms step_avg:41.10ms +[2025-09-06 01:11:10] [Rank 0] step:9261/10000 train_time:380573ms step_avg:41.09ms +[2025-09-06 01:11:10] [Rank 0] step:9261/10000 train_time:380573ms step_avg:41.09ms +[2025-09-06 01:11:11] [Rank 0] step:9281/10000 train_time:381313ms step_avg:41.09ms +[2025-09-06 01:11:11] [Rank 0] step:9281/10000 train_time:381313ms step_avg:41.09ms +[2025-09-06 01:11:11] [Rank 0] step:9301/10000 train_time:382051ms step_avg:41.08ms +[2025-09-06 01:11:11] [Rank 0] step:9301/10000 train_time:382051ms step_avg:41.08ms +[2025-09-06 01:11:12] [Rank 0] step:9321/10000 train_time:382789ms step_avg:41.07ms +[2025-09-06 01:11:12] [Rank 0] step:9321/10000 train_time:382789ms step_avg:41.07ms +[2025-09-06 01:11:13] [Rank 0] step:9341/10000 train_time:383528ms step_avg:41.06ms +[2025-09-06 01:11:13] [Rank 0] step:9341/10000 train_time:383528ms step_avg:41.06ms +[2025-09-06 01:11:14] [Rank 0] step:9361/10000 train_time:384265ms step_avg:41.05ms +[2025-09-06 01:11:14] [Rank 0] step:9361/10000 train_time:384265ms step_avg:41.05ms +[2025-09-06 01:11:14] [Rank 0] step:9381/10000 train_time:385003ms step_avg:41.04ms +[2025-09-06 01:11:14] [Rank 0] step:9381/10000 train_time:385003ms step_avg:41.04ms +[2025-09-06 01:11:15] [Rank 0] step:9401/10000 train_time:385741ms step_avg:41.03ms +[2025-09-06 01:11:15] [Rank 0] step:9401/10000 train_time:385741ms step_avg:41.03ms +[2025-09-06 01:11:16] [Rank 0] step:9421/10000 train_time:386480ms step_avg:41.02ms +[2025-09-06 01:11:16] [Rank 0] step:9421/10000 train_time:386480ms step_avg:41.02ms +[2025-09-06 01:11:17] [Rank 0] step:9441/10000 train_time:387218ms step_avg:41.01ms +[2025-09-06 01:11:17] [Rank 0] step:9441/10000 train_time:387218ms step_avg:41.01ms +[2025-09-06 01:11:17] [Rank 0] step:9461/10000 train_time:387956ms step_avg:41.01ms +[2025-09-06 01:11:17] [Rank 0] step:9461/10000 train_time:387956ms step_avg:41.01ms +[2025-09-06 01:11:18] [Rank 0] step:9481/10000 train_time:388694ms step_avg:41.00ms +[2025-09-06 01:11:18] [Rank 0] step:9481/10000 train_time:388694ms step_avg:41.00ms +[2025-09-06 01:11:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:11:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:11:19] [Rank 0] PRINT: step:9500/10000 train_loss:2.1824 val_loss:2.1702 train_time:389513ms step_avg:41.00ms +[2025-09-06 01:11:19] [Rank 0] PRINT: step:9500/10000 train_loss:2.1824 val_loss:2.1702 train_time:389513ms step_avg:41.00ms +[2025-09-06 01:11:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:11:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:11:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:11:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:12:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:12:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:12:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:12:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:12:40] [Rank 0] Total Loss: 4.5275 +[2025-09-06 01:12:40] [Rank 0] Total Loss: 4.5275 +[2025-09-06 01:12:40] [Rank 0] Total FTA (Unweighted): 0.3044 +[2025-09-06 01:12:40] [Rank 0] Total FTA (Unweighted): 0.3044 +[2025-09-06 01:12:40] [Rank 0] Total FTA (Weighted): 0.3044 +[2025-09-06 01:12:40] [Rank 0] Total FTA (Weighted): 0.3044 +[2025-09-06 01:12:40] [Rank 0] Group 0 Loss: 3.2378 +[2025-09-06 01:12:40] [Rank 0] Group 0 Loss: 3.2378 +[2025-09-06 01:12:40] [Rank 0] Group 1 Loss: 3.0722 +[2025-09-06 01:12:40] [Rank 0] Group 1 Loss: 3.0722 +[2025-09-06 01:12:40] [Rank 0] Group 2 Loss: 3.2073 +[2025-09-06 01:12:40] [Rank 0] Group 2 Loss: 3.2073 +[2025-09-06 01:12:40] [Rank 0] Group 3 Loss: 3.5253 +[2025-09-06 01:12:40] [Rank 0] Group 3 Loss: 3.5253 +[2025-09-06 01:12:40] [Rank 0] Group 4 Loss: 3.8598 +[2025-09-06 01:12:40] [Rank 0] Group 4 Loss: 3.8598 +[2025-09-06 01:12:40] [Rank 0] Group 5 Loss: 4.3623 +[2025-09-06 01:12:40] [Rank 0] Group 5 Loss: 4.3623 +[2025-09-06 01:12:40] [Rank 0] Group 6 Loss: 4.6214 +[2025-09-06 01:12:40] [Rank 0] Group 6 Loss: 4.6214 +[2025-09-06 01:12:40] [Rank 0] Group 7 Loss: 4.7802 +[2025-09-06 01:12:40] [Rank 0] Group 7 Loss: 4.7802 +[2025-09-06 01:12:40] [Rank 0] Group 8 Loss: 5.0473 +[2025-09-06 01:12:40] [Rank 0] Group 8 Loss: 5.0473 +[2025-09-06 01:12:40] [Rank 0] Group 9 Loss: 5.2212 +[2025-09-06 01:12:40] [Rank 0] Group 9 Loss: 5.2212 +[2025-09-06 01:12:40] [Rank 0] Group 10 Loss: 5.2705 +[2025-09-06 01:12:40] [Rank 0] Group 10 Loss: 5.2705 +[2025-09-06 01:12:40] [Rank 0] Group 11 Loss: 5.2687 +[2025-09-06 01:12:40] [Rank 0] Group 11 Loss: 5.2687 +[2025-09-06 01:12:40] [Rank 0] Group 12 Loss: 5.2259 +[2025-09-06 01:12:40] [Rank 0] Group 12 Loss: 5.2259 +[2025-09-06 01:12:40] [Rank 0] Group 13 Loss: 5.2418 +[2025-09-06 01:12:40] [Rank 0] Group 13 Loss: 5.2418 +[2025-09-06 01:12:40] [Rank 0] Group 14 Loss: 5.2736 +[2025-09-06 01:12:40] [Rank 0] Group 14 Loss: 5.2736 +[2025-09-06 01:12:40] [Rank 0] Group 15 Loss: 5.2254 +[2025-09-06 01:12:40] [Rank 0] Group 15 Loss: 5.2254 +[2025-09-06 01:12:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:12:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:12:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:12:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:12:40] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:12:40] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:12:40] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:12:40] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:12:40] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:12:40] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:12:40] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:12:40] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:12:40] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:12:40] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:12:40] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:12:40] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:12:40] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:12:40] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:12:40] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:12:40] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:12:40] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:12:40] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:12:40] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:12:40] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:12:40] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:12:40] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:12:40] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-06 01:12:40] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-06 01:12:40] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 01:12:40] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 01:12:40] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 01:12:40] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 01:12:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:12:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:12:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:12:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:12:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:12:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:12:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:12:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:12:42] [Rank 0] step:9501/10000 train_time:389522ms step_avg:41.00ms +[2025-09-06 01:12:42] [Rank 0] step:9501/10000 train_time:389522ms step_avg:41.00ms +[2025-09-06 01:12:43] [Rank 0] step:9521/10000 train_time:390187ms step_avg:40.98ms +[2025-09-06 01:12:43] [Rank 0] step:9521/10000 train_time:390187ms step_avg:40.98ms +[2025-09-06 01:12:43] [Rank 0] step:9541/10000 train_time:390925ms step_avg:40.97ms +[2025-09-06 01:12:43] [Rank 0] step:9541/10000 train_time:390925ms step_avg:40.97ms +[2025-09-06 01:12:44] [Rank 0] step:9561/10000 train_time:391663ms step_avg:40.96ms +[2025-09-06 01:12:44] [Rank 0] step:9561/10000 train_time:391663ms step_avg:40.96ms +[2025-09-06 01:12:45] [Rank 0] step:9581/10000 train_time:392402ms step_avg:40.96ms +[2025-09-06 01:12:45] [Rank 0] step:9581/10000 train_time:392402ms step_avg:40.96ms +[2025-09-06 01:12:45] [Rank 0] step:9601/10000 train_time:393140ms step_avg:40.95ms +[2025-09-06 01:12:45] [Rank 0] step:9601/10000 train_time:393140ms step_avg:40.95ms +[2025-09-06 01:12:46] [Rank 0] step:9621/10000 train_time:393878ms step_avg:40.94ms +[2025-09-06 01:12:46] [Rank 0] step:9621/10000 train_time:393878ms step_avg:40.94ms +[2025-09-06 01:12:47] [Rank 0] step:9641/10000 train_time:394616ms step_avg:40.93ms +[2025-09-06 01:12:47] [Rank 0] step:9641/10000 train_time:394616ms step_avg:40.93ms +[2025-09-06 01:12:48] [Rank 0] step:9661/10000 train_time:395628ms step_avg:40.95ms +[2025-09-06 01:12:48] [Rank 0] step:9661/10000 train_time:395628ms step_avg:40.95ms +[2025-09-06 01:12:49] [Rank 0] step:9681/10000 train_time:396366ms step_avg:40.94ms +[2025-09-06 01:12:49] [Rank 0] step:9681/10000 train_time:396366ms step_avg:40.94ms +[2025-09-06 01:12:49] [Rank 0] step:9701/10000 train_time:397104ms step_avg:40.93ms +[2025-09-06 01:12:49] [Rank 0] step:9701/10000 train_time:397104ms step_avg:40.93ms +[2025-09-06 01:12:50] [Rank 0] step:9721/10000 train_time:397842ms step_avg:40.93ms +[2025-09-06 01:12:50] [Rank 0] step:9721/10000 train_time:397842ms step_avg:40.93ms +[2025-09-06 01:12:51] [Rank 0] step:9741/10000 train_time:398580ms step_avg:40.92ms +[2025-09-06 01:12:51] [Rank 0] step:9741/10000 train_time:398580ms step_avg:40.92ms +[2025-09-06 01:12:52] [Rank 0] step:9761/10000 train_time:399318ms step_avg:40.91ms +[2025-09-06 01:12:52] [Rank 0] step:9761/10000 train_time:399318ms step_avg:40.91ms +[2025-09-06 01:12:52] [Rank 0] step:9781/10000 train_time:400056ms step_avg:40.90ms +[2025-09-06 01:12:52] [Rank 0] step:9781/10000 train_time:400056ms step_avg:40.90ms +[2025-09-06 01:12:53] [Rank 0] step:9801/10000 train_time:400794ms step_avg:40.89ms +[2025-09-06 01:12:53] [Rank 0] step:9801/10000 train_time:400794ms step_avg:40.89ms +[2025-09-06 01:12:54] [Rank 0] step:9821/10000 train_time:401532ms step_avg:40.89ms +[2025-09-06 01:12:54] [Rank 0] step:9821/10000 train_time:401532ms step_avg:40.89ms +[2025-09-06 01:12:55] [Rank 0] step:9841/10000 train_time:402270ms step_avg:40.88ms +[2025-09-06 01:12:55] [Rank 0] step:9841/10000 train_time:402270ms step_avg:40.88ms +[2025-09-06 01:12:55] [Rank 0] step:9861/10000 train_time:403008ms step_avg:40.87ms +[2025-09-06 01:12:55] [Rank 0] step:9861/10000 train_time:403008ms step_avg:40.87ms +[2025-09-06 01:12:56] [Rank 0] step:9881/10000 train_time:403746ms step_avg:40.86ms +[2025-09-06 01:12:56] [Rank 0] step:9881/10000 train_time:403746ms step_avg:40.86ms +[2025-09-06 01:12:57] [Rank 0] step:9901/10000 train_time:404484ms step_avg:40.85ms +[2025-09-06 01:12:57] [Rank 0] step:9901/10000 train_time:404484ms step_avg:40.85ms +[2025-09-06 01:12:58] [Rank 0] step:9921/10000 train_time:405222ms step_avg:40.84ms +[2025-09-06 01:12:58] [Rank 0] step:9921/10000 train_time:405222ms step_avg:40.84ms +[2025-09-06 01:12:58] [Rank 0] step:9941/10000 train_time:405960ms step_avg:40.84ms +[2025-09-06 01:12:58] [Rank 0] step:9941/10000 train_time:405960ms step_avg:40.84ms +[2025-09-06 01:12:59] [Rank 0] step:9961/10000 train_time:406698ms step_avg:40.83ms +[2025-09-06 01:12:59] [Rank 0] step:9961/10000 train_time:406698ms step_avg:40.83ms +[2025-09-06 01:13:00] [Rank 0] step:9981/10000 train_time:407436ms step_avg:40.82ms +[2025-09-06 01:13:00] [Rank 0] step:9981/10000 train_time:407436ms step_avg:40.82ms +[2025-09-06 01:13:00] [Rank 0] step:10000/10000 train_time:408138ms step_avg:40.81ms +[2025-09-06 01:13:00] [Rank 0] step:10000/10000 train_time:408138ms step_avg:40.81ms +[2025-09-06 01:13:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:13:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:13:01] [Rank 0] PRINT: step:10000/10000 train_loss:2.1736 val_loss:2.1617 train_time:408262ms step_avg:40.83ms +[2025-09-06 01:13:01] [Rank 0] PRINT: step:10000/10000 train_loss:2.1736 val_loss:2.1617 train_time:408262ms step_avg:40.83ms +[2025-09-06 01:13:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:13:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:13:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:13:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:14:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:14:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:14:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:14:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:14:22] [Rank 0] Total Loss: 4.5238 +[2025-09-06 01:14:22] [Rank 0] Total Loss: 4.5238 +[2025-09-06 01:14:22] [Rank 0] Total FTA (Unweighted): 0.3106 +[2025-09-06 01:14:22] [Rank 0] Total FTA (Unweighted): 0.3106 +[2025-09-06 01:14:22] [Rank 0] Total FTA (Weighted): 0.3106 +[2025-09-06 01:14:22] [Rank 0] Total FTA (Weighted): 0.3106 +[2025-09-06 01:14:22] [Rank 0] Group 0 Loss: 3.1971 +[2025-09-06 01:14:22] [Rank 0] Group 0 Loss: 3.1971 +[2025-09-06 01:14:22] [Rank 0] Group 1 Loss: 3.0988 +[2025-09-06 01:14:22] [Rank 0] Group 1 Loss: 3.0988 +[2025-09-06 01:14:22] [Rank 0] Group 2 Loss: 3.2077 +[2025-09-06 01:14:22] [Rank 0] Group 2 Loss: 3.2077 +[2025-09-06 01:14:22] [Rank 0] Group 3 Loss: 3.5046 +[2025-09-06 01:14:22] [Rank 0] Group 3 Loss: 3.5046 +[2025-09-06 01:14:22] [Rank 0] Group 4 Loss: 3.8566 +[2025-09-06 01:14:22] [Rank 0] Group 4 Loss: 3.8566 +[2025-09-06 01:14:22] [Rank 0] Group 5 Loss: 4.3812 +[2025-09-06 01:14:22] [Rank 0] Group 5 Loss: 4.3812 +[2025-09-06 01:14:22] [Rank 0] Group 6 Loss: 4.6139 +[2025-09-06 01:14:22] [Rank 0] Group 6 Loss: 4.6139 +[2025-09-06 01:14:22] [Rank 0] Group 7 Loss: 4.7781 +[2025-09-06 01:14:22] [Rank 0] Group 7 Loss: 4.7781 +[2025-09-06 01:14:22] [Rank 0] Group 8 Loss: 5.0481 +[2025-09-06 01:14:22] [Rank 0] Group 8 Loss: 5.0481 +[2025-09-06 01:14:22] [Rank 0] Group 9 Loss: 5.2129 +[2025-09-06 01:14:22] [Rank 0] Group 9 Loss: 5.2129 +[2025-09-06 01:14:22] [Rank 0] Group 10 Loss: 5.2571 +[2025-09-06 01:14:22] [Rank 0] Group 10 Loss: 5.2571 +[2025-09-06 01:14:22] [Rank 0] Group 11 Loss: 5.2637 +[2025-09-06 01:14:22] [Rank 0] Group 11 Loss: 5.2637 +[2025-09-06 01:14:22] [Rank 0] Group 12 Loss: 5.2298 +[2025-09-06 01:14:22] [Rank 0] Group 12 Loss: 5.2298 +[2025-09-06 01:14:22] [Rank 0] Group 13 Loss: 5.2375 +[2025-09-06 01:14:22] [Rank 0] Group 13 Loss: 5.2375 +[2025-09-06 01:14:22] [Rank 0] Group 14 Loss: 5.2684 +[2025-09-06 01:14:22] [Rank 0] Group 14 Loss: 5.2684 +[2025-09-06 01:14:22] [Rank 0] Group 15 Loss: 5.2252 +[2025-09-06 01:14:22] [Rank 0] Group 15 Loss: 5.2252 +[2025-09-06 01:14:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:14:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:14:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:14:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:14:22] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:14:22] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:14:22] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:14:22] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:14:22] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:14:22] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:14:22] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:14:22] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:14:22] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:14:22] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:14:22] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:14:22] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:14:22] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:14:22] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:14:22] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:14:22] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:14:22] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:14:22] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:14:22] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:14:22] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:14:22] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:14:22] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:14:22] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:14:22] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:14:22] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-06 01:14:22] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-06 01:14:22] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-06 01:14:22] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-06 01:14:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:14:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_loss_curves.png +[2025-09-06 01:14:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:14:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/per_class_acc_curves.png +[2025-09-06 01:14:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:14:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_loss_curve.png +[2025-09-06 01:14:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:14:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_44/total_acc_curve.png +[2025-09-06 01:14:24] [Rank 0] step:10001/10000 train_time:408271ms step_avg:40.82ms +[2025-09-06 01:14:24] [Rank 0] step:10001/10000 train_time:408271ms step_avg:40.82ms +[2025-09-06 01:14:24] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 01:14:24 2025 --- +[2025-09-06 01:14:24] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 01:14:24 2025 --- +[2025-09-06 01:14:24] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-06 01:14:24] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..684348aeda12dfa41d525e1ef0e7e68ae096876b --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.08, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "0de1dbf6-66e7-4729-a1a5-95c148f14f96", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..a19f644979be6d49aee6e1e7506b43bba1fa9675 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de51806cf523e680f3fc5eb39f67163ff2cc26bf8bc8a752ef862446abca1778 +size 302576 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..d0888faa88aac1f917d496a9d8d098a157e68b73 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3a2787cdf5d2907c60f0f8c5256298add5ed7129a9a00e321889d81acb33e91 +size 426301 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..f8969e4dde0d73133d45d1aa0d157b0e1878279e --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a3665f715abff440864351416d216a580b0193f6c047490a56e74bf724b0673 +size 89665 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..44bf4b232824cbfa8e9bdffd700f10bf93e6ce81 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04d6a14fe3884ce0a8904c8192f1ebccc46624037c7b2e61d7a0c7889f90b988 +size 123134 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/training_log_0de1dbf6-66e7-4729-a1a5-95c148f14f96.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/training_log_0de1dbf6-66e7-4729-a1a5-95c148f14f96.txt new file mode 100644 index 0000000000000000000000000000000000000000..bbe7fa04356fb4aba1d0ff9250994daec6edf568 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/training_log_0de1dbf6-66e7-4729-a1a5-95c148f14f96.txt @@ -0,0 +1,5614 @@ +[2025-09-06 01:14:44] [Rank 0] PRINT: --- Script Start: Sat Sep 6 01:14:44 2025 --- +[2025-09-06 01:14:44] [Rank 0] PRINT: --- Script Start: Sat Sep 6 01:14:44 2025 --- +[2025-09-06 01:14:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 01:14:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 01:14:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 01:14:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 01:14:44] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-06 01:14:44] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-06 01:14:44] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45 +[2025-09-06 01:14:44] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45 +[2025-09-06 01:14:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 01:14:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 01:14:45] [Rank 0] PRINT: Constructing model... +[2025-09-06 01:14:45] [Rank 0] PRINT: Constructing model... +[2025-09-06 01:14:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 01:14:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 01:14:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 01:14:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 01:14:46] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 01:14:46] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 01:14:50] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 01:14:50] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 01:14:50] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 01:14:50] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 01:14:50] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 01:14:50] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 01:14:50] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 01:14:50] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 01:14:50] [Rank 0] PRINT: Model returns: +[2025-09-06 01:14:50] [Rank 0] PRINT: Model returns: +[2025-09-06 01:14:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 01:14:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 01:14:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 01:14:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 01:14:50] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-06 01:14:50] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-06 01:14:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 01:14:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 01:14:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 01:14:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 01:14:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 01:14:54] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 01:14:54] [Rank 0] PRINT: Starting warmup... +[2025-09-06 01:14:54] [Rank 0] PRINT: Starting warmup... +[2025-09-06 01:15:32] [Rank 0] PRINT: Warmup complete. +[2025-09-06 01:15:32] [Rank 0] PRINT: Warmup complete. +[2025-09-06 01:15:32] [Rank 0] PRINT: Starting training... +[2025-09-06 01:15:32] [Rank 0] PRINT: Starting training... +[2025-09-06 01:15:39] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/fixed_eval_indices.json +[2025-09-06 01:15:39] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/fixed_eval_indices.json +[2025-09-06 01:15:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:15:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:15:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 01:15:42] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 01:16:14] [Rank 0] step:21/10000 train_time:32044ms step_avg:1525.92ms +[2025-09-06 01:16:14] [Rank 0] step:21/10000 train_time:32044ms step_avg:1525.92ms +[2025-09-06 01:16:15] [Rank 0] step:41/10000 train_time:32770ms step_avg:799.27ms +[2025-09-06 01:16:15] [Rank 0] step:41/10000 train_time:32770ms step_avg:799.27ms +[2025-09-06 01:16:16] [Rank 0] step:61/10000 train_time:33494ms step_avg:549.08ms +[2025-09-06 01:16:16] [Rank 0] step:61/10000 train_time:33494ms step_avg:549.08ms +[2025-09-06 01:16:16] [Rank 0] step:81/10000 train_time:34218ms step_avg:422.45ms +[2025-09-06 01:16:16] [Rank 0] step:81/10000 train_time:34218ms step_avg:422.45ms +[2025-09-06 01:16:17] [Rank 0] step:101/10000 train_time:34943ms step_avg:345.97ms +[2025-09-06 01:16:17] [Rank 0] step:101/10000 train_time:34943ms step_avg:345.97ms +[2025-09-06 01:16:18] [Rank 0] step:121/10000 train_time:35667ms step_avg:294.77ms +[2025-09-06 01:16:18] [Rank 0] step:121/10000 train_time:35667ms step_avg:294.77ms +[2025-09-06 01:16:19] [Rank 0] step:141/10000 train_time:36392ms step_avg:258.10ms +[2025-09-06 01:16:19] [Rank 0] step:141/10000 train_time:36392ms step_avg:258.10ms +[2025-09-06 01:16:19] [Rank 0] step:161/10000 train_time:37116ms step_avg:230.53ms +[2025-09-06 01:16:19] [Rank 0] step:161/10000 train_time:37116ms step_avg:230.53ms +[2025-09-06 01:16:20] [Rank 0] step:181/10000 train_time:37840ms step_avg:209.06ms +[2025-09-06 01:16:20] [Rank 0] step:181/10000 train_time:37840ms step_avg:209.06ms +[2025-09-06 01:16:21] [Rank 0] step:201/10000 train_time:38565ms step_avg:191.86ms +[2025-09-06 01:16:21] [Rank 0] step:201/10000 train_time:38565ms step_avg:191.86ms +[2025-09-06 01:16:22] [Rank 0] step:221/10000 train_time:39290ms step_avg:177.78ms +[2025-09-06 01:16:22] [Rank 0] step:221/10000 train_time:39290ms step_avg:177.78ms +[2025-09-06 01:16:22] [Rank 0] step:241/10000 train_time:40014ms step_avg:166.03ms +[2025-09-06 01:16:22] [Rank 0] step:241/10000 train_time:40014ms step_avg:166.03ms +[2025-09-06 01:16:23] [Rank 0] step:261/10000 train_time:40738ms step_avg:156.08ms +[2025-09-06 01:16:23] [Rank 0] step:261/10000 train_time:40738ms step_avg:156.08ms +[2025-09-06 01:16:24] [Rank 0] step:281/10000 train_time:41462ms step_avg:147.55ms +[2025-09-06 01:16:24] [Rank 0] step:281/10000 train_time:41462ms step_avg:147.55ms +[2025-09-06 01:16:24] [Rank 0] step:301/10000 train_time:42188ms step_avg:140.16ms +[2025-09-06 01:16:24] [Rank 0] step:301/10000 train_time:42188ms step_avg:140.16ms +[2025-09-06 01:16:25] [Rank 0] step:321/10000 train_time:42911ms step_avg:133.68ms +[2025-09-06 01:16:25] [Rank 0] step:321/10000 train_time:42911ms step_avg:133.68ms +[2025-09-06 01:16:26] [Rank 0] step:341/10000 train_time:43635ms step_avg:127.96ms +[2025-09-06 01:16:26] [Rank 0] step:341/10000 train_time:43635ms step_avg:127.96ms +[2025-09-06 01:16:27] [Rank 0] step:361/10000 train_time:44359ms step_avg:122.88ms +[2025-09-06 01:16:27] [Rank 0] step:361/10000 train_time:44359ms step_avg:122.88ms +[2025-09-06 01:16:27] [Rank 0] step:381/10000 train_time:45083ms step_avg:118.33ms +[2025-09-06 01:16:27] [Rank 0] step:381/10000 train_time:45083ms step_avg:118.33ms +[2025-09-06 01:16:28] [Rank 0] step:401/10000 train_time:45807ms step_avg:114.23ms +[2025-09-06 01:16:28] [Rank 0] step:401/10000 train_time:45807ms step_avg:114.23ms +[2025-09-06 01:16:29] [Rank 0] step:421/10000 train_time:46531ms step_avg:110.52ms +[2025-09-06 01:16:29] [Rank 0] step:421/10000 train_time:46531ms step_avg:110.52ms +[2025-09-06 01:16:29] [Rank 0] step:441/10000 train_time:47255ms step_avg:107.15ms +[2025-09-06 01:16:29] [Rank 0] step:441/10000 train_time:47255ms step_avg:107.15ms +[2025-09-06 01:16:30] [Rank 0] step:461/10000 train_time:47979ms step_avg:104.08ms +[2025-09-06 01:16:30] [Rank 0] step:461/10000 train_time:47979ms step_avg:104.08ms +[2025-09-06 01:16:31] [Rank 0] step:481/10000 train_time:48703ms step_avg:101.25ms +[2025-09-06 01:16:31] [Rank 0] step:481/10000 train_time:48703ms step_avg:101.25ms +[2025-09-06 01:16:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:16:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:16:32] [Rank 0] PRINT: step:500/10000 train_loss:5.9363 val_loss:4.3054 train_time:49507ms step_avg:99.01ms +[2025-09-06 01:16:32] [Rank 0] PRINT: step:500/10000 train_loss:5.9363 val_loss:4.3054 train_time:49507ms step_avg:99.01ms +[2025-09-06 01:16:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:16:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:16:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:16:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:17:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:17:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:17:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:17:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:17:53] [Rank 0] Total Loss: 6.2599 +[2025-09-06 01:17:53] [Rank 0] Total Loss: 6.2599 +[2025-09-06 01:17:53] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-06 01:17:53] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-06 01:17:53] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-06 01:17:53] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-06 01:17:53] [Rank 0] Group 0 Loss: 4.0286 +[2025-09-06 01:17:53] [Rank 0] Group 0 Loss: 4.0286 +[2025-09-06 01:17:53] [Rank 0] Group 1 Loss: 4.1628 +[2025-09-06 01:17:53] [Rank 0] Group 1 Loss: 4.1628 +[2025-09-06 01:17:53] [Rank 0] Group 2 Loss: 5.0593 +[2025-09-06 01:17:53] [Rank 0] Group 2 Loss: 5.0593 +[2025-09-06 01:17:53] [Rank 0] Group 3 Loss: 5.7913 +[2025-09-06 01:17:53] [Rank 0] Group 3 Loss: 5.7913 +[2025-09-06 01:17:53] [Rank 0] Group 4 Loss: 6.4807 +[2025-09-06 01:17:53] [Rank 0] Group 4 Loss: 6.4807 +[2025-09-06 01:17:53] [Rank 0] Group 5 Loss: 6.6110 +[2025-09-06 01:17:53] [Rank 0] Group 5 Loss: 6.6110 +[2025-09-06 01:17:53] [Rank 0] Group 6 Loss: 6.6790 +[2025-09-06 01:17:53] [Rank 0] Group 6 Loss: 6.6790 +[2025-09-06 01:17:53] [Rank 0] Group 7 Loss: 6.6473 +[2025-09-06 01:17:53] [Rank 0] Group 7 Loss: 6.6473 +[2025-09-06 01:17:53] [Rank 0] Group 8 Loss: 6.7771 +[2025-09-06 01:17:53] [Rank 0] Group 8 Loss: 6.7771 +[2025-09-06 01:17:53] [Rank 0] Group 9 Loss: 6.8931 +[2025-09-06 01:17:53] [Rank 0] Group 9 Loss: 6.8931 +[2025-09-06 01:17:53] [Rank 0] Group 10 Loss: 6.8786 +[2025-09-06 01:17:53] [Rank 0] Group 10 Loss: 6.8786 +[2025-09-06 01:17:53] [Rank 0] Group 11 Loss: 6.9421 +[2025-09-06 01:17:53] [Rank 0] Group 11 Loss: 6.9421 +[2025-09-06 01:17:53] [Rank 0] Group 12 Loss: 6.7669 +[2025-09-06 01:17:53] [Rank 0] Group 12 Loss: 6.7669 +[2025-09-06 01:17:53] [Rank 0] Group 13 Loss: 6.7681 +[2025-09-06 01:17:53] [Rank 0] Group 13 Loss: 6.7681 +[2025-09-06 01:17:53] [Rank 0] Group 14 Loss: 6.8902 +[2025-09-06 01:17:53] [Rank 0] Group 14 Loss: 6.8902 +[2025-09-06 01:17:53] [Rank 0] Group 15 Loss: 6.7829 +[2025-09-06 01:17:53] [Rank 0] Group 15 Loss: 6.7829 +[2025-09-06 01:17:53] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 01:17:53] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 01:17:53] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:17:53] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:17:53] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 01:17:53] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 01:17:53] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 01:17:53] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 01:17:53] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 01:17:53] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 01:17:53] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 01:17:53] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 01:17:53] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 01:17:53] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 01:17:53] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 01:17:53] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 01:17:53] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-06 01:17:53] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-06 01:17:53] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-06 01:17:53] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-06 01:17:53] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-06 01:17:53] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-06 01:17:53] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 01:17:53] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 01:17:53] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:17:53] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:17:53] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 01:17:53] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 01:17:53] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:17:53] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:17:53] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 01:17:53] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 01:17:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:17:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:17:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:17:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:17:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:17:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:17:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:17:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:17:55] [Rank 0] step:501/10000 train_time:49516ms step_avg:98.83ms +[2025-09-06 01:17:55] [Rank 0] step:501/10000 train_time:49516ms step_avg:98.83ms +[2025-09-06 01:17:56] [Rank 0] step:521/10000 train_time:50167ms step_avg:96.29ms +[2025-09-06 01:17:56] [Rank 0] step:521/10000 train_time:50167ms step_avg:96.29ms +[2025-09-06 01:17:57] [Rank 0] step:541/10000 train_time:50891ms step_avg:94.07ms +[2025-09-06 01:17:57] [Rank 0] step:541/10000 train_time:50891ms step_avg:94.07ms +[2025-09-06 01:17:57] [Rank 0] step:561/10000 train_time:51740ms step_avg:92.23ms +[2025-09-06 01:17:57] [Rank 0] step:561/10000 train_time:51740ms step_avg:92.23ms +[2025-09-06 01:17:58] [Rank 0] step:581/10000 train_time:52464ms step_avg:90.30ms +[2025-09-06 01:17:58] [Rank 0] step:581/10000 train_time:52464ms step_avg:90.30ms +[2025-09-06 01:17:59] [Rank 0] step:601/10000 train_time:53191ms step_avg:88.50ms +[2025-09-06 01:17:59] [Rank 0] step:601/10000 train_time:53191ms step_avg:88.50ms +[2025-09-06 01:18:00] [Rank 0] step:621/10000 train_time:53915ms step_avg:86.82ms +[2025-09-06 01:18:00] [Rank 0] step:621/10000 train_time:53915ms step_avg:86.82ms +[2025-09-06 01:18:00] [Rank 0] step:641/10000 train_time:54639ms step_avg:85.24ms +[2025-09-06 01:18:00] [Rank 0] step:641/10000 train_time:54639ms step_avg:85.24ms +[2025-09-06 01:18:01] [Rank 0] step:661/10000 train_time:55364ms step_avg:83.76ms +[2025-09-06 01:18:01] [Rank 0] step:661/10000 train_time:55364ms step_avg:83.76ms +[2025-09-06 01:18:02] [Rank 0] step:681/10000 train_time:56088ms step_avg:82.36ms +[2025-09-06 01:18:02] [Rank 0] step:681/10000 train_time:56088ms step_avg:82.36ms +[2025-09-06 01:18:03] [Rank 0] step:701/10000 train_time:56813ms step_avg:81.05ms +[2025-09-06 01:18:03] [Rank 0] step:701/10000 train_time:56813ms step_avg:81.05ms +[2025-09-06 01:18:03] [Rank 0] step:721/10000 train_time:57537ms step_avg:79.80ms +[2025-09-06 01:18:03] [Rank 0] step:721/10000 train_time:57537ms step_avg:79.80ms +[2025-09-06 01:18:04] [Rank 0] step:741/10000 train_time:58261ms step_avg:78.63ms +[2025-09-06 01:18:04] [Rank 0] step:741/10000 train_time:58261ms step_avg:78.63ms +[2025-09-06 01:18:05] [Rank 0] step:761/10000 train_time:58991ms step_avg:77.52ms +[2025-09-06 01:18:05] [Rank 0] step:761/10000 train_time:58991ms step_avg:77.52ms +[2025-09-06 01:18:05] [Rank 0] step:781/10000 train_time:59720ms step_avg:76.47ms +[2025-09-06 01:18:05] [Rank 0] step:781/10000 train_time:59720ms step_avg:76.47ms +[2025-09-06 01:18:06] [Rank 0] step:801/10000 train_time:60449ms step_avg:75.47ms +[2025-09-06 01:18:06] [Rank 0] step:801/10000 train_time:60449ms step_avg:75.47ms +[2025-09-06 01:18:08] [Rank 0] step:821/10000 train_time:61807ms step_avg:75.28ms +[2025-09-06 01:18:08] [Rank 0] step:821/10000 train_time:61807ms step_avg:75.28ms +[2025-09-06 01:18:08] [Rank 0] step:841/10000 train_time:62536ms step_avg:74.36ms +[2025-09-06 01:18:08] [Rank 0] step:841/10000 train_time:62536ms step_avg:74.36ms +[2025-09-06 01:18:09] [Rank 0] step:861/10000 train_time:63265ms step_avg:73.48ms +[2025-09-06 01:18:09] [Rank 0] step:861/10000 train_time:63265ms step_avg:73.48ms +[2025-09-06 01:18:10] [Rank 0] step:881/10000 train_time:63994ms step_avg:72.64ms +[2025-09-06 01:18:10] [Rank 0] step:881/10000 train_time:63994ms step_avg:72.64ms +[2025-09-06 01:18:10] [Rank 0] step:901/10000 train_time:64723ms step_avg:71.83ms +[2025-09-06 01:18:10] [Rank 0] step:901/10000 train_time:64723ms step_avg:71.83ms +[2025-09-06 01:18:11] [Rank 0] step:921/10000 train_time:65452ms step_avg:71.07ms +[2025-09-06 01:18:11] [Rank 0] step:921/10000 train_time:65452ms step_avg:71.07ms +[2025-09-06 01:18:12] [Rank 0] step:941/10000 train_time:66181ms step_avg:70.33ms +[2025-09-06 01:18:12] [Rank 0] step:941/10000 train_time:66181ms step_avg:70.33ms +[2025-09-06 01:18:13] [Rank 0] step:961/10000 train_time:66910ms step_avg:69.63ms +[2025-09-06 01:18:13] [Rank 0] step:961/10000 train_time:66910ms step_avg:69.63ms +[2025-09-06 01:18:13] [Rank 0] step:981/10000 train_time:67640ms step_avg:68.95ms +[2025-09-06 01:18:13] [Rank 0] step:981/10000 train_time:67640ms step_avg:68.95ms +[2025-09-06 01:18:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:18:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:18:15] [Rank 0] PRINT: step:1000/10000 train_loss:3.8793 val_loss:3.5508 train_time:68449ms step_avg:68.45ms +[2025-09-06 01:18:15] [Rank 0] PRINT: step:1000/10000 train_loss:3.8793 val_loss:3.5508 train_time:68449ms step_avg:68.45ms +[2025-09-06 01:18:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:18:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:18:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:18:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:19:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:19:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:19:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:19:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:19:35] [Rank 0] Total Loss: 5.7562 +[2025-09-06 01:19:35] [Rank 0] Total Loss: 5.7562 +[2025-09-06 01:19:35] [Rank 0] Total FTA (Unweighted): 0.1069 +[2025-09-06 01:19:35] [Rank 0] Total FTA (Unweighted): 0.1069 +[2025-09-06 01:19:35] [Rank 0] Total FTA (Weighted): 0.1069 +[2025-09-06 01:19:35] [Rank 0] Total FTA (Weighted): 0.1069 +[2025-09-06 01:19:35] [Rank 0] Group 0 Loss: 3.5717 +[2025-09-06 01:19:35] [Rank 0] Group 0 Loss: 3.5717 +[2025-09-06 01:19:35] [Rank 0] Group 1 Loss: 3.6406 +[2025-09-06 01:19:35] [Rank 0] Group 1 Loss: 3.6406 +[2025-09-06 01:19:35] [Rank 0] Group 2 Loss: 4.0069 +[2025-09-06 01:19:35] [Rank 0] Group 2 Loss: 4.0069 +[2025-09-06 01:19:35] [Rank 0] Group 3 Loss: 4.8370 +[2025-09-06 01:19:35] [Rank 0] Group 3 Loss: 4.8370 +[2025-09-06 01:19:35] [Rank 0] Group 4 Loss: 5.7414 +[2025-09-06 01:19:35] [Rank 0] Group 4 Loss: 5.7414 +[2025-09-06 01:19:35] [Rank 0] Group 5 Loss: 6.0344 +[2025-09-06 01:19:35] [Rank 0] Group 5 Loss: 6.0344 +[2025-09-06 01:19:35] [Rank 0] Group 6 Loss: 6.2249 +[2025-09-06 01:19:35] [Rank 0] Group 6 Loss: 6.2249 +[2025-09-06 01:19:35] [Rank 0] Group 7 Loss: 6.2281 +[2025-09-06 01:19:35] [Rank 0] Group 7 Loss: 6.2281 +[2025-09-06 01:19:35] [Rank 0] Group 8 Loss: 6.3770 +[2025-09-06 01:19:35] [Rank 0] Group 8 Loss: 6.3770 +[2025-09-06 01:19:35] [Rank 0] Group 9 Loss: 6.5440 +[2025-09-06 01:19:35] [Rank 0] Group 9 Loss: 6.5440 +[2025-09-06 01:19:35] [Rank 0] Group 10 Loss: 6.5257 +[2025-09-06 01:19:35] [Rank 0] Group 10 Loss: 6.5257 +[2025-09-06 01:19:35] [Rank 0] Group 11 Loss: 6.5986 +[2025-09-06 01:19:35] [Rank 0] Group 11 Loss: 6.5986 +[2025-09-06 01:19:35] [Rank 0] Group 12 Loss: 6.4150 +[2025-09-06 01:19:35] [Rank 0] Group 12 Loss: 6.4150 +[2025-09-06 01:19:35] [Rank 0] Group 13 Loss: 6.4252 +[2025-09-06 01:19:35] [Rank 0] Group 13 Loss: 6.4252 +[2025-09-06 01:19:35] [Rank 0] Group 14 Loss: 6.5136 +[2025-09-06 01:19:35] [Rank 0] Group 14 Loss: 6.5136 +[2025-09-06 01:19:35] [Rank 0] Group 15 Loss: 6.4154 +[2025-09-06 01:19:35] [Rank 0] Group 15 Loss: 6.4154 +[2025-09-06 01:19:35] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 01:19:35] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 01:19:35] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:19:35] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:19:35] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:19:35] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:19:35] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:19:35] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:19:35] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:19:35] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:19:35] [Rank 0] Group 5 FTA: 0.1100 +[2025-09-06 01:19:35] [Rank 0] Group 5 FTA: 0.1100 +[2025-09-06 01:19:35] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 01:19:35] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 01:19:35] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:19:35] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:19:35] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 01:19:35] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 01:19:35] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 01:19:35] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 01:19:35] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 01:19:35] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-06 01:19:35] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:19:35] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:19:35] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:19:35] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:19:35] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 01:19:35] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 01:19:35] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:19:35] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:19:35] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 01:19:35] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 01:19:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:19:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:19:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:19:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:19:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:19:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:19:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:19:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:19:37] [Rank 0] step:1001/10000 train_time:68458ms step_avg:68.39ms +[2025-09-06 01:19:37] [Rank 0] step:1001/10000 train_time:68458ms step_avg:68.39ms +[2025-09-06 01:19:38] [Rank 0] step:1021/10000 train_time:69118ms step_avg:67.70ms +[2025-09-06 01:19:38] [Rank 0] step:1021/10000 train_time:69118ms step_avg:67.70ms +[2025-09-06 01:19:38] [Rank 0] step:1041/10000 train_time:69849ms step_avg:67.10ms +[2025-09-06 01:19:38] [Rank 0] step:1041/10000 train_time:69849ms step_avg:67.10ms +[2025-09-06 01:19:39] [Rank 0] step:1061/10000 train_time:70578ms step_avg:66.52ms +[2025-09-06 01:19:39] [Rank 0] step:1061/10000 train_time:70578ms step_avg:66.52ms +[2025-09-06 01:19:40] [Rank 0] step:1081/10000 train_time:71307ms step_avg:65.96ms +[2025-09-06 01:19:40] [Rank 0] step:1081/10000 train_time:71307ms step_avg:65.96ms +[2025-09-06 01:19:40] [Rank 0] step:1101/10000 train_time:72036ms step_avg:65.43ms +[2025-09-06 01:19:40] [Rank 0] step:1101/10000 train_time:72036ms step_avg:65.43ms +[2025-09-06 01:19:41] [Rank 0] step:1121/10000 train_time:72765ms step_avg:64.91ms +[2025-09-06 01:19:41] [Rank 0] step:1121/10000 train_time:72765ms step_avg:64.91ms +[2025-09-06 01:19:42] [Rank 0] step:1141/10000 train_time:73494ms step_avg:64.41ms +[2025-09-06 01:19:42] [Rank 0] step:1141/10000 train_time:73494ms step_avg:64.41ms +[2025-09-06 01:19:43] [Rank 0] step:1161/10000 train_time:74223ms step_avg:63.93ms +[2025-09-06 01:19:43] [Rank 0] step:1161/10000 train_time:74223ms step_avg:63.93ms +[2025-09-06 01:19:43] [Rank 0] step:1181/10000 train_time:74952ms step_avg:63.46ms +[2025-09-06 01:19:43] [Rank 0] step:1181/10000 train_time:74952ms step_avg:63.46ms +[2025-09-06 01:19:44] [Rank 0] step:1201/10000 train_time:75680ms step_avg:63.01ms +[2025-09-06 01:19:44] [Rank 0] step:1201/10000 train_time:75680ms step_avg:63.01ms +[2025-09-06 01:19:45] [Rank 0] step:1221/10000 train_time:76409ms step_avg:62.58ms +[2025-09-06 01:19:45] [Rank 0] step:1221/10000 train_time:76409ms step_avg:62.58ms +[2025-09-06 01:19:46] [Rank 0] step:1241/10000 train_time:77138ms step_avg:62.16ms +[2025-09-06 01:19:46] [Rank 0] step:1241/10000 train_time:77138ms step_avg:62.16ms +[2025-09-06 01:19:46] [Rank 0] step:1261/10000 train_time:77867ms step_avg:61.75ms +[2025-09-06 01:19:46] [Rank 0] step:1261/10000 train_time:77867ms step_avg:61.75ms +[2025-09-06 01:19:47] [Rank 0] step:1281/10000 train_time:78596ms step_avg:61.35ms +[2025-09-06 01:19:47] [Rank 0] step:1281/10000 train_time:78596ms step_avg:61.35ms +[2025-09-06 01:19:48] [Rank 0] step:1301/10000 train_time:79325ms step_avg:60.97ms +[2025-09-06 01:19:48] [Rank 0] step:1301/10000 train_time:79325ms step_avg:60.97ms +[2025-09-06 01:19:49] [Rank 0] step:1321/10000 train_time:80054ms step_avg:60.60ms +[2025-09-06 01:19:49] [Rank 0] step:1321/10000 train_time:80054ms step_avg:60.60ms +[2025-09-06 01:19:49] [Rank 0] step:1341/10000 train_time:80783ms step_avg:60.24ms +[2025-09-06 01:19:49] [Rank 0] step:1341/10000 train_time:80783ms step_avg:60.24ms +[2025-09-06 01:19:50] [Rank 0] step:1361/10000 train_time:81512ms step_avg:59.89ms +[2025-09-06 01:19:50] [Rank 0] step:1361/10000 train_time:81512ms step_avg:59.89ms +[2025-09-06 01:19:51] [Rank 0] step:1381/10000 train_time:82241ms step_avg:59.55ms +[2025-09-06 01:19:51] [Rank 0] step:1381/10000 train_time:82241ms step_avg:59.55ms +[2025-09-06 01:19:51] [Rank 0] step:1401/10000 train_time:82970ms step_avg:59.22ms +[2025-09-06 01:19:51] [Rank 0] step:1401/10000 train_time:82970ms step_avg:59.22ms +[2025-09-06 01:19:52] [Rank 0] step:1421/10000 train_time:83700ms step_avg:58.90ms +[2025-09-06 01:19:52] [Rank 0] step:1421/10000 train_time:83700ms step_avg:58.90ms +[2025-09-06 01:19:53] [Rank 0] step:1441/10000 train_time:84429ms step_avg:58.59ms +[2025-09-06 01:19:53] [Rank 0] step:1441/10000 train_time:84429ms step_avg:58.59ms +[2025-09-06 01:19:54] [Rank 0] step:1461/10000 train_time:85158ms step_avg:58.29ms +[2025-09-06 01:19:54] [Rank 0] step:1461/10000 train_time:85158ms step_avg:58.29ms +[2025-09-06 01:19:54] [Rank 0] step:1481/10000 train_time:85887ms step_avg:57.99ms +[2025-09-06 01:19:54] [Rank 0] step:1481/10000 train_time:85887ms step_avg:57.99ms +[2025-09-06 01:19:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:19:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:19:56] [Rank 0] PRINT: step:1500/10000 train_loss:3.3494 val_loss:3.1696 train_time:86696ms step_avg:57.80ms +[2025-09-06 01:19:56] [Rank 0] PRINT: step:1500/10000 train_loss:3.3494 val_loss:3.1696 train_time:86696ms step_avg:57.80ms +[2025-09-06 01:19:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:19:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:19:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:19:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:21:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:21:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:21:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:21:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:21:16] [Rank 0] Total Loss: 5.4816 +[2025-09-06 01:21:16] [Rank 0] Total Loss: 5.4816 +[2025-09-06 01:21:16] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-06 01:21:16] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-06 01:21:16] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-06 01:21:16] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-06 01:21:16] [Rank 0] Group 0 Loss: 3.5032 +[2025-09-06 01:21:16] [Rank 0] Group 0 Loss: 3.5032 +[2025-09-06 01:21:16] [Rank 0] Group 1 Loss: 3.5142 +[2025-09-06 01:21:16] [Rank 0] Group 1 Loss: 3.5142 +[2025-09-06 01:21:16] [Rank 0] Group 2 Loss: 3.7038 +[2025-09-06 01:21:16] [Rank 0] Group 2 Loss: 3.7038 +[2025-09-06 01:21:16] [Rank 0] Group 3 Loss: 4.3103 +[2025-09-06 01:21:16] [Rank 0] Group 3 Loss: 4.3103 +[2025-09-06 01:21:16] [Rank 0] Group 4 Loss: 5.2334 +[2025-09-06 01:21:16] [Rank 0] Group 4 Loss: 5.2334 +[2025-09-06 01:21:16] [Rank 0] Group 5 Loss: 5.6534 +[2025-09-06 01:21:16] [Rank 0] Group 5 Loss: 5.6534 +[2025-09-06 01:21:16] [Rank 0] Group 6 Loss: 5.8891 +[2025-09-06 01:21:16] [Rank 0] Group 6 Loss: 5.8891 +[2025-09-06 01:21:16] [Rank 0] Group 7 Loss: 5.9412 +[2025-09-06 01:21:16] [Rank 0] Group 7 Loss: 5.9412 +[2025-09-06 01:21:16] [Rank 0] Group 8 Loss: 6.1414 +[2025-09-06 01:21:16] [Rank 0] Group 8 Loss: 6.1414 +[2025-09-06 01:21:16] [Rank 0] Group 9 Loss: 6.2937 +[2025-09-06 01:21:16] [Rank 0] Group 9 Loss: 6.2937 +[2025-09-06 01:21:16] [Rank 0] Group 10 Loss: 6.3278 +[2025-09-06 01:21:16] [Rank 0] Group 10 Loss: 6.3278 +[2025-09-06 01:21:16] [Rank 0] Group 11 Loss: 6.3587 +[2025-09-06 01:21:16] [Rank 0] Group 11 Loss: 6.3587 +[2025-09-06 01:21:16] [Rank 0] Group 12 Loss: 6.1578 +[2025-09-06 01:21:16] [Rank 0] Group 12 Loss: 6.1578 +[2025-09-06 01:21:16] [Rank 0] Group 13 Loss: 6.1938 +[2025-09-06 01:21:16] [Rank 0] Group 13 Loss: 6.1938 +[2025-09-06 01:21:16] [Rank 0] Group 14 Loss: 6.2846 +[2025-09-06 01:21:16] [Rank 0] Group 14 Loss: 6.2846 +[2025-09-06 01:21:16] [Rank 0] Group 15 Loss: 6.1996 +[2025-09-06 01:21:16] [Rank 0] Group 15 Loss: 6.1996 +[2025-09-06 01:21:16] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 01:21:16] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 01:21:16] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:21:16] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:21:16] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:21:16] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:21:16] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:21:16] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:21:16] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:21:16] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:21:16] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 01:21:16] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 01:21:16] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 01:21:16] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 01:21:16] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:21:16] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:21:16] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 01:21:16] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 01:21:16] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:21:16] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:21:16] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 01:21:16] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 01:21:16] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 01:21:16] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 01:21:16] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 01:21:16] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 01:21:16] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:21:16] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:21:16] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:21:16] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:21:16] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 01:21:16] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 01:21:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:21:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:21:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:21:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:21:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:21:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:21:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:21:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:21:17] [Rank 0] step:1501/10000 train_time:86706ms step_avg:57.77ms +[2025-09-06 01:21:17] [Rank 0] step:1501/10000 train_time:86706ms step_avg:57.77ms +[2025-09-06 01:21:18] [Rank 0] step:1521/10000 train_time:87378ms step_avg:57.45ms +[2025-09-06 01:21:18] [Rank 0] step:1521/10000 train_time:87378ms step_avg:57.45ms +[2025-09-06 01:21:19] [Rank 0] step:1541/10000 train_time:88106ms step_avg:57.17ms +[2025-09-06 01:21:19] [Rank 0] step:1541/10000 train_time:88106ms step_avg:57.17ms +[2025-09-06 01:21:20] [Rank 0] step:1561/10000 train_time:88835ms step_avg:56.91ms +[2025-09-06 01:21:20] [Rank 0] step:1561/10000 train_time:88835ms step_avg:56.91ms +[2025-09-06 01:21:20] [Rank 0] step:1581/10000 train_time:89564ms step_avg:56.65ms +[2025-09-06 01:21:20] [Rank 0] step:1581/10000 train_time:89564ms step_avg:56.65ms +[2025-09-06 01:21:21] [Rank 0] step:1601/10000 train_time:90294ms step_avg:56.40ms +[2025-09-06 01:21:21] [Rank 0] step:1601/10000 train_time:90294ms step_avg:56.40ms +[2025-09-06 01:21:22] [Rank 0] step:1621/10000 train_time:91023ms step_avg:56.15ms +[2025-09-06 01:21:22] [Rank 0] step:1621/10000 train_time:91023ms step_avg:56.15ms +[2025-09-06 01:21:23] [Rank 0] step:1641/10000 train_time:92381ms step_avg:56.30ms +[2025-09-06 01:21:23] [Rank 0] step:1641/10000 train_time:92381ms step_avg:56.30ms +[2025-09-06 01:21:24] [Rank 0] step:1661/10000 train_time:93110ms step_avg:56.06ms +[2025-09-06 01:21:24] [Rank 0] step:1661/10000 train_time:93110ms step_avg:56.06ms +[2025-09-06 01:21:25] [Rank 0] step:1681/10000 train_time:93840ms step_avg:55.82ms +[2025-09-06 01:21:25] [Rank 0] step:1681/10000 train_time:93840ms step_avg:55.82ms +[2025-09-06 01:21:25] [Rank 0] step:1701/10000 train_time:94569ms step_avg:55.60ms +[2025-09-06 01:21:25] [Rank 0] step:1701/10000 train_time:94569ms step_avg:55.60ms +[2025-09-06 01:21:26] [Rank 0] step:1721/10000 train_time:95299ms step_avg:55.37ms +[2025-09-06 01:21:26] [Rank 0] step:1721/10000 train_time:95299ms step_avg:55.37ms +[2025-09-06 01:21:27] [Rank 0] step:1741/10000 train_time:96028ms step_avg:55.16ms +[2025-09-06 01:21:27] [Rank 0] step:1741/10000 train_time:96028ms step_avg:55.16ms +[2025-09-06 01:21:28] [Rank 0] step:1761/10000 train_time:96757ms step_avg:54.94ms +[2025-09-06 01:21:28] [Rank 0] step:1761/10000 train_time:96757ms step_avg:54.94ms +[2025-09-06 01:21:28] [Rank 0] step:1781/10000 train_time:97486ms step_avg:54.74ms +[2025-09-06 01:21:28] [Rank 0] step:1781/10000 train_time:97486ms step_avg:54.74ms +[2025-09-06 01:21:29] [Rank 0] step:1801/10000 train_time:98215ms step_avg:54.53ms +[2025-09-06 01:21:29] [Rank 0] step:1801/10000 train_time:98215ms step_avg:54.53ms +[2025-09-06 01:21:30] [Rank 0] step:1821/10000 train_time:98944ms step_avg:54.34ms +[2025-09-06 01:21:30] [Rank 0] step:1821/10000 train_time:98944ms step_avg:54.34ms +[2025-09-06 01:21:30] [Rank 0] step:1841/10000 train_time:99673ms step_avg:54.14ms +[2025-09-06 01:21:30] [Rank 0] step:1841/10000 train_time:99673ms step_avg:54.14ms +[2025-09-06 01:21:31] [Rank 0] step:1861/10000 train_time:100402ms step_avg:53.95ms +[2025-09-06 01:21:31] [Rank 0] step:1861/10000 train_time:100402ms step_avg:53.95ms +[2025-09-06 01:21:32] [Rank 0] step:1881/10000 train_time:101131ms step_avg:53.76ms +[2025-09-06 01:21:32] [Rank 0] step:1881/10000 train_time:101131ms step_avg:53.76ms +[2025-09-06 01:21:33] [Rank 0] step:1901/10000 train_time:101860ms step_avg:53.58ms +[2025-09-06 01:21:33] [Rank 0] step:1901/10000 train_time:101860ms step_avg:53.58ms +[2025-09-06 01:21:33] [Rank 0] step:1921/10000 train_time:102589ms step_avg:53.40ms +[2025-09-06 01:21:33] [Rank 0] step:1921/10000 train_time:102589ms step_avg:53.40ms +[2025-09-06 01:21:34] [Rank 0] step:1941/10000 train_time:103318ms step_avg:53.23ms +[2025-09-06 01:21:34] [Rank 0] step:1941/10000 train_time:103318ms step_avg:53.23ms +[2025-09-06 01:21:35] [Rank 0] step:1961/10000 train_time:104048ms step_avg:53.06ms +[2025-09-06 01:21:35] [Rank 0] step:1961/10000 train_time:104048ms step_avg:53.06ms +[2025-09-06 01:21:36] [Rank 0] step:1981/10000 train_time:104777ms step_avg:52.89ms +[2025-09-06 01:21:36] [Rank 0] step:1981/10000 train_time:104777ms step_avg:52.89ms +[2025-09-06 01:21:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:21:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:21:37] [Rank 0] PRINT: step:2000/10000 train_loss:3.0560 val_loss:2.9409 train_time:105586ms step_avg:52.79ms +[2025-09-06 01:21:37] [Rank 0] PRINT: step:2000/10000 train_loss:3.0560 val_loss:2.9409 train_time:105586ms step_avg:52.79ms +[2025-09-06 01:21:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:21:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:21:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:21:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:22:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:22:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:22:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:22:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:22:58] [Rank 0] Total Loss: 5.3296 +[2025-09-06 01:22:58] [Rank 0] Total Loss: 5.3296 +[2025-09-06 01:22:58] [Rank 0] Total FTA (Unweighted): 0.1619 +[2025-09-06 01:22:58] [Rank 0] Total FTA (Unweighted): 0.1619 +[2025-09-06 01:22:58] [Rank 0] Total FTA (Weighted): 0.1619 +[2025-09-06 01:22:58] [Rank 0] Total FTA (Weighted): 0.1619 +[2025-09-06 01:22:58] [Rank 0] Group 0 Loss: 3.4938 +[2025-09-06 01:22:58] [Rank 0] Group 0 Loss: 3.4938 +[2025-09-06 01:22:58] [Rank 0] Group 1 Loss: 3.4459 +[2025-09-06 01:22:58] [Rank 0] Group 1 Loss: 3.4459 +[2025-09-06 01:22:58] [Rank 0] Group 2 Loss: 3.5807 +[2025-09-06 01:22:58] [Rank 0] Group 2 Loss: 3.5807 +[2025-09-06 01:22:58] [Rank 0] Group 3 Loss: 4.1177 +[2025-09-06 01:22:58] [Rank 0] Group 3 Loss: 4.1177 +[2025-09-06 01:22:58] [Rank 0] Group 4 Loss: 4.9525 +[2025-09-06 01:22:58] [Rank 0] Group 4 Loss: 4.9525 +[2025-09-06 01:22:58] [Rank 0] Group 5 Loss: 5.4185 +[2025-09-06 01:22:58] [Rank 0] Group 5 Loss: 5.4185 +[2025-09-06 01:22:58] [Rank 0] Group 6 Loss: 5.6800 +[2025-09-06 01:22:58] [Rank 0] Group 6 Loss: 5.6800 +[2025-09-06 01:22:58] [Rank 0] Group 7 Loss: 5.7664 +[2025-09-06 01:22:58] [Rank 0] Group 7 Loss: 5.7664 +[2025-09-06 01:22:58] [Rank 0] Group 8 Loss: 5.9965 +[2025-09-06 01:22:58] [Rank 0] Group 8 Loss: 5.9965 +[2025-09-06 01:22:58] [Rank 0] Group 9 Loss: 6.1234 +[2025-09-06 01:22:58] [Rank 0] Group 9 Loss: 6.1234 +[2025-09-06 01:22:58] [Rank 0] Group 10 Loss: 6.1615 +[2025-09-06 01:22:58] [Rank 0] Group 10 Loss: 6.1615 +[2025-09-06 01:22:58] [Rank 0] Group 11 Loss: 6.2251 +[2025-09-06 01:22:58] [Rank 0] Group 11 Loss: 6.2251 +[2025-09-06 01:22:58] [Rank 0] Group 12 Loss: 6.0346 +[2025-09-06 01:22:58] [Rank 0] Group 12 Loss: 6.0346 +[2025-09-06 01:22:58] [Rank 0] Group 13 Loss: 6.0648 +[2025-09-06 01:22:58] [Rank 0] Group 13 Loss: 6.0648 +[2025-09-06 01:22:58] [Rank 0] Group 14 Loss: 6.1437 +[2025-09-06 01:22:58] [Rank 0] Group 14 Loss: 6.1437 +[2025-09-06 01:22:58] [Rank 0] Group 15 Loss: 6.0685 +[2025-09-06 01:22:58] [Rank 0] Group 15 Loss: 6.0685 +[2025-09-06 01:22:58] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 01:22:58] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 01:22:58] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:22:58] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:22:58] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:22:58] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:22:58] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:22:58] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:22:58] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:22:58] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:22:58] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:22:58] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:22:58] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 01:22:58] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-06 01:22:58] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:22:58] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:22:58] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 01:22:58] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-06 01:22:58] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:22:58] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:22:58] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 01:22:58] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-06 01:22:58] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:22:58] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:22:58] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:22:58] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:22:58] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:22:58] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:22:58] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:22:58] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:22:58] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:22:58] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:22:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:22:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:22:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:22:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:22:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:22:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:22:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:22:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:22:59] [Rank 0] step:2001/10000 train_time:105596ms step_avg:52.77ms +[2025-09-06 01:22:59] [Rank 0] step:2001/10000 train_time:105596ms step_avg:52.77ms +[2025-09-06 01:23:00] [Rank 0] step:2021/10000 train_time:106270ms step_avg:52.58ms +[2025-09-06 01:23:00] [Rank 0] step:2021/10000 train_time:106270ms step_avg:52.58ms +[2025-09-06 01:23:01] [Rank 0] step:2041/10000 train_time:106999ms step_avg:52.42ms +[2025-09-06 01:23:01] [Rank 0] step:2041/10000 train_time:106999ms step_avg:52.42ms +[2025-09-06 01:23:02] [Rank 0] step:2061/10000 train_time:107729ms step_avg:52.27ms +[2025-09-06 01:23:02] [Rank 0] step:2061/10000 train_time:107729ms step_avg:52.27ms +[2025-09-06 01:23:02] [Rank 0] step:2081/10000 train_time:108457ms step_avg:52.12ms +[2025-09-06 01:23:02] [Rank 0] step:2081/10000 train_time:108457ms step_avg:52.12ms +[2025-09-06 01:23:03] [Rank 0] step:2101/10000 train_time:109187ms step_avg:51.97ms +[2025-09-06 01:23:03] [Rank 0] step:2101/10000 train_time:109187ms step_avg:51.97ms +[2025-09-06 01:23:04] [Rank 0] step:2121/10000 train_time:109916ms step_avg:51.82ms +[2025-09-06 01:23:04] [Rank 0] step:2121/10000 train_time:109916ms step_avg:51.82ms +[2025-09-06 01:23:04] [Rank 0] step:2141/10000 train_time:110646ms step_avg:51.68ms +[2025-09-06 01:23:04] [Rank 0] step:2141/10000 train_time:110646ms step_avg:51.68ms +[2025-09-06 01:23:05] [Rank 0] step:2161/10000 train_time:111375ms step_avg:51.54ms +[2025-09-06 01:23:05] [Rank 0] step:2161/10000 train_time:111375ms step_avg:51.54ms +[2025-09-06 01:23:06] [Rank 0] step:2181/10000 train_time:112105ms step_avg:51.40ms +[2025-09-06 01:23:06] [Rank 0] step:2181/10000 train_time:112105ms step_avg:51.40ms +[2025-09-06 01:23:07] [Rank 0] step:2201/10000 train_time:112835ms step_avg:51.27ms +[2025-09-06 01:23:07] [Rank 0] step:2201/10000 train_time:112835ms step_avg:51.27ms +[2025-09-06 01:23:07] [Rank 0] step:2221/10000 train_time:113563ms step_avg:51.13ms +[2025-09-06 01:23:07] [Rank 0] step:2221/10000 train_time:113563ms step_avg:51.13ms +[2025-09-06 01:23:08] [Rank 0] step:2241/10000 train_time:114297ms step_avg:51.00ms +[2025-09-06 01:23:08] [Rank 0] step:2241/10000 train_time:114297ms step_avg:51.00ms +[2025-09-06 01:23:09] [Rank 0] step:2261/10000 train_time:115032ms step_avg:50.88ms +[2025-09-06 01:23:09] [Rank 0] step:2261/10000 train_time:115032ms step_avg:50.88ms +[2025-09-06 01:23:10] [Rank 0] step:2281/10000 train_time:115768ms step_avg:50.75ms +[2025-09-06 01:23:10] [Rank 0] step:2281/10000 train_time:115768ms step_avg:50.75ms +[2025-09-06 01:23:10] [Rank 0] step:2301/10000 train_time:116503ms step_avg:50.63ms +[2025-09-06 01:23:10] [Rank 0] step:2301/10000 train_time:116503ms step_avg:50.63ms +[2025-09-06 01:23:11] [Rank 0] step:2321/10000 train_time:117350ms step_avg:50.56ms +[2025-09-06 01:23:11] [Rank 0] step:2321/10000 train_time:117350ms step_avg:50.56ms +[2025-09-06 01:23:12] [Rank 0] step:2341/10000 train_time:118086ms step_avg:50.44ms +[2025-09-06 01:23:12] [Rank 0] step:2341/10000 train_time:118086ms step_avg:50.44ms +[2025-09-06 01:23:13] [Rank 0] step:2361/10000 train_time:118821ms step_avg:50.33ms +[2025-09-06 01:23:13] [Rank 0] step:2361/10000 train_time:118821ms step_avg:50.33ms +[2025-09-06 01:23:14] [Rank 0] step:2381/10000 train_time:119699ms step_avg:50.27ms +[2025-09-06 01:23:14] [Rank 0] step:2381/10000 train_time:119699ms step_avg:50.27ms +[2025-09-06 01:23:14] [Rank 0] step:2401/10000 train_time:120434ms step_avg:50.16ms +[2025-09-06 01:23:14] [Rank 0] step:2401/10000 train_time:120434ms step_avg:50.16ms +[2025-09-06 01:23:15] [Rank 0] step:2421/10000 train_time:121170ms step_avg:50.05ms +[2025-09-06 01:23:15] [Rank 0] step:2421/10000 train_time:121170ms step_avg:50.05ms +[2025-09-06 01:23:16] [Rank 0] step:2441/10000 train_time:121906ms step_avg:49.94ms +[2025-09-06 01:23:16] [Rank 0] step:2441/10000 train_time:121906ms step_avg:49.94ms +[2025-09-06 01:23:16] [Rank 0] step:2461/10000 train_time:122642ms step_avg:49.83ms +[2025-09-06 01:23:16] [Rank 0] step:2461/10000 train_time:122642ms step_avg:49.83ms +[2025-09-06 01:23:17] [Rank 0] step:2481/10000 train_time:123377ms step_avg:49.73ms +[2025-09-06 01:23:17] [Rank 0] step:2481/10000 train_time:123377ms step_avg:49.73ms +[2025-09-06 01:23:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:23:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:23:18] [Rank 0] PRINT: step:2500/10000 train_loss:2.8626 val_loss:2.7695 train_time:124194ms step_avg:49.68ms +[2025-09-06 01:23:18] [Rank 0] PRINT: step:2500/10000 train_loss:2.8626 val_loss:2.7695 train_time:124194ms step_avg:49.68ms +[2025-09-06 01:23:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:23:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:23:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:23:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:24:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:24:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:24:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:24:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:24:39] [Rank 0] Total Loss: 5.1301 +[2025-09-06 01:24:39] [Rank 0] Total Loss: 5.1301 +[2025-09-06 01:24:39] [Rank 0] Total FTA (Unweighted): 0.1713 +[2025-09-06 01:24:39] [Rank 0] Total FTA (Unweighted): 0.1713 +[2025-09-06 01:24:39] [Rank 0] Total FTA (Weighted): 0.1713 +[2025-09-06 01:24:39] [Rank 0] Total FTA (Weighted): 0.1713 +[2025-09-06 01:24:39] [Rank 0] Group 0 Loss: 3.4008 +[2025-09-06 01:24:39] [Rank 0] Group 0 Loss: 3.4008 +[2025-09-06 01:24:39] [Rank 0] Group 1 Loss: 3.3446 +[2025-09-06 01:24:39] [Rank 0] Group 1 Loss: 3.3446 +[2025-09-06 01:24:39] [Rank 0] Group 2 Loss: 3.4606 +[2025-09-06 01:24:39] [Rank 0] Group 2 Loss: 3.4606 +[2025-09-06 01:24:39] [Rank 0] Group 3 Loss: 3.9518 +[2025-09-06 01:24:39] [Rank 0] Group 3 Loss: 3.9518 +[2025-09-06 01:24:39] [Rank 0] Group 4 Loss: 4.6568 +[2025-09-06 01:24:39] [Rank 0] Group 4 Loss: 4.6568 +[2025-09-06 01:24:39] [Rank 0] Group 5 Loss: 5.1358 +[2025-09-06 01:24:39] [Rank 0] Group 5 Loss: 5.1358 +[2025-09-06 01:24:39] [Rank 0] Group 6 Loss: 5.4318 +[2025-09-06 01:24:39] [Rank 0] Group 6 Loss: 5.4318 +[2025-09-06 01:24:39] [Rank 0] Group 7 Loss: 5.5121 +[2025-09-06 01:24:39] [Rank 0] Group 7 Loss: 5.5121 +[2025-09-06 01:24:39] [Rank 0] Group 8 Loss: 5.7758 +[2025-09-06 01:24:39] [Rank 0] Group 8 Loss: 5.7758 +[2025-09-06 01:24:39] [Rank 0] Group 9 Loss: 5.9272 +[2025-09-06 01:24:39] [Rank 0] Group 9 Loss: 5.9272 +[2025-09-06 01:24:39] [Rank 0] Group 10 Loss: 5.9446 +[2025-09-06 01:24:39] [Rank 0] Group 10 Loss: 5.9446 +[2025-09-06 01:24:39] [Rank 0] Group 11 Loss: 6.0227 +[2025-09-06 01:24:39] [Rank 0] Group 11 Loss: 6.0227 +[2025-09-06 01:24:39] [Rank 0] Group 12 Loss: 5.8382 +[2025-09-06 01:24:39] [Rank 0] Group 12 Loss: 5.8382 +[2025-09-06 01:24:39] [Rank 0] Group 13 Loss: 5.8766 +[2025-09-06 01:24:39] [Rank 0] Group 13 Loss: 5.8766 +[2025-09-06 01:24:39] [Rank 0] Group 14 Loss: 5.9346 +[2025-09-06 01:24:39] [Rank 0] Group 14 Loss: 5.9346 +[2025-09-06 01:24:39] [Rank 0] Group 15 Loss: 5.8669 +[2025-09-06 01:24:39] [Rank 0] Group 15 Loss: 5.8669 +[2025-09-06 01:24:39] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 01:24:39] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 01:24:39] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:24:39] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:24:39] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:24:39] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:24:39] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:24:39] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:24:39] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 01:24:39] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 01:24:39] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:24:39] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:24:39] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 01:24:39] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 01:24:39] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:24:39] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:24:39] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-06 01:24:39] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-06 01:24:39] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:24:39] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:24:39] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 01:24:39] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 01:24:39] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 01:24:39] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 01:24:39] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:24:39] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:24:39] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:24:39] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:24:40] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:24:40] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:24:40] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 01:24:40] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 01:24:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:24:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:24:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:24:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:24:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:24:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:24:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:24:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:24:41] [Rank 0] step:2501/10000 train_time:124203ms step_avg:49.66ms +[2025-09-06 01:24:41] [Rank 0] step:2501/10000 train_time:124203ms step_avg:49.66ms +[2025-09-06 01:24:42] [Rank 0] step:2521/10000 train_time:124876ms step_avg:49.53ms +[2025-09-06 01:24:42] [Rank 0] step:2521/10000 train_time:124876ms step_avg:49.53ms +[2025-09-06 01:24:42] [Rank 0] step:2541/10000 train_time:125611ms step_avg:49.43ms +[2025-09-06 01:24:42] [Rank 0] step:2541/10000 train_time:125611ms step_avg:49.43ms +[2025-09-06 01:24:43] [Rank 0] step:2561/10000 train_time:126346ms step_avg:49.33ms +[2025-09-06 01:24:43] [Rank 0] step:2561/10000 train_time:126346ms step_avg:49.33ms +[2025-09-06 01:24:44] [Rank 0] step:2581/10000 train_time:127082ms step_avg:49.24ms +[2025-09-06 01:24:44] [Rank 0] step:2581/10000 train_time:127082ms step_avg:49.24ms +[2025-09-06 01:24:45] [Rank 0] step:2601/10000 train_time:127817ms step_avg:49.14ms +[2025-09-06 01:24:45] [Rank 0] step:2601/10000 train_time:127817ms step_avg:49.14ms +[2025-09-06 01:24:45] [Rank 0] step:2621/10000 train_time:128552ms step_avg:49.05ms +[2025-09-06 01:24:45] [Rank 0] step:2621/10000 train_time:128552ms step_avg:49.05ms +[2025-09-06 01:24:46] [Rank 0] step:2641/10000 train_time:129288ms step_avg:48.95ms +[2025-09-06 01:24:46] [Rank 0] step:2641/10000 train_time:129288ms step_avg:48.95ms +[2025-09-06 01:24:47] [Rank 0] step:2661/10000 train_time:130024ms step_avg:48.86ms +[2025-09-06 01:24:47] [Rank 0] step:2661/10000 train_time:130024ms step_avg:48.86ms +[2025-09-06 01:24:48] [Rank 0] step:2681/10000 train_time:130759ms step_avg:48.77ms +[2025-09-06 01:24:48] [Rank 0] step:2681/10000 train_time:130759ms step_avg:48.77ms +[2025-09-06 01:24:48] [Rank 0] step:2701/10000 train_time:131494ms step_avg:48.68ms +[2025-09-06 01:24:48] [Rank 0] step:2701/10000 train_time:131494ms step_avg:48.68ms +[2025-09-06 01:24:49] [Rank 0] step:2721/10000 train_time:132230ms step_avg:48.60ms +[2025-09-06 01:24:49] [Rank 0] step:2721/10000 train_time:132230ms step_avg:48.60ms +[2025-09-06 01:24:50] [Rank 0] step:2741/10000 train_time:132966ms step_avg:48.51ms +[2025-09-06 01:24:50] [Rank 0] step:2741/10000 train_time:132966ms step_avg:48.51ms +[2025-09-06 01:24:50] [Rank 0] step:2761/10000 train_time:133701ms step_avg:48.42ms +[2025-09-06 01:24:50] [Rank 0] step:2761/10000 train_time:133701ms step_avg:48.42ms +[2025-09-06 01:24:51] [Rank 0] step:2781/10000 train_time:134436ms step_avg:48.34ms +[2025-09-06 01:24:51] [Rank 0] step:2781/10000 train_time:134436ms step_avg:48.34ms +[2025-09-06 01:24:52] [Rank 0] step:2801/10000 train_time:135172ms step_avg:48.26ms +[2025-09-06 01:24:52] [Rank 0] step:2801/10000 train_time:135172ms step_avg:48.26ms +[2025-09-06 01:24:53] [Rank 0] step:2821/10000 train_time:136518ms step_avg:48.39ms +[2025-09-06 01:24:53] [Rank 0] step:2821/10000 train_time:136518ms step_avg:48.39ms +[2025-09-06 01:24:54] [Rank 0] step:2841/10000 train_time:137252ms step_avg:48.31ms +[2025-09-06 01:24:54] [Rank 0] step:2841/10000 train_time:137252ms step_avg:48.31ms +[2025-09-06 01:24:55] [Rank 0] step:2861/10000 train_time:137988ms step_avg:48.23ms +[2025-09-06 01:24:55] [Rank 0] step:2861/10000 train_time:137988ms step_avg:48.23ms +[2025-09-06 01:24:55] [Rank 0] step:2881/10000 train_time:138723ms step_avg:48.15ms +[2025-09-06 01:24:55] [Rank 0] step:2881/10000 train_time:138723ms step_avg:48.15ms +[2025-09-06 01:24:56] [Rank 0] step:2901/10000 train_time:139459ms step_avg:48.07ms +[2025-09-06 01:24:56] [Rank 0] step:2901/10000 train_time:139459ms step_avg:48.07ms +[2025-09-06 01:24:57] [Rank 0] step:2921/10000 train_time:140195ms step_avg:48.00ms +[2025-09-06 01:24:57] [Rank 0] step:2921/10000 train_time:140195ms step_avg:48.00ms +[2025-09-06 01:24:58] [Rank 0] step:2941/10000 train_time:140931ms step_avg:47.92ms +[2025-09-06 01:24:58] [Rank 0] step:2941/10000 train_time:140931ms step_avg:47.92ms +[2025-09-06 01:24:58] [Rank 0] step:2961/10000 train_time:141666ms step_avg:47.84ms +[2025-09-06 01:24:58] [Rank 0] step:2961/10000 train_time:141666ms step_avg:47.84ms +[2025-09-06 01:24:59] [Rank 0] step:2981/10000 train_time:142402ms step_avg:47.77ms +[2025-09-06 01:24:59] [Rank 0] step:2981/10000 train_time:142402ms step_avg:47.77ms +[2025-09-06 01:25:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:25:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:25:00] [Rank 0] PRINT: step:3000/10000 train_loss:2.7117 val_loss:2.6438 train_time:143218ms step_avg:47.74ms +[2025-09-06 01:25:00] [Rank 0] PRINT: step:3000/10000 train_loss:2.7117 val_loss:2.6438 train_time:143218ms step_avg:47.74ms +[2025-09-06 01:25:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:25:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:25:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:25:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:26:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:26:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:26:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:26:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:26:21] [Rank 0] Total Loss: 5.0613 +[2025-09-06 01:26:21] [Rank 0] Total Loss: 5.0613 +[2025-09-06 01:26:21] [Rank 0] Total FTA (Unweighted): 0.1956 +[2025-09-06 01:26:21] [Rank 0] Total FTA (Unweighted): 0.1956 +[2025-09-06 01:26:21] [Rank 0] Total FTA (Weighted): 0.1956 +[2025-09-06 01:26:21] [Rank 0] Total FTA (Weighted): 0.1956 +[2025-09-06 01:26:21] [Rank 0] Group 0 Loss: 3.4499 +[2025-09-06 01:26:21] [Rank 0] Group 0 Loss: 3.4499 +[2025-09-06 01:26:21] [Rank 0] Group 1 Loss: 3.3096 +[2025-09-06 01:26:21] [Rank 0] Group 1 Loss: 3.3096 +[2025-09-06 01:26:21] [Rank 0] Group 2 Loss: 3.4670 +[2025-09-06 01:26:21] [Rank 0] Group 2 Loss: 3.4670 +[2025-09-06 01:26:21] [Rank 0] Group 3 Loss: 3.9052 +[2025-09-06 01:26:21] [Rank 0] Group 3 Loss: 3.9052 +[2025-09-06 01:26:21] [Rank 0] Group 4 Loss: 4.5367 +[2025-09-06 01:26:21] [Rank 0] Group 4 Loss: 4.5367 +[2025-09-06 01:26:21] [Rank 0] Group 5 Loss: 5.0147 +[2025-09-06 01:26:21] [Rank 0] Group 5 Loss: 5.0147 +[2025-09-06 01:26:21] [Rank 0] Group 6 Loss: 5.3066 +[2025-09-06 01:26:21] [Rank 0] Group 6 Loss: 5.3066 +[2025-09-06 01:26:21] [Rank 0] Group 7 Loss: 5.4169 +[2025-09-06 01:26:21] [Rank 0] Group 7 Loss: 5.4169 +[2025-09-06 01:26:21] [Rank 0] Group 8 Loss: 5.6878 +[2025-09-06 01:26:21] [Rank 0] Group 8 Loss: 5.6878 +[2025-09-06 01:26:21] [Rank 0] Group 9 Loss: 5.8446 +[2025-09-06 01:26:21] [Rank 0] Group 9 Loss: 5.8446 +[2025-09-06 01:26:21] [Rank 0] Group 10 Loss: 5.8520 +[2025-09-06 01:26:21] [Rank 0] Group 10 Loss: 5.8520 +[2025-09-06 01:26:21] [Rank 0] Group 11 Loss: 5.9319 +[2025-09-06 01:26:21] [Rank 0] Group 11 Loss: 5.9319 +[2025-09-06 01:26:21] [Rank 0] Group 12 Loss: 5.7836 +[2025-09-06 01:26:21] [Rank 0] Group 12 Loss: 5.7836 +[2025-09-06 01:26:21] [Rank 0] Group 13 Loss: 5.8106 +[2025-09-06 01:26:21] [Rank 0] Group 13 Loss: 5.8106 +[2025-09-06 01:26:21] [Rank 0] Group 14 Loss: 5.8691 +[2025-09-06 01:26:21] [Rank 0] Group 14 Loss: 5.8691 +[2025-09-06 01:26:21] [Rank 0] Group 15 Loss: 5.7948 +[2025-09-06 01:26:21] [Rank 0] Group 15 Loss: 5.7948 +[2025-09-06 01:26:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:26:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:26:21] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 01:26:21] [Rank 0] Group 1 FTA: 0.3800 +[2025-09-06 01:26:21] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:26:21] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:26:21] [Rank 0] Group 3 FTA: 0.1600 +[2025-09-06 01:26:21] [Rank 0] Group 3 FTA: 0.1600 +[2025-09-06 01:26:21] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:26:21] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:26:21] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:26:21] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:26:21] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 01:26:21] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 01:26:21] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 01:26:21] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 01:26:21] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 01:26:21] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 01:26:21] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:26:21] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:26:21] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 01:26:21] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 01:26:21] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:26:21] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:26:21] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 01:26:21] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 01:26:21] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:26:21] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:26:21] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:26:21] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:26:21] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 01:26:21] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 01:26:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:26:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:26:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:26:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:26:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:26:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:26:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:26:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:26:23] [Rank 0] step:3001/10000 train_time:143227ms step_avg:47.73ms +[2025-09-06 01:26:23] [Rank 0] step:3001/10000 train_time:143227ms step_avg:47.73ms +[2025-09-06 01:26:24] [Rank 0] step:3021/10000 train_time:144053ms step_avg:47.68ms +[2025-09-06 01:26:24] [Rank 0] step:3021/10000 train_time:144053ms step_avg:47.68ms +[2025-09-06 01:26:24] [Rank 0] step:3041/10000 train_time:144788ms step_avg:47.61ms +[2025-09-06 01:26:24] [Rank 0] step:3041/10000 train_time:144788ms step_avg:47.61ms +[2025-09-06 01:26:25] [Rank 0] step:3061/10000 train_time:145524ms step_avg:47.54ms +[2025-09-06 01:26:25] [Rank 0] step:3061/10000 train_time:145524ms step_avg:47.54ms +[2025-09-06 01:26:26] [Rank 0] step:3081/10000 train_time:146259ms step_avg:47.47ms +[2025-09-06 01:26:26] [Rank 0] step:3081/10000 train_time:146259ms step_avg:47.47ms +[2025-09-06 01:26:27] [Rank 0] step:3101/10000 train_time:146995ms step_avg:47.40ms +[2025-09-06 01:26:27] [Rank 0] step:3101/10000 train_time:146995ms step_avg:47.40ms +[2025-09-06 01:26:27] [Rank 0] step:3121/10000 train_time:147731ms step_avg:47.33ms +[2025-09-06 01:26:27] [Rank 0] step:3121/10000 train_time:147731ms step_avg:47.33ms +[2025-09-06 01:26:28] [Rank 0] step:3141/10000 train_time:148467ms step_avg:47.27ms +[2025-09-06 01:26:28] [Rank 0] step:3141/10000 train_time:148467ms step_avg:47.27ms +[2025-09-06 01:26:29] [Rank 0] step:3161/10000 train_time:149202ms step_avg:47.20ms +[2025-09-06 01:26:29] [Rank 0] step:3161/10000 train_time:149202ms step_avg:47.20ms +[2025-09-06 01:26:30] [Rank 0] step:3181/10000 train_time:149937ms step_avg:47.14ms +[2025-09-06 01:26:30] [Rank 0] step:3181/10000 train_time:149937ms step_avg:47.14ms +[2025-09-06 01:26:30] [Rank 0] step:3201/10000 train_time:150673ms step_avg:47.07ms +[2025-09-06 01:26:30] [Rank 0] step:3201/10000 train_time:150673ms step_avg:47.07ms +[2025-09-06 01:26:31] [Rank 0] step:3221/10000 train_time:151407ms step_avg:47.01ms +[2025-09-06 01:26:31] [Rank 0] step:3221/10000 train_time:151407ms step_avg:47.01ms +[2025-09-06 01:26:32] [Rank 0] step:3241/10000 train_time:152142ms step_avg:46.94ms +[2025-09-06 01:26:32] [Rank 0] step:3241/10000 train_time:152142ms step_avg:46.94ms +[2025-09-06 01:26:33] [Rank 0] step:3261/10000 train_time:152876ms step_avg:46.88ms +[2025-09-06 01:26:33] [Rank 0] step:3261/10000 train_time:152876ms step_avg:46.88ms +[2025-09-06 01:26:33] [Rank 0] step:3281/10000 train_time:153612ms step_avg:46.82ms +[2025-09-06 01:26:33] [Rank 0] step:3281/10000 train_time:153612ms step_avg:46.82ms +[2025-09-06 01:26:34] [Rank 0] step:3301/10000 train_time:154348ms step_avg:46.76ms +[2025-09-06 01:26:34] [Rank 0] step:3301/10000 train_time:154348ms step_avg:46.76ms +[2025-09-06 01:26:35] [Rank 0] step:3321/10000 train_time:155083ms step_avg:46.70ms +[2025-09-06 01:26:35] [Rank 0] step:3321/10000 train_time:155083ms step_avg:46.70ms +[2025-09-06 01:26:36] [Rank 0] step:3341/10000 train_time:155818ms step_avg:46.64ms +[2025-09-06 01:26:36] [Rank 0] step:3341/10000 train_time:155818ms step_avg:46.64ms +[2025-09-06 01:26:36] [Rank 0] step:3361/10000 train_time:156554ms step_avg:46.58ms +[2025-09-06 01:26:36] [Rank 0] step:3361/10000 train_time:156554ms step_avg:46.58ms +[2025-09-06 01:26:37] [Rank 0] step:3381/10000 train_time:157290ms step_avg:46.52ms +[2025-09-06 01:26:37] [Rank 0] step:3381/10000 train_time:157290ms step_avg:46.52ms +[2025-09-06 01:26:38] [Rank 0] step:3401/10000 train_time:158026ms step_avg:46.46ms +[2025-09-06 01:26:38] [Rank 0] step:3401/10000 train_time:158026ms step_avg:46.46ms +[2025-09-06 01:26:38] [Rank 0] step:3421/10000 train_time:158761ms step_avg:46.41ms +[2025-09-06 01:26:38] [Rank 0] step:3421/10000 train_time:158761ms step_avg:46.41ms +[2025-09-06 01:26:39] [Rank 0] step:3441/10000 train_time:159496ms step_avg:46.35ms +[2025-09-06 01:26:39] [Rank 0] step:3441/10000 train_time:159496ms step_avg:46.35ms +[2025-09-06 01:26:40] [Rank 0] step:3461/10000 train_time:160232ms step_avg:46.30ms +[2025-09-06 01:26:40] [Rank 0] step:3461/10000 train_time:160232ms step_avg:46.30ms +[2025-09-06 01:26:41] [Rank 0] step:3481/10000 train_time:160968ms step_avg:46.24ms +[2025-09-06 01:26:41] [Rank 0] step:3481/10000 train_time:160968ms step_avg:46.24ms +[2025-09-06 01:26:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:26:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:26:42] [Rank 0] PRINT: step:3500/10000 train_loss:2.6028 val_loss:2.5472 train_time:161784ms step_avg:46.22ms +[2025-09-06 01:26:42] [Rank 0] PRINT: step:3500/10000 train_loss:2.6028 val_loss:2.5472 train_time:161784ms step_avg:46.22ms +[2025-09-06 01:26:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:26:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:26:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:26:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:28:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:28:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:28:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:28:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:28:02] [Rank 0] Total Loss: 4.9938 +[2025-09-06 01:28:02] [Rank 0] Total Loss: 4.9938 +[2025-09-06 01:28:02] [Rank 0] Total FTA (Unweighted): 0.2125 +[2025-09-06 01:28:02] [Rank 0] Total FTA (Unweighted): 0.2125 +[2025-09-06 01:28:02] [Rank 0] Total FTA (Weighted): 0.2125 +[2025-09-06 01:28:02] [Rank 0] Total FTA (Weighted): 0.2125 +[2025-09-06 01:28:02] [Rank 0] Group 0 Loss: 3.4936 +[2025-09-06 01:28:02] [Rank 0] Group 0 Loss: 3.4936 +[2025-09-06 01:28:02] [Rank 0] Group 1 Loss: 3.3708 +[2025-09-06 01:28:02] [Rank 0] Group 1 Loss: 3.3708 +[2025-09-06 01:28:02] [Rank 0] Group 2 Loss: 3.4505 +[2025-09-06 01:28:02] [Rank 0] Group 2 Loss: 3.4505 +[2025-09-06 01:28:03] [Rank 0] Group 3 Loss: 3.8844 +[2025-09-06 01:28:03] [Rank 0] Group 3 Loss: 3.8844 +[2025-09-06 01:28:03] [Rank 0] Group 4 Loss: 4.4221 +[2025-09-06 01:28:03] [Rank 0] Group 4 Loss: 4.4221 +[2025-09-06 01:28:03] [Rank 0] Group 5 Loss: 4.9010 +[2025-09-06 01:28:03] [Rank 0] Group 5 Loss: 4.9010 +[2025-09-06 01:28:03] [Rank 0] Group 6 Loss: 5.1934 +[2025-09-06 01:28:03] [Rank 0] Group 6 Loss: 5.1934 +[2025-09-06 01:28:03] [Rank 0] Group 7 Loss: 5.3468 +[2025-09-06 01:28:03] [Rank 0] Group 7 Loss: 5.3468 +[2025-09-06 01:28:03] [Rank 0] Group 8 Loss: 5.6013 +[2025-09-06 01:28:03] [Rank 0] Group 8 Loss: 5.6013 +[2025-09-06 01:28:03] [Rank 0] Group 9 Loss: 5.7267 +[2025-09-06 01:28:03] [Rank 0] Group 9 Loss: 5.7267 +[2025-09-06 01:28:03] [Rank 0] Group 10 Loss: 5.7712 +[2025-09-06 01:28:03] [Rank 0] Group 10 Loss: 5.7712 +[2025-09-06 01:28:03] [Rank 0] Group 11 Loss: 5.8362 +[2025-09-06 01:28:03] [Rank 0] Group 11 Loss: 5.8362 +[2025-09-06 01:28:03] [Rank 0] Group 12 Loss: 5.6939 +[2025-09-06 01:28:03] [Rank 0] Group 12 Loss: 5.6939 +[2025-09-06 01:28:03] [Rank 0] Group 13 Loss: 5.7309 +[2025-09-06 01:28:03] [Rank 0] Group 13 Loss: 5.7309 +[2025-09-06 01:28:03] [Rank 0] Group 14 Loss: 5.7660 +[2025-09-06 01:28:03] [Rank 0] Group 14 Loss: 5.7660 +[2025-09-06 01:28:03] [Rank 0] Group 15 Loss: 5.7117 +[2025-09-06 01:28:03] [Rank 0] Group 15 Loss: 5.7117 +[2025-09-06 01:28:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:28:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:28:03] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 01:28:03] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 01:28:03] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:28:03] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:28:03] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:28:03] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:28:03] [Rank 0] Group 4 FTA: 0.1100 +[2025-09-06 01:28:03] [Rank 0] Group 4 FTA: 0.1100 +[2025-09-06 01:28:03] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 01:28:03] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 01:28:03] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 01:28:03] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 01:28:03] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-06 01:28:03] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-06 01:28:03] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 01:28:03] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 01:28:03] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:28:03] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:28:03] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 01:28:03] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 01:28:03] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:28:03] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:28:03] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 01:28:03] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 01:28:03] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 01:28:03] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 01:28:03] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:28:03] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:28:03] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:28:03] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:28:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:28:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:28:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:28:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:28:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:28:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:28:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:28:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:28:04] [Rank 0] step:3501/10000 train_time:161793ms step_avg:46.21ms +[2025-09-06 01:28:04] [Rank 0] step:3501/10000 train_time:161793ms step_avg:46.21ms +[2025-09-06 01:28:05] [Rank 0] step:3521/10000 train_time:162468ms step_avg:46.14ms +[2025-09-06 01:28:05] [Rank 0] step:3521/10000 train_time:162468ms step_avg:46.14ms +[2025-09-06 01:28:06] [Rank 0] step:3541/10000 train_time:163204ms step_avg:46.09ms +[2025-09-06 01:28:06] [Rank 0] step:3541/10000 train_time:163204ms step_avg:46.09ms +[2025-09-06 01:28:06] [Rank 0] step:3561/10000 train_time:163939ms step_avg:46.04ms +[2025-09-06 01:28:06] [Rank 0] step:3561/10000 train_time:163939ms step_avg:46.04ms +[2025-09-06 01:28:07] [Rank 0] step:3581/10000 train_time:164675ms step_avg:45.99ms +[2025-09-06 01:28:07] [Rank 0] step:3581/10000 train_time:164675ms step_avg:45.99ms +[2025-09-06 01:28:08] [Rank 0] step:3601/10000 train_time:165411ms step_avg:45.93ms +[2025-09-06 01:28:08] [Rank 0] step:3601/10000 train_time:165411ms step_avg:45.93ms +[2025-09-06 01:28:09] [Rank 0] step:3621/10000 train_time:166146ms step_avg:45.88ms +[2025-09-06 01:28:09] [Rank 0] step:3621/10000 train_time:166146ms step_avg:45.88ms +[2025-09-06 01:28:10] [Rank 0] step:3641/10000 train_time:167495ms step_avg:46.00ms +[2025-09-06 01:28:10] [Rank 0] step:3641/10000 train_time:167495ms step_avg:46.00ms +[2025-09-06 01:28:11] [Rank 0] step:3661/10000 train_time:168230ms step_avg:45.95ms +[2025-09-06 01:28:11] [Rank 0] step:3661/10000 train_time:168230ms step_avg:45.95ms +[2025-09-06 01:28:11] [Rank 0] step:3681/10000 train_time:168965ms step_avg:45.90ms +[2025-09-06 01:28:11] [Rank 0] step:3681/10000 train_time:168965ms step_avg:45.90ms +[2025-09-06 01:28:12] [Rank 0] step:3701/10000 train_time:169701ms step_avg:45.85ms +[2025-09-06 01:28:12] [Rank 0] step:3701/10000 train_time:169701ms step_avg:45.85ms +[2025-09-06 01:28:13] [Rank 0] step:3721/10000 train_time:170436ms step_avg:45.80ms +[2025-09-06 01:28:13] [Rank 0] step:3721/10000 train_time:170436ms step_avg:45.80ms +[2025-09-06 01:28:14] [Rank 0] step:3741/10000 train_time:171172ms step_avg:45.76ms +[2025-09-06 01:28:14] [Rank 0] step:3741/10000 train_time:171172ms step_avg:45.76ms +[2025-09-06 01:28:14] [Rank 0] step:3761/10000 train_time:171906ms step_avg:45.71ms +[2025-09-06 01:28:14] [Rank 0] step:3761/10000 train_time:171906ms step_avg:45.71ms +[2025-09-06 01:28:15] [Rank 0] step:3781/10000 train_time:172641ms step_avg:45.66ms +[2025-09-06 01:28:15] [Rank 0] step:3781/10000 train_time:172641ms step_avg:45.66ms +[2025-09-06 01:28:16] [Rank 0] step:3801/10000 train_time:173376ms step_avg:45.61ms +[2025-09-06 01:28:16] [Rank 0] step:3801/10000 train_time:173376ms step_avg:45.61ms +[2025-09-06 01:28:16] [Rank 0] step:3821/10000 train_time:174117ms step_avg:45.57ms +[2025-09-06 01:28:16] [Rank 0] step:3821/10000 train_time:174117ms step_avg:45.57ms +[2025-09-06 01:28:17] [Rank 0] step:3841/10000 train_time:174852ms step_avg:45.52ms +[2025-09-06 01:28:17] [Rank 0] step:3841/10000 train_time:174852ms step_avg:45.52ms +[2025-09-06 01:28:18] [Rank 0] step:3861/10000 train_time:175587ms step_avg:45.48ms +[2025-09-06 01:28:18] [Rank 0] step:3861/10000 train_time:175587ms step_avg:45.48ms +[2025-09-06 01:28:19] [Rank 0] step:3881/10000 train_time:176322ms step_avg:45.43ms +[2025-09-06 01:28:19] [Rank 0] step:3881/10000 train_time:176322ms step_avg:45.43ms +[2025-09-06 01:28:19] [Rank 0] step:3901/10000 train_time:177057ms step_avg:45.39ms +[2025-09-06 01:28:19] [Rank 0] step:3901/10000 train_time:177057ms step_avg:45.39ms +[2025-09-06 01:28:20] [Rank 0] step:3921/10000 train_time:177793ms step_avg:45.34ms +[2025-09-06 01:28:20] [Rank 0] step:3921/10000 train_time:177793ms step_avg:45.34ms +[2025-09-06 01:28:21] [Rank 0] step:3941/10000 train_time:178528ms step_avg:45.30ms +[2025-09-06 01:28:21] [Rank 0] step:3941/10000 train_time:178528ms step_avg:45.30ms +[2025-09-06 01:28:22] [Rank 0] step:3961/10000 train_time:179264ms step_avg:45.26ms +[2025-09-06 01:28:22] [Rank 0] step:3961/10000 train_time:179264ms step_avg:45.26ms +[2025-09-06 01:28:22] [Rank 0] step:3981/10000 train_time:179999ms step_avg:45.21ms +[2025-09-06 01:28:22] [Rank 0] step:3981/10000 train_time:179999ms step_avg:45.21ms +[2025-09-06 01:28:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:28:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:28:23] [Rank 0] PRINT: step:4000/10000 train_loss:2.5201 val_loss:2.4766 train_time:180816ms step_avg:45.20ms +[2025-09-06 01:28:23] [Rank 0] PRINT: step:4000/10000 train_loss:2.5201 val_loss:2.4766 train_time:180816ms step_avg:45.20ms +[2025-09-06 01:28:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:28:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:28:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:28:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:29:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:29:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:29:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:29:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:29:44] [Rank 0] Total Loss: 4.9702 +[2025-09-06 01:29:44] [Rank 0] Total Loss: 4.9702 +[2025-09-06 01:29:44] [Rank 0] Total FTA (Unweighted): 0.2206 +[2025-09-06 01:29:44] [Rank 0] Total FTA (Unweighted): 0.2206 +[2025-09-06 01:29:44] [Rank 0] Total FTA (Weighted): 0.2206 +[2025-09-06 01:29:44] [Rank 0] Total FTA (Weighted): 0.2206 +[2025-09-06 01:29:44] [Rank 0] Group 0 Loss: 3.5000 +[2025-09-06 01:29:44] [Rank 0] Group 0 Loss: 3.5000 +[2025-09-06 01:29:44] [Rank 0] Group 1 Loss: 3.4125 +[2025-09-06 01:29:44] [Rank 0] Group 1 Loss: 3.4125 +[2025-09-06 01:29:44] [Rank 0] Group 2 Loss: 3.4304 +[2025-09-06 01:29:44] [Rank 0] Group 2 Loss: 3.4304 +[2025-09-06 01:29:44] [Rank 0] Group 3 Loss: 3.8677 +[2025-09-06 01:29:44] [Rank 0] Group 3 Loss: 3.8677 +[2025-09-06 01:29:44] [Rank 0] Group 4 Loss: 4.3773 +[2025-09-06 01:29:44] [Rank 0] Group 4 Loss: 4.3773 +[2025-09-06 01:29:44] [Rank 0] Group 5 Loss: 4.8560 +[2025-09-06 01:29:44] [Rank 0] Group 5 Loss: 4.8560 +[2025-09-06 01:29:44] [Rank 0] Group 6 Loss: 5.1590 +[2025-09-06 01:29:44] [Rank 0] Group 6 Loss: 5.1590 +[2025-09-06 01:29:44] [Rank 0] Group 7 Loss: 5.3249 +[2025-09-06 01:29:44] [Rank 0] Group 7 Loss: 5.3249 +[2025-09-06 01:29:44] [Rank 0] Group 8 Loss: 5.5681 +[2025-09-06 01:29:44] [Rank 0] Group 8 Loss: 5.5681 +[2025-09-06 01:29:44] [Rank 0] Group 9 Loss: 5.6940 +[2025-09-06 01:29:44] [Rank 0] Group 9 Loss: 5.6940 +[2025-09-06 01:29:44] [Rank 0] Group 10 Loss: 5.7531 +[2025-09-06 01:29:44] [Rank 0] Group 10 Loss: 5.7531 +[2025-09-06 01:29:44] [Rank 0] Group 11 Loss: 5.7911 +[2025-09-06 01:29:44] [Rank 0] Group 11 Loss: 5.7911 +[2025-09-06 01:29:44] [Rank 0] Group 12 Loss: 5.6633 +[2025-09-06 01:29:44] [Rank 0] Group 12 Loss: 5.6633 +[2025-09-06 01:29:44] [Rank 0] Group 13 Loss: 5.7024 +[2025-09-06 01:29:44] [Rank 0] Group 13 Loss: 5.7024 +[2025-09-06 01:29:44] [Rank 0] Group 14 Loss: 5.7442 +[2025-09-06 01:29:44] [Rank 0] Group 14 Loss: 5.7442 +[2025-09-06 01:29:44] [Rank 0] Group 15 Loss: 5.6799 +[2025-09-06 01:29:44] [Rank 0] Group 15 Loss: 5.6799 +[2025-09-06 01:29:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:29:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:29:44] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 01:29:44] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 01:29:44] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:29:44] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:29:44] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:29:44] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:29:44] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 01:29:44] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 01:29:44] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 01:29:44] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 01:29:44] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 01:29:44] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 01:29:44] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 01:29:44] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 01:29:44] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 01:29:44] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 01:29:44] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:29:44] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:29:44] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 01:29:44] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 01:29:44] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 01:29:44] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 01:29:44] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 01:29:44] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 01:29:44] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 01:29:44] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 01:29:44] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 01:29:44] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 01:29:44] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:29:44] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:29:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:29:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:29:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:29:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:29:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:29:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:29:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:29:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:29:46] [Rank 0] step:4001/10000 train_time:180825ms step_avg:45.19ms +[2025-09-06 01:29:46] [Rank 0] step:4001/10000 train_time:180825ms step_avg:45.19ms +[2025-09-06 01:29:47] [Rank 0] step:4021/10000 train_time:182123ms step_avg:45.29ms +[2025-09-06 01:29:47] [Rank 0] step:4021/10000 train_time:182123ms step_avg:45.29ms +[2025-09-06 01:29:48] [Rank 0] step:4041/10000 train_time:182859ms step_avg:45.25ms +[2025-09-06 01:29:48] [Rank 0] step:4041/10000 train_time:182859ms step_avg:45.25ms +[2025-09-06 01:29:49] [Rank 0] step:4061/10000 train_time:183595ms step_avg:45.21ms +[2025-09-06 01:29:49] [Rank 0] step:4061/10000 train_time:183595ms step_avg:45.21ms +[2025-09-06 01:29:49] [Rank 0] step:4081/10000 train_time:184331ms step_avg:45.17ms +[2025-09-06 01:29:49] [Rank 0] step:4081/10000 train_time:184331ms step_avg:45.17ms +[2025-09-06 01:29:50] [Rank 0] step:4101/10000 train_time:185066ms step_avg:45.13ms +[2025-09-06 01:29:50] [Rank 0] step:4101/10000 train_time:185066ms step_avg:45.13ms +[2025-09-06 01:29:51] [Rank 0] step:4121/10000 train_time:185801ms step_avg:45.09ms +[2025-09-06 01:29:51] [Rank 0] step:4121/10000 train_time:185801ms step_avg:45.09ms +[2025-09-06 01:29:52] [Rank 0] step:4141/10000 train_time:186537ms step_avg:45.05ms +[2025-09-06 01:29:52] [Rank 0] step:4141/10000 train_time:186537ms step_avg:45.05ms +[2025-09-06 01:29:52] [Rank 0] step:4161/10000 train_time:187272ms step_avg:45.01ms +[2025-09-06 01:29:52] [Rank 0] step:4161/10000 train_time:187272ms step_avg:45.01ms +[2025-09-06 01:29:53] [Rank 0] step:4181/10000 train_time:188008ms step_avg:44.97ms +[2025-09-06 01:29:53] [Rank 0] step:4181/10000 train_time:188008ms step_avg:44.97ms +[2025-09-06 01:29:54] [Rank 0] step:4201/10000 train_time:188743ms step_avg:44.93ms +[2025-09-06 01:29:54] [Rank 0] step:4201/10000 train_time:188743ms step_avg:44.93ms +[2025-09-06 01:29:55] [Rank 0] step:4221/10000 train_time:189478ms step_avg:44.89ms +[2025-09-06 01:29:55] [Rank 0] step:4221/10000 train_time:189478ms step_avg:44.89ms +[2025-09-06 01:29:55] [Rank 0] step:4241/10000 train_time:190214ms step_avg:44.85ms +[2025-09-06 01:29:55] [Rank 0] step:4241/10000 train_time:190214ms step_avg:44.85ms +[2025-09-06 01:29:56] [Rank 0] step:4261/10000 train_time:190950ms step_avg:44.81ms +[2025-09-06 01:29:56] [Rank 0] step:4261/10000 train_time:190950ms step_avg:44.81ms +[2025-09-06 01:29:57] [Rank 0] step:4281/10000 train_time:191686ms step_avg:44.78ms +[2025-09-06 01:29:57] [Rank 0] step:4281/10000 train_time:191686ms step_avg:44.78ms +[2025-09-06 01:29:57] [Rank 0] step:4301/10000 train_time:192421ms step_avg:44.74ms +[2025-09-06 01:29:57] [Rank 0] step:4301/10000 train_time:192421ms step_avg:44.74ms +[2025-09-06 01:29:58] [Rank 0] step:4321/10000 train_time:193156ms step_avg:44.70ms +[2025-09-06 01:29:58] [Rank 0] step:4321/10000 train_time:193156ms step_avg:44.70ms +[2025-09-06 01:29:59] [Rank 0] step:4341/10000 train_time:193892ms step_avg:44.67ms +[2025-09-06 01:29:59] [Rank 0] step:4341/10000 train_time:193892ms step_avg:44.67ms +[2025-09-06 01:30:00] [Rank 0] step:4361/10000 train_time:194627ms step_avg:44.63ms +[2025-09-06 01:30:00] [Rank 0] step:4361/10000 train_time:194627ms step_avg:44.63ms +[2025-09-06 01:30:00] [Rank 0] step:4381/10000 train_time:195362ms step_avg:44.59ms +[2025-09-06 01:30:00] [Rank 0] step:4381/10000 train_time:195362ms step_avg:44.59ms +[2025-09-06 01:30:01] [Rank 0] step:4401/10000 train_time:196098ms step_avg:44.56ms +[2025-09-06 01:30:01] [Rank 0] step:4401/10000 train_time:196098ms step_avg:44.56ms +[2025-09-06 01:30:02] [Rank 0] step:4421/10000 train_time:196835ms step_avg:44.52ms +[2025-09-06 01:30:02] [Rank 0] step:4421/10000 train_time:196835ms step_avg:44.52ms +[2025-09-06 01:30:03] [Rank 0] step:4441/10000 train_time:197590ms step_avg:44.49ms +[2025-09-06 01:30:03] [Rank 0] step:4441/10000 train_time:197590ms step_avg:44.49ms +[2025-09-06 01:30:03] [Rank 0] step:4461/10000 train_time:198326ms step_avg:44.46ms +[2025-09-06 01:30:03] [Rank 0] step:4461/10000 train_time:198326ms step_avg:44.46ms +[2025-09-06 01:30:04] [Rank 0] step:4481/10000 train_time:199061ms step_avg:44.42ms +[2025-09-06 01:30:04] [Rank 0] step:4481/10000 train_time:199061ms step_avg:44.42ms +[2025-09-06 01:30:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:30:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:30:05] [Rank 0] PRINT: step:4500/10000 train_loss:2.4536 val_loss:2.4138 train_time:199877ms step_avg:44.42ms +[2025-09-06 01:30:05] [Rank 0] PRINT: step:4500/10000 train_loss:2.4536 val_loss:2.4138 train_time:199877ms step_avg:44.42ms +[2025-09-06 01:30:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:30:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:30:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:30:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:31:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:31:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:31:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:31:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:31:26] [Rank 0] Total Loss: 4.9033 +[2025-09-06 01:31:26] [Rank 0] Total Loss: 4.9033 +[2025-09-06 01:31:26] [Rank 0] Total FTA (Unweighted): 0.2625 +[2025-09-06 01:31:26] [Rank 0] Total FTA (Unweighted): 0.2625 +[2025-09-06 01:31:26] [Rank 0] Total FTA (Weighted): 0.2625 +[2025-09-06 01:31:26] [Rank 0] Total FTA (Weighted): 0.2625 +[2025-09-06 01:31:26] [Rank 0] Group 0 Loss: 3.4364 +[2025-09-06 01:31:26] [Rank 0] Group 0 Loss: 3.4364 +[2025-09-06 01:31:26] [Rank 0] Group 1 Loss: 3.3718 +[2025-09-06 01:31:26] [Rank 0] Group 1 Loss: 3.3718 +[2025-09-06 01:31:26] [Rank 0] Group 2 Loss: 3.4002 +[2025-09-06 01:31:26] [Rank 0] Group 2 Loss: 3.4002 +[2025-09-06 01:31:26] [Rank 0] Group 3 Loss: 3.8293 +[2025-09-06 01:31:26] [Rank 0] Group 3 Loss: 3.8293 +[2025-09-06 01:31:26] [Rank 0] Group 4 Loss: 4.2849 +[2025-09-06 01:31:26] [Rank 0] Group 4 Loss: 4.2849 +[2025-09-06 01:31:26] [Rank 0] Group 5 Loss: 4.7918 +[2025-09-06 01:31:26] [Rank 0] Group 5 Loss: 4.7918 +[2025-09-06 01:31:26] [Rank 0] Group 6 Loss: 5.0709 +[2025-09-06 01:31:26] [Rank 0] Group 6 Loss: 5.0709 +[2025-09-06 01:31:26] [Rank 0] Group 7 Loss: 5.2180 +[2025-09-06 01:31:26] [Rank 0] Group 7 Loss: 5.2180 +[2025-09-06 01:31:26] [Rank 0] Group 8 Loss: 5.5195 +[2025-09-06 01:31:26] [Rank 0] Group 8 Loss: 5.5195 +[2025-09-06 01:31:26] [Rank 0] Group 9 Loss: 5.6418 +[2025-09-06 01:31:26] [Rank 0] Group 9 Loss: 5.6418 +[2025-09-06 01:31:26] [Rank 0] Group 10 Loss: 5.6637 +[2025-09-06 01:31:26] [Rank 0] Group 10 Loss: 5.6637 +[2025-09-06 01:31:26] [Rank 0] Group 11 Loss: 5.6960 +[2025-09-06 01:31:26] [Rank 0] Group 11 Loss: 5.6960 +[2025-09-06 01:31:26] [Rank 0] Group 12 Loss: 5.6199 +[2025-09-06 01:31:26] [Rank 0] Group 12 Loss: 5.6199 +[2025-09-06 01:31:26] [Rank 0] Group 13 Loss: 5.6272 +[2025-09-06 01:31:26] [Rank 0] Group 13 Loss: 5.6272 +[2025-09-06 01:31:26] [Rank 0] Group 14 Loss: 5.6733 +[2025-09-06 01:31:26] [Rank 0] Group 14 Loss: 5.6733 +[2025-09-06 01:31:26] [Rank 0] Group 15 Loss: 5.6088 +[2025-09-06 01:31:26] [Rank 0] Group 15 Loss: 5.6088 +[2025-09-06 01:31:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:31:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:31:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:31:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:31:26] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:31:26] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:31:26] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:31:26] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:31:26] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-06 01:31:26] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-06 01:31:26] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 01:31:26] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 01:31:26] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-06 01:31:26] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-06 01:31:26] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-06 01:31:26] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-06 01:31:26] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 01:31:26] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 01:31:26] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:31:26] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:31:26] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-06 01:31:26] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-06 01:31:26] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 01:31:26] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 01:31:26] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 01:31:26] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-06 01:31:26] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 01:31:26] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 01:31:26] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:31:26] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:31:26] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:31:26] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:31:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:31:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:31:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:31:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:31:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:31:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:31:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:31:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:31:27] [Rank 0] step:4501/10000 train_time:199887ms step_avg:44.41ms +[2025-09-06 01:31:27] [Rank 0] step:4501/10000 train_time:199887ms step_avg:44.41ms +[2025-09-06 01:31:28] [Rank 0] step:4521/10000 train_time:200557ms step_avg:44.36ms +[2025-09-06 01:31:28] [Rank 0] step:4521/10000 train_time:200557ms step_avg:44.36ms +[2025-09-06 01:31:29] [Rank 0] step:4541/10000 train_time:201291ms step_avg:44.33ms +[2025-09-06 01:31:29] [Rank 0] step:4541/10000 train_time:201291ms step_avg:44.33ms +[2025-09-06 01:31:30] [Rank 0] step:4561/10000 train_time:202026ms step_avg:44.29ms +[2025-09-06 01:31:30] [Rank 0] step:4561/10000 train_time:202026ms step_avg:44.29ms +[2025-09-06 01:31:30] [Rank 0] step:4581/10000 train_time:202761ms step_avg:44.26ms +[2025-09-06 01:31:30] [Rank 0] step:4581/10000 train_time:202761ms step_avg:44.26ms +[2025-09-06 01:31:31] [Rank 0] step:4601/10000 train_time:203496ms step_avg:44.23ms +[2025-09-06 01:31:31] [Rank 0] step:4601/10000 train_time:203496ms step_avg:44.23ms +[2025-09-06 01:31:32] [Rank 0] step:4621/10000 train_time:204231ms step_avg:44.20ms +[2025-09-06 01:31:32] [Rank 0] step:4621/10000 train_time:204231ms step_avg:44.20ms +[2025-09-06 01:31:32] [Rank 0] step:4641/10000 train_time:204965ms step_avg:44.16ms +[2025-09-06 01:31:32] [Rank 0] step:4641/10000 train_time:204965ms step_avg:44.16ms +[2025-09-06 01:31:33] [Rank 0] step:4661/10000 train_time:205700ms step_avg:44.13ms +[2025-09-06 01:31:33] [Rank 0] step:4661/10000 train_time:205700ms step_avg:44.13ms +[2025-09-06 01:31:34] [Rank 0] step:4681/10000 train_time:206435ms step_avg:44.10ms +[2025-09-06 01:31:34] [Rank 0] step:4681/10000 train_time:206435ms step_avg:44.10ms +[2025-09-06 01:31:35] [Rank 0] step:4701/10000 train_time:207170ms step_avg:44.07ms +[2025-09-06 01:31:35] [Rank 0] step:4701/10000 train_time:207170ms step_avg:44.07ms +[2025-09-06 01:31:35] [Rank 0] step:4721/10000 train_time:207905ms step_avg:44.04ms +[2025-09-06 01:31:35] [Rank 0] step:4721/10000 train_time:207905ms step_avg:44.04ms +[2025-09-06 01:31:36] [Rank 0] step:4741/10000 train_time:208640ms step_avg:44.01ms +[2025-09-06 01:31:36] [Rank 0] step:4741/10000 train_time:208640ms step_avg:44.01ms +[2025-09-06 01:31:37] [Rank 0] step:4761/10000 train_time:209375ms step_avg:43.98ms +[2025-09-06 01:31:37] [Rank 0] step:4761/10000 train_time:209375ms step_avg:43.98ms +[2025-09-06 01:31:38] [Rank 0] step:4781/10000 train_time:210223ms step_avg:43.97ms +[2025-09-06 01:31:38] [Rank 0] step:4781/10000 train_time:210223ms step_avg:43.97ms +[2025-09-06 01:31:38] [Rank 0] step:4801/10000 train_time:210958ms step_avg:43.94ms +[2025-09-06 01:31:38] [Rank 0] step:4801/10000 train_time:210958ms step_avg:43.94ms +[2025-09-06 01:31:39] [Rank 0] step:4821/10000 train_time:211693ms step_avg:43.91ms +[2025-09-06 01:31:39] [Rank 0] step:4821/10000 train_time:211693ms step_avg:43.91ms +[2025-09-06 01:31:40] [Rank 0] step:4841/10000 train_time:212743ms step_avg:43.95ms +[2025-09-06 01:31:40] [Rank 0] step:4841/10000 train_time:212743ms step_avg:43.95ms +[2025-09-06 01:31:41] [Rank 0] step:4861/10000 train_time:213478ms step_avg:43.92ms +[2025-09-06 01:31:41] [Rank 0] step:4861/10000 train_time:213478ms step_avg:43.92ms +[2025-09-06 01:31:42] [Rank 0] step:4881/10000 train_time:214213ms step_avg:43.89ms +[2025-09-06 01:31:42] [Rank 0] step:4881/10000 train_time:214213ms step_avg:43.89ms +[2025-09-06 01:31:42] [Rank 0] step:4901/10000 train_time:214948ms step_avg:43.86ms +[2025-09-06 01:31:42] [Rank 0] step:4901/10000 train_time:214948ms step_avg:43.86ms +[2025-09-06 01:31:43] [Rank 0] step:4921/10000 train_time:215683ms step_avg:43.83ms +[2025-09-06 01:31:43] [Rank 0] step:4921/10000 train_time:215683ms step_avg:43.83ms +[2025-09-06 01:31:44] [Rank 0] step:4941/10000 train_time:216418ms step_avg:43.80ms +[2025-09-06 01:31:44] [Rank 0] step:4941/10000 train_time:216418ms step_avg:43.80ms +[2025-09-06 01:31:45] [Rank 0] step:4961/10000 train_time:217153ms step_avg:43.77ms +[2025-09-06 01:31:45] [Rank 0] step:4961/10000 train_time:217153ms step_avg:43.77ms +[2025-09-06 01:31:45] [Rank 0] step:4981/10000 train_time:217888ms step_avg:43.74ms +[2025-09-06 01:31:45] [Rank 0] step:4981/10000 train_time:217888ms step_avg:43.74ms +[2025-09-06 01:31:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:31:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:31:47] [Rank 0] PRINT: step:5000/10000 train_loss:2.3975 val_loss:2.3678 train_time:218808ms step_avg:43.76ms +[2025-09-06 01:31:47] [Rank 0] PRINT: step:5000/10000 train_loss:2.3975 val_loss:2.3678 train_time:218808ms step_avg:43.76ms +[2025-09-06 01:31:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:31:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:31:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:31:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:33:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:33:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:33:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:33:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:33:08] [Rank 0] Total Loss: 4.8595 +[2025-09-06 01:33:08] [Rank 0] Total Loss: 4.8595 +[2025-09-06 01:33:08] [Rank 0] Total FTA (Unweighted): 0.2756 +[2025-09-06 01:33:08] [Rank 0] Total FTA (Unweighted): 0.2756 +[2025-09-06 01:33:08] [Rank 0] Total FTA (Weighted): 0.2756 +[2025-09-06 01:33:08] [Rank 0] Total FTA (Weighted): 0.2756 +[2025-09-06 01:33:08] [Rank 0] Group 0 Loss: 3.4401 +[2025-09-06 01:33:08] [Rank 0] Group 0 Loss: 3.4401 +[2025-09-06 01:33:08] [Rank 0] Group 1 Loss: 3.3055 +[2025-09-06 01:33:08] [Rank 0] Group 1 Loss: 3.3055 +[2025-09-06 01:33:08] [Rank 0] Group 2 Loss: 3.3893 +[2025-09-06 01:33:08] [Rank 0] Group 2 Loss: 3.3893 +[2025-09-06 01:33:08] [Rank 0] Group 3 Loss: 3.8337 +[2025-09-06 01:33:08] [Rank 0] Group 3 Loss: 3.8337 +[2025-09-06 01:33:08] [Rank 0] Group 4 Loss: 4.2521 +[2025-09-06 01:33:08] [Rank 0] Group 4 Loss: 4.2521 +[2025-09-06 01:33:08] [Rank 0] Group 5 Loss: 4.7166 +[2025-09-06 01:33:08] [Rank 0] Group 5 Loss: 4.7166 +[2025-09-06 01:33:08] [Rank 0] Group 6 Loss: 5.0025 +[2025-09-06 01:33:08] [Rank 0] Group 6 Loss: 5.0025 +[2025-09-06 01:33:08] [Rank 0] Group 7 Loss: 5.1720 +[2025-09-06 01:33:08] [Rank 0] Group 7 Loss: 5.1720 +[2025-09-06 01:33:08] [Rank 0] Group 8 Loss: 5.4521 +[2025-09-06 01:33:08] [Rank 0] Group 8 Loss: 5.4521 +[2025-09-06 01:33:08] [Rank 0] Group 9 Loss: 5.5672 +[2025-09-06 01:33:08] [Rank 0] Group 9 Loss: 5.5672 +[2025-09-06 01:33:08] [Rank 0] Group 10 Loss: 5.6095 +[2025-09-06 01:33:08] [Rank 0] Group 10 Loss: 5.6095 +[2025-09-06 01:33:08] [Rank 0] Group 11 Loss: 5.6597 +[2025-09-06 01:33:08] [Rank 0] Group 11 Loss: 5.6597 +[2025-09-06 01:33:08] [Rank 0] Group 12 Loss: 5.5629 +[2025-09-06 01:33:08] [Rank 0] Group 12 Loss: 5.5629 +[2025-09-06 01:33:08] [Rank 0] Group 13 Loss: 5.6001 +[2025-09-06 01:33:08] [Rank 0] Group 13 Loss: 5.6001 +[2025-09-06 01:33:08] [Rank 0] Group 14 Loss: 5.6223 +[2025-09-06 01:33:08] [Rank 0] Group 14 Loss: 5.6223 +[2025-09-06 01:33:08] [Rank 0] Group 15 Loss: 5.5658 +[2025-09-06 01:33:08] [Rank 0] Group 15 Loss: 5.5658 +[2025-09-06 01:33:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:33:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:33:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:33:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:33:08] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:33:08] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:33:08] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:33:08] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:33:08] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-06 01:33:08] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-06 01:33:08] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-06 01:33:08] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-06 01:33:08] [Rank 0] Group 6 FTA: 0.2600 +[2025-09-06 01:33:08] [Rank 0] Group 6 FTA: 0.2600 +[2025-09-06 01:33:08] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:33:08] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:33:08] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 01:33:08] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 01:33:08] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:33:08] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:33:08] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-06 01:33:08] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-06 01:33:08] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 01:33:08] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 01:33:08] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 01:33:08] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 01:33:08] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 01:33:08] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 01:33:08] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 01:33:08] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 01:33:08] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:33:08] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:33:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:33:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:33:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:33:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:33:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:33:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:33:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:33:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:33:09] [Rank 0] step:5001/10000 train_time:218817ms step_avg:43.75ms +[2025-09-06 01:33:09] [Rank 0] step:5001/10000 train_time:218817ms step_avg:43.75ms +[2025-09-06 01:33:10] [Rank 0] step:5021/10000 train_time:219485ms step_avg:43.71ms +[2025-09-06 01:33:10] [Rank 0] step:5021/10000 train_time:219485ms step_avg:43.71ms +[2025-09-06 01:33:11] [Rank 0] step:5041/10000 train_time:220220ms step_avg:43.69ms +[2025-09-06 01:33:11] [Rank 0] step:5041/10000 train_time:220220ms step_avg:43.69ms +[2025-09-06 01:33:11] [Rank 0] step:5061/10000 train_time:220955ms step_avg:43.66ms +[2025-09-06 01:33:11] [Rank 0] step:5061/10000 train_time:220955ms step_avg:43.66ms +[2025-09-06 01:33:12] [Rank 0] step:5081/10000 train_time:221690ms step_avg:43.63ms +[2025-09-06 01:33:12] [Rank 0] step:5081/10000 train_time:221690ms step_avg:43.63ms +[2025-09-06 01:33:13] [Rank 0] step:5101/10000 train_time:222426ms step_avg:43.60ms +[2025-09-06 01:33:13] [Rank 0] step:5101/10000 train_time:222426ms step_avg:43.60ms +[2025-09-06 01:33:14] [Rank 0] step:5121/10000 train_time:223161ms step_avg:43.58ms +[2025-09-06 01:33:14] [Rank 0] step:5121/10000 train_time:223161ms step_avg:43.58ms +[2025-09-06 01:33:14] [Rank 0] step:5141/10000 train_time:223897ms step_avg:43.55ms +[2025-09-06 01:33:14] [Rank 0] step:5141/10000 train_time:223897ms step_avg:43.55ms +[2025-09-06 01:33:15] [Rank 0] step:5161/10000 train_time:224632ms step_avg:43.52ms +[2025-09-06 01:33:15] [Rank 0] step:5161/10000 train_time:224632ms step_avg:43.52ms +[2025-09-06 01:33:16] [Rank 0] step:5181/10000 train_time:225368ms step_avg:43.50ms +[2025-09-06 01:33:16] [Rank 0] step:5181/10000 train_time:225368ms step_avg:43.50ms +[2025-09-06 01:33:17] [Rank 0] step:5201/10000 train_time:226104ms step_avg:43.47ms +[2025-09-06 01:33:17] [Rank 0] step:5201/10000 train_time:226104ms step_avg:43.47ms +[2025-09-06 01:33:17] [Rank 0] step:5221/10000 train_time:226840ms step_avg:43.45ms +[2025-09-06 01:33:17] [Rank 0] step:5221/10000 train_time:226840ms step_avg:43.45ms +[2025-09-06 01:33:18] [Rank 0] step:5241/10000 train_time:227576ms step_avg:43.42ms +[2025-09-06 01:33:18] [Rank 0] step:5241/10000 train_time:227576ms step_avg:43.42ms +[2025-09-06 01:33:19] [Rank 0] step:5261/10000 train_time:228311ms step_avg:43.40ms +[2025-09-06 01:33:19] [Rank 0] step:5261/10000 train_time:228311ms step_avg:43.40ms +[2025-09-06 01:33:20] [Rank 0] step:5281/10000 train_time:229048ms step_avg:43.37ms +[2025-09-06 01:33:20] [Rank 0] step:5281/10000 train_time:229048ms step_avg:43.37ms +[2025-09-06 01:33:20] [Rank 0] step:5301/10000 train_time:229783ms step_avg:43.35ms +[2025-09-06 01:33:20] [Rank 0] step:5301/10000 train_time:229783ms step_avg:43.35ms +[2025-09-06 01:33:21] [Rank 0] step:5321/10000 train_time:230518ms step_avg:43.32ms +[2025-09-06 01:33:21] [Rank 0] step:5321/10000 train_time:230518ms step_avg:43.32ms +[2025-09-06 01:33:22] [Rank 0] step:5341/10000 train_time:231254ms step_avg:43.30ms +[2025-09-06 01:33:22] [Rank 0] step:5341/10000 train_time:231254ms step_avg:43.30ms +[2025-09-06 01:33:23] [Rank 0] step:5361/10000 train_time:231989ms step_avg:43.27ms +[2025-09-06 01:33:23] [Rank 0] step:5361/10000 train_time:231989ms step_avg:43.27ms +[2025-09-06 01:33:23] [Rank 0] step:5381/10000 train_time:232724ms step_avg:43.25ms +[2025-09-06 01:33:23] [Rank 0] step:5381/10000 train_time:232724ms step_avg:43.25ms +[2025-09-06 01:33:24] [Rank 0] step:5401/10000 train_time:233460ms step_avg:43.23ms +[2025-09-06 01:33:24] [Rank 0] step:5401/10000 train_time:233460ms step_avg:43.23ms +[2025-09-06 01:33:25] [Rank 0] step:5421/10000 train_time:234195ms step_avg:43.20ms +[2025-09-06 01:33:25] [Rank 0] step:5421/10000 train_time:234195ms step_avg:43.20ms +[2025-09-06 01:33:25] [Rank 0] step:5441/10000 train_time:234931ms step_avg:43.18ms +[2025-09-06 01:33:25] [Rank 0] step:5441/10000 train_time:234931ms step_avg:43.18ms +[2025-09-06 01:33:26] [Rank 0] step:5461/10000 train_time:235666ms step_avg:43.15ms +[2025-09-06 01:33:26] [Rank 0] step:5461/10000 train_time:235666ms step_avg:43.15ms +[2025-09-06 01:33:27] [Rank 0] step:5481/10000 train_time:236402ms step_avg:43.13ms +[2025-09-06 01:33:27] [Rank 0] step:5481/10000 train_time:236402ms step_avg:43.13ms +[2025-09-06 01:33:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:33:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:33:28] [Rank 0] PRINT: step:5500/10000 train_loss:2.3539 val_loss:2.3263 train_time:237218ms step_avg:43.13ms +[2025-09-06 01:33:28] [Rank 0] PRINT: step:5500/10000 train_loss:2.3539 val_loss:2.3263 train_time:237218ms step_avg:43.13ms +[2025-09-06 01:33:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:33:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:33:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:33:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:34:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:34:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:34:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:34:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:34:49] [Rank 0] Total Loss: 4.8340 +[2025-09-06 01:34:49] [Rank 0] Total Loss: 4.8340 +[2025-09-06 01:34:49] [Rank 0] Total FTA (Unweighted): 0.2737 +[2025-09-06 01:34:49] [Rank 0] Total FTA (Unweighted): 0.2737 +[2025-09-06 01:34:49] [Rank 0] Total FTA (Weighted): 0.2737 +[2025-09-06 01:34:49] [Rank 0] Total FTA (Weighted): 0.2737 +[2025-09-06 01:34:49] [Rank 0] Group 0 Loss: 3.4391 +[2025-09-06 01:34:49] [Rank 0] Group 0 Loss: 3.4391 +[2025-09-06 01:34:49] [Rank 0] Group 1 Loss: 3.3812 +[2025-09-06 01:34:49] [Rank 0] Group 1 Loss: 3.3812 +[2025-09-06 01:34:49] [Rank 0] Group 2 Loss: 3.3792 +[2025-09-06 01:34:49] [Rank 0] Group 2 Loss: 3.3792 +[2025-09-06 01:34:49] [Rank 0] Group 3 Loss: 3.8359 +[2025-09-06 01:34:49] [Rank 0] Group 3 Loss: 3.8359 +[2025-09-06 01:34:49] [Rank 0] Group 4 Loss: 4.2040 +[2025-09-06 01:34:49] [Rank 0] Group 4 Loss: 4.2040 +[2025-09-06 01:34:49] [Rank 0] Group 5 Loss: 4.6769 +[2025-09-06 01:34:49] [Rank 0] Group 5 Loss: 4.6769 +[2025-09-06 01:34:49] [Rank 0] Group 6 Loss: 4.9512 +[2025-09-06 01:34:49] [Rank 0] Group 6 Loss: 4.9512 +[2025-09-06 01:34:49] [Rank 0] Group 7 Loss: 5.1287 +[2025-09-06 01:34:49] [Rank 0] Group 7 Loss: 5.1287 +[2025-09-06 01:34:49] [Rank 0] Group 8 Loss: 5.4170 +[2025-09-06 01:34:49] [Rank 0] Group 8 Loss: 5.4170 +[2025-09-06 01:34:49] [Rank 0] Group 9 Loss: 5.5175 +[2025-09-06 01:34:49] [Rank 0] Group 9 Loss: 5.5175 +[2025-09-06 01:34:49] [Rank 0] Group 10 Loss: 5.6011 +[2025-09-06 01:34:49] [Rank 0] Group 10 Loss: 5.6011 +[2025-09-06 01:34:49] [Rank 0] Group 11 Loss: 5.6287 +[2025-09-06 01:34:49] [Rank 0] Group 11 Loss: 5.6287 +[2025-09-06 01:34:49] [Rank 0] Group 12 Loss: 5.5192 +[2025-09-06 01:34:49] [Rank 0] Group 12 Loss: 5.5192 +[2025-09-06 01:34:49] [Rank 0] Group 13 Loss: 5.5481 +[2025-09-06 01:34:49] [Rank 0] Group 13 Loss: 5.5481 +[2025-09-06 01:34:49] [Rank 0] Group 14 Loss: 5.5828 +[2025-09-06 01:34:49] [Rank 0] Group 14 Loss: 5.5828 +[2025-09-06 01:34:49] [Rank 0] Group 15 Loss: 5.5341 +[2025-09-06 01:34:49] [Rank 0] Group 15 Loss: 5.5341 +[2025-09-06 01:34:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:34:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:34:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:34:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:34:49] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:34:49] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:34:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:34:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:34:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:34:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:34:49] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-06 01:34:49] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-06 01:34:49] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-06 01:34:49] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-06 01:34:49] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 01:34:49] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 01:34:49] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 01:34:49] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 01:34:49] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 01:34:49] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 01:34:49] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 01:34:49] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 01:34:49] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 01:34:49] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 01:34:49] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 01:34:49] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 01:34:49] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 01:34:49] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 01:34:49] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:34:49] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:34:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 01:34:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 01:34:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:34:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:34:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:34:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:34:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:34:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:34:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:34:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:34:50] [Rank 0] step:5501/10000 train_time:237228ms step_avg:43.12ms +[2025-09-06 01:34:50] [Rank 0] step:5501/10000 train_time:237228ms step_avg:43.12ms +[2025-09-06 01:34:51] [Rank 0] step:5521/10000 train_time:237898ms step_avg:43.09ms +[2025-09-06 01:34:51] [Rank 0] step:5521/10000 train_time:237898ms step_avg:43.09ms +[2025-09-06 01:34:52] [Rank 0] step:5541/10000 train_time:238633ms step_avg:43.07ms +[2025-09-06 01:34:52] [Rank 0] step:5541/10000 train_time:238633ms step_avg:43.07ms +[2025-09-06 01:34:52] [Rank 0] step:5561/10000 train_time:239368ms step_avg:43.04ms +[2025-09-06 01:34:52] [Rank 0] step:5561/10000 train_time:239368ms step_avg:43.04ms +[2025-09-06 01:34:53] [Rank 0] step:5581/10000 train_time:240104ms step_avg:43.02ms +[2025-09-06 01:34:53] [Rank 0] step:5581/10000 train_time:240104ms step_avg:43.02ms +[2025-09-06 01:34:54] [Rank 0] step:5601/10000 train_time:240839ms step_avg:43.00ms +[2025-09-06 01:34:54] [Rank 0] step:5601/10000 train_time:240839ms step_avg:43.00ms +[2025-09-06 01:34:55] [Rank 0] step:5621/10000 train_time:241575ms step_avg:42.98ms +[2025-09-06 01:34:55] [Rank 0] step:5621/10000 train_time:241575ms step_avg:42.98ms +[2025-09-06 01:34:56] [Rank 0] step:5641/10000 train_time:242917ms step_avg:43.06ms +[2025-09-06 01:34:56] [Rank 0] step:5641/10000 train_time:242917ms step_avg:43.06ms +[2025-09-06 01:34:57] [Rank 0] step:5661/10000 train_time:243654ms step_avg:43.04ms +[2025-09-06 01:34:57] [Rank 0] step:5661/10000 train_time:243654ms step_avg:43.04ms +[2025-09-06 01:34:57] [Rank 0] step:5681/10000 train_time:244389ms step_avg:43.02ms +[2025-09-06 01:34:57] [Rank 0] step:5681/10000 train_time:244389ms step_avg:43.02ms +[2025-09-06 01:34:58] [Rank 0] step:5701/10000 train_time:245125ms step_avg:43.00ms +[2025-09-06 01:34:58] [Rank 0] step:5701/10000 train_time:245125ms step_avg:43.00ms +[2025-09-06 01:34:59] [Rank 0] step:5721/10000 train_time:245860ms step_avg:42.97ms +[2025-09-06 01:34:59] [Rank 0] step:5721/10000 train_time:245860ms step_avg:42.97ms +[2025-09-06 01:35:00] [Rank 0] step:5741/10000 train_time:246595ms step_avg:42.95ms +[2025-09-06 01:35:00] [Rank 0] step:5741/10000 train_time:246595ms step_avg:42.95ms +[2025-09-06 01:35:00] [Rank 0] step:5761/10000 train_time:247331ms step_avg:42.93ms +[2025-09-06 01:35:00] [Rank 0] step:5761/10000 train_time:247331ms step_avg:42.93ms +[2025-09-06 01:35:01] [Rank 0] step:5781/10000 train_time:248067ms step_avg:42.91ms +[2025-09-06 01:35:01] [Rank 0] step:5781/10000 train_time:248067ms step_avg:42.91ms +[2025-09-06 01:35:02] [Rank 0] step:5801/10000 train_time:248803ms step_avg:42.89ms +[2025-09-06 01:35:02] [Rank 0] step:5801/10000 train_time:248803ms step_avg:42.89ms +[2025-09-06 01:35:03] [Rank 0] step:5821/10000 train_time:249539ms step_avg:42.87ms +[2025-09-06 01:35:03] [Rank 0] step:5821/10000 train_time:249539ms step_avg:42.87ms +[2025-09-06 01:35:03] [Rank 0] step:5841/10000 train_time:250275ms step_avg:42.85ms +[2025-09-06 01:35:03] [Rank 0] step:5841/10000 train_time:250275ms step_avg:42.85ms +[2025-09-06 01:35:04] [Rank 0] step:5861/10000 train_time:251010ms step_avg:42.83ms +[2025-09-06 01:35:04] [Rank 0] step:5861/10000 train_time:251010ms step_avg:42.83ms +[2025-09-06 01:35:05] [Rank 0] step:5881/10000 train_time:251745ms step_avg:42.81ms +[2025-09-06 01:35:05] [Rank 0] step:5881/10000 train_time:251745ms step_avg:42.81ms +[2025-09-06 01:35:05] [Rank 0] step:5901/10000 train_time:252481ms step_avg:42.79ms +[2025-09-06 01:35:05] [Rank 0] step:5901/10000 train_time:252481ms step_avg:42.79ms +[2025-09-06 01:35:06] [Rank 0] step:5921/10000 train_time:253217ms step_avg:42.77ms +[2025-09-06 01:35:06] [Rank 0] step:5921/10000 train_time:253217ms step_avg:42.77ms +[2025-09-06 01:35:07] [Rank 0] step:5941/10000 train_time:253952ms step_avg:42.75ms +[2025-09-06 01:35:07] [Rank 0] step:5941/10000 train_time:253952ms step_avg:42.75ms +[2025-09-06 01:35:08] [Rank 0] step:5961/10000 train_time:254687ms step_avg:42.73ms +[2025-09-06 01:35:08] [Rank 0] step:5961/10000 train_time:254687ms step_avg:42.73ms +[2025-09-06 01:35:08] [Rank 0] step:5981/10000 train_time:255422ms step_avg:42.71ms +[2025-09-06 01:35:08] [Rank 0] step:5981/10000 train_time:255422ms step_avg:42.71ms +[2025-09-06 01:35:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:35:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:35:10] [Rank 0] PRINT: step:6000/10000 train_loss:2.3187 val_loss:2.2948 train_time:256239ms step_avg:42.71ms +[2025-09-06 01:35:10] [Rank 0] PRINT: step:6000/10000 train_loss:2.3187 val_loss:2.2948 train_time:256239ms step_avg:42.71ms +[2025-09-06 01:35:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:35:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:35:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:35:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:36:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:36:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:36:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:36:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:36:30] [Rank 0] Total Loss: 4.8450 +[2025-09-06 01:36:30] [Rank 0] Total Loss: 4.8450 +[2025-09-06 01:36:30] [Rank 0] Total FTA (Unweighted): 0.2856 +[2025-09-06 01:36:30] [Rank 0] Total FTA (Unweighted): 0.2856 +[2025-09-06 01:36:30] [Rank 0] Total FTA (Weighted): 0.2856 +[2025-09-06 01:36:30] [Rank 0] Total FTA (Weighted): 0.2856 +[2025-09-06 01:36:30] [Rank 0] Group 0 Loss: 3.4577 +[2025-09-06 01:36:30] [Rank 0] Group 0 Loss: 3.4577 +[2025-09-06 01:36:30] [Rank 0] Group 1 Loss: 3.3986 +[2025-09-06 01:36:30] [Rank 0] Group 1 Loss: 3.3986 +[2025-09-06 01:36:30] [Rank 0] Group 2 Loss: 3.4325 +[2025-09-06 01:36:30] [Rank 0] Group 2 Loss: 3.4325 +[2025-09-06 01:36:30] [Rank 0] Group 3 Loss: 3.8177 +[2025-09-06 01:36:30] [Rank 0] Group 3 Loss: 3.8177 +[2025-09-06 01:36:30] [Rank 0] Group 4 Loss: 4.2314 +[2025-09-06 01:36:30] [Rank 0] Group 4 Loss: 4.2314 +[2025-09-06 01:36:30] [Rank 0] Group 5 Loss: 4.6786 +[2025-09-06 01:36:30] [Rank 0] Group 5 Loss: 4.6786 +[2025-09-06 01:36:30] [Rank 0] Group 6 Loss: 4.9724 +[2025-09-06 01:36:30] [Rank 0] Group 6 Loss: 4.9724 +[2025-09-06 01:36:30] [Rank 0] Group 7 Loss: 5.1295 +[2025-09-06 01:36:30] [Rank 0] Group 7 Loss: 5.1295 +[2025-09-06 01:36:30] [Rank 0] Group 8 Loss: 5.4230 +[2025-09-06 01:36:30] [Rank 0] Group 8 Loss: 5.4230 +[2025-09-06 01:36:30] [Rank 0] Group 9 Loss: 5.5326 +[2025-09-06 01:36:30] [Rank 0] Group 9 Loss: 5.5326 +[2025-09-06 01:36:30] [Rank 0] Group 10 Loss: 5.5993 +[2025-09-06 01:36:30] [Rank 0] Group 10 Loss: 5.5993 +[2025-09-06 01:36:30] [Rank 0] Group 11 Loss: 5.6277 +[2025-09-06 01:36:30] [Rank 0] Group 11 Loss: 5.6277 +[2025-09-06 01:36:30] [Rank 0] Group 12 Loss: 5.5327 +[2025-09-06 01:36:30] [Rank 0] Group 12 Loss: 5.5327 +[2025-09-06 01:36:30] [Rank 0] Group 13 Loss: 5.5507 +[2025-09-06 01:36:30] [Rank 0] Group 13 Loss: 5.5507 +[2025-09-06 01:36:30] [Rank 0] Group 14 Loss: 5.5983 +[2025-09-06 01:36:30] [Rank 0] Group 14 Loss: 5.5983 +[2025-09-06 01:36:30] [Rank 0] Group 15 Loss: 5.5381 +[2025-09-06 01:36:30] [Rank 0] Group 15 Loss: 5.5381 +[2025-09-06 01:36:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:36:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:36:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:36:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:36:30] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:36:30] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:36:30] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:36:30] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:36:30] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:36:30] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:36:30] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:36:30] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:36:30] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 01:36:30] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 01:36:30] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:36:30] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:36:30] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:36:30] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:36:30] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:36:30] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:36:30] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 01:36:30] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 01:36:30] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-06 01:36:30] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-06 01:36:30] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-06 01:36:30] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-06 01:36:30] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-06 01:36:30] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-06 01:36:30] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:36:30] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 01:36:30] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:36:30] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:36:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:36:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:36:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:36:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:36:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:36:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:36:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:36:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:36:32] [Rank 0] step:6001/10000 train_time:256248ms step_avg:42.70ms +[2025-09-06 01:36:32] [Rank 0] step:6001/10000 train_time:256248ms step_avg:42.70ms +[2025-09-06 01:36:33] [Rank 0] step:6021/10000 train_time:257523ms step_avg:42.77ms +[2025-09-06 01:36:33] [Rank 0] step:6021/10000 train_time:257523ms step_avg:42.77ms +[2025-09-06 01:36:34] [Rank 0] step:6041/10000 train_time:258258ms step_avg:42.75ms +[2025-09-06 01:36:34] [Rank 0] step:6041/10000 train_time:258258ms step_avg:42.75ms +[2025-09-06 01:36:34] [Rank 0] step:6061/10000 train_time:258993ms step_avg:42.73ms +[2025-09-06 01:36:34] [Rank 0] step:6061/10000 train_time:258993ms step_avg:42.73ms +[2025-09-06 01:36:35] [Rank 0] step:6081/10000 train_time:259729ms step_avg:42.71ms +[2025-09-06 01:36:35] [Rank 0] step:6081/10000 train_time:259729ms step_avg:42.71ms +[2025-09-06 01:36:36] [Rank 0] step:6101/10000 train_time:260464ms step_avg:42.69ms +[2025-09-06 01:36:36] [Rank 0] step:6101/10000 train_time:260464ms step_avg:42.69ms +[2025-09-06 01:36:37] [Rank 0] step:6121/10000 train_time:261200ms step_avg:42.67ms +[2025-09-06 01:36:37] [Rank 0] step:6121/10000 train_time:261200ms step_avg:42.67ms +[2025-09-06 01:36:37] [Rank 0] step:6141/10000 train_time:261934ms step_avg:42.65ms +[2025-09-06 01:36:37] [Rank 0] step:6141/10000 train_time:261934ms step_avg:42.65ms +[2025-09-06 01:36:38] [Rank 0] step:6161/10000 train_time:262670ms step_avg:42.63ms +[2025-09-06 01:36:38] [Rank 0] step:6161/10000 train_time:262670ms step_avg:42.63ms +[2025-09-06 01:36:39] [Rank 0] step:6181/10000 train_time:263405ms step_avg:42.62ms +[2025-09-06 01:36:39] [Rank 0] step:6181/10000 train_time:263405ms step_avg:42.62ms +[2025-09-06 01:36:40] [Rank 0] step:6201/10000 train_time:264141ms step_avg:42.60ms +[2025-09-06 01:36:40] [Rank 0] step:6201/10000 train_time:264141ms step_avg:42.60ms +[2025-09-06 01:36:40] [Rank 0] step:6221/10000 train_time:264876ms step_avg:42.58ms +[2025-09-06 01:36:40] [Rank 0] step:6221/10000 train_time:264876ms step_avg:42.58ms +[2025-09-06 01:36:41] [Rank 0] step:6241/10000 train_time:265611ms step_avg:42.56ms +[2025-09-06 01:36:41] [Rank 0] step:6241/10000 train_time:265611ms step_avg:42.56ms +[2025-09-06 01:36:42] [Rank 0] step:6261/10000 train_time:266349ms step_avg:42.54ms +[2025-09-06 01:36:42] [Rank 0] step:6261/10000 train_time:266349ms step_avg:42.54ms +[2025-09-06 01:36:42] [Rank 0] step:6281/10000 train_time:267084ms step_avg:42.52ms +[2025-09-06 01:36:42] [Rank 0] step:6281/10000 train_time:267084ms step_avg:42.52ms +[2025-09-06 01:36:43] [Rank 0] step:6301/10000 train_time:267820ms step_avg:42.50ms +[2025-09-06 01:36:43] [Rank 0] step:6301/10000 train_time:267820ms step_avg:42.50ms +[2025-09-06 01:36:44] [Rank 0] step:6321/10000 train_time:268555ms step_avg:42.49ms +[2025-09-06 01:36:44] [Rank 0] step:6321/10000 train_time:268555ms step_avg:42.49ms +[2025-09-06 01:36:45] [Rank 0] step:6341/10000 train_time:269290ms step_avg:42.47ms +[2025-09-06 01:36:45] [Rank 0] step:6341/10000 train_time:269290ms step_avg:42.47ms +[2025-09-06 01:36:45] [Rank 0] step:6361/10000 train_time:270026ms step_avg:42.45ms +[2025-09-06 01:36:45] [Rank 0] step:6361/10000 train_time:270026ms step_avg:42.45ms +[2025-09-06 01:36:46] [Rank 0] step:6381/10000 train_time:270761ms step_avg:42.43ms +[2025-09-06 01:36:46] [Rank 0] step:6381/10000 train_time:270761ms step_avg:42.43ms +[2025-09-06 01:36:47] [Rank 0] step:6401/10000 train_time:271496ms step_avg:42.41ms +[2025-09-06 01:36:47] [Rank 0] step:6401/10000 train_time:271496ms step_avg:42.41ms +[2025-09-06 01:36:48] [Rank 0] step:6421/10000 train_time:272232ms step_avg:42.40ms +[2025-09-06 01:36:48] [Rank 0] step:6421/10000 train_time:272232ms step_avg:42.40ms +[2025-09-06 01:36:48] [Rank 0] step:6441/10000 train_time:272968ms step_avg:42.38ms +[2025-09-06 01:36:48] [Rank 0] step:6441/10000 train_time:272968ms step_avg:42.38ms +[2025-09-06 01:36:49] [Rank 0] step:6461/10000 train_time:273702ms step_avg:42.36ms +[2025-09-06 01:36:49] [Rank 0] step:6461/10000 train_time:273702ms step_avg:42.36ms +[2025-09-06 01:36:50] [Rank 0] step:6481/10000 train_time:274438ms step_avg:42.34ms +[2025-09-06 01:36:50] [Rank 0] step:6481/10000 train_time:274438ms step_avg:42.34ms +[2025-09-06 01:36:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:36:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:36:51] [Rank 0] PRINT: step:6500/10000 train_loss:2.2934 val_loss:2.2722 train_time:275254ms step_avg:42.35ms +[2025-09-06 01:36:51] [Rank 0] PRINT: step:6500/10000 train_loss:2.2934 val_loss:2.2722 train_time:275254ms step_avg:42.35ms +[2025-09-06 01:36:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:36:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:36:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:36:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:38:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:38:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:38:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:38:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:38:12] [Rank 0] Total Loss: 4.8282 +[2025-09-06 01:38:12] [Rank 0] Total Loss: 4.8282 +[2025-09-06 01:38:12] [Rank 0] Total FTA (Unweighted): 0.2944 +[2025-09-06 01:38:12] [Rank 0] Total FTA (Unweighted): 0.2944 +[2025-09-06 01:38:12] [Rank 0] Total FTA (Weighted): 0.2944 +[2025-09-06 01:38:12] [Rank 0] Total FTA (Weighted): 0.2944 +[2025-09-06 01:38:12] [Rank 0] Group 0 Loss: 3.4824 +[2025-09-06 01:38:12] [Rank 0] Group 0 Loss: 3.4824 +[2025-09-06 01:38:12] [Rank 0] Group 1 Loss: 3.4071 +[2025-09-06 01:38:12] [Rank 0] Group 1 Loss: 3.4071 +[2025-09-06 01:38:12] [Rank 0] Group 2 Loss: 3.4435 +[2025-09-06 01:38:12] [Rank 0] Group 2 Loss: 3.4435 +[2025-09-06 01:38:12] [Rank 0] Group 3 Loss: 3.8334 +[2025-09-06 01:38:12] [Rank 0] Group 3 Loss: 3.8334 +[2025-09-06 01:38:12] [Rank 0] Group 4 Loss: 4.2158 +[2025-09-06 01:38:12] [Rank 0] Group 4 Loss: 4.2158 +[2025-09-06 01:38:12] [Rank 0] Group 5 Loss: 4.6485 +[2025-09-06 01:38:12] [Rank 0] Group 5 Loss: 4.6485 +[2025-09-06 01:38:12] [Rank 0] Group 6 Loss: 4.9457 +[2025-09-06 01:38:12] [Rank 0] Group 6 Loss: 4.9457 +[2025-09-06 01:38:12] [Rank 0] Group 7 Loss: 5.0971 +[2025-09-06 01:38:12] [Rank 0] Group 7 Loss: 5.0971 +[2025-09-06 01:38:12] [Rank 0] Group 8 Loss: 5.3933 +[2025-09-06 01:38:12] [Rank 0] Group 8 Loss: 5.3933 +[2025-09-06 01:38:12] [Rank 0] Group 9 Loss: 5.4879 +[2025-09-06 01:38:12] [Rank 0] Group 9 Loss: 5.4879 +[2025-09-06 01:38:12] [Rank 0] Group 10 Loss: 5.5683 +[2025-09-06 01:38:12] [Rank 0] Group 10 Loss: 5.5683 +[2025-09-06 01:38:12] [Rank 0] Group 11 Loss: 5.6088 +[2025-09-06 01:38:12] [Rank 0] Group 11 Loss: 5.6088 +[2025-09-06 01:38:12] [Rank 0] Group 12 Loss: 5.5048 +[2025-09-06 01:38:12] [Rank 0] Group 12 Loss: 5.5048 +[2025-09-06 01:38:12] [Rank 0] Group 13 Loss: 5.5349 +[2025-09-06 01:38:12] [Rank 0] Group 13 Loss: 5.5349 +[2025-09-06 01:38:12] [Rank 0] Group 14 Loss: 5.5593 +[2025-09-06 01:38:12] [Rank 0] Group 14 Loss: 5.5593 +[2025-09-06 01:38:12] [Rank 0] Group 15 Loss: 5.5209 +[2025-09-06 01:38:12] [Rank 0] Group 15 Loss: 5.5209 +[2025-09-06 01:38:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:38:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:38:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:38:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:38:12] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:38:12] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:38:12] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:38:12] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:38:12] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:38:12] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:38:12] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:38:12] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:38:12] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:38:12] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:38:12] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 01:38:12] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 01:38:12] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:38:12] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:38:12] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:38:12] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:38:12] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-06 01:38:12] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-06 01:38:12] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:38:12] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:38:12] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-06 01:38:12] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-06 01:38:12] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 01:38:12] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 01:38:12] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 01:38:12] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 01:38:12] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 01:38:12] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 01:38:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:38:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:38:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:38:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:38:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:38:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:38:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:38:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:38:14] [Rank 0] step:6501/10000 train_time:275263ms step_avg:42.34ms +[2025-09-06 01:38:14] [Rank 0] step:6501/10000 train_time:275263ms step_avg:42.34ms +[2025-09-06 01:38:14] [Rank 0] step:6521/10000 train_time:275935ms step_avg:42.31ms +[2025-09-06 01:38:14] [Rank 0] step:6521/10000 train_time:275935ms step_avg:42.31ms +[2025-09-06 01:38:15] [Rank 0] step:6541/10000 train_time:276671ms step_avg:42.30ms +[2025-09-06 01:38:15] [Rank 0] step:6541/10000 train_time:276671ms step_avg:42.30ms +[2025-09-06 01:38:16] [Rank 0] step:6561/10000 train_time:277406ms step_avg:42.28ms +[2025-09-06 01:38:16] [Rank 0] step:6561/10000 train_time:277406ms step_avg:42.28ms +[2025-09-06 01:38:16] [Rank 0] step:6581/10000 train_time:278142ms step_avg:42.26ms +[2025-09-06 01:38:16] [Rank 0] step:6581/10000 train_time:278142ms step_avg:42.26ms +[2025-09-06 01:38:17] [Rank 0] step:6601/10000 train_time:278877ms step_avg:42.25ms +[2025-09-06 01:38:17] [Rank 0] step:6601/10000 train_time:278877ms step_avg:42.25ms +[2025-09-06 01:38:18] [Rank 0] step:6621/10000 train_time:279612ms step_avg:42.23ms +[2025-09-06 01:38:18] [Rank 0] step:6621/10000 train_time:279612ms step_avg:42.23ms +[2025-09-06 01:38:19] [Rank 0] step:6641/10000 train_time:280347ms step_avg:42.21ms +[2025-09-06 01:38:19] [Rank 0] step:6641/10000 train_time:280347ms step_avg:42.21ms +[2025-09-06 01:38:19] [Rank 0] step:6661/10000 train_time:281083ms step_avg:42.20ms +[2025-09-06 01:38:19] [Rank 0] step:6661/10000 train_time:281083ms step_avg:42.20ms +[2025-09-06 01:38:20] [Rank 0] step:6681/10000 train_time:281819ms step_avg:42.18ms +[2025-09-06 01:38:20] [Rank 0] step:6681/10000 train_time:281819ms step_avg:42.18ms +[2025-09-06 01:38:21] [Rank 0] step:6701/10000 train_time:282554ms step_avg:42.17ms +[2025-09-06 01:38:21] [Rank 0] step:6701/10000 train_time:282554ms step_avg:42.17ms +[2025-09-06 01:38:22] [Rank 0] step:6721/10000 train_time:283289ms step_avg:42.15ms +[2025-09-06 01:38:22] [Rank 0] step:6721/10000 train_time:283289ms step_avg:42.15ms +[2025-09-06 01:38:22] [Rank 0] step:6741/10000 train_time:284024ms step_avg:42.13ms +[2025-09-06 01:38:22] [Rank 0] step:6741/10000 train_time:284024ms step_avg:42.13ms +[2025-09-06 01:38:23] [Rank 0] step:6761/10000 train_time:284760ms step_avg:42.12ms +[2025-09-06 01:38:23] [Rank 0] step:6761/10000 train_time:284760ms step_avg:42.12ms +[2025-09-06 01:38:24] [Rank 0] step:6781/10000 train_time:285495ms step_avg:42.10ms +[2025-09-06 01:38:24] [Rank 0] step:6781/10000 train_time:285495ms step_avg:42.10ms +[2025-09-06 01:38:25] [Rank 0] step:6801/10000 train_time:286230ms step_avg:42.09ms +[2025-09-06 01:38:25] [Rank 0] step:6801/10000 train_time:286230ms step_avg:42.09ms +[2025-09-06 01:38:25] [Rank 0] step:6821/10000 train_time:286966ms step_avg:42.07ms +[2025-09-06 01:38:25] [Rank 0] step:6821/10000 train_time:286966ms step_avg:42.07ms +[2025-09-06 01:38:26] [Rank 0] step:6841/10000 train_time:287897ms step_avg:42.08ms +[2025-09-06 01:38:26] [Rank 0] step:6841/10000 train_time:287897ms step_avg:42.08ms +[2025-09-06 01:38:27] [Rank 0] step:6861/10000 train_time:288633ms step_avg:42.07ms +[2025-09-06 01:38:27] [Rank 0] step:6861/10000 train_time:288633ms step_avg:42.07ms +[2025-09-06 01:38:28] [Rank 0] step:6881/10000 train_time:289368ms step_avg:42.05ms +[2025-09-06 01:38:28] [Rank 0] step:6881/10000 train_time:289368ms step_avg:42.05ms +[2025-09-06 01:38:28] [Rank 0] step:6901/10000 train_time:290104ms step_avg:42.04ms +[2025-09-06 01:38:28] [Rank 0] step:6901/10000 train_time:290104ms step_avg:42.04ms +[2025-09-06 01:38:29] [Rank 0] step:6921/10000 train_time:290840ms step_avg:42.02ms +[2025-09-06 01:38:29] [Rank 0] step:6921/10000 train_time:290840ms step_avg:42.02ms +[2025-09-06 01:38:30] [Rank 0] step:6941/10000 train_time:291575ms step_avg:42.01ms +[2025-09-06 01:38:30] [Rank 0] step:6941/10000 train_time:291575ms step_avg:42.01ms +[2025-09-06 01:38:31] [Rank 0] step:6961/10000 train_time:292312ms step_avg:41.99ms +[2025-09-06 01:38:31] [Rank 0] step:6961/10000 train_time:292312ms step_avg:41.99ms +[2025-09-06 01:38:31] [Rank 0] step:6981/10000 train_time:293047ms step_avg:41.98ms +[2025-09-06 01:38:31] [Rank 0] step:6981/10000 train_time:293047ms step_avg:41.98ms +[2025-09-06 01:38:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:38:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:38:33] [Rank 0] PRINT: step:7000/10000 train_loss:2.2686 val_loss:2.2486 train_time:293862ms step_avg:41.98ms +[2025-09-06 01:38:33] [Rank 0] PRINT: step:7000/10000 train_loss:2.2686 val_loss:2.2486 train_time:293862ms step_avg:41.98ms +[2025-09-06 01:38:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:38:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:38:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:38:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:39:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:39:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:39:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:39:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:39:53] [Rank 0] Total Loss: 4.7999 +[2025-09-06 01:39:53] [Rank 0] Total Loss: 4.7999 +[2025-09-06 01:39:53] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-06 01:39:53] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-06 01:39:53] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-06 01:39:53] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-06 01:39:53] [Rank 0] Group 0 Loss: 3.4740 +[2025-09-06 01:39:53] [Rank 0] Group 0 Loss: 3.4740 +[2025-09-06 01:39:53] [Rank 0] Group 1 Loss: 3.3315 +[2025-09-06 01:39:53] [Rank 0] Group 1 Loss: 3.3315 +[2025-09-06 01:39:53] [Rank 0] Group 2 Loss: 3.3957 +[2025-09-06 01:39:53] [Rank 0] Group 2 Loss: 3.3957 +[2025-09-06 01:39:53] [Rank 0] Group 3 Loss: 3.8764 +[2025-09-06 01:39:53] [Rank 0] Group 3 Loss: 3.8764 +[2025-09-06 01:39:53] [Rank 0] Group 4 Loss: 4.1970 +[2025-09-06 01:39:53] [Rank 0] Group 4 Loss: 4.1970 +[2025-09-06 01:39:53] [Rank 0] Group 5 Loss: 4.6296 +[2025-09-06 01:39:53] [Rank 0] Group 5 Loss: 4.6296 +[2025-09-06 01:39:53] [Rank 0] Group 6 Loss: 4.9189 +[2025-09-06 01:39:53] [Rank 0] Group 6 Loss: 4.9189 +[2025-09-06 01:39:53] [Rank 0] Group 7 Loss: 5.0602 +[2025-09-06 01:39:53] [Rank 0] Group 7 Loss: 5.0602 +[2025-09-06 01:39:53] [Rank 0] Group 8 Loss: 5.3656 +[2025-09-06 01:39:53] [Rank 0] Group 8 Loss: 5.3656 +[2025-09-06 01:39:53] [Rank 0] Group 9 Loss: 5.4781 +[2025-09-06 01:39:53] [Rank 0] Group 9 Loss: 5.4781 +[2025-09-06 01:39:53] [Rank 0] Group 10 Loss: 5.5231 +[2025-09-06 01:39:53] [Rank 0] Group 10 Loss: 5.5231 +[2025-09-06 01:39:53] [Rank 0] Group 11 Loss: 5.5724 +[2025-09-06 01:39:53] [Rank 0] Group 11 Loss: 5.5724 +[2025-09-06 01:39:53] [Rank 0] Group 12 Loss: 5.4627 +[2025-09-06 01:39:53] [Rank 0] Group 12 Loss: 5.4627 +[2025-09-06 01:39:53] [Rank 0] Group 13 Loss: 5.5056 +[2025-09-06 01:39:53] [Rank 0] Group 13 Loss: 5.5056 +[2025-09-06 01:39:53] [Rank 0] Group 14 Loss: 5.5311 +[2025-09-06 01:39:53] [Rank 0] Group 14 Loss: 5.5311 +[2025-09-06 01:39:53] [Rank 0] Group 15 Loss: 5.4772 +[2025-09-06 01:39:53] [Rank 0] Group 15 Loss: 5.4772 +[2025-09-06 01:39:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:39:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:39:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:39:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:39:53] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:39:53] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:39:53] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:39:53] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:39:53] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:39:53] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:39:53] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:39:53] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:39:53] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:39:53] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:39:53] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:39:53] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:39:53] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:39:53] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:39:53] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:39:53] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:39:53] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:39:53] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:39:53] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:39:53] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:39:53] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 01:39:53] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 01:39:53] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-06 01:39:53] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-06 01:39:53] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:39:53] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:39:53] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:39:53] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 01:39:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:39:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:39:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:39:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:39:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:39:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:39:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:39:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:39:55] [Rank 0] step:7001/10000 train_time:293871ms step_avg:41.98ms +[2025-09-06 01:39:55] [Rank 0] step:7001/10000 train_time:293871ms step_avg:41.98ms +[2025-09-06 01:39:55] [Rank 0] step:7021/10000 train_time:294545ms step_avg:41.95ms +[2025-09-06 01:39:55] [Rank 0] step:7021/10000 train_time:294545ms step_avg:41.95ms +[2025-09-06 01:39:56] [Rank 0] step:7041/10000 train_time:295281ms step_avg:41.94ms +[2025-09-06 01:39:56] [Rank 0] step:7041/10000 train_time:295281ms step_avg:41.94ms +[2025-09-06 01:39:57] [Rank 0] step:7061/10000 train_time:296016ms step_avg:41.92ms +[2025-09-06 01:39:57] [Rank 0] step:7061/10000 train_time:296016ms step_avg:41.92ms +[2025-09-06 01:39:58] [Rank 0] step:7081/10000 train_time:296751ms step_avg:41.91ms +[2025-09-06 01:39:58] [Rank 0] step:7081/10000 train_time:296751ms step_avg:41.91ms +[2025-09-06 01:39:58] [Rank 0] step:7101/10000 train_time:297487ms step_avg:41.89ms +[2025-09-06 01:39:58] [Rank 0] step:7101/10000 train_time:297487ms step_avg:41.89ms +[2025-09-06 01:39:59] [Rank 0] step:7121/10000 train_time:298222ms step_avg:41.88ms +[2025-09-06 01:39:59] [Rank 0] step:7121/10000 train_time:298222ms step_avg:41.88ms +[2025-09-06 01:40:00] [Rank 0] step:7141/10000 train_time:298958ms step_avg:41.86ms +[2025-09-06 01:40:00] [Rank 0] step:7141/10000 train_time:298958ms step_avg:41.86ms +[2025-09-06 01:40:01] [Rank 0] step:7161/10000 train_time:299694ms step_avg:41.85ms +[2025-09-06 01:40:01] [Rank 0] step:7161/10000 train_time:299694ms step_avg:41.85ms +[2025-09-06 01:40:01] [Rank 0] step:7181/10000 train_time:300429ms step_avg:41.84ms +[2025-09-06 01:40:01] [Rank 0] step:7181/10000 train_time:300429ms step_avg:41.84ms +[2025-09-06 01:40:02] [Rank 0] step:7201/10000 train_time:301166ms step_avg:41.82ms +[2025-09-06 01:40:02] [Rank 0] step:7201/10000 train_time:301166ms step_avg:41.82ms +[2025-09-06 01:40:03] [Rank 0] step:7221/10000 train_time:302034ms step_avg:41.83ms +[2025-09-06 01:40:03] [Rank 0] step:7221/10000 train_time:302034ms step_avg:41.83ms +[2025-09-06 01:40:04] [Rank 0] step:7241/10000 train_time:302770ms step_avg:41.81ms +[2025-09-06 01:40:04] [Rank 0] step:7241/10000 train_time:302770ms step_avg:41.81ms +[2025-09-06 01:40:04] [Rank 0] step:7261/10000 train_time:303506ms step_avg:41.80ms +[2025-09-06 01:40:04] [Rank 0] step:7261/10000 train_time:303506ms step_avg:41.80ms +[2025-09-06 01:40:05] [Rank 0] step:7281/10000 train_time:304241ms step_avg:41.79ms +[2025-09-06 01:40:05] [Rank 0] step:7281/10000 train_time:304241ms step_avg:41.79ms +[2025-09-06 01:40:06] [Rank 0] step:7301/10000 train_time:305117ms step_avg:41.79ms +[2025-09-06 01:40:06] [Rank 0] step:7301/10000 train_time:305117ms step_avg:41.79ms +[2025-09-06 01:40:07] [Rank 0] step:7321/10000 train_time:305853ms step_avg:41.78ms +[2025-09-06 01:40:07] [Rank 0] step:7321/10000 train_time:305853ms step_avg:41.78ms +[2025-09-06 01:40:07] [Rank 0] step:7341/10000 train_time:306589ms step_avg:41.76ms +[2025-09-06 01:40:07] [Rank 0] step:7341/10000 train_time:306589ms step_avg:41.76ms +[2025-09-06 01:40:08] [Rank 0] step:7361/10000 train_time:307324ms step_avg:41.75ms +[2025-09-06 01:40:08] [Rank 0] step:7361/10000 train_time:307324ms step_avg:41.75ms +[2025-09-06 01:40:09] [Rank 0] step:7381/10000 train_time:308059ms step_avg:41.74ms +[2025-09-06 01:40:09] [Rank 0] step:7381/10000 train_time:308059ms step_avg:41.74ms +[2025-09-06 01:40:10] [Rank 0] step:7401/10000 train_time:308795ms step_avg:41.72ms +[2025-09-06 01:40:10] [Rank 0] step:7401/10000 train_time:308795ms step_avg:41.72ms +[2025-09-06 01:40:10] [Rank 0] step:7421/10000 train_time:309531ms step_avg:41.71ms +[2025-09-06 01:40:10] [Rank 0] step:7421/10000 train_time:309531ms step_avg:41.71ms +[2025-09-06 01:40:11] [Rank 0] step:7441/10000 train_time:310266ms step_avg:41.70ms +[2025-09-06 01:40:11] [Rank 0] step:7441/10000 train_time:310266ms step_avg:41.70ms +[2025-09-06 01:40:12] [Rank 0] step:7461/10000 train_time:311001ms step_avg:41.68ms +[2025-09-06 01:40:12] [Rank 0] step:7461/10000 train_time:311001ms step_avg:41.68ms +[2025-09-06 01:40:13] [Rank 0] step:7481/10000 train_time:311736ms step_avg:41.67ms +[2025-09-06 01:40:13] [Rank 0] step:7481/10000 train_time:311736ms step_avg:41.67ms +[2025-09-06 01:40:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:40:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:40:14] [Rank 0] PRINT: step:7500/10000 train_loss:2.2468 val_loss:2.2294 train_time:312552ms step_avg:41.67ms +[2025-09-06 01:40:14] [Rank 0] PRINT: step:7500/10000 train_loss:2.2468 val_loss:2.2294 train_time:312552ms step_avg:41.67ms +[2025-09-06 01:40:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:40:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:40:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:40:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:41:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:41:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:41:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:41:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:41:34] [Rank 0] Total Loss: 4.7608 +[2025-09-06 01:41:34] [Rank 0] Total Loss: 4.7608 +[2025-09-06 01:41:34] [Rank 0] Total FTA (Unweighted): 0.2962 +[2025-09-06 01:41:34] [Rank 0] Total FTA (Unweighted): 0.2962 +[2025-09-06 01:41:34] [Rank 0] Total FTA (Weighted): 0.2963 +[2025-09-06 01:41:34] [Rank 0] Total FTA (Weighted): 0.2963 +[2025-09-06 01:41:34] [Rank 0] Group 0 Loss: 3.4009 +[2025-09-06 01:41:34] [Rank 0] Group 0 Loss: 3.4009 +[2025-09-06 01:41:34] [Rank 0] Group 1 Loss: 3.4028 +[2025-09-06 01:41:34] [Rank 0] Group 1 Loss: 3.4028 +[2025-09-06 01:41:34] [Rank 0] Group 2 Loss: 3.3733 +[2025-09-06 01:41:34] [Rank 0] Group 2 Loss: 3.3733 +[2025-09-06 01:41:34] [Rank 0] Group 3 Loss: 3.7999 +[2025-09-06 01:41:34] [Rank 0] Group 3 Loss: 3.7999 +[2025-09-06 01:41:34] [Rank 0] Group 4 Loss: 4.1449 +[2025-09-06 01:41:34] [Rank 0] Group 4 Loss: 4.1449 +[2025-09-06 01:41:34] [Rank 0] Group 5 Loss: 4.5852 +[2025-09-06 01:41:34] [Rank 0] Group 5 Loss: 4.5852 +[2025-09-06 01:41:34] [Rank 0] Group 6 Loss: 4.8678 +[2025-09-06 01:41:34] [Rank 0] Group 6 Loss: 4.8678 +[2025-09-06 01:41:34] [Rank 0] Group 7 Loss: 5.0258 +[2025-09-06 01:41:34] [Rank 0] Group 7 Loss: 5.0258 +[2025-09-06 01:41:34] [Rank 0] Group 8 Loss: 5.3115 +[2025-09-06 01:41:34] [Rank 0] Group 8 Loss: 5.3115 +[2025-09-06 01:41:34] [Rank 0] Group 9 Loss: 5.4183 +[2025-09-06 01:41:34] [Rank 0] Group 9 Loss: 5.4183 +[2025-09-06 01:41:34] [Rank 0] Group 10 Loss: 5.4909 +[2025-09-06 01:41:34] [Rank 0] Group 10 Loss: 5.4909 +[2025-09-06 01:41:34] [Rank 0] Group 11 Loss: 5.5325 +[2025-09-06 01:41:34] [Rank 0] Group 11 Loss: 5.5325 +[2025-09-06 01:41:34] [Rank 0] Group 12 Loss: 5.4284 +[2025-09-06 01:41:34] [Rank 0] Group 12 Loss: 5.4284 +[2025-09-06 01:41:34] [Rank 0] Group 13 Loss: 5.4652 +[2025-09-06 01:41:34] [Rank 0] Group 13 Loss: 5.4652 +[2025-09-06 01:41:34] [Rank 0] Group 14 Loss: 5.4784 +[2025-09-06 01:41:34] [Rank 0] Group 14 Loss: 5.4784 +[2025-09-06 01:41:34] [Rank 0] Group 15 Loss: 5.4474 +[2025-09-06 01:41:34] [Rank 0] Group 15 Loss: 5.4474 +[2025-09-06 01:41:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:41:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:41:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:41:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:41:34] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:41:34] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 01:41:34] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:41:34] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:41:34] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:41:34] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:41:34] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:41:34] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:41:34] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:41:34] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:41:34] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:41:34] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:41:34] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:41:34] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:41:34] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:41:34] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:41:34] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:41:34] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:41:34] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:41:34] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:41:34] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 01:41:34] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 01:41:34] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-06 01:41:34] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-06 01:41:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 01:41:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 01:41:34] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 01:41:34] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 01:41:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:41:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:41:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:41:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:41:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:41:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:41:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:41:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:41:36] [Rank 0] step:7501/10000 train_time:312560ms step_avg:41.67ms +[2025-09-06 01:41:36] [Rank 0] step:7501/10000 train_time:312560ms step_avg:41.67ms +[2025-09-06 01:41:37] [Rank 0] step:7521/10000 train_time:313226ms step_avg:41.65ms +[2025-09-06 01:41:37] [Rank 0] step:7521/10000 train_time:313226ms step_avg:41.65ms +[2025-09-06 01:41:37] [Rank 0] step:7541/10000 train_time:313962ms step_avg:41.63ms +[2025-09-06 01:41:37] [Rank 0] step:7541/10000 train_time:313962ms step_avg:41.63ms +[2025-09-06 01:41:38] [Rank 0] step:7561/10000 train_time:314697ms step_avg:41.62ms +[2025-09-06 01:41:38] [Rank 0] step:7561/10000 train_time:314697ms step_avg:41.62ms +[2025-09-06 01:41:39] [Rank 0] step:7581/10000 train_time:315432ms step_avg:41.61ms +[2025-09-06 01:41:39] [Rank 0] step:7581/10000 train_time:315432ms step_avg:41.61ms +[2025-09-06 01:41:39] [Rank 0] step:7601/10000 train_time:316168ms step_avg:41.60ms +[2025-09-06 01:41:39] [Rank 0] step:7601/10000 train_time:316168ms step_avg:41.60ms +[2025-09-06 01:41:40] [Rank 0] step:7621/10000 train_time:316903ms step_avg:41.58ms +[2025-09-06 01:41:40] [Rank 0] step:7621/10000 train_time:316903ms step_avg:41.58ms +[2025-09-06 01:41:41] [Rank 0] step:7641/10000 train_time:317638ms step_avg:41.57ms +[2025-09-06 01:41:41] [Rank 0] step:7641/10000 train_time:317638ms step_avg:41.57ms +[2025-09-06 01:41:42] [Rank 0] step:7661/10000 train_time:318567ms step_avg:41.58ms +[2025-09-06 01:41:42] [Rank 0] step:7661/10000 train_time:318567ms step_avg:41.58ms +[2025-09-06 01:41:43] [Rank 0] step:7681/10000 train_time:319303ms step_avg:41.57ms +[2025-09-06 01:41:43] [Rank 0] step:7681/10000 train_time:319303ms step_avg:41.57ms +[2025-09-06 01:41:43] [Rank 0] step:7701/10000 train_time:320038ms step_avg:41.56ms +[2025-09-06 01:41:43] [Rank 0] step:7701/10000 train_time:320038ms step_avg:41.56ms +[2025-09-06 01:41:44] [Rank 0] step:7721/10000 train_time:320773ms step_avg:41.55ms +[2025-09-06 01:41:44] [Rank 0] step:7721/10000 train_time:320773ms step_avg:41.55ms +[2025-09-06 01:41:45] [Rank 0] step:7741/10000 train_time:321509ms step_avg:41.53ms +[2025-09-06 01:41:45] [Rank 0] step:7741/10000 train_time:321509ms step_avg:41.53ms +[2025-09-06 01:41:46] [Rank 0] step:7761/10000 train_time:322244ms step_avg:41.52ms +[2025-09-06 01:41:46] [Rank 0] step:7761/10000 train_time:322244ms step_avg:41.52ms +[2025-09-06 01:41:46] [Rank 0] step:7781/10000 train_time:322979ms step_avg:41.51ms +[2025-09-06 01:41:46] [Rank 0] step:7781/10000 train_time:322979ms step_avg:41.51ms +[2025-09-06 01:41:47] [Rank 0] step:7801/10000 train_time:323715ms step_avg:41.50ms +[2025-09-06 01:41:47] [Rank 0] step:7801/10000 train_time:323715ms step_avg:41.50ms +[2025-09-06 01:41:48] [Rank 0] step:7821/10000 train_time:324451ms step_avg:41.48ms +[2025-09-06 01:41:48] [Rank 0] step:7821/10000 train_time:324451ms step_avg:41.48ms +[2025-09-06 01:41:49] [Rank 0] step:7841/10000 train_time:325186ms step_avg:41.47ms +[2025-09-06 01:41:49] [Rank 0] step:7841/10000 train_time:325186ms step_avg:41.47ms +[2025-09-06 01:41:49] [Rank 0] step:7861/10000 train_time:325922ms step_avg:41.46ms +[2025-09-06 01:41:49] [Rank 0] step:7861/10000 train_time:325922ms step_avg:41.46ms +[2025-09-06 01:41:50] [Rank 0] step:7881/10000 train_time:326657ms step_avg:41.45ms +[2025-09-06 01:41:50] [Rank 0] step:7881/10000 train_time:326657ms step_avg:41.45ms +[2025-09-06 01:41:51] [Rank 0] step:7901/10000 train_time:327393ms step_avg:41.44ms +[2025-09-06 01:41:51] [Rank 0] step:7901/10000 train_time:327393ms step_avg:41.44ms +[2025-09-06 01:41:51] [Rank 0] step:7921/10000 train_time:328129ms step_avg:41.43ms +[2025-09-06 01:41:51] [Rank 0] step:7921/10000 train_time:328129ms step_avg:41.43ms +[2025-09-06 01:41:52] [Rank 0] step:7941/10000 train_time:328864ms step_avg:41.41ms +[2025-09-06 01:41:52] [Rank 0] step:7941/10000 train_time:328864ms step_avg:41.41ms +[2025-09-06 01:41:53] [Rank 0] step:7961/10000 train_time:329599ms step_avg:41.40ms +[2025-09-06 01:41:53] [Rank 0] step:7961/10000 train_time:329599ms step_avg:41.40ms +[2025-09-06 01:41:54] [Rank 0] step:7981/10000 train_time:330334ms step_avg:41.39ms +[2025-09-06 01:41:54] [Rank 0] step:7981/10000 train_time:330334ms step_avg:41.39ms +[2025-09-06 01:41:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:41:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:41:55] [Rank 0] PRINT: step:8000/10000 train_loss:2.2302 val_loss:2.2135 train_time:331151ms step_avg:41.39ms +[2025-09-06 01:41:55] [Rank 0] PRINT: step:8000/10000 train_loss:2.2302 val_loss:2.2135 train_time:331151ms step_avg:41.39ms +[2025-09-06 01:41:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:41:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:41:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:41:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:43:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:43:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:43:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:43:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:43:16] [Rank 0] Total Loss: 4.7178 +[2025-09-06 01:43:16] [Rank 0] Total Loss: 4.7178 +[2025-09-06 01:43:16] [Rank 0] Total FTA (Unweighted): 0.3019 +[2025-09-06 01:43:16] [Rank 0] Total FTA (Unweighted): 0.3019 +[2025-09-06 01:43:16] [Rank 0] Total FTA (Weighted): 0.3019 +[2025-09-06 01:43:16] [Rank 0] Total FTA (Weighted): 0.3019 +[2025-09-06 01:43:16] [Rank 0] Group 0 Loss: 3.3592 +[2025-09-06 01:43:16] [Rank 0] Group 0 Loss: 3.3592 +[2025-09-06 01:43:16] [Rank 0] Group 1 Loss: 3.3129 +[2025-09-06 01:43:16] [Rank 0] Group 1 Loss: 3.3129 +[2025-09-06 01:43:16] [Rank 0] Group 2 Loss: 3.3086 +[2025-09-06 01:43:16] [Rank 0] Group 2 Loss: 3.3086 +[2025-09-06 01:43:16] [Rank 0] Group 3 Loss: 3.7676 +[2025-09-06 01:43:16] [Rank 0] Group 3 Loss: 3.7676 +[2025-09-06 01:43:16] [Rank 0] Group 4 Loss: 4.0763 +[2025-09-06 01:43:16] [Rank 0] Group 4 Loss: 4.0763 +[2025-09-06 01:43:16] [Rank 0] Group 5 Loss: 4.5467 +[2025-09-06 01:43:16] [Rank 0] Group 5 Loss: 4.5467 +[2025-09-06 01:43:16] [Rank 0] Group 6 Loss: 4.8249 +[2025-09-06 01:43:16] [Rank 0] Group 6 Loss: 4.8249 +[2025-09-06 01:43:16] [Rank 0] Group 7 Loss: 4.9861 +[2025-09-06 01:43:16] [Rank 0] Group 7 Loss: 4.9861 +[2025-09-06 01:43:16] [Rank 0] Group 8 Loss: 5.2740 +[2025-09-06 01:43:16] [Rank 0] Group 8 Loss: 5.2740 +[2025-09-06 01:43:16] [Rank 0] Group 9 Loss: 5.3897 +[2025-09-06 01:43:16] [Rank 0] Group 9 Loss: 5.3897 +[2025-09-06 01:43:16] [Rank 0] Group 10 Loss: 5.4584 +[2025-09-06 01:43:16] [Rank 0] Group 10 Loss: 5.4584 +[2025-09-06 01:43:16] [Rank 0] Group 11 Loss: 5.4988 +[2025-09-06 01:43:16] [Rank 0] Group 11 Loss: 5.4988 +[2025-09-06 01:43:16] [Rank 0] Group 12 Loss: 5.3891 +[2025-09-06 01:43:16] [Rank 0] Group 12 Loss: 5.3891 +[2025-09-06 01:43:16] [Rank 0] Group 13 Loss: 5.4219 +[2025-09-06 01:43:16] [Rank 0] Group 13 Loss: 5.4219 +[2025-09-06 01:43:16] [Rank 0] Group 14 Loss: 5.4561 +[2025-09-06 01:43:16] [Rank 0] Group 14 Loss: 5.4561 +[2025-09-06 01:43:16] [Rank 0] Group 15 Loss: 5.4148 +[2025-09-06 01:43:16] [Rank 0] Group 15 Loss: 5.4148 +[2025-09-06 01:43:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:43:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:43:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:43:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:43:16] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:43:16] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:43:16] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:43:16] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:43:16] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:43:16] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:43:16] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:43:16] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:43:16] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:43:16] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-06 01:43:16] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:43:16] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:43:16] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:43:16] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:43:16] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:43:16] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:43:16] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 01:43:16] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 01:43:16] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:43:16] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 01:43:16] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 01:43:16] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 01:43:16] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 01:43:16] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 01:43:16] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 01:43:16] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 01:43:16] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 01:43:16] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-06 01:43:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:43:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:43:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:43:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:43:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:43:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:43:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:43:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:43:17] [Rank 0] step:8001/10000 train_time:331159ms step_avg:41.39ms +[2025-09-06 01:43:17] [Rank 0] step:8001/10000 train_time:331159ms step_avg:41.39ms +[2025-09-06 01:43:19] [Rank 0] step:8021/10000 train_time:332450ms step_avg:41.45ms +[2025-09-06 01:43:19] [Rank 0] step:8021/10000 train_time:332450ms step_avg:41.45ms +[2025-09-06 01:43:19] [Rank 0] step:8041/10000 train_time:333185ms step_avg:41.44ms +[2025-09-06 01:43:19] [Rank 0] step:8041/10000 train_time:333185ms step_avg:41.44ms +[2025-09-06 01:43:20] [Rank 0] step:8061/10000 train_time:333921ms step_avg:41.42ms +[2025-09-06 01:43:20] [Rank 0] step:8061/10000 train_time:333921ms step_avg:41.42ms +[2025-09-06 01:43:21] [Rank 0] step:8081/10000 train_time:334657ms step_avg:41.41ms +[2025-09-06 01:43:21] [Rank 0] step:8081/10000 train_time:334657ms step_avg:41.41ms +[2025-09-06 01:43:21] [Rank 0] step:8101/10000 train_time:335392ms step_avg:41.40ms +[2025-09-06 01:43:21] [Rank 0] step:8101/10000 train_time:335392ms step_avg:41.40ms +[2025-09-06 01:43:22] [Rank 0] step:8121/10000 train_time:336127ms step_avg:41.39ms +[2025-09-06 01:43:22] [Rank 0] step:8121/10000 train_time:336127ms step_avg:41.39ms +[2025-09-06 01:43:23] [Rank 0] step:8141/10000 train_time:336862ms step_avg:41.38ms +[2025-09-06 01:43:23] [Rank 0] step:8141/10000 train_time:336862ms step_avg:41.38ms +[2025-09-06 01:43:24] [Rank 0] step:8161/10000 train_time:337597ms step_avg:41.37ms +[2025-09-06 01:43:24] [Rank 0] step:8161/10000 train_time:337597ms step_avg:41.37ms +[2025-09-06 01:43:24] [Rank 0] step:8181/10000 train_time:338333ms step_avg:41.36ms +[2025-09-06 01:43:24] [Rank 0] step:8181/10000 train_time:338333ms step_avg:41.36ms +[2025-09-06 01:43:25] [Rank 0] step:8201/10000 train_time:339069ms step_avg:41.34ms +[2025-09-06 01:43:25] [Rank 0] step:8201/10000 train_time:339069ms step_avg:41.34ms +[2025-09-06 01:43:26] [Rank 0] step:8221/10000 train_time:339804ms step_avg:41.33ms +[2025-09-06 01:43:26] [Rank 0] step:8221/10000 train_time:339804ms step_avg:41.33ms +[2025-09-06 01:43:27] [Rank 0] step:8241/10000 train_time:340539ms step_avg:41.32ms +[2025-09-06 01:43:27] [Rank 0] step:8241/10000 train_time:340539ms step_avg:41.32ms +[2025-09-06 01:43:27] [Rank 0] step:8261/10000 train_time:341275ms step_avg:41.31ms +[2025-09-06 01:43:27] [Rank 0] step:8261/10000 train_time:341275ms step_avg:41.31ms +[2025-09-06 01:43:28] [Rank 0] step:8281/10000 train_time:342010ms step_avg:41.30ms +[2025-09-06 01:43:28] [Rank 0] step:8281/10000 train_time:342010ms step_avg:41.30ms +[2025-09-06 01:43:29] [Rank 0] step:8301/10000 train_time:342745ms step_avg:41.29ms +[2025-09-06 01:43:29] [Rank 0] step:8301/10000 train_time:342745ms step_avg:41.29ms +[2025-09-06 01:43:30] [Rank 0] step:8321/10000 train_time:343480ms step_avg:41.28ms +[2025-09-06 01:43:30] [Rank 0] step:8321/10000 train_time:343480ms step_avg:41.28ms +[2025-09-06 01:43:30] [Rank 0] step:8341/10000 train_time:344215ms step_avg:41.27ms +[2025-09-06 01:43:30] [Rank 0] step:8341/10000 train_time:344215ms step_avg:41.27ms +[2025-09-06 01:43:31] [Rank 0] step:8361/10000 train_time:344951ms step_avg:41.26ms +[2025-09-06 01:43:31] [Rank 0] step:8361/10000 train_time:344951ms step_avg:41.26ms +[2025-09-06 01:43:32] [Rank 0] step:8381/10000 train_time:345686ms step_avg:41.25ms +[2025-09-06 01:43:32] [Rank 0] step:8381/10000 train_time:345686ms step_avg:41.25ms +[2025-09-06 01:43:33] [Rank 0] step:8401/10000 train_time:346421ms step_avg:41.24ms +[2025-09-06 01:43:33] [Rank 0] step:8401/10000 train_time:346421ms step_avg:41.24ms +[2025-09-06 01:43:33] [Rank 0] step:8421/10000 train_time:347156ms step_avg:41.23ms +[2025-09-06 01:43:33] [Rank 0] step:8421/10000 train_time:347156ms step_avg:41.23ms +[2025-09-06 01:43:34] [Rank 0] step:8441/10000 train_time:347892ms step_avg:41.21ms +[2025-09-06 01:43:34] [Rank 0] step:8441/10000 train_time:347892ms step_avg:41.21ms +[2025-09-06 01:43:35] [Rank 0] step:8461/10000 train_time:348627ms step_avg:41.20ms +[2025-09-06 01:43:35] [Rank 0] step:8461/10000 train_time:348627ms step_avg:41.20ms +[2025-09-06 01:43:35] [Rank 0] step:8481/10000 train_time:349362ms step_avg:41.19ms +[2025-09-06 01:43:35] [Rank 0] step:8481/10000 train_time:349362ms step_avg:41.19ms +[2025-09-06 01:43:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:43:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:43:37] [Rank 0] PRINT: step:8500/10000 train_loss:2.2165 val_loss:2.2008 train_time:350179ms step_avg:41.20ms +[2025-09-06 01:43:37] [Rank 0] PRINT: step:8500/10000 train_loss:2.2165 val_loss:2.2008 train_time:350179ms step_avg:41.20ms +[2025-09-06 01:43:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:43:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:43:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:43:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:44:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:44:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:44:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:44:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:44:57] [Rank 0] Total Loss: 4.7165 +[2025-09-06 01:44:57] [Rank 0] Total Loss: 4.7165 +[2025-09-06 01:44:57] [Rank 0] Total FTA (Unweighted): 0.3087 +[2025-09-06 01:44:57] [Rank 0] Total FTA (Unweighted): 0.3087 +[2025-09-06 01:44:57] [Rank 0] Total FTA (Weighted): 0.3088 +[2025-09-06 01:44:57] [Rank 0] Total FTA (Weighted): 0.3088 +[2025-09-06 01:44:57] [Rank 0] Group 0 Loss: 3.3875 +[2025-09-06 01:44:57] [Rank 0] Group 0 Loss: 3.3875 +[2025-09-06 01:44:57] [Rank 0] Group 1 Loss: 3.2883 +[2025-09-06 01:44:57] [Rank 0] Group 1 Loss: 3.2883 +[2025-09-06 01:44:57] [Rank 0] Group 2 Loss: 3.3359 +[2025-09-06 01:44:57] [Rank 0] Group 2 Loss: 3.3359 +[2025-09-06 01:44:57] [Rank 0] Group 3 Loss: 3.7788 +[2025-09-06 01:44:57] [Rank 0] Group 3 Loss: 3.7788 +[2025-09-06 01:44:57] [Rank 0] Group 4 Loss: 4.0919 +[2025-09-06 01:44:57] [Rank 0] Group 4 Loss: 4.0919 +[2025-09-06 01:44:57] [Rank 0] Group 5 Loss: 4.5301 +[2025-09-06 01:44:57] [Rank 0] Group 5 Loss: 4.5301 +[2025-09-06 01:44:57] [Rank 0] Group 6 Loss: 4.8085 +[2025-09-06 01:44:57] [Rank 0] Group 6 Loss: 4.8085 +[2025-09-06 01:44:57] [Rank 0] Group 7 Loss: 4.9703 +[2025-09-06 01:44:57] [Rank 0] Group 7 Loss: 4.9703 +[2025-09-06 01:44:57] [Rank 0] Group 8 Loss: 5.2677 +[2025-09-06 01:44:57] [Rank 0] Group 8 Loss: 5.2677 +[2025-09-06 01:44:57] [Rank 0] Group 9 Loss: 5.3808 +[2025-09-06 01:44:57] [Rank 0] Group 9 Loss: 5.3808 +[2025-09-06 01:44:57] [Rank 0] Group 10 Loss: 5.4559 +[2025-09-06 01:44:57] [Rank 0] Group 10 Loss: 5.4559 +[2025-09-06 01:44:57] [Rank 0] Group 11 Loss: 5.4884 +[2025-09-06 01:44:57] [Rank 0] Group 11 Loss: 5.4884 +[2025-09-06 01:44:57] [Rank 0] Group 12 Loss: 5.3898 +[2025-09-06 01:44:57] [Rank 0] Group 12 Loss: 5.3898 +[2025-09-06 01:44:58] [Rank 0] Group 13 Loss: 5.4288 +[2025-09-06 01:44:58] [Rank 0] Group 13 Loss: 5.4288 +[2025-09-06 01:44:58] [Rank 0] Group 14 Loss: 5.4529 +[2025-09-06 01:44:58] [Rank 0] Group 14 Loss: 5.4529 +[2025-09-06 01:44:58] [Rank 0] Group 15 Loss: 5.4082 +[2025-09-06 01:44:58] [Rank 0] Group 15 Loss: 5.4082 +[2025-09-06 01:44:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:44:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:44:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:44:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:44:58] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:44:58] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:44:58] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:44:58] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:44:58] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:44:58] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:44:58] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:44:58] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:44:58] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:44:58] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:44:58] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 01:44:58] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 01:44:58] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:44:58] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:44:58] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:44:58] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:44:58] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:44:58] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:44:58] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 01:44:58] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 01:44:58] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:44:58] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:44:58] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-06 01:44:58] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-06 01:44:58] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 01:44:58] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 01:44:58] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-06 01:44:58] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-06 01:44:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:44:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:44:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:44:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:44:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:44:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:44:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:44:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:44:59] [Rank 0] step:8501/10000 train_time:350189ms step_avg:41.19ms +[2025-09-06 01:44:59] [Rank 0] step:8501/10000 train_time:350189ms step_avg:41.19ms +[2025-09-06 01:45:00] [Rank 0] step:8521/10000 train_time:350859ms step_avg:41.18ms +[2025-09-06 01:45:00] [Rank 0] step:8521/10000 train_time:350859ms step_avg:41.18ms +[2025-09-06 01:45:00] [Rank 0] step:8541/10000 train_time:351594ms step_avg:41.17ms +[2025-09-06 01:45:00] [Rank 0] step:8541/10000 train_time:351594ms step_avg:41.17ms +[2025-09-06 01:45:01] [Rank 0] step:8561/10000 train_time:352330ms step_avg:41.16ms +[2025-09-06 01:45:01] [Rank 0] step:8561/10000 train_time:352330ms step_avg:41.16ms +[2025-09-06 01:45:02] [Rank 0] step:8581/10000 train_time:353065ms step_avg:41.15ms +[2025-09-06 01:45:02] [Rank 0] step:8581/10000 train_time:353065ms step_avg:41.15ms +[2025-09-06 01:45:03] [Rank 0] step:8601/10000 train_time:353838ms step_avg:41.14ms +[2025-09-06 01:45:03] [Rank 0] step:8601/10000 train_time:353838ms step_avg:41.14ms +[2025-09-06 01:45:03] [Rank 0] step:8621/10000 train_time:354574ms step_avg:41.13ms +[2025-09-06 01:45:03] [Rank 0] step:8621/10000 train_time:354574ms step_avg:41.13ms +[2025-09-06 01:45:04] [Rank 0] step:8641/10000 train_time:355309ms step_avg:41.12ms +[2025-09-06 01:45:04] [Rank 0] step:8641/10000 train_time:355309ms step_avg:41.12ms +[2025-09-06 01:45:05] [Rank 0] step:8661/10000 train_time:356045ms step_avg:41.11ms +[2025-09-06 01:45:05] [Rank 0] step:8661/10000 train_time:356045ms step_avg:41.11ms +[2025-09-06 01:45:06] [Rank 0] step:8681/10000 train_time:356780ms step_avg:41.10ms +[2025-09-06 01:45:06] [Rank 0] step:8681/10000 train_time:356780ms step_avg:41.10ms +[2025-09-06 01:45:06] [Rank 0] step:8701/10000 train_time:357516ms step_avg:41.09ms +[2025-09-06 01:45:06] [Rank 0] step:8701/10000 train_time:357516ms step_avg:41.09ms +[2025-09-06 01:45:07] [Rank 0] step:8721/10000 train_time:358252ms step_avg:41.08ms +[2025-09-06 01:45:07] [Rank 0] step:8721/10000 train_time:358252ms step_avg:41.08ms +[2025-09-06 01:45:08] [Rank 0] step:8741/10000 train_time:358987ms step_avg:41.07ms +[2025-09-06 01:45:08] [Rank 0] step:8741/10000 train_time:358987ms step_avg:41.07ms +[2025-09-06 01:45:09] [Rank 0] step:8761/10000 train_time:359722ms step_avg:41.06ms +[2025-09-06 01:45:09] [Rank 0] step:8761/10000 train_time:359722ms step_avg:41.06ms +[2025-09-06 01:45:09] [Rank 0] step:8781/10000 train_time:360458ms step_avg:41.05ms +[2025-09-06 01:45:09] [Rank 0] step:8781/10000 train_time:360458ms step_avg:41.05ms +[2025-09-06 01:45:10] [Rank 0] step:8801/10000 train_time:361194ms step_avg:41.04ms +[2025-09-06 01:45:10] [Rank 0] step:8801/10000 train_time:361194ms step_avg:41.04ms +[2025-09-06 01:45:11] [Rank 0] step:8821/10000 train_time:361929ms step_avg:41.03ms +[2025-09-06 01:45:11] [Rank 0] step:8821/10000 train_time:361929ms step_avg:41.03ms +[2025-09-06 01:45:12] [Rank 0] step:8841/10000 train_time:363276ms step_avg:41.09ms +[2025-09-06 01:45:12] [Rank 0] step:8841/10000 train_time:363276ms step_avg:41.09ms +[2025-09-06 01:45:13] [Rank 0] step:8861/10000 train_time:364012ms step_avg:41.08ms +[2025-09-06 01:45:13] [Rank 0] step:8861/10000 train_time:364012ms step_avg:41.08ms +[2025-09-06 01:45:14] [Rank 0] step:8881/10000 train_time:364747ms step_avg:41.07ms +[2025-09-06 01:45:14] [Rank 0] step:8881/10000 train_time:364747ms step_avg:41.07ms +[2025-09-06 01:45:14] [Rank 0] step:8901/10000 train_time:365482ms step_avg:41.06ms +[2025-09-06 01:45:14] [Rank 0] step:8901/10000 train_time:365482ms step_avg:41.06ms +[2025-09-06 01:45:15] [Rank 0] step:8921/10000 train_time:366217ms step_avg:41.05ms +[2025-09-06 01:45:15] [Rank 0] step:8921/10000 train_time:366217ms step_avg:41.05ms +[2025-09-06 01:45:16] [Rank 0] step:8941/10000 train_time:366952ms step_avg:41.04ms +[2025-09-06 01:45:16] [Rank 0] step:8941/10000 train_time:366952ms step_avg:41.04ms +[2025-09-06 01:45:17] [Rank 0] step:8961/10000 train_time:367688ms step_avg:41.03ms +[2025-09-06 01:45:17] [Rank 0] step:8961/10000 train_time:367688ms step_avg:41.03ms +[2025-09-06 01:45:17] [Rank 0] step:8981/10000 train_time:368422ms step_avg:41.02ms +[2025-09-06 01:45:17] [Rank 0] step:8981/10000 train_time:368422ms step_avg:41.02ms +[2025-09-06 01:45:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:45:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:45:18] [Rank 0] PRINT: step:9000/10000 train_loss:2.2027 val_loss:2.1887 train_time:369238ms step_avg:41.03ms +[2025-09-06 01:45:18] [Rank 0] PRINT: step:9000/10000 train_loss:2.2027 val_loss:2.1887 train_time:369238ms step_avg:41.03ms +[2025-09-06 01:45:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:45:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:45:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:45:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:46:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:46:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:46:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:46:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:46:39] [Rank 0] Total Loss: 4.7252 +[2025-09-06 01:46:39] [Rank 0] Total Loss: 4.7252 +[2025-09-06 01:46:39] [Rank 0] Total FTA (Unweighted): 0.3075 +[2025-09-06 01:46:39] [Rank 0] Total FTA (Unweighted): 0.3075 +[2025-09-06 01:46:39] [Rank 0] Total FTA (Weighted): 0.3075 +[2025-09-06 01:46:39] [Rank 0] Total FTA (Weighted): 0.3075 +[2025-09-06 01:46:39] [Rank 0] Group 0 Loss: 3.4259 +[2025-09-06 01:46:39] [Rank 0] Group 0 Loss: 3.4259 +[2025-09-06 01:46:39] [Rank 0] Group 1 Loss: 3.3406 +[2025-09-06 01:46:39] [Rank 0] Group 1 Loss: 3.3406 +[2025-09-06 01:46:39] [Rank 0] Group 2 Loss: 3.3514 +[2025-09-06 01:46:39] [Rank 0] Group 2 Loss: 3.3514 +[2025-09-06 01:46:39] [Rank 0] Group 3 Loss: 3.7795 +[2025-09-06 01:46:39] [Rank 0] Group 3 Loss: 3.7795 +[2025-09-06 01:46:39] [Rank 0] Group 4 Loss: 4.0916 +[2025-09-06 01:46:39] [Rank 0] Group 4 Loss: 4.0916 +[2025-09-06 01:46:39] [Rank 0] Group 5 Loss: 4.5366 +[2025-09-06 01:46:39] [Rank 0] Group 5 Loss: 4.5366 +[2025-09-06 01:46:39] [Rank 0] Group 6 Loss: 4.8161 +[2025-09-06 01:46:39] [Rank 0] Group 6 Loss: 4.8161 +[2025-09-06 01:46:39] [Rank 0] Group 7 Loss: 4.9730 +[2025-09-06 01:46:39] [Rank 0] Group 7 Loss: 4.9730 +[2025-09-06 01:46:39] [Rank 0] Group 8 Loss: 5.2933 +[2025-09-06 01:46:39] [Rank 0] Group 8 Loss: 5.2933 +[2025-09-06 01:46:39] [Rank 0] Group 9 Loss: 5.3895 +[2025-09-06 01:46:39] [Rank 0] Group 9 Loss: 5.3895 +[2025-09-06 01:46:39] [Rank 0] Group 10 Loss: 5.4487 +[2025-09-06 01:46:39] [Rank 0] Group 10 Loss: 5.4487 +[2025-09-06 01:46:39] [Rank 0] Group 11 Loss: 5.4913 +[2025-09-06 01:46:39] [Rank 0] Group 11 Loss: 5.4913 +[2025-09-06 01:46:39] [Rank 0] Group 12 Loss: 5.3834 +[2025-09-06 01:46:39] [Rank 0] Group 12 Loss: 5.3834 +[2025-09-06 01:46:39] [Rank 0] Group 13 Loss: 5.4265 +[2025-09-06 01:46:39] [Rank 0] Group 13 Loss: 5.4265 +[2025-09-06 01:46:39] [Rank 0] Group 14 Loss: 5.4551 +[2025-09-06 01:46:39] [Rank 0] Group 14 Loss: 5.4551 +[2025-09-06 01:46:39] [Rank 0] Group 15 Loss: 5.4006 +[2025-09-06 01:46:39] [Rank 0] Group 15 Loss: 5.4006 +[2025-09-06 01:46:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:46:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:46:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:46:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:46:39] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:46:39] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:46:39] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:46:39] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:46:40] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:46:40] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:46:40] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:46:40] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 01:46:40] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:46:40] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:46:40] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:46:40] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 01:46:40] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:46:40] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:46:40] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:46:40] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 01:46:40] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:46:40] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 01:46:40] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:46:40] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:46:40] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:46:40] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:46:40] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 01:46:40] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-06 01:46:40] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 01:46:40] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 01:46:40] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 01:46:40] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 01:46:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:46:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:46:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:46:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:46:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:46:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:46:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:46:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:46:41] [Rank 0] step:9001/10000 train_time:369247ms step_avg:41.02ms +[2025-09-06 01:46:41] [Rank 0] step:9001/10000 train_time:369247ms step_avg:41.02ms +[2025-09-06 01:46:42] [Rank 0] step:9021/10000 train_time:369916ms step_avg:41.01ms +[2025-09-06 01:46:42] [Rank 0] step:9021/10000 train_time:369916ms step_avg:41.01ms +[2025-09-06 01:46:43] [Rank 0] step:9041/10000 train_time:370651ms step_avg:41.00ms +[2025-09-06 01:46:43] [Rank 0] step:9041/10000 train_time:370651ms step_avg:41.00ms +[2025-09-06 01:46:44] [Rank 0] step:9061/10000 train_time:371387ms step_avg:40.99ms +[2025-09-06 01:46:44] [Rank 0] step:9061/10000 train_time:371387ms step_avg:40.99ms +[2025-09-06 01:46:44] [Rank 0] step:9081/10000 train_time:372123ms step_avg:40.98ms +[2025-09-06 01:46:44] [Rank 0] step:9081/10000 train_time:372123ms step_avg:40.98ms +[2025-09-06 01:46:45] [Rank 0] step:9101/10000 train_time:372859ms step_avg:40.97ms +[2025-09-06 01:46:45] [Rank 0] step:9101/10000 train_time:372859ms step_avg:40.97ms +[2025-09-06 01:46:46] [Rank 0] step:9121/10000 train_time:373594ms step_avg:40.96ms +[2025-09-06 01:46:46] [Rank 0] step:9121/10000 train_time:373594ms step_avg:40.96ms +[2025-09-06 01:46:47] [Rank 0] step:9141/10000 train_time:374330ms step_avg:40.95ms +[2025-09-06 01:46:47] [Rank 0] step:9141/10000 train_time:374330ms step_avg:40.95ms +[2025-09-06 01:46:47] [Rank 0] step:9161/10000 train_time:375065ms step_avg:40.94ms +[2025-09-06 01:46:47] [Rank 0] step:9161/10000 train_time:375065ms step_avg:40.94ms +[2025-09-06 01:46:48] [Rank 0] step:9181/10000 train_time:375801ms step_avg:40.93ms +[2025-09-06 01:46:48] [Rank 0] step:9181/10000 train_time:375801ms step_avg:40.93ms +[2025-09-06 01:46:49] [Rank 0] step:9201/10000 train_time:376536ms step_avg:40.92ms +[2025-09-06 01:46:49] [Rank 0] step:9201/10000 train_time:376536ms step_avg:40.92ms +[2025-09-06 01:46:50] [Rank 0] step:9221/10000 train_time:377272ms step_avg:40.91ms +[2025-09-06 01:46:50] [Rank 0] step:9221/10000 train_time:377272ms step_avg:40.91ms +[2025-09-06 01:46:50] [Rank 0] step:9241/10000 train_time:378008ms step_avg:40.91ms +[2025-09-06 01:46:50] [Rank 0] step:9241/10000 train_time:378008ms step_avg:40.91ms +[2025-09-06 01:46:51] [Rank 0] step:9261/10000 train_time:378743ms step_avg:40.90ms +[2025-09-06 01:46:51] [Rank 0] step:9261/10000 train_time:378743ms step_avg:40.90ms +[2025-09-06 01:46:52] [Rank 0] step:9281/10000 train_time:379479ms step_avg:40.89ms +[2025-09-06 01:46:52] [Rank 0] step:9281/10000 train_time:379479ms step_avg:40.89ms +[2025-09-06 01:46:53] [Rank 0] step:9301/10000 train_time:380214ms step_avg:40.88ms +[2025-09-06 01:46:53] [Rank 0] step:9301/10000 train_time:380214ms step_avg:40.88ms +[2025-09-06 01:46:53] [Rank 0] step:9321/10000 train_time:380950ms step_avg:40.87ms +[2025-09-06 01:46:53] [Rank 0] step:9321/10000 train_time:380950ms step_avg:40.87ms +[2025-09-06 01:46:54] [Rank 0] step:9341/10000 train_time:381686ms step_avg:40.86ms +[2025-09-06 01:46:54] [Rank 0] step:9341/10000 train_time:381686ms step_avg:40.86ms +[2025-09-06 01:46:55] [Rank 0] step:9361/10000 train_time:382422ms step_avg:40.85ms +[2025-09-06 01:46:55] [Rank 0] step:9361/10000 train_time:382422ms step_avg:40.85ms +[2025-09-06 01:46:55] [Rank 0] step:9381/10000 train_time:383157ms step_avg:40.84ms +[2025-09-06 01:46:55] [Rank 0] step:9381/10000 train_time:383157ms step_avg:40.84ms +[2025-09-06 01:46:56] [Rank 0] step:9401/10000 train_time:383893ms step_avg:40.84ms +[2025-09-06 01:46:56] [Rank 0] step:9401/10000 train_time:383893ms step_avg:40.84ms +[2025-09-06 01:46:57] [Rank 0] step:9421/10000 train_time:384629ms step_avg:40.83ms +[2025-09-06 01:46:57] [Rank 0] step:9421/10000 train_time:384629ms step_avg:40.83ms +[2025-09-06 01:46:58] [Rank 0] step:9441/10000 train_time:385365ms step_avg:40.82ms +[2025-09-06 01:46:58] [Rank 0] step:9441/10000 train_time:385365ms step_avg:40.82ms +[2025-09-06 01:46:58] [Rank 0] step:9461/10000 train_time:386100ms step_avg:40.81ms +[2025-09-06 01:46:58] [Rank 0] step:9461/10000 train_time:386100ms step_avg:40.81ms +[2025-09-06 01:46:59] [Rank 0] step:9481/10000 train_time:386835ms step_avg:40.80ms +[2025-09-06 01:46:59] [Rank 0] step:9481/10000 train_time:386835ms step_avg:40.80ms +[2025-09-06 01:47:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:47:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:47:00] [Rank 0] PRINT: step:9500/10000 train_loss:2.1911 val_loss:2.1785 train_time:387651ms step_avg:40.81ms +[2025-09-06 01:47:00] [Rank 0] PRINT: step:9500/10000 train_loss:2.1911 val_loss:2.1785 train_time:387651ms step_avg:40.81ms +[2025-09-06 01:47:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:47:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:47:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:47:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:48:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:48:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:48:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:48:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:48:21] [Rank 0] Total Loss: 4.6944 +[2025-09-06 01:48:21] [Rank 0] Total Loss: 4.6944 +[2025-09-06 01:48:21] [Rank 0] Total FTA (Unweighted): 0.3150 +[2025-09-06 01:48:21] [Rank 0] Total FTA (Unweighted): 0.3150 +[2025-09-06 01:48:21] [Rank 0] Total FTA (Weighted): 0.3150 +[2025-09-06 01:48:21] [Rank 0] Total FTA (Weighted): 0.3150 +[2025-09-06 01:48:21] [Rank 0] Group 0 Loss: 3.4228 +[2025-09-06 01:48:21] [Rank 0] Group 0 Loss: 3.4228 +[2025-09-06 01:48:21] [Rank 0] Group 1 Loss: 3.3423 +[2025-09-06 01:48:21] [Rank 0] Group 1 Loss: 3.3423 +[2025-09-06 01:48:21] [Rank 0] Group 2 Loss: 3.3084 +[2025-09-06 01:48:21] [Rank 0] Group 2 Loss: 3.3084 +[2025-09-06 01:48:21] [Rank 0] Group 3 Loss: 3.7669 +[2025-09-06 01:48:21] [Rank 0] Group 3 Loss: 3.7669 +[2025-09-06 01:48:21] [Rank 0] Group 4 Loss: 4.0462 +[2025-09-06 01:48:21] [Rank 0] Group 4 Loss: 4.0462 +[2025-09-06 01:48:21] [Rank 0] Group 5 Loss: 4.4972 +[2025-09-06 01:48:21] [Rank 0] Group 5 Loss: 4.4972 +[2025-09-06 01:48:21] [Rank 0] Group 6 Loss: 4.7836 +[2025-09-06 01:48:21] [Rank 0] Group 6 Loss: 4.7836 +[2025-09-06 01:48:21] [Rank 0] Group 7 Loss: 4.9398 +[2025-09-06 01:48:21] [Rank 0] Group 7 Loss: 4.9398 +[2025-09-06 01:48:21] [Rank 0] Group 8 Loss: 5.2410 +[2025-09-06 01:48:21] [Rank 0] Group 8 Loss: 5.2410 +[2025-09-06 01:48:21] [Rank 0] Group 9 Loss: 5.3510 +[2025-09-06 01:48:21] [Rank 0] Group 9 Loss: 5.3510 +[2025-09-06 01:48:21] [Rank 0] Group 10 Loss: 5.4207 +[2025-09-06 01:48:21] [Rank 0] Group 10 Loss: 5.4207 +[2025-09-06 01:48:21] [Rank 0] Group 11 Loss: 5.4584 +[2025-09-06 01:48:21] [Rank 0] Group 11 Loss: 5.4584 +[2025-09-06 01:48:21] [Rank 0] Group 12 Loss: 5.3462 +[2025-09-06 01:48:21] [Rank 0] Group 12 Loss: 5.3462 +[2025-09-06 01:48:21] [Rank 0] Group 13 Loss: 5.3929 +[2025-09-06 01:48:21] [Rank 0] Group 13 Loss: 5.3929 +[2025-09-06 01:48:21] [Rank 0] Group 14 Loss: 5.4236 +[2025-09-06 01:48:21] [Rank 0] Group 14 Loss: 5.4236 +[2025-09-06 01:48:21] [Rank 0] Group 15 Loss: 5.3689 +[2025-09-06 01:48:21] [Rank 0] Group 15 Loss: 5.3689 +[2025-09-06 01:48:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:48:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:48:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:48:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:48:21] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:48:21] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:48:21] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:48:21] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:48:21] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:48:21] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:48:21] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 01:48:21] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 01:48:21] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:48:21] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:48:21] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 01:48:21] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 01:48:21] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:48:21] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:48:21] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-06 01:48:21] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-06 01:48:21] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-06 01:48:21] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-06 01:48:21] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:48:21] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:48:21] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-06 01:48:21] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-06 01:48:21] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:48:21] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:48:21] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 01:48:21] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 01:48:21] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 01:48:21] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 01:48:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:48:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:48:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:48:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:48:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:48:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:48:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:48:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:48:23] [Rank 0] step:9501/10000 train_time:387660ms step_avg:40.80ms +[2025-09-06 01:48:23] [Rank 0] step:9501/10000 train_time:387660ms step_avg:40.80ms +[2025-09-06 01:48:23] [Rank 0] step:9521/10000 train_time:388332ms step_avg:40.79ms +[2025-09-06 01:48:23] [Rank 0] step:9521/10000 train_time:388332ms step_avg:40.79ms +[2025-09-06 01:48:24] [Rank 0] step:9541/10000 train_time:389067ms step_avg:40.78ms +[2025-09-06 01:48:24] [Rank 0] step:9541/10000 train_time:389067ms step_avg:40.78ms +[2025-09-06 01:48:25] [Rank 0] step:9561/10000 train_time:389803ms step_avg:40.77ms +[2025-09-06 01:48:25] [Rank 0] step:9561/10000 train_time:389803ms step_avg:40.77ms +[2025-09-06 01:48:26] [Rank 0] step:9581/10000 train_time:390538ms step_avg:40.76ms +[2025-09-06 01:48:26] [Rank 0] step:9581/10000 train_time:390538ms step_avg:40.76ms +[2025-09-06 01:48:26] [Rank 0] step:9601/10000 train_time:391274ms step_avg:40.75ms +[2025-09-06 01:48:26] [Rank 0] step:9601/10000 train_time:391274ms step_avg:40.75ms +[2025-09-06 01:48:27] [Rank 0] step:9621/10000 train_time:392010ms step_avg:40.75ms +[2025-09-06 01:48:27] [Rank 0] step:9621/10000 train_time:392010ms step_avg:40.75ms +[2025-09-06 01:48:28] [Rank 0] step:9641/10000 train_time:392745ms step_avg:40.74ms +[2025-09-06 01:48:28] [Rank 0] step:9641/10000 train_time:392745ms step_avg:40.74ms +[2025-09-06 01:48:29] [Rank 0] step:9661/10000 train_time:393758ms step_avg:40.76ms +[2025-09-06 01:48:29] [Rank 0] step:9661/10000 train_time:393758ms step_avg:40.76ms +[2025-09-06 01:48:30] [Rank 0] step:9681/10000 train_time:394629ms step_avg:40.76ms +[2025-09-06 01:48:30] [Rank 0] step:9681/10000 train_time:394629ms step_avg:40.76ms +[2025-09-06 01:48:30] [Rank 0] step:9701/10000 train_time:395364ms step_avg:40.75ms +[2025-09-06 01:48:30] [Rank 0] step:9701/10000 train_time:395364ms step_avg:40.75ms +[2025-09-06 01:48:31] [Rank 0] step:9721/10000 train_time:396099ms step_avg:40.75ms +[2025-09-06 01:48:31] [Rank 0] step:9721/10000 train_time:396099ms step_avg:40.75ms +[2025-09-06 01:48:32] [Rank 0] step:9741/10000 train_time:396976ms step_avg:40.75ms +[2025-09-06 01:48:32] [Rank 0] step:9741/10000 train_time:396976ms step_avg:40.75ms +[2025-09-06 01:48:33] [Rank 0] step:9761/10000 train_time:397711ms step_avg:40.74ms +[2025-09-06 01:48:33] [Rank 0] step:9761/10000 train_time:397711ms step_avg:40.74ms +[2025-09-06 01:48:33] [Rank 0] step:9781/10000 train_time:398447ms step_avg:40.74ms +[2025-09-06 01:48:33] [Rank 0] step:9781/10000 train_time:398447ms step_avg:40.74ms +[2025-09-06 01:48:34] [Rank 0] step:9801/10000 train_time:399183ms step_avg:40.73ms +[2025-09-06 01:48:34] [Rank 0] step:9801/10000 train_time:399183ms step_avg:40.73ms +[2025-09-06 01:48:35] [Rank 0] step:9821/10000 train_time:399919ms step_avg:40.72ms +[2025-09-06 01:48:35] [Rank 0] step:9821/10000 train_time:399919ms step_avg:40.72ms +[2025-09-06 01:48:36] [Rank 0] step:9841/10000 train_time:400654ms step_avg:40.71ms +[2025-09-06 01:48:36] [Rank 0] step:9841/10000 train_time:400654ms step_avg:40.71ms +[2025-09-06 01:48:36] [Rank 0] step:9861/10000 train_time:401390ms step_avg:40.70ms +[2025-09-06 01:48:36] [Rank 0] step:9861/10000 train_time:401390ms step_avg:40.70ms +[2025-09-06 01:48:37] [Rank 0] step:9881/10000 train_time:402125ms step_avg:40.70ms +[2025-09-06 01:48:37] [Rank 0] step:9881/10000 train_time:402125ms step_avg:40.70ms +[2025-09-06 01:48:38] [Rank 0] step:9901/10000 train_time:402861ms step_avg:40.69ms +[2025-09-06 01:48:38] [Rank 0] step:9901/10000 train_time:402861ms step_avg:40.69ms +[2025-09-06 01:48:39] [Rank 0] step:9921/10000 train_time:403596ms step_avg:40.68ms +[2025-09-06 01:48:39] [Rank 0] step:9921/10000 train_time:403596ms step_avg:40.68ms +[2025-09-06 01:48:39] [Rank 0] step:9941/10000 train_time:404332ms step_avg:40.67ms +[2025-09-06 01:48:39] [Rank 0] step:9941/10000 train_time:404332ms step_avg:40.67ms +[2025-09-06 01:48:40] [Rank 0] step:9961/10000 train_time:405068ms step_avg:40.67ms +[2025-09-06 01:48:40] [Rank 0] step:9961/10000 train_time:405068ms step_avg:40.67ms +[2025-09-06 01:48:41] [Rank 0] step:9981/10000 train_time:405803ms step_avg:40.66ms +[2025-09-06 01:48:41] [Rank 0] step:9981/10000 train_time:405803ms step_avg:40.66ms +[2025-09-06 01:48:41] [Rank 0] step:10000/10000 train_time:406502ms step_avg:40.65ms +[2025-09-06 01:48:41] [Rank 0] step:10000/10000 train_time:406502ms step_avg:40.65ms +[2025-09-06 01:48:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:48:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:48:42] [Rank 0] PRINT: step:10000/10000 train_loss:2.1823 val_loss:2.1705 train_time:406624ms step_avg:40.66ms +[2025-09-06 01:48:42] [Rank 0] PRINT: step:10000/10000 train_loss:2.1823 val_loss:2.1705 train_time:406624ms step_avg:40.66ms +[2025-09-06 01:48:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:48:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:48:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:48:42] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:50:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:50:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:50:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:50:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:50:02] [Rank 0] Total Loss: 4.7283 +[2025-09-06 01:50:02] [Rank 0] Total Loss: 4.7283 +[2025-09-06 01:50:02] [Rank 0] Total FTA (Unweighted): 0.3144 +[2025-09-06 01:50:02] [Rank 0] Total FTA (Unweighted): 0.3144 +[2025-09-06 01:50:02] [Rank 0] Total FTA (Weighted): 0.3144 +[2025-09-06 01:50:02] [Rank 0] Total FTA (Weighted): 0.3144 +[2025-09-06 01:50:02] [Rank 0] Group 0 Loss: 3.4744 +[2025-09-06 01:50:02] [Rank 0] Group 0 Loss: 3.4744 +[2025-09-06 01:50:02] [Rank 0] Group 1 Loss: 3.3555 +[2025-09-06 01:50:02] [Rank 0] Group 1 Loss: 3.3555 +[2025-09-06 01:50:02] [Rank 0] Group 2 Loss: 3.3454 +[2025-09-06 01:50:02] [Rank 0] Group 2 Loss: 3.3454 +[2025-09-06 01:50:02] [Rank 0] Group 3 Loss: 3.8362 +[2025-09-06 01:50:02] [Rank 0] Group 3 Loss: 3.8362 +[2025-09-06 01:50:02] [Rank 0] Group 4 Loss: 4.0899 +[2025-09-06 01:50:02] [Rank 0] Group 4 Loss: 4.0899 +[2025-09-06 01:50:02] [Rank 0] Group 5 Loss: 4.5177 +[2025-09-06 01:50:02] [Rank 0] Group 5 Loss: 4.5177 +[2025-09-06 01:50:02] [Rank 0] Group 6 Loss: 4.8106 +[2025-09-06 01:50:02] [Rank 0] Group 6 Loss: 4.8106 +[2025-09-06 01:50:02] [Rank 0] Group 7 Loss: 4.9678 +[2025-09-06 01:50:02] [Rank 0] Group 7 Loss: 4.9678 +[2025-09-06 01:50:02] [Rank 0] Group 8 Loss: 5.2845 +[2025-09-06 01:50:02] [Rank 0] Group 8 Loss: 5.2845 +[2025-09-06 01:50:02] [Rank 0] Group 9 Loss: 5.3840 +[2025-09-06 01:50:02] [Rank 0] Group 9 Loss: 5.3840 +[2025-09-06 01:50:02] [Rank 0] Group 10 Loss: 5.4409 +[2025-09-06 01:50:02] [Rank 0] Group 10 Loss: 5.4409 +[2025-09-06 01:50:02] [Rank 0] Group 11 Loss: 5.4899 +[2025-09-06 01:50:02] [Rank 0] Group 11 Loss: 5.4899 +[2025-09-06 01:50:02] [Rank 0] Group 12 Loss: 5.3932 +[2025-09-06 01:50:02] [Rank 0] Group 12 Loss: 5.3932 +[2025-09-06 01:50:02] [Rank 0] Group 13 Loss: 5.4133 +[2025-09-06 01:50:02] [Rank 0] Group 13 Loss: 5.4133 +[2025-09-06 01:50:02] [Rank 0] Group 14 Loss: 5.4570 +[2025-09-06 01:50:02] [Rank 0] Group 14 Loss: 5.4570 +[2025-09-06 01:50:02] [Rank 0] Group 15 Loss: 5.3925 +[2025-09-06 01:50:02] [Rank 0] Group 15 Loss: 5.3925 +[2025-09-06 01:50:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:50:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 01:50:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:50:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 01:50:02] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:50:02] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-06 01:50:03] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:50:03] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 01:50:03] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:50:03] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 01:50:03] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 01:50:03] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 01:50:03] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:50:03] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-06 01:50:03] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 01:50:03] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-06 01:50:03] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:50:03] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 01:50:03] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-06 01:50:03] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-06 01:50:03] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-06 01:50:03] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-06 01:50:03] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:50:03] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-06 01:50:03] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:50:03] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-06 01:50:03] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:50:03] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 01:50:03] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 01:50:03] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 01:50:03] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 01:50:03] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-06 01:50:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:50:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_loss_curves.png +[2025-09-06 01:50:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:50:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/per_class_acc_curves.png +[2025-09-06 01:50:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:50:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_loss_curve.png +[2025-09-06 01:50:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:50:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_45/total_acc_curve.png +[2025-09-06 01:50:05] [Rank 0] step:10001/10000 train_time:406633ms step_avg:40.66ms +[2025-09-06 01:50:05] [Rank 0] step:10001/10000 train_time:406633ms step_avg:40.66ms +[2025-09-06 01:50:05] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 01:50:05 2025 --- +[2025-09-06 01:50:05] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 01:50:05 2025 --- +[2025-09-06 01:50:05] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-06 01:50:05] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f34ad15eaea4e87a59223d5fe0e6f5c85c405dab --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.08, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "3ace4c18-7488-4964-9435-eb227c72439f", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7895b9b4cfa584f77d26306efbd0ae666870496b --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4440ebb1f9a2568b464bc921a81d8a6e88878ae4972dc3ffb8cfeac30b3b1656 +size 307746 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..9dab10e8b50cd5ac7616cb0855cbc64e6bea9f4f --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4de591835e66aed8f58e95f6d3cb1b560e2bae72124ed8713e48e307a6dd85f +size 412623 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d9fab4a3318418a1f1dee9b8bcf7994504d5eb4d --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d877a16aa64e9a275d2d319d37c0d44ac54031bcdbe54c716ea7d28ffc644a4 +size 89253 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..58e9930afe15f024124adc1f7e5910cc1990eb0e --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64e6f31ca5812a5fbbc7158292e19f734ad2ba6904634fd1f256725da265e30f +size 117426 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/training_log_3ace4c18-7488-4964-9435-eb227c72439f.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/training_log_3ace4c18-7488-4964-9435-eb227c72439f.txt new file mode 100644 index 0000000000000000000000000000000000000000..ae95ad00c0f875816c507f8a37acb29d7de96135 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/training_log_3ace4c18-7488-4964-9435-eb227c72439f.txt @@ -0,0 +1,5614 @@ +[2025-09-06 01:50:28] [Rank 0] PRINT: --- Script Start: Sat Sep 6 01:50:28 2025 --- +[2025-09-06 01:50:28] [Rank 0] PRINT: --- Script Start: Sat Sep 6 01:50:28 2025 --- +[2025-09-06 01:50:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 01:50:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.08, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-06 01:50:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 01:50:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-06 01:50:28] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-06 01:50:28] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-06 01:50:28] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46 +[2025-09-06 01:50:28] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46 +[2025-09-06 01:50:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 01:50:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-06 01:50:28] [Rank 0] PRINT: Constructing model... +[2025-09-06 01:50:28] [Rank 0] PRINT: Constructing model... +[2025-09-06 01:50:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 01:50:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-06 01:50:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 01:50:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-06 01:50:29] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 01:50:29] [Rank 0] PRINT: Testing model forward function: +[2025-09-06 01:50:34] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 01:50:34] [Rank 0] PRINT: Model test - Result type: +[2025-09-06 01:50:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 01:50:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-06 01:50:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 01:50:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-06 01:50:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 01:50:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-06 01:50:34] [Rank 0] PRINT: Model returns: +[2025-09-06 01:50:34] [Rank 0] PRINT: Model returns: +[2025-09-06 01:50:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 01:50:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-06 01:50:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 01:50:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-06 01:50:34] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-06 01:50:34] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.08). +[2025-09-06 01:50:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 01:50:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-06 01:50:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 01:50:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-06 01:50:39] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 01:50:39] [Rank 0] PRINT: Model compilation complete. +[2025-09-06 01:50:39] [Rank 0] PRINT: Starting warmup... +[2025-09-06 01:50:39] [Rank 0] PRINT: Starting warmup... +[2025-09-06 01:51:16] [Rank 0] PRINT: Warmup complete. +[2025-09-06 01:51:16] [Rank 0] PRINT: Warmup complete. +[2025-09-06 01:51:16] [Rank 0] PRINT: Starting training... +[2025-09-06 01:51:16] [Rank 0] PRINT: Starting training... +[2025-09-06 01:51:22] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/fixed_eval_indices.json +[2025-09-06 01:51:22] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/fixed_eval_indices.json +[2025-09-06 01:51:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:51:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:51:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 01:51:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-06 01:51:57] [Rank 0] step:21/10000 train_time:31768ms step_avg:1512.77ms +[2025-09-06 01:51:57] [Rank 0] step:21/10000 train_time:31768ms step_avg:1512.77ms +[2025-09-06 01:51:58] [Rank 0] step:41/10000 train_time:32497ms step_avg:792.60ms +[2025-09-06 01:51:58] [Rank 0] step:41/10000 train_time:32497ms step_avg:792.60ms +[2025-09-06 01:51:59] [Rank 0] step:61/10000 train_time:33224ms step_avg:544.65ms +[2025-09-06 01:51:59] [Rank 0] step:61/10000 train_time:33224ms step_avg:544.65ms +[2025-09-06 01:52:00] [Rank 0] step:81/10000 train_time:33951ms step_avg:419.14ms +[2025-09-06 01:52:00] [Rank 0] step:81/10000 train_time:33951ms step_avg:419.14ms +[2025-09-06 01:52:00] [Rank 0] step:101/10000 train_time:34679ms step_avg:343.35ms +[2025-09-06 01:52:00] [Rank 0] step:101/10000 train_time:34679ms step_avg:343.35ms +[2025-09-06 01:52:01] [Rank 0] step:121/10000 train_time:35406ms step_avg:292.61ms +[2025-09-06 01:52:01] [Rank 0] step:121/10000 train_time:35406ms step_avg:292.61ms +[2025-09-06 01:52:02] [Rank 0] step:141/10000 train_time:36135ms step_avg:256.27ms +[2025-09-06 01:52:02] [Rank 0] step:141/10000 train_time:36135ms step_avg:256.27ms +[2025-09-06 01:52:03] [Rank 0] step:161/10000 train_time:36863ms step_avg:228.96ms +[2025-09-06 01:52:03] [Rank 0] step:161/10000 train_time:36863ms step_avg:228.96ms +[2025-09-06 01:52:03] [Rank 0] step:181/10000 train_time:37590ms step_avg:207.68ms +[2025-09-06 01:52:03] [Rank 0] step:181/10000 train_time:37590ms step_avg:207.68ms +[2025-09-06 01:52:04] [Rank 0] step:201/10000 train_time:38318ms step_avg:190.63ms +[2025-09-06 01:52:04] [Rank 0] step:201/10000 train_time:38318ms step_avg:190.63ms +[2025-09-06 01:52:05] [Rank 0] step:221/10000 train_time:39045ms step_avg:176.67ms +[2025-09-06 01:52:05] [Rank 0] step:221/10000 train_time:39045ms step_avg:176.67ms +[2025-09-06 01:52:06] [Rank 0] step:241/10000 train_time:39773ms step_avg:165.03ms +[2025-09-06 01:52:06] [Rank 0] step:241/10000 train_time:39773ms step_avg:165.03ms +[2025-09-06 01:52:06] [Rank 0] step:261/10000 train_time:40501ms step_avg:155.17ms +[2025-09-06 01:52:06] [Rank 0] step:261/10000 train_time:40501ms step_avg:155.17ms +[2025-09-06 01:52:07] [Rank 0] step:281/10000 train_time:41227ms step_avg:146.72ms +[2025-09-06 01:52:07] [Rank 0] step:281/10000 train_time:41227ms step_avg:146.72ms +[2025-09-06 01:52:08] [Rank 0] step:301/10000 train_time:41955ms step_avg:139.38ms +[2025-09-06 01:52:08] [Rank 0] step:301/10000 train_time:41955ms step_avg:139.38ms +[2025-09-06 01:52:08] [Rank 0] step:321/10000 train_time:42682ms step_avg:132.97ms +[2025-09-06 01:52:08] [Rank 0] step:321/10000 train_time:42682ms step_avg:132.97ms +[2025-09-06 01:52:09] [Rank 0] step:341/10000 train_time:43410ms step_avg:127.30ms +[2025-09-06 01:52:09] [Rank 0] step:341/10000 train_time:43410ms step_avg:127.30ms +[2025-09-06 01:52:10] [Rank 0] step:361/10000 train_time:44138ms step_avg:122.26ms +[2025-09-06 01:52:10] [Rank 0] step:361/10000 train_time:44138ms step_avg:122.26ms +[2025-09-06 01:52:11] [Rank 0] step:381/10000 train_time:44865ms step_avg:117.76ms +[2025-09-06 01:52:11] [Rank 0] step:381/10000 train_time:44865ms step_avg:117.76ms +[2025-09-06 01:52:11] [Rank 0] step:401/10000 train_time:45592ms step_avg:113.70ms +[2025-09-06 01:52:11] [Rank 0] step:401/10000 train_time:45592ms step_avg:113.70ms +[2025-09-06 01:52:12] [Rank 0] step:421/10000 train_time:46321ms step_avg:110.03ms +[2025-09-06 01:52:12] [Rank 0] step:421/10000 train_time:46321ms step_avg:110.03ms +[2025-09-06 01:52:13] [Rank 0] step:441/10000 train_time:47049ms step_avg:106.69ms +[2025-09-06 01:52:13] [Rank 0] step:441/10000 train_time:47049ms step_avg:106.69ms +[2025-09-06 01:52:14] [Rank 0] step:461/10000 train_time:47777ms step_avg:103.64ms +[2025-09-06 01:52:14] [Rank 0] step:461/10000 train_time:47777ms step_avg:103.64ms +[2025-09-06 01:52:14] [Rank 0] step:481/10000 train_time:48505ms step_avg:100.84ms +[2025-09-06 01:52:14] [Rank 0] step:481/10000 train_time:48505ms step_avg:100.84ms +[2025-09-06 01:52:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:52:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:52:15] [Rank 0] PRINT: step:500/10000 train_loss:5.9707 val_loss:4.3328 train_time:49313ms step_avg:98.63ms +[2025-09-06 01:52:15] [Rank 0] PRINT: step:500/10000 train_loss:5.9707 val_loss:4.3328 train_time:49313ms step_avg:98.63ms +[2025-09-06 01:52:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:52:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:52:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:52:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:53:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:53:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:53:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:53:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:53:36] [Rank 0] Total Loss: 6.1715 +[2025-09-06 01:53:36] [Rank 0] Total Loss: 6.1715 +[2025-09-06 01:53:36] [Rank 0] Total FTA (Unweighted): 0.0806 +[2025-09-06 01:53:36] [Rank 0] Total FTA (Unweighted): 0.0806 +[2025-09-06 01:53:36] [Rank 0] Total FTA (Weighted): 0.0806 +[2025-09-06 01:53:36] [Rank 0] Total FTA (Weighted): 0.0806 +[2025-09-06 01:53:36] [Rank 0] Group 0 Loss: 3.9201 +[2025-09-06 01:53:36] [Rank 0] Group 0 Loss: 3.9201 +[2025-09-06 01:53:37] [Rank 0] Group 1 Loss: 4.0754 +[2025-09-06 01:53:37] [Rank 0] Group 1 Loss: 4.0754 +[2025-09-06 01:53:37] [Rank 0] Group 2 Loss: 4.9501 +[2025-09-06 01:53:37] [Rank 0] Group 2 Loss: 4.9501 +[2025-09-06 01:53:37] [Rank 0] Group 3 Loss: 5.7207 +[2025-09-06 01:53:37] [Rank 0] Group 3 Loss: 5.7207 +[2025-09-06 01:53:37] [Rank 0] Group 4 Loss: 6.4105 +[2025-09-06 01:53:37] [Rank 0] Group 4 Loss: 6.4105 +[2025-09-06 01:53:37] [Rank 0] Group 5 Loss: 6.5196 +[2025-09-06 01:53:37] [Rank 0] Group 5 Loss: 6.5196 +[2025-09-06 01:53:37] [Rank 0] Group 6 Loss: 6.5867 +[2025-09-06 01:53:37] [Rank 0] Group 6 Loss: 6.5867 +[2025-09-06 01:53:37] [Rank 0] Group 7 Loss: 6.5660 +[2025-09-06 01:53:37] [Rank 0] Group 7 Loss: 6.5660 +[2025-09-06 01:53:37] [Rank 0] Group 8 Loss: 6.6965 +[2025-09-06 01:53:37] [Rank 0] Group 8 Loss: 6.6965 +[2025-09-06 01:53:37] [Rank 0] Group 9 Loss: 6.8129 +[2025-09-06 01:53:37] [Rank 0] Group 9 Loss: 6.8129 +[2025-09-06 01:53:37] [Rank 0] Group 10 Loss: 6.7941 +[2025-09-06 01:53:37] [Rank 0] Group 10 Loss: 6.7941 +[2025-09-06 01:53:37] [Rank 0] Group 11 Loss: 6.8600 +[2025-09-06 01:53:37] [Rank 0] Group 11 Loss: 6.8600 +[2025-09-06 01:53:37] [Rank 0] Group 12 Loss: 6.6714 +[2025-09-06 01:53:37] [Rank 0] Group 12 Loss: 6.6714 +[2025-09-06 01:53:37] [Rank 0] Group 13 Loss: 6.6800 +[2025-09-06 01:53:37] [Rank 0] Group 13 Loss: 6.6800 +[2025-09-06 01:53:37] [Rank 0] Group 14 Loss: 6.7891 +[2025-09-06 01:53:37] [Rank 0] Group 14 Loss: 6.7891 +[2025-09-06 01:53:37] [Rank 0] Group 15 Loss: 6.6912 +[2025-09-06 01:53:37] [Rank 0] Group 15 Loss: 6.6912 +[2025-09-06 01:53:37] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 01:53:37] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-06 01:53:37] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:53:37] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:53:37] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 01:53:37] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-06 01:53:37] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 01:53:37] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 01:53:37] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 01:53:37] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-06 01:53:37] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 01:53:37] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-06 01:53:37] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 01:53:37] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-06 01:53:37] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 01:53:37] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-06 01:53:37] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-06 01:53:37] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-06 01:53:37] [Rank 0] Group 9 FTA: 0.0600 +[2025-09-06 01:53:37] [Rank 0] Group 9 FTA: 0.0600 +[2025-09-06 01:53:37] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-06 01:53:37] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-06 01:53:37] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 01:53:37] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 01:53:37] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:53:37] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:53:37] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 01:53:37] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-06 01:53:37] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:53:37] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:53:37] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 01:53:37] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-06 01:53:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 01:53:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 01:53:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 01:53:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 01:53:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 01:53:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 01:53:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 01:53:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 01:53:38] [Rank 0] step:501/10000 train_time:49322ms step_avg:98.45ms +[2025-09-06 01:53:38] [Rank 0] step:501/10000 train_time:49322ms step_avg:98.45ms +[2025-09-06 01:53:39] [Rank 0] step:521/10000 train_time:49984ms step_avg:95.94ms +[2025-09-06 01:53:39] [Rank 0] step:521/10000 train_time:49984ms step_avg:95.94ms +[2025-09-06 01:53:40] [Rank 0] step:541/10000 train_time:50711ms step_avg:93.74ms +[2025-09-06 01:53:40] [Rank 0] step:541/10000 train_time:50711ms step_avg:93.74ms +[2025-09-06 01:53:41] [Rank 0] step:561/10000 train_time:51438ms step_avg:91.69ms +[2025-09-06 01:53:41] [Rank 0] step:561/10000 train_time:51438ms step_avg:91.69ms +[2025-09-06 01:53:41] [Rank 0] step:581/10000 train_time:52166ms step_avg:89.79ms +[2025-09-06 01:53:41] [Rank 0] step:581/10000 train_time:52166ms step_avg:89.79ms +[2025-09-06 01:53:42] [Rank 0] step:601/10000 train_time:52892ms step_avg:88.01ms +[2025-09-06 01:53:42] [Rank 0] step:601/10000 train_time:52892ms step_avg:88.01ms +[2025-09-06 01:53:43] [Rank 0] step:621/10000 train_time:53620ms step_avg:86.34ms +[2025-09-06 01:53:43] [Rank 0] step:621/10000 train_time:53620ms step_avg:86.34ms +[2025-09-06 01:53:43] [Rank 0] step:641/10000 train_time:54346ms step_avg:84.78ms +[2025-09-06 01:53:43] [Rank 0] step:641/10000 train_time:54346ms step_avg:84.78ms +[2025-09-06 01:53:44] [Rank 0] step:661/10000 train_time:55074ms step_avg:83.32ms +[2025-09-06 01:53:44] [Rank 0] step:661/10000 train_time:55074ms step_avg:83.32ms +[2025-09-06 01:53:45] [Rank 0] step:681/10000 train_time:55920ms step_avg:82.11ms +[2025-09-06 01:53:45] [Rank 0] step:681/10000 train_time:55920ms step_avg:82.11ms +[2025-09-06 01:53:46] [Rank 0] step:701/10000 train_time:56647ms step_avg:80.81ms +[2025-09-06 01:53:46] [Rank 0] step:701/10000 train_time:56647ms step_avg:80.81ms +[2025-09-06 01:53:47] [Rank 0] step:721/10000 train_time:57375ms step_avg:79.58ms +[2025-09-06 01:53:47] [Rank 0] step:721/10000 train_time:57375ms step_avg:79.58ms +[2025-09-06 01:53:47] [Rank 0] step:741/10000 train_time:58103ms step_avg:78.41ms +[2025-09-06 01:53:47] [Rank 0] step:741/10000 train_time:58103ms step_avg:78.41ms +[2025-09-06 01:53:48] [Rank 0] step:761/10000 train_time:58973ms step_avg:77.49ms +[2025-09-06 01:53:48] [Rank 0] step:761/10000 train_time:58973ms step_avg:77.49ms +[2025-09-06 01:53:49] [Rank 0] step:781/10000 train_time:59706ms step_avg:76.45ms +[2025-09-06 01:53:49] [Rank 0] step:781/10000 train_time:59706ms step_avg:76.45ms +[2025-09-06 01:53:50] [Rank 0] step:801/10000 train_time:60438ms step_avg:75.45ms +[2025-09-06 01:53:50] [Rank 0] step:801/10000 train_time:60438ms step_avg:75.45ms +[2025-09-06 01:53:50] [Rank 0] step:821/10000 train_time:61365ms step_avg:74.74ms +[2025-09-06 01:53:50] [Rank 0] step:821/10000 train_time:61365ms step_avg:74.74ms +[2025-09-06 01:53:51] [Rank 0] step:841/10000 train_time:62097ms step_avg:73.84ms +[2025-09-06 01:53:51] [Rank 0] step:841/10000 train_time:62097ms step_avg:73.84ms +[2025-09-06 01:53:52] [Rank 0] step:861/10000 train_time:62830ms step_avg:72.97ms +[2025-09-06 01:53:52] [Rank 0] step:861/10000 train_time:62830ms step_avg:72.97ms +[2025-09-06 01:53:53] [Rank 0] step:881/10000 train_time:63562ms step_avg:72.15ms +[2025-09-06 01:53:53] [Rank 0] step:881/10000 train_time:63562ms step_avg:72.15ms +[2025-09-06 01:53:53] [Rank 0] step:901/10000 train_time:64294ms step_avg:71.36ms +[2025-09-06 01:53:53] [Rank 0] step:901/10000 train_time:64294ms step_avg:71.36ms +[2025-09-06 01:53:54] [Rank 0] step:921/10000 train_time:65027ms step_avg:70.60ms +[2025-09-06 01:53:54] [Rank 0] step:921/10000 train_time:65027ms step_avg:70.60ms +[2025-09-06 01:53:55] [Rank 0] step:941/10000 train_time:65760ms step_avg:69.88ms +[2025-09-06 01:53:55] [Rank 0] step:941/10000 train_time:65760ms step_avg:69.88ms +[2025-09-06 01:53:56] [Rank 0] step:961/10000 train_time:66493ms step_avg:69.19ms +[2025-09-06 01:53:56] [Rank 0] step:961/10000 train_time:66493ms step_avg:69.19ms +[2025-09-06 01:53:56] [Rank 0] step:981/10000 train_time:67225ms step_avg:68.53ms +[2025-09-06 01:53:56] [Rank 0] step:981/10000 train_time:67225ms step_avg:68.53ms +[2025-09-06 01:53:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:53:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:53:58] [Rank 0] PRINT: step:1000/10000 train_loss:3.9008 val_loss:3.5536 train_time:68044ms step_avg:68.04ms +[2025-09-06 01:53:58] [Rank 0] PRINT: step:1000/10000 train_loss:3.9008 val_loss:3.5536 train_time:68044ms step_avg:68.04ms +[2025-09-06 01:53:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:53:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:53:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:53:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:55:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:55:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:55:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:55:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:55:18] [Rank 0] Total Loss: 5.6642 +[2025-09-06 01:55:18] [Rank 0] Total Loss: 5.6642 +[2025-09-06 01:55:18] [Rank 0] Total FTA (Unweighted): 0.1238 +[2025-09-06 01:55:18] [Rank 0] Total FTA (Unweighted): 0.1238 +[2025-09-06 01:55:18] [Rank 0] Total FTA (Weighted): 0.1237 +[2025-09-06 01:55:18] [Rank 0] Total FTA (Weighted): 0.1237 +[2025-09-06 01:55:18] [Rank 0] Group 0 Loss: 3.5341 +[2025-09-06 01:55:18] [Rank 0] Group 0 Loss: 3.5341 +[2025-09-06 01:55:18] [Rank 0] Group 1 Loss: 3.5181 +[2025-09-06 01:55:18] [Rank 0] Group 1 Loss: 3.5181 +[2025-09-06 01:55:18] [Rank 0] Group 2 Loss: 3.9289 +[2025-09-06 01:55:18] [Rank 0] Group 2 Loss: 3.9289 +[2025-09-06 01:55:18] [Rank 0] Group 3 Loss: 4.7505 +[2025-09-06 01:55:18] [Rank 0] Group 3 Loss: 4.7505 +[2025-09-06 01:55:18] [Rank 0] Group 4 Loss: 5.6506 +[2025-09-06 01:55:18] [Rank 0] Group 4 Loss: 5.6506 +[2025-09-06 01:55:18] [Rank 0] Group 5 Loss: 5.9419 +[2025-09-06 01:55:18] [Rank 0] Group 5 Loss: 5.9419 +[2025-09-06 01:55:18] [Rank 0] Group 6 Loss: 6.1152 +[2025-09-06 01:55:18] [Rank 0] Group 6 Loss: 6.1152 +[2025-09-06 01:55:18] [Rank 0] Group 7 Loss: 6.1314 +[2025-09-06 01:55:18] [Rank 0] Group 7 Loss: 6.1314 +[2025-09-06 01:55:18] [Rank 0] Group 8 Loss: 6.2982 +[2025-09-06 01:55:18] [Rank 0] Group 8 Loss: 6.2982 +[2025-09-06 01:55:18] [Rank 0] Group 9 Loss: 6.4551 +[2025-09-06 01:55:18] [Rank 0] Group 9 Loss: 6.4551 +[2025-09-06 01:55:18] [Rank 0] Group 10 Loss: 6.4223 +[2025-09-06 01:55:18] [Rank 0] Group 10 Loss: 6.4223 +[2025-09-06 01:55:18] [Rank 0] Group 11 Loss: 6.5027 +[2025-09-06 01:55:18] [Rank 0] Group 11 Loss: 6.5027 +[2025-09-06 01:55:18] [Rank 0] Group 12 Loss: 6.3223 +[2025-09-06 01:55:18] [Rank 0] Group 12 Loss: 6.3223 +[2025-09-06 01:55:18] [Rank 0] Group 13 Loss: 6.3152 +[2025-09-06 01:55:18] [Rank 0] Group 13 Loss: 6.3152 +[2025-09-06 01:55:18] [Rank 0] Group 14 Loss: 6.4128 +[2025-09-06 01:55:18] [Rank 0] Group 14 Loss: 6.4128 +[2025-09-06 01:55:18] [Rank 0] Group 15 Loss: 6.3288 +[2025-09-06 01:55:18] [Rank 0] Group 15 Loss: 6.3288 +[2025-09-06 01:55:18] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 01:55:18] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-06 01:55:18] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:55:18] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:55:18] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:55:18] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:55:18] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 01:55:18] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-06 01:55:18] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:55:18] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 01:55:18] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 01:55:18] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-06 01:55:18] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 01:55:18] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 01:55:18] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:55:18] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:55:18] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 01:55:18] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-06 01:55:18] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 01:55:18] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-06 01:55:18] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-06 01:55:18] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-06 01:55:18] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:55:18] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:55:18] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:55:18] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:55:18] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:55:18] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:55:18] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:55:18] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:55:18] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 01:55:18] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 01:55:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 01:55:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 01:55:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 01:55:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 01:55:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 01:55:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 01:55:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 01:55:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 01:55:20] [Rank 0] step:1001/10000 train_time:68054ms step_avg:67.99ms +[2025-09-06 01:55:20] [Rank 0] step:1001/10000 train_time:68054ms step_avg:67.99ms +[2025-09-06 01:55:21] [Rank 0] step:1021/10000 train_time:68715ms step_avg:67.30ms +[2025-09-06 01:55:21] [Rank 0] step:1021/10000 train_time:68715ms step_avg:67.30ms +[2025-09-06 01:55:21] [Rank 0] step:1041/10000 train_time:69447ms step_avg:66.71ms +[2025-09-06 01:55:21] [Rank 0] step:1041/10000 train_time:69447ms step_avg:66.71ms +[2025-09-06 01:55:22] [Rank 0] step:1061/10000 train_time:70179ms step_avg:66.14ms +[2025-09-06 01:55:22] [Rank 0] step:1061/10000 train_time:70179ms step_avg:66.14ms +[2025-09-06 01:55:23] [Rank 0] step:1081/10000 train_time:70912ms step_avg:65.60ms +[2025-09-06 01:55:23] [Rank 0] step:1081/10000 train_time:70912ms step_avg:65.60ms +[2025-09-06 01:55:24] [Rank 0] step:1101/10000 train_time:71644ms step_avg:65.07ms +[2025-09-06 01:55:24] [Rank 0] step:1101/10000 train_time:71644ms step_avg:65.07ms +[2025-09-06 01:55:24] [Rank 0] step:1121/10000 train_time:72377ms step_avg:64.56ms +[2025-09-06 01:55:24] [Rank 0] step:1121/10000 train_time:72377ms step_avg:64.56ms +[2025-09-06 01:55:25] [Rank 0] step:1141/10000 train_time:73109ms step_avg:64.07ms +[2025-09-06 01:55:25] [Rank 0] step:1141/10000 train_time:73109ms step_avg:64.07ms +[2025-09-06 01:55:26] [Rank 0] step:1161/10000 train_time:73842ms step_avg:63.60ms +[2025-09-06 01:55:26] [Rank 0] step:1161/10000 train_time:73842ms step_avg:63.60ms +[2025-09-06 01:55:27] [Rank 0] step:1181/10000 train_time:74575ms step_avg:63.15ms +[2025-09-06 01:55:27] [Rank 0] step:1181/10000 train_time:74575ms step_avg:63.15ms +[2025-09-06 01:55:27] [Rank 0] step:1201/10000 train_time:75308ms step_avg:62.70ms +[2025-09-06 01:55:27] [Rank 0] step:1201/10000 train_time:75308ms step_avg:62.70ms +[2025-09-06 01:55:28] [Rank 0] step:1221/10000 train_time:76039ms step_avg:62.28ms +[2025-09-06 01:55:28] [Rank 0] step:1221/10000 train_time:76039ms step_avg:62.28ms +[2025-09-06 01:55:29] [Rank 0] step:1241/10000 train_time:76771ms step_avg:61.86ms +[2025-09-06 01:55:29] [Rank 0] step:1241/10000 train_time:76771ms step_avg:61.86ms +[2025-09-06 01:55:29] [Rank 0] step:1261/10000 train_time:77502ms step_avg:61.46ms +[2025-09-06 01:55:29] [Rank 0] step:1261/10000 train_time:77502ms step_avg:61.46ms +[2025-09-06 01:55:30] [Rank 0] step:1281/10000 train_time:78234ms step_avg:61.07ms +[2025-09-06 01:55:30] [Rank 0] step:1281/10000 train_time:78234ms step_avg:61.07ms +[2025-09-06 01:55:31] [Rank 0] step:1301/10000 train_time:78966ms step_avg:60.70ms +[2025-09-06 01:55:31] [Rank 0] step:1301/10000 train_time:78966ms step_avg:60.70ms +[2025-09-06 01:55:32] [Rank 0] step:1321/10000 train_time:79698ms step_avg:60.33ms +[2025-09-06 01:55:32] [Rank 0] step:1321/10000 train_time:79698ms step_avg:60.33ms +[2025-09-06 01:55:32] [Rank 0] step:1341/10000 train_time:80430ms step_avg:59.98ms +[2025-09-06 01:55:32] [Rank 0] step:1341/10000 train_time:80430ms step_avg:59.98ms +[2025-09-06 01:55:33] [Rank 0] step:1361/10000 train_time:81162ms step_avg:59.63ms +[2025-09-06 01:55:33] [Rank 0] step:1361/10000 train_time:81162ms step_avg:59.63ms +[2025-09-06 01:55:34] [Rank 0] step:1381/10000 train_time:81894ms step_avg:59.30ms +[2025-09-06 01:55:34] [Rank 0] step:1381/10000 train_time:81894ms step_avg:59.30ms +[2025-09-06 01:55:35] [Rank 0] step:1401/10000 train_time:82627ms step_avg:58.98ms +[2025-09-06 01:55:35] [Rank 0] step:1401/10000 train_time:82627ms step_avg:58.98ms +[2025-09-06 01:55:35] [Rank 0] step:1421/10000 train_time:83360ms step_avg:58.66ms +[2025-09-06 01:55:35] [Rank 0] step:1421/10000 train_time:83360ms step_avg:58.66ms +[2025-09-06 01:55:36] [Rank 0] step:1441/10000 train_time:84092ms step_avg:58.36ms +[2025-09-06 01:55:36] [Rank 0] step:1441/10000 train_time:84092ms step_avg:58.36ms +[2025-09-06 01:55:37] [Rank 0] step:1461/10000 train_time:84825ms step_avg:58.06ms +[2025-09-06 01:55:37] [Rank 0] step:1461/10000 train_time:84825ms step_avg:58.06ms +[2025-09-06 01:55:38] [Rank 0] step:1481/10000 train_time:85557ms step_avg:57.77ms +[2025-09-06 01:55:38] [Rank 0] step:1481/10000 train_time:85557ms step_avg:57.77ms +[2025-09-06 01:55:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:55:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:55:39] [Rank 0] PRINT: step:1500/10000 train_loss:3.3495 val_loss:3.1720 train_time:86370ms step_avg:57.58ms +[2025-09-06 01:55:39] [Rank 0] PRINT: step:1500/10000 train_loss:3.3495 val_loss:3.1720 train_time:86370ms step_avg:57.58ms +[2025-09-06 01:55:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:55:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:55:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:55:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:56:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:56:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:56:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:56:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:56:59] [Rank 0] Total Loss: 5.4025 +[2025-09-06 01:56:59] [Rank 0] Total Loss: 5.4025 +[2025-09-06 01:56:59] [Rank 0] Total FTA (Unweighted): 0.1394 +[2025-09-06 01:56:59] [Rank 0] Total FTA (Unweighted): 0.1394 +[2025-09-06 01:56:59] [Rank 0] Total FTA (Weighted): 0.1394 +[2025-09-06 01:56:59] [Rank 0] Total FTA (Weighted): 0.1394 +[2025-09-06 01:56:59] [Rank 0] Group 0 Loss: 3.4156 +[2025-09-06 01:56:59] [Rank 0] Group 0 Loss: 3.4156 +[2025-09-06 01:56:59] [Rank 0] Group 1 Loss: 3.3714 +[2025-09-06 01:56:59] [Rank 0] Group 1 Loss: 3.3714 +[2025-09-06 01:56:59] [Rank 0] Group 2 Loss: 3.5163 +[2025-09-06 01:56:59] [Rank 0] Group 2 Loss: 3.5163 +[2025-09-06 01:56:59] [Rank 0] Group 3 Loss: 4.2872 +[2025-09-06 01:56:59] [Rank 0] Group 3 Loss: 4.2872 +[2025-09-06 01:56:59] [Rank 0] Group 4 Loss: 5.1476 +[2025-09-06 01:56:59] [Rank 0] Group 4 Loss: 5.1476 +[2025-09-06 01:56:59] [Rank 0] Group 5 Loss: 5.5836 +[2025-09-06 01:56:59] [Rank 0] Group 5 Loss: 5.5836 +[2025-09-06 01:56:59] [Rank 0] Group 6 Loss: 5.8513 +[2025-09-06 01:56:59] [Rank 0] Group 6 Loss: 5.8513 +[2025-09-06 01:56:59] [Rank 0] Group 7 Loss: 5.8751 +[2025-09-06 01:56:59] [Rank 0] Group 7 Loss: 5.8751 +[2025-09-06 01:56:59] [Rank 0] Group 8 Loss: 6.1044 +[2025-09-06 01:56:59] [Rank 0] Group 8 Loss: 6.1044 +[2025-09-06 01:56:59] [Rank 0] Group 9 Loss: 6.2182 +[2025-09-06 01:56:59] [Rank 0] Group 9 Loss: 6.2182 +[2025-09-06 01:56:59] [Rank 0] Group 10 Loss: 6.2361 +[2025-09-06 01:56:59] [Rank 0] Group 10 Loss: 6.2361 +[2025-09-06 01:56:59] [Rank 0] Group 11 Loss: 6.2851 +[2025-09-06 01:56:59] [Rank 0] Group 11 Loss: 6.2851 +[2025-09-06 01:56:59] [Rank 0] Group 12 Loss: 6.1017 +[2025-09-06 01:56:59] [Rank 0] Group 12 Loss: 6.1017 +[2025-09-06 01:56:59] [Rank 0] Group 13 Loss: 6.1178 +[2025-09-06 01:56:59] [Rank 0] Group 13 Loss: 6.1178 +[2025-09-06 01:56:59] [Rank 0] Group 14 Loss: 6.2025 +[2025-09-06 01:56:59] [Rank 0] Group 14 Loss: 6.2025 +[2025-09-06 01:56:59] [Rank 0] Group 15 Loss: 6.1255 +[2025-09-06 01:56:59] [Rank 0] Group 15 Loss: 6.1255 +[2025-09-06 01:56:59] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 01:56:59] [Rank 0] Group 0 FTA: 0.4000 +[2025-09-06 01:56:59] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:56:59] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:56:59] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:56:59] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:56:59] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 01:56:59] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 01:56:59] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:56:59] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:56:59] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 01:56:59] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-06 01:56:59] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:56:59] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:56:59] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 01:56:59] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 01:56:59] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:56:59] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 01:56:59] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:56:59] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:56:59] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:56:59] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 01:56:59] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 01:57:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 01:57:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 01:57:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 01:57:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 01:57:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 01:57:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 01:57:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 01:57:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 01:57:01] [Rank 0] step:1501/10000 train_time:86379ms step_avg:57.55ms +[2025-09-06 01:57:01] [Rank 0] step:1501/10000 train_time:86379ms step_avg:57.55ms +[2025-09-06 01:57:01] [Rank 0] step:1521/10000 train_time:87055ms step_avg:57.24ms +[2025-09-06 01:57:01] [Rank 0] step:1521/10000 train_time:87055ms step_avg:57.24ms +[2025-09-06 01:57:02] [Rank 0] step:1541/10000 train_time:87788ms step_avg:56.97ms +[2025-09-06 01:57:02] [Rank 0] step:1541/10000 train_time:87788ms step_avg:56.97ms +[2025-09-06 01:57:03] [Rank 0] step:1561/10000 train_time:88521ms step_avg:56.71ms +[2025-09-06 01:57:03] [Rank 0] step:1561/10000 train_time:88521ms step_avg:56.71ms +[2025-09-06 01:57:04] [Rank 0] step:1581/10000 train_time:89254ms step_avg:56.45ms +[2025-09-06 01:57:04] [Rank 0] step:1581/10000 train_time:89254ms step_avg:56.45ms +[2025-09-06 01:57:04] [Rank 0] step:1601/10000 train_time:89987ms step_avg:56.21ms +[2025-09-06 01:57:04] [Rank 0] step:1601/10000 train_time:89987ms step_avg:56.21ms +[2025-09-06 01:57:05] [Rank 0] step:1621/10000 train_time:90718ms step_avg:55.96ms +[2025-09-06 01:57:05] [Rank 0] step:1621/10000 train_time:90718ms step_avg:55.96ms +[2025-09-06 01:57:06] [Rank 0] step:1641/10000 train_time:92059ms step_avg:56.10ms +[2025-09-06 01:57:06] [Rank 0] step:1641/10000 train_time:92059ms step_avg:56.10ms +[2025-09-06 01:57:07] [Rank 0] step:1661/10000 train_time:92791ms step_avg:55.86ms +[2025-09-06 01:57:07] [Rank 0] step:1661/10000 train_time:92791ms step_avg:55.86ms +[2025-09-06 01:57:08] [Rank 0] step:1681/10000 train_time:93524ms step_avg:55.64ms +[2025-09-06 01:57:08] [Rank 0] step:1681/10000 train_time:93524ms step_avg:55.64ms +[2025-09-06 01:57:09] [Rank 0] step:1701/10000 train_time:94256ms step_avg:55.41ms +[2025-09-06 01:57:09] [Rank 0] step:1701/10000 train_time:94256ms step_avg:55.41ms +[2025-09-06 01:57:09] [Rank 0] step:1721/10000 train_time:94989ms step_avg:55.19ms +[2025-09-06 01:57:09] [Rank 0] step:1721/10000 train_time:94989ms step_avg:55.19ms +[2025-09-06 01:57:10] [Rank 0] step:1741/10000 train_time:95721ms step_avg:54.98ms +[2025-09-06 01:57:10] [Rank 0] step:1741/10000 train_time:95721ms step_avg:54.98ms +[2025-09-06 01:57:11] [Rank 0] step:1761/10000 train_time:96454ms step_avg:54.77ms +[2025-09-06 01:57:11] [Rank 0] step:1761/10000 train_time:96454ms step_avg:54.77ms +[2025-09-06 01:57:12] [Rank 0] step:1781/10000 train_time:97186ms step_avg:54.57ms +[2025-09-06 01:57:12] [Rank 0] step:1781/10000 train_time:97186ms step_avg:54.57ms +[2025-09-06 01:57:12] [Rank 0] step:1801/10000 train_time:97919ms step_avg:54.37ms +[2025-09-06 01:57:12] [Rank 0] step:1801/10000 train_time:97919ms step_avg:54.37ms +[2025-09-06 01:57:13] [Rank 0] step:1821/10000 train_time:98651ms step_avg:54.17ms +[2025-09-06 01:57:13] [Rank 0] step:1821/10000 train_time:98651ms step_avg:54.17ms +[2025-09-06 01:57:14] [Rank 0] step:1841/10000 train_time:99382ms step_avg:53.98ms +[2025-09-06 01:57:14] [Rank 0] step:1841/10000 train_time:99382ms step_avg:53.98ms +[2025-09-06 01:57:15] [Rank 0] step:1861/10000 train_time:100115ms step_avg:53.80ms +[2025-09-06 01:57:15] [Rank 0] step:1861/10000 train_time:100115ms step_avg:53.80ms +[2025-09-06 01:57:15] [Rank 0] step:1881/10000 train_time:100848ms step_avg:53.61ms +[2025-09-06 01:57:15] [Rank 0] step:1881/10000 train_time:100848ms step_avg:53.61ms +[2025-09-06 01:57:16] [Rank 0] step:1901/10000 train_time:101581ms step_avg:53.44ms +[2025-09-06 01:57:16] [Rank 0] step:1901/10000 train_time:101581ms step_avg:53.44ms +[2025-09-06 01:57:17] [Rank 0] step:1921/10000 train_time:102314ms step_avg:53.26ms +[2025-09-06 01:57:17] [Rank 0] step:1921/10000 train_time:102314ms step_avg:53.26ms +[2025-09-06 01:57:17] [Rank 0] step:1941/10000 train_time:103047ms step_avg:53.09ms +[2025-09-06 01:57:17] [Rank 0] step:1941/10000 train_time:103047ms step_avg:53.09ms +[2025-09-06 01:57:18] [Rank 0] step:1961/10000 train_time:103782ms step_avg:52.92ms +[2025-09-06 01:57:18] [Rank 0] step:1961/10000 train_time:103782ms step_avg:52.92ms +[2025-09-06 01:57:19] [Rank 0] step:1981/10000 train_time:104515ms step_avg:52.76ms +[2025-09-06 01:57:19] [Rank 0] step:1981/10000 train_time:104515ms step_avg:52.76ms +[2025-09-06 01:57:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:57:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:57:20] [Rank 0] PRINT: step:2000/10000 train_loss:3.0507 val_loss:2.9248 train_time:105328ms step_avg:52.66ms +[2025-09-06 01:57:20] [Rank 0] PRINT: step:2000/10000 train_loss:3.0507 val_loss:2.9248 train_time:105328ms step_avg:52.66ms +[2025-09-06 01:57:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:57:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:57:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:57:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:58:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:58:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 01:58:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:58:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 01:58:41] [Rank 0] Total Loss: 5.2516 +[2025-09-06 01:58:41] [Rank 0] Total Loss: 5.2516 +[2025-09-06 01:58:41] [Rank 0] Total FTA (Unweighted): 0.1656 +[2025-09-06 01:58:41] [Rank 0] Total FTA (Unweighted): 0.1656 +[2025-09-06 01:58:41] [Rank 0] Total FTA (Weighted): 0.1656 +[2025-09-06 01:58:41] [Rank 0] Total FTA (Weighted): 0.1656 +[2025-09-06 01:58:41] [Rank 0] Group 0 Loss: 3.3802 +[2025-09-06 01:58:41] [Rank 0] Group 0 Loss: 3.3802 +[2025-09-06 01:58:41] [Rank 0] Group 1 Loss: 3.4204 +[2025-09-06 01:58:41] [Rank 0] Group 1 Loss: 3.4204 +[2025-09-06 01:58:41] [Rank 0] Group 2 Loss: 3.4974 +[2025-09-06 01:58:41] [Rank 0] Group 2 Loss: 3.4974 +[2025-09-06 01:58:41] [Rank 0] Group 3 Loss: 4.1421 +[2025-09-06 01:58:41] [Rank 0] Group 3 Loss: 4.1421 +[2025-09-06 01:58:41] [Rank 0] Group 4 Loss: 4.8351 +[2025-09-06 01:58:41] [Rank 0] Group 4 Loss: 4.8351 +[2025-09-06 01:58:41] [Rank 0] Group 5 Loss: 5.3296 +[2025-09-06 01:58:41] [Rank 0] Group 5 Loss: 5.3296 +[2025-09-06 01:58:41] [Rank 0] Group 6 Loss: 5.5996 +[2025-09-06 01:58:41] [Rank 0] Group 6 Loss: 5.5996 +[2025-09-06 01:58:41] [Rank 0] Group 7 Loss: 5.6845 +[2025-09-06 01:58:41] [Rank 0] Group 7 Loss: 5.6845 +[2025-09-06 01:58:41] [Rank 0] Group 8 Loss: 5.9178 +[2025-09-06 01:58:41] [Rank 0] Group 8 Loss: 5.9178 +[2025-09-06 01:58:41] [Rank 0] Group 9 Loss: 6.0508 +[2025-09-06 01:58:41] [Rank 0] Group 9 Loss: 6.0508 +[2025-09-06 01:58:41] [Rank 0] Group 10 Loss: 6.0803 +[2025-09-06 01:58:41] [Rank 0] Group 10 Loss: 6.0803 +[2025-09-06 01:58:41] [Rank 0] Group 11 Loss: 6.1236 +[2025-09-06 01:58:41] [Rank 0] Group 11 Loss: 6.1236 +[2025-09-06 01:58:41] [Rank 0] Group 12 Loss: 5.9701 +[2025-09-06 01:58:41] [Rank 0] Group 12 Loss: 5.9701 +[2025-09-06 01:58:41] [Rank 0] Group 13 Loss: 5.9863 +[2025-09-06 01:58:41] [Rank 0] Group 13 Loss: 5.9863 +[2025-09-06 01:58:41] [Rank 0] Group 14 Loss: 6.0441 +[2025-09-06 01:58:41] [Rank 0] Group 14 Loss: 6.0441 +[2025-09-06 01:58:41] [Rank 0] Group 15 Loss: 5.9643 +[2025-09-06 01:58:41] [Rank 0] Group 15 Loss: 5.9643 +[2025-09-06 01:58:41] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 01:58:41] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 01:58:41] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:58:41] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 01:58:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:58:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 01:58:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:58:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 01:58:41] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 01:58:41] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 01:58:41] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:58:41] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 01:58:41] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 01:58:41] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 01:58:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:58:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 01:58:41] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 01:58:41] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-06 01:58:41] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:58:41] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 01:58:41] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 01:58:41] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-06 01:58:41] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 01:58:41] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 01:58:41] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:58:41] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 01:58:41] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:58:41] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 01:58:41] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:58:41] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 01:58:41] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 01:58:41] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 01:58:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 01:58:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 01:58:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 01:58:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 01:58:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 01:58:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 01:58:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 01:58:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 01:58:43] [Rank 0] step:2001/10000 train_time:105338ms step_avg:52.64ms +[2025-09-06 01:58:43] [Rank 0] step:2001/10000 train_time:105338ms step_avg:52.64ms +[2025-09-06 01:58:44] [Rank 0] step:2021/10000 train_time:106207ms step_avg:52.55ms +[2025-09-06 01:58:44] [Rank 0] step:2021/10000 train_time:106207ms step_avg:52.55ms +[2025-09-06 01:58:44] [Rank 0] step:2041/10000 train_time:106941ms step_avg:52.40ms +[2025-09-06 01:58:44] [Rank 0] step:2041/10000 train_time:106941ms step_avg:52.40ms +[2025-09-06 01:58:45] [Rank 0] step:2061/10000 train_time:107673ms step_avg:52.24ms +[2025-09-06 01:58:45] [Rank 0] step:2061/10000 train_time:107673ms step_avg:52.24ms +[2025-09-06 01:58:46] [Rank 0] step:2081/10000 train_time:108406ms step_avg:52.09ms +[2025-09-06 01:58:46] [Rank 0] step:2081/10000 train_time:108406ms step_avg:52.09ms +[2025-09-06 01:58:47] [Rank 0] step:2101/10000 train_time:109139ms step_avg:51.95ms +[2025-09-06 01:58:47] [Rank 0] step:2101/10000 train_time:109139ms step_avg:51.95ms +[2025-09-06 01:58:47] [Rank 0] step:2121/10000 train_time:109871ms step_avg:51.80ms +[2025-09-06 01:58:47] [Rank 0] step:2121/10000 train_time:109871ms step_avg:51.80ms +[2025-09-06 01:58:48] [Rank 0] step:2141/10000 train_time:110605ms step_avg:51.66ms +[2025-09-06 01:58:48] [Rank 0] step:2141/10000 train_time:110605ms step_avg:51.66ms +[2025-09-06 01:58:49] [Rank 0] step:2161/10000 train_time:111337ms step_avg:51.52ms +[2025-09-06 01:58:49] [Rank 0] step:2161/10000 train_time:111337ms step_avg:51.52ms +[2025-09-06 01:58:49] [Rank 0] step:2181/10000 train_time:112069ms step_avg:51.38ms +[2025-09-06 01:58:49] [Rank 0] step:2181/10000 train_time:112069ms step_avg:51.38ms +[2025-09-06 01:58:50] [Rank 0] step:2201/10000 train_time:112802ms step_avg:51.25ms +[2025-09-06 01:58:50] [Rank 0] step:2201/10000 train_time:112802ms step_avg:51.25ms +[2025-09-06 01:58:51] [Rank 0] step:2221/10000 train_time:113535ms step_avg:51.12ms +[2025-09-06 01:58:51] [Rank 0] step:2221/10000 train_time:113535ms step_avg:51.12ms +[2025-09-06 01:58:52] [Rank 0] step:2241/10000 train_time:114272ms step_avg:50.99ms +[2025-09-06 01:58:52] [Rank 0] step:2241/10000 train_time:114272ms step_avg:50.99ms +[2025-09-06 01:58:52] [Rank 0] step:2261/10000 train_time:115014ms step_avg:50.87ms +[2025-09-06 01:58:52] [Rank 0] step:2261/10000 train_time:115014ms step_avg:50.87ms +[2025-09-06 01:58:53] [Rank 0] step:2281/10000 train_time:115754ms step_avg:50.75ms +[2025-09-06 01:58:53] [Rank 0] step:2281/10000 train_time:115754ms step_avg:50.75ms +[2025-09-06 01:58:54] [Rank 0] step:2301/10000 train_time:116493ms step_avg:50.63ms +[2025-09-06 01:58:54] [Rank 0] step:2301/10000 train_time:116493ms step_avg:50.63ms +[2025-09-06 01:58:55] [Rank 0] step:2321/10000 train_time:117231ms step_avg:50.51ms +[2025-09-06 01:58:55] [Rank 0] step:2321/10000 train_time:117231ms step_avg:50.51ms +[2025-09-06 01:58:55] [Rank 0] step:2341/10000 train_time:117969ms step_avg:50.39ms +[2025-09-06 01:58:55] [Rank 0] step:2341/10000 train_time:117969ms step_avg:50.39ms +[2025-09-06 01:58:56] [Rank 0] step:2361/10000 train_time:118708ms step_avg:50.28ms +[2025-09-06 01:58:56] [Rank 0] step:2361/10000 train_time:118708ms step_avg:50.28ms +[2025-09-06 01:58:57] [Rank 0] step:2381/10000 train_time:119446ms step_avg:50.17ms +[2025-09-06 01:58:57] [Rank 0] step:2381/10000 train_time:119446ms step_avg:50.17ms +[2025-09-06 01:58:58] [Rank 0] step:2401/10000 train_time:120185ms step_avg:50.06ms +[2025-09-06 01:58:58] [Rank 0] step:2401/10000 train_time:120185ms step_avg:50.06ms +[2025-09-06 01:58:58] [Rank 0] step:2421/10000 train_time:120924ms step_avg:49.95ms +[2025-09-06 01:58:58] [Rank 0] step:2421/10000 train_time:120924ms step_avg:49.95ms +[2025-09-06 01:58:59] [Rank 0] step:2441/10000 train_time:121662ms step_avg:49.84ms +[2025-09-06 01:58:59] [Rank 0] step:2441/10000 train_time:121662ms step_avg:49.84ms +[2025-09-06 01:59:00] [Rank 0] step:2461/10000 train_time:122400ms step_avg:49.74ms +[2025-09-06 01:59:00] [Rank 0] step:2461/10000 train_time:122400ms step_avg:49.74ms +[2025-09-06 01:59:01] [Rank 0] step:2481/10000 train_time:123138ms step_avg:49.63ms +[2025-09-06 01:59:01] [Rank 0] step:2481/10000 train_time:123138ms step_avg:49.63ms +[2025-09-06 01:59:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:59:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 01:59:02] [Rank 0] PRINT: step:2500/10000 train_loss:2.8494 val_loss:2.7597 train_time:124081ms step_avg:49.63ms +[2025-09-06 01:59:02] [Rank 0] PRINT: step:2500/10000 train_loss:2.8494 val_loss:2.7597 train_time:124081ms step_avg:49.63ms +[2025-09-06 01:59:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:59:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 01:59:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 01:59:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:00:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:00:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:00:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:00:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:00:23] [Rank 0] Total Loss: 5.1185 +[2025-09-06 02:00:23] [Rank 0] Total Loss: 5.1185 +[2025-09-06 02:00:23] [Rank 0] Total FTA (Unweighted): 0.1688 +[2025-09-06 02:00:23] [Rank 0] Total FTA (Unweighted): 0.1688 +[2025-09-06 02:00:23] [Rank 0] Total FTA (Weighted): 0.1688 +[2025-09-06 02:00:23] [Rank 0] Total FTA (Weighted): 0.1688 +[2025-09-06 02:00:23] [Rank 0] Group 0 Loss: 3.4081 +[2025-09-06 02:00:23] [Rank 0] Group 0 Loss: 3.4081 +[2025-09-06 02:00:23] [Rank 0] Group 1 Loss: 3.3200 +[2025-09-06 02:00:23] [Rank 0] Group 1 Loss: 3.3200 +[2025-09-06 02:00:23] [Rank 0] Group 2 Loss: 3.4194 +[2025-09-06 02:00:23] [Rank 0] Group 2 Loss: 3.4194 +[2025-09-06 02:00:23] [Rank 0] Group 3 Loss: 3.9836 +[2025-09-06 02:00:23] [Rank 0] Group 3 Loss: 3.9836 +[2025-09-06 02:00:23] [Rank 0] Group 4 Loss: 4.6077 +[2025-09-06 02:00:23] [Rank 0] Group 4 Loss: 4.6077 +[2025-09-06 02:00:23] [Rank 0] Group 5 Loss: 5.1372 +[2025-09-06 02:00:23] [Rank 0] Group 5 Loss: 5.1372 +[2025-09-06 02:00:23] [Rank 0] Group 6 Loss: 5.4062 +[2025-09-06 02:00:23] [Rank 0] Group 6 Loss: 5.4062 +[2025-09-06 02:00:23] [Rank 0] Group 7 Loss: 5.5282 +[2025-09-06 02:00:23] [Rank 0] Group 7 Loss: 5.5282 +[2025-09-06 02:00:23] [Rank 0] Group 8 Loss: 5.7790 +[2025-09-06 02:00:23] [Rank 0] Group 8 Loss: 5.7790 +[2025-09-06 02:00:23] [Rank 0] Group 9 Loss: 5.9205 +[2025-09-06 02:00:23] [Rank 0] Group 9 Loss: 5.9205 +[2025-09-06 02:00:23] [Rank 0] Group 10 Loss: 5.9541 +[2025-09-06 02:00:23] [Rank 0] Group 10 Loss: 5.9541 +[2025-09-06 02:00:23] [Rank 0] Group 11 Loss: 5.9920 +[2025-09-06 02:00:23] [Rank 0] Group 11 Loss: 5.9920 +[2025-09-06 02:00:23] [Rank 0] Group 12 Loss: 5.8277 +[2025-09-06 02:00:23] [Rank 0] Group 12 Loss: 5.8277 +[2025-09-06 02:00:23] [Rank 0] Group 13 Loss: 5.8415 +[2025-09-06 02:00:23] [Rank 0] Group 13 Loss: 5.8415 +[2025-09-06 02:00:23] [Rank 0] Group 14 Loss: 5.9276 +[2025-09-06 02:00:23] [Rank 0] Group 14 Loss: 5.9276 +[2025-09-06 02:00:23] [Rank 0] Group 15 Loss: 5.8441 +[2025-09-06 02:00:23] [Rank 0] Group 15 Loss: 5.8441 +[2025-09-06 02:00:23] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 02:00:23] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-06 02:00:23] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:00:23] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-06 02:00:23] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:00:23] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:00:23] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:00:23] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-06 02:00:23] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:00:23] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-06 02:00:23] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:00:23] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:00:23] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:00:23] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:00:23] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:00:23] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-06 02:00:23] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-06 02:00:23] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-06 02:00:23] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:00:23] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:00:23] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:00:23] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:00:23] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:00:23] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:00:23] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:00:23] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-06 02:00:23] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 02:00:23] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 02:00:23] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:00:23] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:00:23] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:00:23] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:00:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:00:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:00:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:00:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:00:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:00:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:00:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:00:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:00:24] [Rank 0] step:2501/10000 train_time:124090ms step_avg:49.62ms +[2025-09-06 02:00:24] [Rank 0] step:2501/10000 train_time:124090ms step_avg:49.62ms +[2025-09-06 02:00:25] [Rank 0] step:2521/10000 train_time:124770ms step_avg:49.49ms +[2025-09-06 02:00:25] [Rank 0] step:2521/10000 train_time:124770ms step_avg:49.49ms +[2025-09-06 02:00:26] [Rank 0] step:2541/10000 train_time:125508ms step_avg:49.39ms +[2025-09-06 02:00:26] [Rank 0] step:2541/10000 train_time:125508ms step_avg:49.39ms +[2025-09-06 02:00:27] [Rank 0] step:2561/10000 train_time:126247ms step_avg:49.30ms +[2025-09-06 02:00:27] [Rank 0] step:2561/10000 train_time:126247ms step_avg:49.30ms +[2025-09-06 02:00:27] [Rank 0] step:2581/10000 train_time:126986ms step_avg:49.20ms +[2025-09-06 02:00:27] [Rank 0] step:2581/10000 train_time:126986ms step_avg:49.20ms +[2025-09-06 02:00:28] [Rank 0] step:2601/10000 train_time:127725ms step_avg:49.11ms +[2025-09-06 02:00:28] [Rank 0] step:2601/10000 train_time:127725ms step_avg:49.11ms +[2025-09-06 02:00:29] [Rank 0] step:2621/10000 train_time:128463ms step_avg:49.01ms +[2025-09-06 02:00:29] [Rank 0] step:2621/10000 train_time:128463ms step_avg:49.01ms +[2025-09-06 02:00:30] [Rank 0] step:2641/10000 train_time:129202ms step_avg:48.92ms +[2025-09-06 02:00:30] [Rank 0] step:2641/10000 train_time:129202ms step_avg:48.92ms +[2025-09-06 02:00:30] [Rank 0] step:2661/10000 train_time:129940ms step_avg:48.83ms +[2025-09-06 02:00:30] [Rank 0] step:2661/10000 train_time:129940ms step_avg:48.83ms +[2025-09-06 02:00:31] [Rank 0] step:2681/10000 train_time:130678ms step_avg:48.74ms +[2025-09-06 02:00:31] [Rank 0] step:2681/10000 train_time:130678ms step_avg:48.74ms +[2025-09-06 02:00:32] [Rank 0] step:2701/10000 train_time:131416ms step_avg:48.65ms +[2025-09-06 02:00:32] [Rank 0] step:2701/10000 train_time:131416ms step_avg:48.65ms +[2025-09-06 02:00:33] [Rank 0] step:2721/10000 train_time:132154ms step_avg:48.57ms +[2025-09-06 02:00:33] [Rank 0] step:2721/10000 train_time:132154ms step_avg:48.57ms +[2025-09-06 02:00:33] [Rank 0] step:2741/10000 train_time:132893ms step_avg:48.48ms +[2025-09-06 02:00:33] [Rank 0] step:2741/10000 train_time:132893ms step_avg:48.48ms +[2025-09-06 02:00:34] [Rank 0] step:2761/10000 train_time:133631ms step_avg:48.40ms +[2025-09-06 02:00:34] [Rank 0] step:2761/10000 train_time:133631ms step_avg:48.40ms +[2025-09-06 02:00:35] [Rank 0] step:2781/10000 train_time:134369ms step_avg:48.32ms +[2025-09-06 02:00:35] [Rank 0] step:2781/10000 train_time:134369ms step_avg:48.32ms +[2025-09-06 02:00:35] [Rank 0] step:2801/10000 train_time:135108ms step_avg:48.24ms +[2025-09-06 02:00:35] [Rank 0] step:2801/10000 train_time:135108ms step_avg:48.24ms +[2025-09-06 02:00:37] [Rank 0] step:2821/10000 train_time:136464ms step_avg:48.37ms +[2025-09-06 02:00:37] [Rank 0] step:2821/10000 train_time:136464ms step_avg:48.37ms +[2025-09-06 02:00:38] [Rank 0] step:2841/10000 train_time:137202ms step_avg:48.29ms +[2025-09-06 02:00:38] [Rank 0] step:2841/10000 train_time:137202ms step_avg:48.29ms +[2025-09-06 02:00:38] [Rank 0] step:2861/10000 train_time:137941ms step_avg:48.21ms +[2025-09-06 02:00:38] [Rank 0] step:2861/10000 train_time:137941ms step_avg:48.21ms +[2025-09-06 02:00:39] [Rank 0] step:2881/10000 train_time:138680ms step_avg:48.14ms +[2025-09-06 02:00:39] [Rank 0] step:2881/10000 train_time:138680ms step_avg:48.14ms +[2025-09-06 02:00:40] [Rank 0] step:2901/10000 train_time:139422ms step_avg:48.06ms +[2025-09-06 02:00:40] [Rank 0] step:2901/10000 train_time:139422ms step_avg:48.06ms +[2025-09-06 02:00:41] [Rank 0] step:2921/10000 train_time:140161ms step_avg:47.98ms +[2025-09-06 02:00:41] [Rank 0] step:2921/10000 train_time:140161ms step_avg:47.98ms +[2025-09-06 02:00:41] [Rank 0] step:2941/10000 train_time:140899ms step_avg:47.91ms +[2025-09-06 02:00:41] [Rank 0] step:2941/10000 train_time:140899ms step_avg:47.91ms +[2025-09-06 02:00:42] [Rank 0] step:2961/10000 train_time:141638ms step_avg:47.83ms +[2025-09-06 02:00:42] [Rank 0] step:2961/10000 train_time:141638ms step_avg:47.83ms +[2025-09-06 02:00:43] [Rank 0] step:2981/10000 train_time:142377ms step_avg:47.76ms +[2025-09-06 02:00:43] [Rank 0] step:2981/10000 train_time:142377ms step_avg:47.76ms +[2025-09-06 02:00:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:00:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:00:44] [Rank 0] PRINT: step:3000/10000 train_loss:2.6962 val_loss:2.6263 train_time:143197ms step_avg:47.73ms +[2025-09-06 02:00:44] [Rank 0] PRINT: step:3000/10000 train_loss:2.6962 val_loss:2.6263 train_time:143197ms step_avg:47.73ms +[2025-09-06 02:00:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:00:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:00:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:00:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:02:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:02:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:02:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:02:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:02:05] [Rank 0] Total Loss: 5.0436 +[2025-09-06 02:02:05] [Rank 0] Total Loss: 5.0436 +[2025-09-06 02:02:05] [Rank 0] Total FTA (Unweighted): 0.2062 +[2025-09-06 02:02:05] [Rank 0] Total FTA (Unweighted): 0.2062 +[2025-09-06 02:02:05] [Rank 0] Total FTA (Weighted): 0.2062 +[2025-09-06 02:02:05] [Rank 0] Total FTA (Weighted): 0.2062 +[2025-09-06 02:02:05] [Rank 0] Group 0 Loss: 3.4011 +[2025-09-06 02:02:05] [Rank 0] Group 0 Loss: 3.4011 +[2025-09-06 02:02:05] [Rank 0] Group 1 Loss: 3.3402 +[2025-09-06 02:02:05] [Rank 0] Group 1 Loss: 3.3402 +[2025-09-06 02:02:05] [Rank 0] Group 2 Loss: 3.4367 +[2025-09-06 02:02:05] [Rank 0] Group 2 Loss: 3.4367 +[2025-09-06 02:02:05] [Rank 0] Group 3 Loss: 3.9354 +[2025-09-06 02:02:05] [Rank 0] Group 3 Loss: 3.9354 +[2025-09-06 02:02:05] [Rank 0] Group 4 Loss: 4.4910 +[2025-09-06 02:02:05] [Rank 0] Group 4 Loss: 4.4910 +[2025-09-06 02:02:05] [Rank 0] Group 5 Loss: 5.0011 +[2025-09-06 02:02:05] [Rank 0] Group 5 Loss: 5.0011 +[2025-09-06 02:02:05] [Rank 0] Group 6 Loss: 5.2783 +[2025-09-06 02:02:05] [Rank 0] Group 6 Loss: 5.2783 +[2025-09-06 02:02:05] [Rank 0] Group 7 Loss: 5.4187 +[2025-09-06 02:02:05] [Rank 0] Group 7 Loss: 5.4187 +[2025-09-06 02:02:05] [Rank 0] Group 8 Loss: 5.6952 +[2025-09-06 02:02:05] [Rank 0] Group 8 Loss: 5.6952 +[2025-09-06 02:02:05] [Rank 0] Group 9 Loss: 5.8182 +[2025-09-06 02:02:05] [Rank 0] Group 9 Loss: 5.8182 +[2025-09-06 02:02:05] [Rank 0] Group 10 Loss: 5.8491 +[2025-09-06 02:02:05] [Rank 0] Group 10 Loss: 5.8491 +[2025-09-06 02:02:05] [Rank 0] Group 11 Loss: 5.8921 +[2025-09-06 02:02:05] [Rank 0] Group 11 Loss: 5.8921 +[2025-09-06 02:02:05] [Rank 0] Group 12 Loss: 5.7578 +[2025-09-06 02:02:05] [Rank 0] Group 12 Loss: 5.7578 +[2025-09-06 02:02:05] [Rank 0] Group 13 Loss: 5.7692 +[2025-09-06 02:02:05] [Rank 0] Group 13 Loss: 5.7692 +[2025-09-06 02:02:05] [Rank 0] Group 14 Loss: 5.8471 +[2025-09-06 02:02:05] [Rank 0] Group 14 Loss: 5.8471 +[2025-09-06 02:02:05] [Rank 0] Group 15 Loss: 5.7672 +[2025-09-06 02:02:05] [Rank 0] Group 15 Loss: 5.7672 +[2025-09-06 02:02:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:02:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:02:05] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 02:02:05] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 02:02:05] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:02:05] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:02:05] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:02:05] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:02:05] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:02:05] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-06 02:02:05] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:02:05] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:02:05] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:02:05] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:02:05] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:02:05] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:02:05] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 02:02:05] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-06 02:02:05] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:02:05] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-06 02:02:05] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 02:02:05] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-06 02:02:05] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:02:05] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:02:05] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:02:05] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:02:05] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 02:02:05] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 02:02:05] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:02:05] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:02:05] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:02:05] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:02:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:02:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:02:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:02:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:02:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:02:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:02:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:02:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:02:06] [Rank 0] step:3001/10000 train_time:143206ms step_avg:47.72ms +[2025-09-06 02:02:06] [Rank 0] step:3001/10000 train_time:143206ms step_avg:47.72ms +[2025-09-06 02:02:07] [Rank 0] step:3021/10000 train_time:143871ms step_avg:47.62ms +[2025-09-06 02:02:07] [Rank 0] step:3021/10000 train_time:143871ms step_avg:47.62ms +[2025-09-06 02:02:08] [Rank 0] step:3041/10000 train_time:144609ms step_avg:47.55ms +[2025-09-06 02:02:08] [Rank 0] step:3041/10000 train_time:144609ms step_avg:47.55ms +[2025-09-06 02:02:09] [Rank 0] step:3061/10000 train_time:145346ms step_avg:47.48ms +[2025-09-06 02:02:09] [Rank 0] step:3061/10000 train_time:145346ms step_avg:47.48ms +[2025-09-06 02:02:09] [Rank 0] step:3081/10000 train_time:146084ms step_avg:47.41ms +[2025-09-06 02:02:09] [Rank 0] step:3081/10000 train_time:146084ms step_avg:47.41ms +[2025-09-06 02:02:10] [Rank 0] step:3101/10000 train_time:146829ms step_avg:47.35ms +[2025-09-06 02:02:10] [Rank 0] step:3101/10000 train_time:146829ms step_avg:47.35ms +[2025-09-06 02:02:11] [Rank 0] step:3121/10000 train_time:147690ms step_avg:47.32ms +[2025-09-06 02:02:11] [Rank 0] step:3121/10000 train_time:147690ms step_avg:47.32ms +[2025-09-06 02:02:12] [Rank 0] step:3141/10000 train_time:148429ms step_avg:47.26ms +[2025-09-06 02:02:12] [Rank 0] step:3141/10000 train_time:148429ms step_avg:47.26ms +[2025-09-06 02:02:12] [Rank 0] step:3161/10000 train_time:149168ms step_avg:47.19ms +[2025-09-06 02:02:12] [Rank 0] step:3161/10000 train_time:149168ms step_avg:47.19ms +[2025-09-06 02:02:13] [Rank 0] step:3181/10000 train_time:150029ms step_avg:47.16ms +[2025-09-06 02:02:13] [Rank 0] step:3181/10000 train_time:150029ms step_avg:47.16ms +[2025-09-06 02:02:14] [Rank 0] step:3201/10000 train_time:150767ms step_avg:47.10ms +[2025-09-06 02:02:14] [Rank 0] step:3201/10000 train_time:150767ms step_avg:47.10ms +[2025-09-06 02:02:15] [Rank 0] step:3221/10000 train_time:151506ms step_avg:47.04ms +[2025-09-06 02:02:15] [Rank 0] step:3221/10000 train_time:151506ms step_avg:47.04ms +[2025-09-06 02:02:16] [Rank 0] step:3241/10000 train_time:152245ms step_avg:46.97ms +[2025-09-06 02:02:16] [Rank 0] step:3241/10000 train_time:152245ms step_avg:46.97ms +[2025-09-06 02:02:16] [Rank 0] step:3261/10000 train_time:152992ms step_avg:46.92ms +[2025-09-06 02:02:16] [Rank 0] step:3261/10000 train_time:152992ms step_avg:46.92ms +[2025-09-06 02:02:17] [Rank 0] step:3281/10000 train_time:153731ms step_avg:46.85ms +[2025-09-06 02:02:17] [Rank 0] step:3281/10000 train_time:153731ms step_avg:46.85ms +[2025-09-06 02:02:18] [Rank 0] step:3301/10000 train_time:154470ms step_avg:46.79ms +[2025-09-06 02:02:18] [Rank 0] step:3301/10000 train_time:154470ms step_avg:46.79ms +[2025-09-06 02:02:19] [Rank 0] step:3321/10000 train_time:155209ms step_avg:46.74ms +[2025-09-06 02:02:19] [Rank 0] step:3321/10000 train_time:155209ms step_avg:46.74ms +[2025-09-06 02:02:19] [Rank 0] step:3341/10000 train_time:155948ms step_avg:46.68ms +[2025-09-06 02:02:19] [Rank 0] step:3341/10000 train_time:155948ms step_avg:46.68ms +[2025-09-06 02:02:20] [Rank 0] step:3361/10000 train_time:156687ms step_avg:46.62ms +[2025-09-06 02:02:20] [Rank 0] step:3361/10000 train_time:156687ms step_avg:46.62ms +[2025-09-06 02:02:21] [Rank 0] step:3381/10000 train_time:157425ms step_avg:46.56ms +[2025-09-06 02:02:21] [Rank 0] step:3381/10000 train_time:157425ms step_avg:46.56ms +[2025-09-06 02:02:21] [Rank 0] step:3401/10000 train_time:158163ms step_avg:46.50ms +[2025-09-06 02:02:21] [Rank 0] step:3401/10000 train_time:158163ms step_avg:46.50ms +[2025-09-06 02:02:22] [Rank 0] step:3421/10000 train_time:158901ms step_avg:46.45ms +[2025-09-06 02:02:22] [Rank 0] step:3421/10000 train_time:158901ms step_avg:46.45ms +[2025-09-06 02:02:23] [Rank 0] step:3441/10000 train_time:159641ms step_avg:46.39ms +[2025-09-06 02:02:23] [Rank 0] step:3441/10000 train_time:159641ms step_avg:46.39ms +[2025-09-06 02:02:24] [Rank 0] step:3461/10000 train_time:160380ms step_avg:46.34ms +[2025-09-06 02:02:24] [Rank 0] step:3461/10000 train_time:160380ms step_avg:46.34ms +[2025-09-06 02:02:24] [Rank 0] step:3481/10000 train_time:161119ms step_avg:46.29ms +[2025-09-06 02:02:24] [Rank 0] step:3481/10000 train_time:161119ms step_avg:46.29ms +[2025-09-06 02:02:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:02:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:02:26] [Rank 0] PRINT: step:3500/10000 train_loss:2.5874 val_loss:2.5393 train_time:161938ms step_avg:46.27ms +[2025-09-06 02:02:26] [Rank 0] PRINT: step:3500/10000 train_loss:2.5874 val_loss:2.5393 train_time:161938ms step_avg:46.27ms +[2025-09-06 02:02:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:02:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:02:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:02:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:03:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:03:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:03:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:03:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:03:46] [Rank 0] Total Loss: 4.9747 +[2025-09-06 02:03:46] [Rank 0] Total Loss: 4.9747 +[2025-09-06 02:03:46] [Rank 0] Total FTA (Unweighted): 0.2069 +[2025-09-06 02:03:46] [Rank 0] Total FTA (Unweighted): 0.2069 +[2025-09-06 02:03:46] [Rank 0] Total FTA (Weighted): 0.2069 +[2025-09-06 02:03:46] [Rank 0] Total FTA (Weighted): 0.2069 +[2025-09-06 02:03:46] [Rank 0] Group 0 Loss: 3.4211 +[2025-09-06 02:03:46] [Rank 0] Group 0 Loss: 3.4211 +[2025-09-06 02:03:46] [Rank 0] Group 1 Loss: 3.2156 +[2025-09-06 02:03:46] [Rank 0] Group 1 Loss: 3.2156 +[2025-09-06 02:03:46] [Rank 0] Group 2 Loss: 3.4000 +[2025-09-06 02:03:46] [Rank 0] Group 2 Loss: 3.4000 +[2025-09-06 02:03:46] [Rank 0] Group 3 Loss: 3.8882 +[2025-09-06 02:03:46] [Rank 0] Group 3 Loss: 3.8882 +[2025-09-06 02:03:46] [Rank 0] Group 4 Loss: 4.3950 +[2025-09-06 02:03:46] [Rank 0] Group 4 Loss: 4.3950 +[2025-09-06 02:03:46] [Rank 0] Group 5 Loss: 4.8909 +[2025-09-06 02:03:46] [Rank 0] Group 5 Loss: 4.8909 +[2025-09-06 02:03:46] [Rank 0] Group 6 Loss: 5.1996 +[2025-09-06 02:03:46] [Rank 0] Group 6 Loss: 5.1996 +[2025-09-06 02:03:46] [Rank 0] Group 7 Loss: 5.3230 +[2025-09-06 02:03:46] [Rank 0] Group 7 Loss: 5.3230 +[2025-09-06 02:03:46] [Rank 0] Group 8 Loss: 5.6392 +[2025-09-06 02:03:46] [Rank 0] Group 8 Loss: 5.6392 +[2025-09-06 02:03:46] [Rank 0] Group 9 Loss: 5.7457 +[2025-09-06 02:03:46] [Rank 0] Group 9 Loss: 5.7457 +[2025-09-06 02:03:46] [Rank 0] Group 10 Loss: 5.7894 +[2025-09-06 02:03:46] [Rank 0] Group 10 Loss: 5.7894 +[2025-09-06 02:03:46] [Rank 0] Group 11 Loss: 5.8354 +[2025-09-06 02:03:46] [Rank 0] Group 11 Loss: 5.8354 +[2025-09-06 02:03:46] [Rank 0] Group 12 Loss: 5.6843 +[2025-09-06 02:03:46] [Rank 0] Group 12 Loss: 5.6843 +[2025-09-06 02:03:46] [Rank 0] Group 13 Loss: 5.7032 +[2025-09-06 02:03:46] [Rank 0] Group 13 Loss: 5.7032 +[2025-09-06 02:03:46] [Rank 0] Group 14 Loss: 5.7624 +[2025-09-06 02:03:46] [Rank 0] Group 14 Loss: 5.7624 +[2025-09-06 02:03:46] [Rank 0] Group 15 Loss: 5.7030 +[2025-09-06 02:03:46] [Rank 0] Group 15 Loss: 5.7030 +[2025-09-06 02:03:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:03:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:03:46] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 02:03:46] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-06 02:03:46] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:03:46] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:03:46] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:03:46] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:03:46] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 02:03:46] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 02:03:46] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:03:46] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-06 02:03:46] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:03:46] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-06 02:03:46] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:03:46] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:03:46] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:03:46] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-06 02:03:46] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:03:46] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-06 02:03:46] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:03:46] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-06 02:03:46] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 02:03:46] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-06 02:03:46] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:03:46] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:03:46] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 02:03:46] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-06 02:03:46] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:03:46] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:03:46] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:03:46] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:03:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:03:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:03:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:03:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:03:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:03:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:03:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:03:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:03:48] [Rank 0] step:3501/10000 train_time:161947ms step_avg:46.26ms +[2025-09-06 02:03:48] [Rank 0] step:3501/10000 train_time:161947ms step_avg:46.26ms +[2025-09-06 02:03:49] [Rank 0] step:3521/10000 train_time:162618ms step_avg:46.19ms +[2025-09-06 02:03:49] [Rank 0] step:3521/10000 train_time:162618ms step_avg:46.19ms +[2025-09-06 02:03:49] [Rank 0] step:3541/10000 train_time:163355ms step_avg:46.13ms +[2025-09-06 02:03:49] [Rank 0] step:3541/10000 train_time:163355ms step_avg:46.13ms +[2025-09-06 02:03:50] [Rank 0] step:3561/10000 train_time:164093ms step_avg:46.08ms +[2025-09-06 02:03:50] [Rank 0] step:3561/10000 train_time:164093ms step_avg:46.08ms +[2025-09-06 02:03:51] [Rank 0] step:3581/10000 train_time:164831ms step_avg:46.03ms +[2025-09-06 02:03:51] [Rank 0] step:3581/10000 train_time:164831ms step_avg:46.03ms +[2025-09-06 02:03:52] [Rank 0] step:3601/10000 train_time:165570ms step_avg:45.98ms +[2025-09-06 02:03:52] [Rank 0] step:3601/10000 train_time:165570ms step_avg:45.98ms +[2025-09-06 02:03:52] [Rank 0] step:3621/10000 train_time:166307ms step_avg:45.93ms +[2025-09-06 02:03:52] [Rank 0] step:3621/10000 train_time:166307ms step_avg:45.93ms +[2025-09-06 02:03:53] [Rank 0] step:3641/10000 train_time:167239ms step_avg:45.93ms +[2025-09-06 02:03:53] [Rank 0] step:3641/10000 train_time:167239ms step_avg:45.93ms +[2025-09-06 02:03:54] [Rank 0] step:3661/10000 train_time:167977ms step_avg:45.88ms +[2025-09-06 02:03:54] [Rank 0] step:3661/10000 train_time:167977ms step_avg:45.88ms +[2025-09-06 02:03:55] [Rank 0] step:3681/10000 train_time:168715ms step_avg:45.83ms +[2025-09-06 02:03:55] [Rank 0] step:3681/10000 train_time:168715ms step_avg:45.83ms +[2025-09-06 02:03:55] [Rank 0] step:3701/10000 train_time:169454ms step_avg:45.79ms +[2025-09-06 02:03:55] [Rank 0] step:3701/10000 train_time:169454ms step_avg:45.79ms +[2025-09-06 02:03:56] [Rank 0] step:3721/10000 train_time:170193ms step_avg:45.74ms +[2025-09-06 02:03:56] [Rank 0] step:3721/10000 train_time:170193ms step_avg:45.74ms +[2025-09-06 02:03:57] [Rank 0] step:3741/10000 train_time:170931ms step_avg:45.69ms +[2025-09-06 02:03:57] [Rank 0] step:3741/10000 train_time:170931ms step_avg:45.69ms +[2025-09-06 02:03:58] [Rank 0] step:3761/10000 train_time:171672ms step_avg:45.65ms +[2025-09-06 02:03:58] [Rank 0] step:3761/10000 train_time:171672ms step_avg:45.65ms +[2025-09-06 02:03:58] [Rank 0] step:3781/10000 train_time:172411ms step_avg:45.60ms +[2025-09-06 02:03:58] [Rank 0] step:3781/10000 train_time:172411ms step_avg:45.60ms +[2025-09-06 02:03:59] [Rank 0] step:3801/10000 train_time:173149ms step_avg:45.55ms +[2025-09-06 02:03:59] [Rank 0] step:3801/10000 train_time:173149ms step_avg:45.55ms +[2025-09-06 02:04:00] [Rank 0] step:3821/10000 train_time:173886ms step_avg:45.51ms +[2025-09-06 02:04:00] [Rank 0] step:3821/10000 train_time:173886ms step_avg:45.51ms +[2025-09-06 02:04:01] [Rank 0] step:3841/10000 train_time:174624ms step_avg:45.46ms +[2025-09-06 02:04:01] [Rank 0] step:3841/10000 train_time:174624ms step_avg:45.46ms +[2025-09-06 02:04:01] [Rank 0] step:3861/10000 train_time:175362ms step_avg:45.42ms +[2025-09-06 02:04:01] [Rank 0] step:3861/10000 train_time:175362ms step_avg:45.42ms +[2025-09-06 02:04:02] [Rank 0] step:3881/10000 train_time:176101ms step_avg:45.38ms +[2025-09-06 02:04:02] [Rank 0] step:3881/10000 train_time:176101ms step_avg:45.38ms +[2025-09-06 02:04:03] [Rank 0] step:3901/10000 train_time:176840ms step_avg:45.33ms +[2025-09-06 02:04:03] [Rank 0] step:3901/10000 train_time:176840ms step_avg:45.33ms +[2025-09-06 02:04:04] [Rank 0] step:3921/10000 train_time:177577ms step_avg:45.29ms +[2025-09-06 02:04:04] [Rank 0] step:3921/10000 train_time:177577ms step_avg:45.29ms +[2025-09-06 02:04:04] [Rank 0] step:3941/10000 train_time:178316ms step_avg:45.25ms +[2025-09-06 02:04:04] [Rank 0] step:3941/10000 train_time:178316ms step_avg:45.25ms +[2025-09-06 02:04:05] [Rank 0] step:3961/10000 train_time:179054ms step_avg:45.20ms +[2025-09-06 02:04:05] [Rank 0] step:3961/10000 train_time:179054ms step_avg:45.20ms +[2025-09-06 02:04:06] [Rank 0] step:3981/10000 train_time:179792ms step_avg:45.16ms +[2025-09-06 02:04:06] [Rank 0] step:3981/10000 train_time:179792ms step_avg:45.16ms +[2025-09-06 02:04:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:04:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:04:07] [Rank 0] PRINT: step:4000/10000 train_loss:2.5027 val_loss:2.4555 train_time:180610ms step_avg:45.15ms +[2025-09-06 02:04:07] [Rank 0] PRINT: step:4000/10000 train_loss:2.5027 val_loss:2.4555 train_time:180610ms step_avg:45.15ms +[2025-09-06 02:04:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:04:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:04:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:04:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:05:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:05:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:05:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:05:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:05:29] [Rank 0] Total Loss: 4.9070 +[2025-09-06 02:05:29] [Rank 0] Total Loss: 4.9070 +[2025-09-06 02:05:29] [Rank 0] Total FTA (Unweighted): 0.2275 +[2025-09-06 02:05:29] [Rank 0] Total FTA (Unweighted): 0.2275 +[2025-09-06 02:05:29] [Rank 0] Total FTA (Weighted): 0.2275 +[2025-09-06 02:05:29] [Rank 0] Total FTA (Weighted): 0.2275 +[2025-09-06 02:05:29] [Rank 0] Group 0 Loss: 3.4024 +[2025-09-06 02:05:29] [Rank 0] Group 0 Loss: 3.4024 +[2025-09-06 02:05:29] [Rank 0] Group 1 Loss: 3.2846 +[2025-09-06 02:05:29] [Rank 0] Group 1 Loss: 3.2846 +[2025-09-06 02:05:29] [Rank 0] Group 2 Loss: 3.3751 +[2025-09-06 02:05:29] [Rank 0] Group 2 Loss: 3.3751 +[2025-09-06 02:05:29] [Rank 0] Group 3 Loss: 3.8630 +[2025-09-06 02:05:29] [Rank 0] Group 3 Loss: 3.8630 +[2025-09-06 02:05:29] [Rank 0] Group 4 Loss: 4.2854 +[2025-09-06 02:05:29] [Rank 0] Group 4 Loss: 4.2854 +[2025-09-06 02:05:29] [Rank 0] Group 5 Loss: 4.7920 +[2025-09-06 02:05:29] [Rank 0] Group 5 Loss: 4.7920 +[2025-09-06 02:05:29] [Rank 0] Group 6 Loss: 5.0869 +[2025-09-06 02:05:29] [Rank 0] Group 6 Loss: 5.0869 +[2025-09-06 02:05:29] [Rank 0] Group 7 Loss: 5.2381 +[2025-09-06 02:05:29] [Rank 0] Group 7 Loss: 5.2381 +[2025-09-06 02:05:29] [Rank 0] Group 8 Loss: 5.5516 +[2025-09-06 02:05:29] [Rank 0] Group 8 Loss: 5.5516 +[2025-09-06 02:05:29] [Rank 0] Group 9 Loss: 5.6565 +[2025-09-06 02:05:29] [Rank 0] Group 9 Loss: 5.6565 +[2025-09-06 02:05:29] [Rank 0] Group 10 Loss: 5.7183 +[2025-09-06 02:05:29] [Rank 0] Group 10 Loss: 5.7183 +[2025-09-06 02:05:29] [Rank 0] Group 11 Loss: 5.7369 +[2025-09-06 02:05:29] [Rank 0] Group 11 Loss: 5.7369 +[2025-09-06 02:05:29] [Rank 0] Group 12 Loss: 5.6061 +[2025-09-06 02:05:29] [Rank 0] Group 12 Loss: 5.6061 +[2025-09-06 02:05:29] [Rank 0] Group 13 Loss: 5.6208 +[2025-09-06 02:05:29] [Rank 0] Group 13 Loss: 5.6208 +[2025-09-06 02:05:29] [Rank 0] Group 14 Loss: 5.6719 +[2025-09-06 02:05:29] [Rank 0] Group 14 Loss: 5.6719 +[2025-09-06 02:05:29] [Rank 0] Group 15 Loss: 5.6224 +[2025-09-06 02:05:29] [Rank 0] Group 15 Loss: 5.6224 +[2025-09-06 02:05:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:05:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:05:29] [Rank 0] Group 1 FTA: 0.7000 +[2025-09-06 02:05:29] [Rank 0] Group 1 FTA: 0.7000 +[2025-09-06 02:05:29] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:05:29] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:05:29] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:05:29] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:05:29] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 02:05:29] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-06 02:05:29] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 02:05:29] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 02:05:29] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 02:05:29] [Rank 0] Group 6 FTA: 0.1300 +[2025-09-06 02:05:29] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:05:29] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:05:29] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 02:05:29] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 02:05:29] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 02:05:29] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 02:05:29] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 02:05:29] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 02:05:29] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:05:29] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:05:29] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:05:29] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-06 02:05:29] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 02:05:29] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-06 02:05:29] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:05:29] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-06 02:05:29] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 02:05:29] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 02:05:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:05:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:05:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:05:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:05:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:05:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:05:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:05:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:05:30] [Rank 0] step:4001/10000 train_time:180619ms step_avg:45.14ms +[2025-09-06 02:05:30] [Rank 0] step:4001/10000 train_time:180619ms step_avg:45.14ms +[2025-09-06 02:05:31] [Rank 0] step:4021/10000 train_time:181487ms step_avg:45.13ms +[2025-09-06 02:05:31] [Rank 0] step:4021/10000 train_time:181487ms step_avg:45.13ms +[2025-09-06 02:05:32] [Rank 0] step:4041/10000 train_time:182225ms step_avg:45.09ms +[2025-09-06 02:05:32] [Rank 0] step:4041/10000 train_time:182225ms step_avg:45.09ms +[2025-09-06 02:05:33] [Rank 0] step:4061/10000 train_time:182964ms step_avg:45.05ms +[2025-09-06 02:05:33] [Rank 0] step:4061/10000 train_time:182964ms step_avg:45.05ms +[2025-09-06 02:05:33] [Rank 0] step:4081/10000 train_time:183702ms step_avg:45.01ms +[2025-09-06 02:05:33] [Rank 0] step:4081/10000 train_time:183702ms step_avg:45.01ms +[2025-09-06 02:05:34] [Rank 0] step:4101/10000 train_time:184440ms step_avg:44.97ms +[2025-09-06 02:05:34] [Rank 0] step:4101/10000 train_time:184440ms step_avg:44.97ms +[2025-09-06 02:05:35] [Rank 0] step:4121/10000 train_time:185177ms step_avg:44.93ms +[2025-09-06 02:05:35] [Rank 0] step:4121/10000 train_time:185177ms step_avg:44.93ms +[2025-09-06 02:05:36] [Rank 0] step:4141/10000 train_time:185914ms step_avg:44.90ms +[2025-09-06 02:05:36] [Rank 0] step:4141/10000 train_time:185914ms step_avg:44.90ms +[2025-09-06 02:05:36] [Rank 0] step:4161/10000 train_time:186651ms step_avg:44.86ms +[2025-09-06 02:05:36] [Rank 0] step:4161/10000 train_time:186651ms step_avg:44.86ms +[2025-09-06 02:05:37] [Rank 0] step:4181/10000 train_time:187388ms step_avg:44.82ms +[2025-09-06 02:05:37] [Rank 0] step:4181/10000 train_time:187388ms step_avg:44.82ms +[2025-09-06 02:05:38] [Rank 0] step:4201/10000 train_time:188126ms step_avg:44.78ms +[2025-09-06 02:05:38] [Rank 0] step:4201/10000 train_time:188126ms step_avg:44.78ms +[2025-09-06 02:05:39] [Rank 0] step:4221/10000 train_time:188863ms step_avg:44.74ms +[2025-09-06 02:05:39] [Rank 0] step:4221/10000 train_time:188863ms step_avg:44.74ms +[2025-09-06 02:05:39] [Rank 0] step:4241/10000 train_time:189600ms step_avg:44.71ms +[2025-09-06 02:05:39] [Rank 0] step:4241/10000 train_time:189600ms step_avg:44.71ms +[2025-09-06 02:05:40] [Rank 0] step:4261/10000 train_time:190337ms step_avg:44.67ms +[2025-09-06 02:05:40] [Rank 0] step:4261/10000 train_time:190337ms step_avg:44.67ms +[2025-09-06 02:05:41] [Rank 0] step:4281/10000 train_time:191074ms step_avg:44.63ms +[2025-09-06 02:05:41] [Rank 0] step:4281/10000 train_time:191074ms step_avg:44.63ms +[2025-09-06 02:05:41] [Rank 0] step:4301/10000 train_time:191812ms step_avg:44.60ms +[2025-09-06 02:05:41] [Rank 0] step:4301/10000 train_time:191812ms step_avg:44.60ms +[2025-09-06 02:05:42] [Rank 0] step:4321/10000 train_time:192549ms step_avg:44.56ms +[2025-09-06 02:05:42] [Rank 0] step:4321/10000 train_time:192549ms step_avg:44.56ms +[2025-09-06 02:05:43] [Rank 0] step:4341/10000 train_time:193286ms step_avg:44.53ms +[2025-09-06 02:05:43] [Rank 0] step:4341/10000 train_time:193286ms step_avg:44.53ms +[2025-09-06 02:05:44] [Rank 0] step:4361/10000 train_time:194023ms step_avg:44.49ms +[2025-09-06 02:05:44] [Rank 0] step:4361/10000 train_time:194023ms step_avg:44.49ms +[2025-09-06 02:05:44] [Rank 0] step:4381/10000 train_time:194761ms step_avg:44.46ms +[2025-09-06 02:05:44] [Rank 0] step:4381/10000 train_time:194761ms step_avg:44.46ms +[2025-09-06 02:05:45] [Rank 0] step:4401/10000 train_time:195499ms step_avg:44.42ms +[2025-09-06 02:05:45] [Rank 0] step:4401/10000 train_time:195499ms step_avg:44.42ms +[2025-09-06 02:05:46] [Rank 0] step:4421/10000 train_time:196237ms step_avg:44.39ms +[2025-09-06 02:05:46] [Rank 0] step:4421/10000 train_time:196237ms step_avg:44.39ms +[2025-09-06 02:05:47] [Rank 0] step:4441/10000 train_time:196974ms step_avg:44.35ms +[2025-09-06 02:05:47] [Rank 0] step:4441/10000 train_time:196974ms step_avg:44.35ms +[2025-09-06 02:05:47] [Rank 0] step:4461/10000 train_time:197712ms step_avg:44.32ms +[2025-09-06 02:05:47] [Rank 0] step:4461/10000 train_time:197712ms step_avg:44.32ms +[2025-09-06 02:05:48] [Rank 0] step:4481/10000 train_time:198449ms step_avg:44.29ms +[2025-09-06 02:05:48] [Rank 0] step:4481/10000 train_time:198449ms step_avg:44.29ms +[2025-09-06 02:05:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:05:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:05:49] [Rank 0] PRINT: step:4500/10000 train_loss:2.4345 val_loss:2.3947 train_time:199267ms step_avg:44.28ms +[2025-09-06 02:05:49] [Rank 0] PRINT: step:4500/10000 train_loss:2.4345 val_loss:2.3947 train_time:199267ms step_avg:44.28ms +[2025-09-06 02:05:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:05:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:05:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:05:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:07:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:07:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:07:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:07:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:07:10] [Rank 0] Total Loss: 4.8259 +[2025-09-06 02:07:10] [Rank 0] Total Loss: 4.8259 +[2025-09-06 02:07:10] [Rank 0] Total FTA (Unweighted): 0.2431 +[2025-09-06 02:07:10] [Rank 0] Total FTA (Unweighted): 0.2431 +[2025-09-06 02:07:10] [Rank 0] Total FTA (Weighted): 0.2431 +[2025-09-06 02:07:10] [Rank 0] Total FTA (Weighted): 0.2431 +[2025-09-06 02:07:10] [Rank 0] Group 0 Loss: 3.3329 +[2025-09-06 02:07:10] [Rank 0] Group 0 Loss: 3.3329 +[2025-09-06 02:07:10] [Rank 0] Group 1 Loss: 3.2781 +[2025-09-06 02:07:10] [Rank 0] Group 1 Loss: 3.2781 +[2025-09-06 02:07:10] [Rank 0] Group 2 Loss: 3.3513 +[2025-09-06 02:07:10] [Rank 0] Group 2 Loss: 3.3513 +[2025-09-06 02:07:10] [Rank 0] Group 3 Loss: 3.7945 +[2025-09-06 02:07:10] [Rank 0] Group 3 Loss: 3.7945 +[2025-09-06 02:07:10] [Rank 0] Group 4 Loss: 4.1927 +[2025-09-06 02:07:10] [Rank 0] Group 4 Loss: 4.1927 +[2025-09-06 02:07:10] [Rank 0] Group 5 Loss: 4.6857 +[2025-09-06 02:07:10] [Rank 0] Group 5 Loss: 4.6857 +[2025-09-06 02:07:10] [Rank 0] Group 6 Loss: 4.9891 +[2025-09-06 02:07:10] [Rank 0] Group 6 Loss: 4.9891 +[2025-09-06 02:07:10] [Rank 0] Group 7 Loss: 5.1437 +[2025-09-06 02:07:10] [Rank 0] Group 7 Loss: 5.1437 +[2025-09-06 02:07:10] [Rank 0] Group 8 Loss: 5.4624 +[2025-09-06 02:07:10] [Rank 0] Group 8 Loss: 5.4624 +[2025-09-06 02:07:10] [Rank 0] Group 9 Loss: 5.5593 +[2025-09-06 02:07:10] [Rank 0] Group 9 Loss: 5.5593 +[2025-09-06 02:07:10] [Rank 0] Group 10 Loss: 5.5993 +[2025-09-06 02:07:10] [Rank 0] Group 10 Loss: 5.5993 +[2025-09-06 02:07:10] [Rank 0] Group 11 Loss: 5.6340 +[2025-09-06 02:07:10] [Rank 0] Group 11 Loss: 5.6340 +[2025-09-06 02:07:10] [Rank 0] Group 12 Loss: 5.5211 +[2025-09-06 02:07:10] [Rank 0] Group 12 Loss: 5.5211 +[2025-09-06 02:07:10] [Rank 0] Group 13 Loss: 5.5365 +[2025-09-06 02:07:10] [Rank 0] Group 13 Loss: 5.5365 +[2025-09-06 02:07:11] [Rank 0] Group 14 Loss: 5.5969 +[2025-09-06 02:07:11] [Rank 0] Group 14 Loss: 5.5969 +[2025-09-06 02:07:11] [Rank 0] Group 15 Loss: 5.5368 +[2025-09-06 02:07:11] [Rank 0] Group 15 Loss: 5.5368 +[2025-09-06 02:07:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:07:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:07:11] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 02:07:11] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 02:07:11] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:07:11] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:07:11] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:07:11] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:07:11] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 02:07:11] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 02:07:11] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 02:07:11] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-06 02:07:11] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 02:07:11] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-06 02:07:11] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:07:11] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:07:11] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 02:07:11] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-06 02:07:11] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 02:07:11] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 02:07:11] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 02:07:11] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 02:07:11] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:07:11] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:07:11] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 02:07:11] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 02:07:11] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:07:11] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-06 02:07:11] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 02:07:11] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 02:07:11] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:07:11] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:07:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:07:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:07:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:07:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:07:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:07:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:07:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:07:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:07:12] [Rank 0] step:4501/10000 train_time:199278ms step_avg:44.27ms +[2025-09-06 02:07:12] [Rank 0] step:4501/10000 train_time:199278ms step_avg:44.27ms +[2025-09-06 02:07:13] [Rank 0] step:4521/10000 train_time:199955ms step_avg:44.23ms +[2025-09-06 02:07:13] [Rank 0] step:4521/10000 train_time:199955ms step_avg:44.23ms +[2025-09-06 02:07:14] [Rank 0] step:4541/10000 train_time:200693ms step_avg:44.20ms +[2025-09-06 02:07:14] [Rank 0] step:4541/10000 train_time:200693ms step_avg:44.20ms +[2025-09-06 02:07:14] [Rank 0] step:4561/10000 train_time:201431ms step_avg:44.16ms +[2025-09-06 02:07:14] [Rank 0] step:4561/10000 train_time:201431ms step_avg:44.16ms +[2025-09-06 02:07:15] [Rank 0] step:4581/10000 train_time:202169ms step_avg:44.13ms +[2025-09-06 02:07:15] [Rank 0] step:4581/10000 train_time:202169ms step_avg:44.13ms +[2025-09-06 02:07:16] [Rank 0] step:4601/10000 train_time:202906ms step_avg:44.10ms +[2025-09-06 02:07:16] [Rank 0] step:4601/10000 train_time:202906ms step_avg:44.10ms +[2025-09-06 02:07:17] [Rank 0] step:4621/10000 train_time:203645ms step_avg:44.07ms +[2025-09-06 02:07:17] [Rank 0] step:4621/10000 train_time:203645ms step_avg:44.07ms +[2025-09-06 02:07:17] [Rank 0] step:4641/10000 train_time:204383ms step_avg:44.04ms +[2025-09-06 02:07:17] [Rank 0] step:4641/10000 train_time:204383ms step_avg:44.04ms +[2025-09-06 02:07:18] [Rank 0] step:4661/10000 train_time:205121ms step_avg:44.01ms +[2025-09-06 02:07:18] [Rank 0] step:4661/10000 train_time:205121ms step_avg:44.01ms +[2025-09-06 02:07:19] [Rank 0] step:4681/10000 train_time:205859ms step_avg:43.98ms +[2025-09-06 02:07:19] [Rank 0] step:4681/10000 train_time:205859ms step_avg:43.98ms +[2025-09-06 02:07:19] [Rank 0] step:4701/10000 train_time:206597ms step_avg:43.95ms +[2025-09-06 02:07:19] [Rank 0] step:4701/10000 train_time:206597ms step_avg:43.95ms +[2025-09-06 02:07:20] [Rank 0] step:4721/10000 train_time:207336ms step_avg:43.92ms +[2025-09-06 02:07:20] [Rank 0] step:4721/10000 train_time:207336ms step_avg:43.92ms +[2025-09-06 02:07:21] [Rank 0] step:4741/10000 train_time:208076ms step_avg:43.89ms +[2025-09-06 02:07:21] [Rank 0] step:4741/10000 train_time:208076ms step_avg:43.89ms +[2025-09-06 02:07:22] [Rank 0] step:4761/10000 train_time:208815ms step_avg:43.86ms +[2025-09-06 02:07:22] [Rank 0] step:4761/10000 train_time:208815ms step_avg:43.86ms +[2025-09-06 02:07:22] [Rank 0] step:4781/10000 train_time:209553ms step_avg:43.83ms +[2025-09-06 02:07:22] [Rank 0] step:4781/10000 train_time:209553ms step_avg:43.83ms +[2025-09-06 02:07:23] [Rank 0] step:4801/10000 train_time:210292ms step_avg:43.80ms +[2025-09-06 02:07:23] [Rank 0] step:4801/10000 train_time:210292ms step_avg:43.80ms +[2025-09-06 02:07:24] [Rank 0] step:4821/10000 train_time:211031ms step_avg:43.77ms +[2025-09-06 02:07:24] [Rank 0] step:4821/10000 train_time:211031ms step_avg:43.77ms +[2025-09-06 02:07:25] [Rank 0] step:4841/10000 train_time:212080ms step_avg:43.81ms +[2025-09-06 02:07:25] [Rank 0] step:4841/10000 train_time:212080ms step_avg:43.81ms +[2025-09-06 02:07:26] [Rank 0] step:4861/10000 train_time:212818ms step_avg:43.78ms +[2025-09-06 02:07:26] [Rank 0] step:4861/10000 train_time:212818ms step_avg:43.78ms +[2025-09-06 02:07:26] [Rank 0] step:4881/10000 train_time:213557ms step_avg:43.75ms +[2025-09-06 02:07:26] [Rank 0] step:4881/10000 train_time:213557ms step_avg:43.75ms +[2025-09-06 02:07:27] [Rank 0] step:4901/10000 train_time:214296ms step_avg:43.73ms +[2025-09-06 02:07:27] [Rank 0] step:4901/10000 train_time:214296ms step_avg:43.73ms +[2025-09-06 02:07:28] [Rank 0] step:4921/10000 train_time:215151ms step_avg:43.72ms +[2025-09-06 02:07:28] [Rank 0] step:4921/10000 train_time:215151ms step_avg:43.72ms +[2025-09-06 02:07:29] [Rank 0] step:4941/10000 train_time:215890ms step_avg:43.69ms +[2025-09-06 02:07:29] [Rank 0] step:4941/10000 train_time:215890ms step_avg:43.69ms +[2025-09-06 02:07:29] [Rank 0] step:4961/10000 train_time:216629ms step_avg:43.67ms +[2025-09-06 02:07:29] [Rank 0] step:4961/10000 train_time:216629ms step_avg:43.67ms +[2025-09-06 02:07:30] [Rank 0] step:4981/10000 train_time:217518ms step_avg:43.67ms +[2025-09-06 02:07:30] [Rank 0] step:4981/10000 train_time:217518ms step_avg:43.67ms +[2025-09-06 02:07:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:07:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:07:32] [Rank 0] PRINT: step:5000/10000 train_loss:2.3821 val_loss:2.3521 train_time:218337ms step_avg:43.67ms +[2025-09-06 02:07:32] [Rank 0] PRINT: step:5000/10000 train_loss:2.3821 val_loss:2.3521 train_time:218337ms step_avg:43.67ms +[2025-09-06 02:07:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:07:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:07:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:07:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:08:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:08:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:08:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:08:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:08:53] [Rank 0] Total Loss: 4.8463 +[2025-09-06 02:08:53] [Rank 0] Total Loss: 4.8463 +[2025-09-06 02:08:53] [Rank 0] Total FTA (Unweighted): 0.2506 +[2025-09-06 02:08:53] [Rank 0] Total FTA (Unweighted): 0.2506 +[2025-09-06 02:08:53] [Rank 0] Total FTA (Weighted): 0.2506 +[2025-09-06 02:08:53] [Rank 0] Total FTA (Weighted): 0.2506 +[2025-09-06 02:08:53] [Rank 0] Group 0 Loss: 3.3766 +[2025-09-06 02:08:53] [Rank 0] Group 0 Loss: 3.3766 +[2025-09-06 02:08:53] [Rank 0] Group 1 Loss: 3.3041 +[2025-09-06 02:08:53] [Rank 0] Group 1 Loss: 3.3041 +[2025-09-06 02:08:53] [Rank 0] Group 2 Loss: 3.3428 +[2025-09-06 02:08:53] [Rank 0] Group 2 Loss: 3.3428 +[2025-09-06 02:08:53] [Rank 0] Group 3 Loss: 3.8558 +[2025-09-06 02:08:53] [Rank 0] Group 3 Loss: 3.8558 +[2025-09-06 02:08:53] [Rank 0] Group 4 Loss: 4.2036 +[2025-09-06 02:08:53] [Rank 0] Group 4 Loss: 4.2036 +[2025-09-06 02:08:53] [Rank 0] Group 5 Loss: 4.7139 +[2025-09-06 02:08:53] [Rank 0] Group 5 Loss: 4.7139 +[2025-09-06 02:08:53] [Rank 0] Group 6 Loss: 4.9972 +[2025-09-06 02:08:53] [Rank 0] Group 6 Loss: 4.9972 +[2025-09-06 02:08:53] [Rank 0] Group 7 Loss: 5.1453 +[2025-09-06 02:08:53] [Rank 0] Group 7 Loss: 5.1453 +[2025-09-06 02:08:53] [Rank 0] Group 8 Loss: 5.4724 +[2025-09-06 02:08:53] [Rank 0] Group 8 Loss: 5.4724 +[2025-09-06 02:08:53] [Rank 0] Group 9 Loss: 5.5837 +[2025-09-06 02:08:53] [Rank 0] Group 9 Loss: 5.5837 +[2025-09-06 02:08:53] [Rank 0] Group 10 Loss: 5.6362 +[2025-09-06 02:08:53] [Rank 0] Group 10 Loss: 5.6362 +[2025-09-06 02:08:53] [Rank 0] Group 11 Loss: 5.6580 +[2025-09-06 02:08:53] [Rank 0] Group 11 Loss: 5.6580 +[2025-09-06 02:08:53] [Rank 0] Group 12 Loss: 5.5465 +[2025-09-06 02:08:53] [Rank 0] Group 12 Loss: 5.5465 +[2025-09-06 02:08:53] [Rank 0] Group 13 Loss: 5.5486 +[2025-09-06 02:08:53] [Rank 0] Group 13 Loss: 5.5486 +[2025-09-06 02:08:53] [Rank 0] Group 14 Loss: 5.6113 +[2025-09-06 02:08:53] [Rank 0] Group 14 Loss: 5.6113 +[2025-09-06 02:08:53] [Rank 0] Group 15 Loss: 5.5449 +[2025-09-06 02:08:53] [Rank 0] Group 15 Loss: 5.5449 +[2025-09-06 02:08:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:08:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:08:53] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 02:08:53] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-06 02:08:53] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:08:53] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:08:53] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:08:53] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:08:53] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 02:08:53] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-06 02:08:53] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 02:08:53] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-06 02:08:53] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-06 02:08:53] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-06 02:08:53] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:08:53] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:08:53] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:08:53] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:08:53] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 02:08:53] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 02:08:53] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 02:08:53] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 02:08:53] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 02:08:53] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-06 02:08:53] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 02:08:53] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-06 02:08:53] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 02:08:53] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-06 02:08:53] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 02:08:53] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-06 02:08:53] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:08:53] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:08:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:08:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:08:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:08:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:08:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:08:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:08:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:08:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:08:54] [Rank 0] step:5001/10000 train_time:218346ms step_avg:43.66ms +[2025-09-06 02:08:54] [Rank 0] step:5001/10000 train_time:218346ms step_avg:43.66ms +[2025-09-06 02:08:55] [Rank 0] step:5021/10000 train_time:219027ms step_avg:43.62ms +[2025-09-06 02:08:55] [Rank 0] step:5021/10000 train_time:219027ms step_avg:43.62ms +[2025-09-06 02:08:56] [Rank 0] step:5041/10000 train_time:219764ms step_avg:43.60ms +[2025-09-06 02:08:56] [Rank 0] step:5041/10000 train_time:219764ms step_avg:43.60ms +[2025-09-06 02:08:57] [Rank 0] step:5061/10000 train_time:220502ms step_avg:43.57ms +[2025-09-06 02:08:57] [Rank 0] step:5061/10000 train_time:220502ms step_avg:43.57ms +[2025-09-06 02:08:57] [Rank 0] step:5081/10000 train_time:221241ms step_avg:43.54ms +[2025-09-06 02:08:57] [Rank 0] step:5081/10000 train_time:221241ms step_avg:43.54ms +[2025-09-06 02:08:58] [Rank 0] step:5101/10000 train_time:221979ms step_avg:43.52ms +[2025-09-06 02:08:58] [Rank 0] step:5101/10000 train_time:221979ms step_avg:43.52ms +[2025-09-06 02:08:59] [Rank 0] step:5121/10000 train_time:222717ms step_avg:43.49ms +[2025-09-06 02:08:59] [Rank 0] step:5121/10000 train_time:222717ms step_avg:43.49ms +[2025-09-06 02:09:00] [Rank 0] step:5141/10000 train_time:223455ms step_avg:43.47ms +[2025-09-06 02:09:00] [Rank 0] step:5141/10000 train_time:223455ms step_avg:43.47ms +[2025-09-06 02:09:00] [Rank 0] step:5161/10000 train_time:224194ms step_avg:43.44ms +[2025-09-06 02:09:00] [Rank 0] step:5161/10000 train_time:224194ms step_avg:43.44ms +[2025-09-06 02:09:01] [Rank 0] step:5181/10000 train_time:224932ms step_avg:43.41ms +[2025-09-06 02:09:01] [Rank 0] step:5181/10000 train_time:224932ms step_avg:43.41ms +[2025-09-06 02:09:02] [Rank 0] step:5201/10000 train_time:225671ms step_avg:43.39ms +[2025-09-06 02:09:02] [Rank 0] step:5201/10000 train_time:225671ms step_avg:43.39ms +[2025-09-06 02:09:03] [Rank 0] step:5221/10000 train_time:226411ms step_avg:43.37ms +[2025-09-06 02:09:03] [Rank 0] step:5221/10000 train_time:226411ms step_avg:43.37ms +[2025-09-06 02:09:03] [Rank 0] step:5241/10000 train_time:227151ms step_avg:43.34ms +[2025-09-06 02:09:03] [Rank 0] step:5241/10000 train_time:227151ms step_avg:43.34ms +[2025-09-06 02:09:04] [Rank 0] step:5261/10000 train_time:227890ms step_avg:43.32ms +[2025-09-06 02:09:04] [Rank 0] step:5261/10000 train_time:227890ms step_avg:43.32ms +[2025-09-06 02:09:05] [Rank 0] step:5281/10000 train_time:228630ms step_avg:43.29ms +[2025-09-06 02:09:05] [Rank 0] step:5281/10000 train_time:228630ms step_avg:43.29ms +[2025-09-06 02:09:05] [Rank 0] step:5301/10000 train_time:229369ms step_avg:43.27ms +[2025-09-06 02:09:05] [Rank 0] step:5301/10000 train_time:229369ms step_avg:43.27ms +[2025-09-06 02:09:06] [Rank 0] step:5321/10000 train_time:230107ms step_avg:43.25ms +[2025-09-06 02:09:06] [Rank 0] step:5321/10000 train_time:230107ms step_avg:43.25ms +[2025-09-06 02:09:07] [Rank 0] step:5341/10000 train_time:230845ms step_avg:43.22ms +[2025-09-06 02:09:07] [Rank 0] step:5341/10000 train_time:230845ms step_avg:43.22ms +[2025-09-06 02:09:08] [Rank 0] step:5361/10000 train_time:231583ms step_avg:43.20ms +[2025-09-06 02:09:08] [Rank 0] step:5361/10000 train_time:231583ms step_avg:43.20ms +[2025-09-06 02:09:08] [Rank 0] step:5381/10000 train_time:232322ms step_avg:43.17ms +[2025-09-06 02:09:08] [Rank 0] step:5381/10000 train_time:232322ms step_avg:43.17ms +[2025-09-06 02:09:09] [Rank 0] step:5401/10000 train_time:233061ms step_avg:43.15ms +[2025-09-06 02:09:09] [Rank 0] step:5401/10000 train_time:233061ms step_avg:43.15ms +[2025-09-06 02:09:10] [Rank 0] step:5421/10000 train_time:233799ms step_avg:43.13ms +[2025-09-06 02:09:10] [Rank 0] step:5421/10000 train_time:233799ms step_avg:43.13ms +[2025-09-06 02:09:11] [Rank 0] step:5441/10000 train_time:234537ms step_avg:43.11ms +[2025-09-06 02:09:11] [Rank 0] step:5441/10000 train_time:234537ms step_avg:43.11ms +[2025-09-06 02:09:11] [Rank 0] step:5461/10000 train_time:235276ms step_avg:43.08ms +[2025-09-06 02:09:11] [Rank 0] step:5461/10000 train_time:235276ms step_avg:43.08ms +[2025-09-06 02:09:12] [Rank 0] step:5481/10000 train_time:236015ms step_avg:43.06ms +[2025-09-06 02:09:12] [Rank 0] step:5481/10000 train_time:236015ms step_avg:43.06ms +[2025-09-06 02:09:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:09:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:09:13] [Rank 0] PRINT: step:5500/10000 train_loss:2.3399 val_loss:2.3103 train_time:236834ms step_avg:43.06ms +[2025-09-06 02:09:13] [Rank 0] PRINT: step:5500/10000 train_loss:2.3399 val_loss:2.3103 train_time:236834ms step_avg:43.06ms +[2025-09-06 02:09:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:09:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:09:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:09:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:10:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:10:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:10:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:10:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:10:34] [Rank 0] Total Loss: 4.7148 +[2025-09-06 02:10:34] [Rank 0] Total Loss: 4.7148 +[2025-09-06 02:10:34] [Rank 0] Total FTA (Unweighted): 0.2575 +[2025-09-06 02:10:34] [Rank 0] Total FTA (Unweighted): 0.2575 +[2025-09-06 02:10:34] [Rank 0] Total FTA (Weighted): 0.2575 +[2025-09-06 02:10:34] [Rank 0] Total FTA (Weighted): 0.2575 +[2025-09-06 02:10:34] [Rank 0] Group 0 Loss: 3.2343 +[2025-09-06 02:10:34] [Rank 0] Group 0 Loss: 3.2343 +[2025-09-06 02:10:34] [Rank 0] Group 1 Loss: 3.1632 +[2025-09-06 02:10:34] [Rank 0] Group 1 Loss: 3.1632 +[2025-09-06 02:10:34] [Rank 0] Group 2 Loss: 3.2795 +[2025-09-06 02:10:34] [Rank 0] Group 2 Loss: 3.2795 +[2025-09-06 02:10:34] [Rank 0] Group 3 Loss: 3.7475 +[2025-09-06 02:10:34] [Rank 0] Group 3 Loss: 3.7475 +[2025-09-06 02:10:34] [Rank 0] Group 4 Loss: 4.0607 +[2025-09-06 02:10:34] [Rank 0] Group 4 Loss: 4.0607 +[2025-09-06 02:10:34] [Rank 0] Group 5 Loss: 4.5563 +[2025-09-06 02:10:34] [Rank 0] Group 5 Loss: 4.5563 +[2025-09-06 02:10:34] [Rank 0] Group 6 Loss: 4.8527 +[2025-09-06 02:10:34] [Rank 0] Group 6 Loss: 4.8527 +[2025-09-06 02:10:34] [Rank 0] Group 7 Loss: 5.0107 +[2025-09-06 02:10:34] [Rank 0] Group 7 Loss: 5.0107 +[2025-09-06 02:10:34] [Rank 0] Group 8 Loss: 5.3195 +[2025-09-06 02:10:34] [Rank 0] Group 8 Loss: 5.3195 +[2025-09-06 02:10:34] [Rank 0] Group 9 Loss: 5.4620 +[2025-09-06 02:10:34] [Rank 0] Group 9 Loss: 5.4620 +[2025-09-06 02:10:34] [Rank 0] Group 10 Loss: 5.4818 +[2025-09-06 02:10:34] [Rank 0] Group 10 Loss: 5.4818 +[2025-09-06 02:10:34] [Rank 0] Group 11 Loss: 5.5157 +[2025-09-06 02:10:34] [Rank 0] Group 11 Loss: 5.5157 +[2025-09-06 02:10:34] [Rank 0] Group 12 Loss: 5.4189 +[2025-09-06 02:10:34] [Rank 0] Group 12 Loss: 5.4189 +[2025-09-06 02:10:34] [Rank 0] Group 13 Loss: 5.4342 +[2025-09-06 02:10:34] [Rank 0] Group 13 Loss: 5.4342 +[2025-09-06 02:10:34] [Rank 0] Group 14 Loss: 5.4812 +[2025-09-06 02:10:34] [Rank 0] Group 14 Loss: 5.4812 +[2025-09-06 02:10:34] [Rank 0] Group 15 Loss: 5.4189 +[2025-09-06 02:10:34] [Rank 0] Group 15 Loss: 5.4189 +[2025-09-06 02:10:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:10:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:10:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:10:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:10:34] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:10:34] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:10:34] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:10:34] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:10:34] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-06 02:10:34] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-06 02:10:34] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:10:34] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:10:34] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-06 02:10:34] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-06 02:10:35] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:10:35] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-06 02:10:35] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:10:35] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:10:35] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 02:10:35] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-06 02:10:35] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 02:10:35] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-06 02:10:35] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:10:35] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-06 02:10:35] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 02:10:35] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 02:10:35] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:10:35] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-06 02:10:35] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 02:10:35] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 02:10:35] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:10:35] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-06 02:10:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:10:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:10:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:10:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:10:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:10:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:10:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:10:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:10:36] [Rank 0] step:5501/10000 train_time:236843ms step_avg:43.05ms +[2025-09-06 02:10:36] [Rank 0] step:5501/10000 train_time:236843ms step_avg:43.05ms +[2025-09-06 02:10:37] [Rank 0] step:5521/10000 train_time:237517ms step_avg:43.02ms +[2025-09-06 02:10:37] [Rank 0] step:5521/10000 train_time:237517ms step_avg:43.02ms +[2025-09-06 02:10:38] [Rank 0] step:5541/10000 train_time:238391ms step_avg:43.02ms +[2025-09-06 02:10:38] [Rank 0] step:5541/10000 train_time:238391ms step_avg:43.02ms +[2025-09-06 02:10:38] [Rank 0] step:5561/10000 train_time:239129ms step_avg:43.00ms +[2025-09-06 02:10:38] [Rank 0] step:5561/10000 train_time:239129ms step_avg:43.00ms +[2025-09-06 02:10:39] [Rank 0] step:5581/10000 train_time:239868ms step_avg:42.98ms +[2025-09-06 02:10:39] [Rank 0] step:5581/10000 train_time:239868ms step_avg:42.98ms +[2025-09-06 02:10:40] [Rank 0] step:5601/10000 train_time:240750ms step_avg:42.98ms +[2025-09-06 02:10:40] [Rank 0] step:5601/10000 train_time:240750ms step_avg:42.98ms +[2025-09-06 02:10:41] [Rank 0] step:5621/10000 train_time:241489ms step_avg:42.96ms +[2025-09-06 02:10:41] [Rank 0] step:5621/10000 train_time:241489ms step_avg:42.96ms +[2025-09-06 02:10:42] [Rank 0] step:5641/10000 train_time:242840ms step_avg:43.05ms +[2025-09-06 02:10:42] [Rank 0] step:5641/10000 train_time:242840ms step_avg:43.05ms +[2025-09-06 02:10:43] [Rank 0] step:5661/10000 train_time:243579ms step_avg:43.03ms +[2025-09-06 02:10:43] [Rank 0] step:5661/10000 train_time:243579ms step_avg:43.03ms +[2025-09-06 02:10:43] [Rank 0] step:5681/10000 train_time:244317ms step_avg:43.01ms +[2025-09-06 02:10:43] [Rank 0] step:5681/10000 train_time:244317ms step_avg:43.01ms +[2025-09-06 02:10:44] [Rank 0] step:5701/10000 train_time:245057ms step_avg:42.98ms +[2025-09-06 02:10:44] [Rank 0] step:5701/10000 train_time:245057ms step_avg:42.98ms +[2025-09-06 02:10:45] [Rank 0] step:5721/10000 train_time:245796ms step_avg:42.96ms +[2025-09-06 02:10:45] [Rank 0] step:5721/10000 train_time:245796ms step_avg:42.96ms +[2025-09-06 02:10:46] [Rank 0] step:5741/10000 train_time:246536ms step_avg:42.94ms +[2025-09-06 02:10:46] [Rank 0] step:5741/10000 train_time:246536ms step_avg:42.94ms +[2025-09-06 02:10:46] [Rank 0] step:5761/10000 train_time:247275ms step_avg:42.92ms +[2025-09-06 02:10:46] [Rank 0] step:5761/10000 train_time:247275ms step_avg:42.92ms +[2025-09-06 02:10:47] [Rank 0] step:5781/10000 train_time:248014ms step_avg:42.90ms +[2025-09-06 02:10:47] [Rank 0] step:5781/10000 train_time:248014ms step_avg:42.90ms +[2025-09-06 02:10:48] [Rank 0] step:5801/10000 train_time:248753ms step_avg:42.88ms +[2025-09-06 02:10:48] [Rank 0] step:5801/10000 train_time:248753ms step_avg:42.88ms +[2025-09-06 02:10:49] [Rank 0] step:5821/10000 train_time:249491ms step_avg:42.86ms +[2025-09-06 02:10:49] [Rank 0] step:5821/10000 train_time:249491ms step_avg:42.86ms +[2025-09-06 02:10:49] [Rank 0] step:5841/10000 train_time:250230ms step_avg:42.84ms +[2025-09-06 02:10:49] [Rank 0] step:5841/10000 train_time:250230ms step_avg:42.84ms +[2025-09-06 02:10:50] [Rank 0] step:5861/10000 train_time:250969ms step_avg:42.82ms +[2025-09-06 02:10:50] [Rank 0] step:5861/10000 train_time:250969ms step_avg:42.82ms +[2025-09-06 02:10:51] [Rank 0] step:5881/10000 train_time:251708ms step_avg:42.80ms +[2025-09-06 02:10:51] [Rank 0] step:5881/10000 train_time:251708ms step_avg:42.80ms +[2025-09-06 02:10:52] [Rank 0] step:5901/10000 train_time:252446ms step_avg:42.78ms +[2025-09-06 02:10:52] [Rank 0] step:5901/10000 train_time:252446ms step_avg:42.78ms +[2025-09-06 02:10:52] [Rank 0] step:5921/10000 train_time:253184ms step_avg:42.76ms +[2025-09-06 02:10:52] [Rank 0] step:5921/10000 train_time:253184ms step_avg:42.76ms +[2025-09-06 02:10:53] [Rank 0] step:5941/10000 train_time:253922ms step_avg:42.74ms +[2025-09-06 02:10:53] [Rank 0] step:5941/10000 train_time:253922ms step_avg:42.74ms +[2025-09-06 02:10:54] [Rank 0] step:5961/10000 train_time:254661ms step_avg:42.72ms +[2025-09-06 02:10:54] [Rank 0] step:5961/10000 train_time:254661ms step_avg:42.72ms +[2025-09-06 02:10:55] [Rank 0] step:5981/10000 train_time:255400ms step_avg:42.70ms +[2025-09-06 02:10:55] [Rank 0] step:5981/10000 train_time:255400ms step_avg:42.70ms +[2025-09-06 02:10:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:10:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:10:56] [Rank 0] PRINT: step:6000/10000 train_loss:2.3024 val_loss:2.2809 train_time:256220ms step_avg:42.70ms +[2025-09-06 02:10:56] [Rank 0] PRINT: step:6000/10000 train_loss:2.3024 val_loss:2.2809 train_time:256220ms step_avg:42.70ms +[2025-09-06 02:10:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:10:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:10:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:10:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:12:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:12:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:12:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:12:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:12:17] [Rank 0] Total Loss: 4.7192 +[2025-09-06 02:12:17] [Rank 0] Total Loss: 4.7192 +[2025-09-06 02:12:17] [Rank 0] Total FTA (Unweighted): 0.2737 +[2025-09-06 02:12:17] [Rank 0] Total FTA (Unweighted): 0.2737 +[2025-09-06 02:12:17] [Rank 0] Total FTA (Weighted): 0.2737 +[2025-09-06 02:12:17] [Rank 0] Total FTA (Weighted): 0.2737 +[2025-09-06 02:12:17] [Rank 0] Group 0 Loss: 3.3025 +[2025-09-06 02:12:17] [Rank 0] Group 0 Loss: 3.3025 +[2025-09-06 02:12:17] [Rank 0] Group 1 Loss: 3.2584 +[2025-09-06 02:12:17] [Rank 0] Group 1 Loss: 3.2584 +[2025-09-06 02:12:17] [Rank 0] Group 2 Loss: 3.2539 +[2025-09-06 02:12:17] [Rank 0] Group 2 Loss: 3.2539 +[2025-09-06 02:12:17] [Rank 0] Group 3 Loss: 3.7477 +[2025-09-06 02:12:17] [Rank 0] Group 3 Loss: 3.7477 +[2025-09-06 02:12:17] [Rank 0] Group 4 Loss: 4.0501 +[2025-09-06 02:12:17] [Rank 0] Group 4 Loss: 4.0501 +[2025-09-06 02:12:17] [Rank 0] Group 5 Loss: 4.5444 +[2025-09-06 02:12:17] [Rank 0] Group 5 Loss: 4.5444 +[2025-09-06 02:12:17] [Rank 0] Group 6 Loss: 4.8539 +[2025-09-06 02:12:17] [Rank 0] Group 6 Loss: 4.8539 +[2025-09-06 02:12:17] [Rank 0] Group 7 Loss: 5.0062 +[2025-09-06 02:12:17] [Rank 0] Group 7 Loss: 5.0062 +[2025-09-06 02:12:17] [Rank 0] Group 8 Loss: 5.3165 +[2025-09-06 02:12:17] [Rank 0] Group 8 Loss: 5.3165 +[2025-09-06 02:12:17] [Rank 0] Group 9 Loss: 5.4373 +[2025-09-06 02:12:17] [Rank 0] Group 9 Loss: 5.4373 +[2025-09-06 02:12:17] [Rank 0] Group 10 Loss: 5.4864 +[2025-09-06 02:12:17] [Rank 0] Group 10 Loss: 5.4864 +[2025-09-06 02:12:17] [Rank 0] Group 11 Loss: 5.5295 +[2025-09-06 02:12:17] [Rank 0] Group 11 Loss: 5.5295 +[2025-09-06 02:12:17] [Rank 0] Group 12 Loss: 5.4001 +[2025-09-06 02:12:17] [Rank 0] Group 12 Loss: 5.4001 +[2025-09-06 02:12:17] [Rank 0] Group 13 Loss: 5.4248 +[2025-09-06 02:12:17] [Rank 0] Group 13 Loss: 5.4248 +[2025-09-06 02:12:17] [Rank 0] Group 14 Loss: 5.4845 +[2025-09-06 02:12:17] [Rank 0] Group 14 Loss: 5.4845 +[2025-09-06 02:12:17] [Rank 0] Group 15 Loss: 5.4108 +[2025-09-06 02:12:17] [Rank 0] Group 15 Loss: 5.4108 +[2025-09-06 02:12:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:12:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:12:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:12:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:12:17] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:12:17] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:12:17] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:12:17] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:12:17] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:12:17] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:12:17] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-06 02:12:17] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-06 02:12:17] [Rank 0] Group 6 FTA: 0.1900 +[2025-09-06 02:12:17] [Rank 0] Group 6 FTA: 0.1900 +[2025-09-06 02:12:17] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:12:17] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:12:17] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:12:17] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:12:17] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:12:17] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:12:17] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-06 02:12:17] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-06 02:12:17] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:12:17] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-06 02:12:17] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 02:12:17] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-06 02:12:17] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 02:12:17] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-06 02:12:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 02:12:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 02:12:17] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:12:17] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:12:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:12:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:12:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:12:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:12:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:12:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:12:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:12:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:12:18] [Rank 0] step:6001/10000 train_time:256229ms step_avg:42.70ms +[2025-09-06 02:12:18] [Rank 0] step:6001/10000 train_time:256229ms step_avg:42.70ms +[2025-09-06 02:12:20] [Rank 0] step:6021/10000 train_time:257504ms step_avg:42.77ms +[2025-09-06 02:12:20] [Rank 0] step:6021/10000 train_time:257504ms step_avg:42.77ms +[2025-09-06 02:12:21] [Rank 0] step:6041/10000 train_time:258243ms step_avg:42.75ms +[2025-09-06 02:12:21] [Rank 0] step:6041/10000 train_time:258243ms step_avg:42.75ms +[2025-09-06 02:12:21] [Rank 0] step:6061/10000 train_time:258981ms step_avg:42.73ms +[2025-09-06 02:12:21] [Rank 0] step:6061/10000 train_time:258981ms step_avg:42.73ms +[2025-09-06 02:12:22] [Rank 0] step:6081/10000 train_time:259721ms step_avg:42.71ms +[2025-09-06 02:12:22] [Rank 0] step:6081/10000 train_time:259721ms step_avg:42.71ms +[2025-09-06 02:12:23] [Rank 0] step:6101/10000 train_time:260459ms step_avg:42.69ms +[2025-09-06 02:12:23] [Rank 0] step:6101/10000 train_time:260459ms step_avg:42.69ms +[2025-09-06 02:12:24] [Rank 0] step:6121/10000 train_time:261197ms step_avg:42.67ms +[2025-09-06 02:12:24] [Rank 0] step:6121/10000 train_time:261197ms step_avg:42.67ms +[2025-09-06 02:12:24] [Rank 0] step:6141/10000 train_time:261936ms step_avg:42.65ms +[2025-09-06 02:12:24] [Rank 0] step:6141/10000 train_time:261936ms step_avg:42.65ms +[2025-09-06 02:12:25] [Rank 0] step:6161/10000 train_time:262674ms step_avg:42.64ms +[2025-09-06 02:12:25] [Rank 0] step:6161/10000 train_time:262674ms step_avg:42.64ms +[2025-09-06 02:12:26] [Rank 0] step:6181/10000 train_time:263413ms step_avg:42.62ms +[2025-09-06 02:12:26] [Rank 0] step:6181/10000 train_time:263413ms step_avg:42.62ms +[2025-09-06 02:12:26] [Rank 0] step:6201/10000 train_time:264153ms step_avg:42.60ms +[2025-09-06 02:12:26] [Rank 0] step:6201/10000 train_time:264153ms step_avg:42.60ms +[2025-09-06 02:12:27] [Rank 0] step:6221/10000 train_time:264891ms step_avg:42.58ms +[2025-09-06 02:12:27] [Rank 0] step:6221/10000 train_time:264891ms step_avg:42.58ms +[2025-09-06 02:12:28] [Rank 0] step:6241/10000 train_time:265629ms step_avg:42.56ms +[2025-09-06 02:12:28] [Rank 0] step:6241/10000 train_time:265629ms step_avg:42.56ms +[2025-09-06 02:12:29] [Rank 0] step:6261/10000 train_time:266368ms step_avg:42.54ms +[2025-09-06 02:12:29] [Rank 0] step:6261/10000 train_time:266368ms step_avg:42.54ms +[2025-09-06 02:12:29] [Rank 0] step:6281/10000 train_time:267106ms step_avg:42.53ms +[2025-09-06 02:12:29] [Rank 0] step:6281/10000 train_time:267106ms step_avg:42.53ms +[2025-09-06 02:12:30] [Rank 0] step:6301/10000 train_time:267845ms step_avg:42.51ms +[2025-09-06 02:12:30] [Rank 0] step:6301/10000 train_time:267845ms step_avg:42.51ms +[2025-09-06 02:12:31] [Rank 0] step:6321/10000 train_time:268583ms step_avg:42.49ms +[2025-09-06 02:12:31] [Rank 0] step:6321/10000 train_time:268583ms step_avg:42.49ms +[2025-09-06 02:12:32] [Rank 0] step:6341/10000 train_time:269322ms step_avg:42.47ms +[2025-09-06 02:12:32] [Rank 0] step:6341/10000 train_time:269322ms step_avg:42.47ms +[2025-09-06 02:12:32] [Rank 0] step:6361/10000 train_time:270061ms step_avg:42.46ms +[2025-09-06 02:12:32] [Rank 0] step:6361/10000 train_time:270061ms step_avg:42.46ms +[2025-09-06 02:12:33] [Rank 0] step:6381/10000 train_time:270801ms step_avg:42.44ms +[2025-09-06 02:12:33] [Rank 0] step:6381/10000 train_time:270801ms step_avg:42.44ms +[2025-09-06 02:12:34] [Rank 0] step:6401/10000 train_time:271541ms step_avg:42.42ms +[2025-09-06 02:12:34] [Rank 0] step:6401/10000 train_time:271541ms step_avg:42.42ms +[2025-09-06 02:12:35] [Rank 0] step:6421/10000 train_time:272281ms step_avg:42.40ms +[2025-09-06 02:12:35] [Rank 0] step:6421/10000 train_time:272281ms step_avg:42.40ms +[2025-09-06 02:12:35] [Rank 0] step:6441/10000 train_time:273020ms step_avg:42.39ms +[2025-09-06 02:12:35] [Rank 0] step:6441/10000 train_time:273020ms step_avg:42.39ms +[2025-09-06 02:12:36] [Rank 0] step:6461/10000 train_time:273759ms step_avg:42.37ms +[2025-09-06 02:12:36] [Rank 0] step:6461/10000 train_time:273759ms step_avg:42.37ms +[2025-09-06 02:12:37] [Rank 0] step:6481/10000 train_time:274498ms step_avg:42.35ms +[2025-09-06 02:12:37] [Rank 0] step:6481/10000 train_time:274498ms step_avg:42.35ms +[2025-09-06 02:12:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:12:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:12:38] [Rank 0] PRINT: step:6500/10000 train_loss:2.2742 val_loss:2.2510 train_time:275318ms step_avg:42.36ms +[2025-09-06 02:12:38] [Rank 0] PRINT: step:6500/10000 train_loss:2.2742 val_loss:2.2510 train_time:275318ms step_avg:42.36ms +[2025-09-06 02:12:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:12:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:12:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:12:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:14:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:14:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:14:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:14:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:14:00] [Rank 0] Total Loss: 4.7333 +[2025-09-06 02:14:00] [Rank 0] Total Loss: 4.7333 +[2025-09-06 02:14:00] [Rank 0] Total FTA (Unweighted): 0.2800 +[2025-09-06 02:14:00] [Rank 0] Total FTA (Unweighted): 0.2800 +[2025-09-06 02:14:00] [Rank 0] Total FTA (Weighted): 0.2800 +[2025-09-06 02:14:00] [Rank 0] Total FTA (Weighted): 0.2800 +[2025-09-06 02:14:00] [Rank 0] Group 0 Loss: 3.3414 +[2025-09-06 02:14:00] [Rank 0] Group 0 Loss: 3.3414 +[2025-09-06 02:14:00] [Rank 0] Group 1 Loss: 3.3015 +[2025-09-06 02:14:00] [Rank 0] Group 1 Loss: 3.3015 +[2025-09-06 02:14:00] [Rank 0] Group 2 Loss: 3.2917 +[2025-09-06 02:14:00] [Rank 0] Group 2 Loss: 3.2917 +[2025-09-06 02:14:00] [Rank 0] Group 3 Loss: 3.8108 +[2025-09-06 02:14:00] [Rank 0] Group 3 Loss: 3.8108 +[2025-09-06 02:14:00] [Rank 0] Group 4 Loss: 4.0735 +[2025-09-06 02:14:00] [Rank 0] Group 4 Loss: 4.0735 +[2025-09-06 02:14:00] [Rank 0] Group 5 Loss: 4.5459 +[2025-09-06 02:14:00] [Rank 0] Group 5 Loss: 4.5459 +[2025-09-06 02:14:00] [Rank 0] Group 6 Loss: 4.8541 +[2025-09-06 02:14:00] [Rank 0] Group 6 Loss: 4.8541 +[2025-09-06 02:14:00] [Rank 0] Group 7 Loss: 4.9996 +[2025-09-06 02:14:00] [Rank 0] Group 7 Loss: 4.9996 +[2025-09-06 02:14:00] [Rank 0] Group 8 Loss: 5.3270 +[2025-09-06 02:14:00] [Rank 0] Group 8 Loss: 5.3270 +[2025-09-06 02:14:00] [Rank 0] Group 9 Loss: 5.4378 +[2025-09-06 02:14:00] [Rank 0] Group 9 Loss: 5.4378 +[2025-09-06 02:14:00] [Rank 0] Group 10 Loss: 5.5073 +[2025-09-06 02:14:00] [Rank 0] Group 10 Loss: 5.5073 +[2025-09-06 02:14:00] [Rank 0] Group 11 Loss: 5.5282 +[2025-09-06 02:14:00] [Rank 0] Group 11 Loss: 5.5282 +[2025-09-06 02:14:00] [Rank 0] Group 12 Loss: 5.4115 +[2025-09-06 02:14:00] [Rank 0] Group 12 Loss: 5.4115 +[2025-09-06 02:14:00] [Rank 0] Group 13 Loss: 5.4238 +[2025-09-06 02:14:00] [Rank 0] Group 13 Loss: 5.4238 +[2025-09-06 02:14:00] [Rank 0] Group 14 Loss: 5.4693 +[2025-09-06 02:14:00] [Rank 0] Group 14 Loss: 5.4693 +[2025-09-06 02:14:00] [Rank 0] Group 15 Loss: 5.4093 +[2025-09-06 02:14:00] [Rank 0] Group 15 Loss: 5.4093 +[2025-09-06 02:14:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:14:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:14:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:14:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:14:00] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:14:00] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:14:00] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:14:00] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:14:00] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:14:00] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:14:00] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:14:00] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:14:00] [Rank 0] Group 6 FTA: 0.2200 +[2025-09-06 02:14:00] [Rank 0] Group 6 FTA: 0.2200 +[2025-09-06 02:14:00] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:14:00] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:14:00] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-06 02:14:00] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-06 02:14:00] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:14:00] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:14:00] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 02:14:00] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 02:14:00] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-06 02:14:00] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-06 02:14:00] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-06 02:14:00] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-06 02:14:00] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-06 02:14:00] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-06 02:14:00] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:14:00] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-06 02:14:00] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-06 02:14:00] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-06 02:14:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:14:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:14:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:14:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:14:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:14:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:14:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:14:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:14:01] [Rank 0] step:6501/10000 train_time:275327ms step_avg:42.35ms +[2025-09-06 02:14:01] [Rank 0] step:6501/10000 train_time:275327ms step_avg:42.35ms +[2025-09-06 02:14:02] [Rank 0] step:6521/10000 train_time:276014ms step_avg:42.33ms +[2025-09-06 02:14:02] [Rank 0] step:6521/10000 train_time:276014ms step_avg:42.33ms +[2025-09-06 02:14:03] [Rank 0] step:6541/10000 train_time:276753ms step_avg:42.31ms +[2025-09-06 02:14:03] [Rank 0] step:6541/10000 train_time:276753ms step_avg:42.31ms +[2025-09-06 02:14:04] [Rank 0] step:6561/10000 train_time:277491ms step_avg:42.29ms +[2025-09-06 02:14:04] [Rank 0] step:6561/10000 train_time:277491ms step_avg:42.29ms +[2025-09-06 02:14:04] [Rank 0] step:6581/10000 train_time:278230ms step_avg:42.28ms +[2025-09-06 02:14:04] [Rank 0] step:6581/10000 train_time:278230ms step_avg:42.28ms +[2025-09-06 02:14:05] [Rank 0] step:6601/10000 train_time:278970ms step_avg:42.26ms +[2025-09-06 02:14:05] [Rank 0] step:6601/10000 train_time:278970ms step_avg:42.26ms +[2025-09-06 02:14:06] [Rank 0] step:6621/10000 train_time:279709ms step_avg:42.25ms +[2025-09-06 02:14:06] [Rank 0] step:6621/10000 train_time:279709ms step_avg:42.25ms +[2025-09-06 02:14:07] [Rank 0] step:6641/10000 train_time:280449ms step_avg:42.23ms +[2025-09-06 02:14:07] [Rank 0] step:6641/10000 train_time:280449ms step_avg:42.23ms +[2025-09-06 02:14:07] [Rank 0] step:6661/10000 train_time:281188ms step_avg:42.21ms +[2025-09-06 02:14:07] [Rank 0] step:6661/10000 train_time:281188ms step_avg:42.21ms +[2025-09-06 02:14:08] [Rank 0] step:6681/10000 train_time:281926ms step_avg:42.20ms +[2025-09-06 02:14:08] [Rank 0] step:6681/10000 train_time:281926ms step_avg:42.20ms +[2025-09-06 02:14:09] [Rank 0] step:6701/10000 train_time:282665ms step_avg:42.18ms +[2025-09-06 02:14:09] [Rank 0] step:6701/10000 train_time:282665ms step_avg:42.18ms +[2025-09-06 02:14:10] [Rank 0] step:6721/10000 train_time:283404ms step_avg:42.17ms +[2025-09-06 02:14:10] [Rank 0] step:6721/10000 train_time:283404ms step_avg:42.17ms +[2025-09-06 02:14:10] [Rank 0] step:6741/10000 train_time:284143ms step_avg:42.15ms +[2025-09-06 02:14:10] [Rank 0] step:6741/10000 train_time:284143ms step_avg:42.15ms +[2025-09-06 02:14:11] [Rank 0] step:6761/10000 train_time:284881ms step_avg:42.14ms +[2025-09-06 02:14:11] [Rank 0] step:6761/10000 train_time:284881ms step_avg:42.14ms +[2025-09-06 02:14:12] [Rank 0] step:6781/10000 train_time:285620ms step_avg:42.12ms +[2025-09-06 02:14:12] [Rank 0] step:6781/10000 train_time:285620ms step_avg:42.12ms +[2025-09-06 02:14:13] [Rank 0] step:6801/10000 train_time:286358ms step_avg:42.11ms +[2025-09-06 02:14:13] [Rank 0] step:6801/10000 train_time:286358ms step_avg:42.11ms +[2025-09-06 02:14:13] [Rank 0] step:6821/10000 train_time:287096ms step_avg:42.09ms +[2025-09-06 02:14:13] [Rank 0] step:6821/10000 train_time:287096ms step_avg:42.09ms +[2025-09-06 02:14:15] [Rank 0] step:6841/10000 train_time:288457ms step_avg:42.17ms +[2025-09-06 02:14:15] [Rank 0] step:6841/10000 train_time:288457ms step_avg:42.17ms +[2025-09-06 02:14:15] [Rank 0] step:6861/10000 train_time:289195ms step_avg:42.15ms +[2025-09-06 02:14:15] [Rank 0] step:6861/10000 train_time:289195ms step_avg:42.15ms +[2025-09-06 02:14:16] [Rank 0] step:6881/10000 train_time:289933ms step_avg:42.14ms +[2025-09-06 02:14:16] [Rank 0] step:6881/10000 train_time:289933ms step_avg:42.14ms +[2025-09-06 02:14:17] [Rank 0] step:6901/10000 train_time:290672ms step_avg:42.12ms +[2025-09-06 02:14:17] [Rank 0] step:6901/10000 train_time:290672ms step_avg:42.12ms +[2025-09-06 02:14:18] [Rank 0] step:6921/10000 train_time:291411ms step_avg:42.11ms +[2025-09-06 02:14:18] [Rank 0] step:6921/10000 train_time:291411ms step_avg:42.11ms +[2025-09-06 02:14:18] [Rank 0] step:6941/10000 train_time:292149ms step_avg:42.09ms +[2025-09-06 02:14:18] [Rank 0] step:6941/10000 train_time:292149ms step_avg:42.09ms +[2025-09-06 02:14:19] [Rank 0] step:6961/10000 train_time:292889ms step_avg:42.08ms +[2025-09-06 02:14:19] [Rank 0] step:6961/10000 train_time:292889ms step_avg:42.08ms +[2025-09-06 02:14:20] [Rank 0] step:6981/10000 train_time:293629ms step_avg:42.06ms +[2025-09-06 02:14:20] [Rank 0] step:6981/10000 train_time:293629ms step_avg:42.06ms +[2025-09-06 02:14:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:14:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:14:21] [Rank 0] PRINT: step:7000/10000 train_loss:2.2486 val_loss:2.2297 train_time:294448ms step_avg:42.06ms +[2025-09-06 02:14:21] [Rank 0] PRINT: step:7000/10000 train_loss:2.2486 val_loss:2.2297 train_time:294448ms step_avg:42.06ms +[2025-09-06 02:14:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:14:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:14:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:14:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:15:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:15:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:15:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:15:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:15:42] [Rank 0] Total Loss: 4.7425 +[2025-09-06 02:15:42] [Rank 0] Total Loss: 4.7425 +[2025-09-06 02:15:42] [Rank 0] Total FTA (Unweighted): 0.2862 +[2025-09-06 02:15:42] [Rank 0] Total FTA (Unweighted): 0.2862 +[2025-09-06 02:15:42] [Rank 0] Total FTA (Weighted): 0.2863 +[2025-09-06 02:15:42] [Rank 0] Total FTA (Weighted): 0.2863 +[2025-09-06 02:15:42] [Rank 0] Group 0 Loss: 3.3866 +[2025-09-06 02:15:42] [Rank 0] Group 0 Loss: 3.3866 +[2025-09-06 02:15:42] [Rank 0] Group 1 Loss: 3.2821 +[2025-09-06 02:15:42] [Rank 0] Group 1 Loss: 3.2821 +[2025-09-06 02:15:42] [Rank 0] Group 2 Loss: 3.2978 +[2025-09-06 02:15:42] [Rank 0] Group 2 Loss: 3.2978 +[2025-09-06 02:15:42] [Rank 0] Group 3 Loss: 3.7994 +[2025-09-06 02:15:42] [Rank 0] Group 3 Loss: 3.7994 +[2025-09-06 02:15:42] [Rank 0] Group 4 Loss: 4.0937 +[2025-09-06 02:15:42] [Rank 0] Group 4 Loss: 4.0937 +[2025-09-06 02:15:42] [Rank 0] Group 5 Loss: 4.5582 +[2025-09-06 02:15:42] [Rank 0] Group 5 Loss: 4.5582 +[2025-09-06 02:15:42] [Rank 0] Group 6 Loss: 4.8473 +[2025-09-06 02:15:42] [Rank 0] Group 6 Loss: 4.8473 +[2025-09-06 02:15:42] [Rank 0] Group 7 Loss: 5.0063 +[2025-09-06 02:15:42] [Rank 0] Group 7 Loss: 5.0063 +[2025-09-06 02:15:42] [Rank 0] Group 8 Loss: 5.3314 +[2025-09-06 02:15:42] [Rank 0] Group 8 Loss: 5.3314 +[2025-09-06 02:15:42] [Rank 0] Group 9 Loss: 5.4439 +[2025-09-06 02:15:42] [Rank 0] Group 9 Loss: 5.4439 +[2025-09-06 02:15:42] [Rank 0] Group 10 Loss: 5.5202 +[2025-09-06 02:15:42] [Rank 0] Group 10 Loss: 5.5202 +[2025-09-06 02:15:42] [Rank 0] Group 11 Loss: 5.5377 +[2025-09-06 02:15:42] [Rank 0] Group 11 Loss: 5.5377 +[2025-09-06 02:15:42] [Rank 0] Group 12 Loss: 5.4223 +[2025-09-06 02:15:42] [Rank 0] Group 12 Loss: 5.4223 +[2025-09-06 02:15:42] [Rank 0] Group 13 Loss: 5.4300 +[2025-09-06 02:15:42] [Rank 0] Group 13 Loss: 5.4300 +[2025-09-06 02:15:42] [Rank 0] Group 14 Loss: 5.4924 +[2025-09-06 02:15:42] [Rank 0] Group 14 Loss: 5.4924 +[2025-09-06 02:15:42] [Rank 0] Group 15 Loss: 5.4301 +[2025-09-06 02:15:42] [Rank 0] Group 15 Loss: 5.4301 +[2025-09-06 02:15:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:15:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:15:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:15:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:15:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:15:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:15:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:15:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:15:43] [Rank 0] Group 4 FTA: 0.2300 +[2025-09-06 02:15:43] [Rank 0] Group 4 FTA: 0.2300 +[2025-09-06 02:15:43] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:15:43] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:15:43] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:15:43] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:15:43] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:15:43] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:15:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:15:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:15:43] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:15:43] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:15:43] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 02:15:43] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 02:15:43] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:15:43] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:15:43] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 02:15:43] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-06 02:15:43] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-06 02:15:43] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-06 02:15:43] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 02:15:43] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 02:15:43] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:15:43] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:15:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:15:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:15:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:15:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:15:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:15:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:15:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:15:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:15:44] [Rank 0] step:7001/10000 train_time:294457ms step_avg:42.06ms +[2025-09-06 02:15:44] [Rank 0] step:7001/10000 train_time:294457ms step_avg:42.06ms +[2025-09-06 02:15:45] [Rank 0] step:7021/10000 train_time:295140ms step_avg:42.04ms +[2025-09-06 02:15:45] [Rank 0] step:7021/10000 train_time:295140ms step_avg:42.04ms +[2025-09-06 02:15:45] [Rank 0] step:7041/10000 train_time:295879ms step_avg:42.02ms +[2025-09-06 02:15:45] [Rank 0] step:7041/10000 train_time:295879ms step_avg:42.02ms +[2025-09-06 02:15:46] [Rank 0] step:7061/10000 train_time:296618ms step_avg:42.01ms +[2025-09-06 02:15:46] [Rank 0] step:7061/10000 train_time:296618ms step_avg:42.01ms +[2025-09-06 02:15:47] [Rank 0] step:7081/10000 train_time:297357ms step_avg:41.99ms +[2025-09-06 02:15:47] [Rank 0] step:7081/10000 train_time:297357ms step_avg:41.99ms +[2025-09-06 02:15:48] [Rank 0] step:7101/10000 train_time:298096ms step_avg:41.98ms +[2025-09-06 02:15:48] [Rank 0] step:7101/10000 train_time:298096ms step_avg:41.98ms +[2025-09-06 02:15:48] [Rank 0] step:7121/10000 train_time:298835ms step_avg:41.97ms +[2025-09-06 02:15:48] [Rank 0] step:7121/10000 train_time:298835ms step_avg:41.97ms +[2025-09-06 02:15:49] [Rank 0] step:7141/10000 train_time:299574ms step_avg:41.95ms +[2025-09-06 02:15:49] [Rank 0] step:7141/10000 train_time:299574ms step_avg:41.95ms +[2025-09-06 02:15:50] [Rank 0] step:7161/10000 train_time:300313ms step_avg:41.94ms +[2025-09-06 02:15:50] [Rank 0] step:7161/10000 train_time:300313ms step_avg:41.94ms +[2025-09-06 02:15:51] [Rank 0] step:7181/10000 train_time:301052ms step_avg:41.92ms +[2025-09-06 02:15:51] [Rank 0] step:7181/10000 train_time:301052ms step_avg:41.92ms +[2025-09-06 02:15:51] [Rank 0] step:7201/10000 train_time:301790ms step_avg:41.91ms +[2025-09-06 02:15:51] [Rank 0] step:7201/10000 train_time:301790ms step_avg:41.91ms +[2025-09-06 02:15:52] [Rank 0] step:7221/10000 train_time:302529ms step_avg:41.90ms +[2025-09-06 02:15:52] [Rank 0] step:7221/10000 train_time:302529ms step_avg:41.90ms +[2025-09-06 02:15:53] [Rank 0] step:7241/10000 train_time:303269ms step_avg:41.88ms +[2025-09-06 02:15:53] [Rank 0] step:7241/10000 train_time:303269ms step_avg:41.88ms +[2025-09-06 02:15:54] [Rank 0] step:7261/10000 train_time:304170ms step_avg:41.89ms +[2025-09-06 02:15:54] [Rank 0] step:7261/10000 train_time:304170ms step_avg:41.89ms +[2025-09-06 02:15:55] [Rank 0] step:7281/10000 train_time:304909ms step_avg:41.88ms +[2025-09-06 02:15:55] [Rank 0] step:7281/10000 train_time:304909ms step_avg:41.88ms +[2025-09-06 02:15:55] [Rank 0] step:7301/10000 train_time:305649ms step_avg:41.86ms +[2025-09-06 02:15:55] [Rank 0] step:7301/10000 train_time:305649ms step_avg:41.86ms +[2025-09-06 02:15:56] [Rank 0] step:7321/10000 train_time:306515ms step_avg:41.87ms +[2025-09-06 02:15:56] [Rank 0] step:7321/10000 train_time:306515ms step_avg:41.87ms +[2025-09-06 02:15:57] [Rank 0] step:7341/10000 train_time:307254ms step_avg:41.85ms +[2025-09-06 02:15:57] [Rank 0] step:7341/10000 train_time:307254ms step_avg:41.85ms +[2025-09-06 02:15:58] [Rank 0] step:7361/10000 train_time:307992ms step_avg:41.84ms +[2025-09-06 02:15:58] [Rank 0] step:7361/10000 train_time:307992ms step_avg:41.84ms +[2025-09-06 02:15:58] [Rank 0] step:7381/10000 train_time:308731ms step_avg:41.83ms +[2025-09-06 02:15:58] [Rank 0] step:7381/10000 train_time:308731ms step_avg:41.83ms +[2025-09-06 02:15:59] [Rank 0] step:7401/10000 train_time:309470ms step_avg:41.81ms +[2025-09-06 02:15:59] [Rank 0] step:7401/10000 train_time:309470ms step_avg:41.81ms +[2025-09-06 02:16:00] [Rank 0] step:7421/10000 train_time:310208ms step_avg:41.80ms +[2025-09-06 02:16:00] [Rank 0] step:7421/10000 train_time:310208ms step_avg:41.80ms +[2025-09-06 02:16:01] [Rank 0] step:7441/10000 train_time:310946ms step_avg:41.79ms +[2025-09-06 02:16:01] [Rank 0] step:7441/10000 train_time:310946ms step_avg:41.79ms +[2025-09-06 02:16:01] [Rank 0] step:7461/10000 train_time:311686ms step_avg:41.78ms +[2025-09-06 02:16:01] [Rank 0] step:7461/10000 train_time:311686ms step_avg:41.78ms +[2025-09-06 02:16:02] [Rank 0] step:7481/10000 train_time:312424ms step_avg:41.76ms +[2025-09-06 02:16:02] [Rank 0] step:7481/10000 train_time:312424ms step_avg:41.76ms +[2025-09-06 02:16:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:16:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:16:03] [Rank 0] PRINT: step:7500/10000 train_loss:2.2281 val_loss:2.2111 train_time:313244ms step_avg:41.77ms +[2025-09-06 02:16:03] [Rank 0] PRINT: step:7500/10000 train_loss:2.2281 val_loss:2.2111 train_time:313244ms step_avg:41.77ms +[2025-09-06 02:16:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:16:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:16:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:16:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:17:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:17:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:17:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:17:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:17:24] [Rank 0] Total Loss: 4.6716 +[2025-09-06 02:17:24] [Rank 0] Total Loss: 4.6716 +[2025-09-06 02:17:24] [Rank 0] Total FTA (Unweighted): 0.2887 +[2025-09-06 02:17:24] [Rank 0] Total FTA (Unweighted): 0.2887 +[2025-09-06 02:17:25] [Rank 0] Total FTA (Weighted): 0.2888 +[2025-09-06 02:17:25] [Rank 0] Total FTA (Weighted): 0.2888 +[2025-09-06 02:17:25] [Rank 0] Group 0 Loss: 3.3331 +[2025-09-06 02:17:25] [Rank 0] Group 0 Loss: 3.3331 +[2025-09-06 02:17:25] [Rank 0] Group 1 Loss: 3.2613 +[2025-09-06 02:17:25] [Rank 0] Group 1 Loss: 3.2613 +[2025-09-06 02:17:25] [Rank 0] Group 2 Loss: 3.2511 +[2025-09-06 02:17:25] [Rank 0] Group 2 Loss: 3.2511 +[2025-09-06 02:17:25] [Rank 0] Group 3 Loss: 3.7365 +[2025-09-06 02:17:25] [Rank 0] Group 3 Loss: 3.7365 +[2025-09-06 02:17:25] [Rank 0] Group 4 Loss: 4.0344 +[2025-09-06 02:17:25] [Rank 0] Group 4 Loss: 4.0344 +[2025-09-06 02:17:25] [Rank 0] Group 5 Loss: 4.4862 +[2025-09-06 02:17:25] [Rank 0] Group 5 Loss: 4.4862 +[2025-09-06 02:17:25] [Rank 0] Group 6 Loss: 4.7699 +[2025-09-06 02:17:25] [Rank 0] Group 6 Loss: 4.7699 +[2025-09-06 02:17:25] [Rank 0] Group 7 Loss: 4.9214 +[2025-09-06 02:17:25] [Rank 0] Group 7 Loss: 4.9214 +[2025-09-06 02:17:25] [Rank 0] Group 8 Loss: 5.2405 +[2025-09-06 02:17:25] [Rank 0] Group 8 Loss: 5.2405 +[2025-09-06 02:17:25] [Rank 0] Group 9 Loss: 5.3729 +[2025-09-06 02:17:25] [Rank 0] Group 9 Loss: 5.3729 +[2025-09-06 02:17:25] [Rank 0] Group 10 Loss: 5.4390 +[2025-09-06 02:17:25] [Rank 0] Group 10 Loss: 5.4390 +[2025-09-06 02:17:25] [Rank 0] Group 11 Loss: 5.4477 +[2025-09-06 02:17:25] [Rank 0] Group 11 Loss: 5.4477 +[2025-09-06 02:17:25] [Rank 0] Group 12 Loss: 5.3325 +[2025-09-06 02:17:25] [Rank 0] Group 12 Loss: 5.3325 +[2025-09-06 02:17:25] [Rank 0] Group 13 Loss: 5.3583 +[2025-09-06 02:17:25] [Rank 0] Group 13 Loss: 5.3583 +[2025-09-06 02:17:25] [Rank 0] Group 14 Loss: 5.4096 +[2025-09-06 02:17:25] [Rank 0] Group 14 Loss: 5.4096 +[2025-09-06 02:17:25] [Rank 0] Group 15 Loss: 5.3507 +[2025-09-06 02:17:25] [Rank 0] Group 15 Loss: 5.3507 +[2025-09-06 02:17:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:17:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:17:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:17:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:17:25] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:17:25] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:17:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:17:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:17:25] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:17:25] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:17:25] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 02:17:25] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 02:17:25] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:17:25] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:17:25] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 02:17:25] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 02:17:25] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-06 02:17:25] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-06 02:17:25] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:17:25] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:17:25] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 02:17:25] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 02:17:25] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-06 02:17:25] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-06 02:17:25] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 02:17:25] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 02:17:25] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-06 02:17:25] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-06 02:17:25] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 02:17:25] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-06 02:17:25] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 02:17:25] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-06 02:17:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:17:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:17:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:17:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:17:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:17:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:17:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:17:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:17:26] [Rank 0] step:7501/10000 train_time:313253ms step_avg:41.76ms +[2025-09-06 02:17:26] [Rank 0] step:7501/10000 train_time:313253ms step_avg:41.76ms +[2025-09-06 02:17:27] [Rank 0] step:7521/10000 train_time:313935ms step_avg:41.74ms +[2025-09-06 02:17:27] [Rank 0] step:7521/10000 train_time:313935ms step_avg:41.74ms +[2025-09-06 02:17:28] [Rank 0] step:7541/10000 train_time:314675ms step_avg:41.73ms +[2025-09-06 02:17:28] [Rank 0] step:7541/10000 train_time:314675ms step_avg:41.73ms +[2025-09-06 02:17:28] [Rank 0] step:7561/10000 train_time:315414ms step_avg:41.72ms +[2025-09-06 02:17:28] [Rank 0] step:7561/10000 train_time:315414ms step_avg:41.72ms +[2025-09-06 02:17:29] [Rank 0] step:7581/10000 train_time:316152ms step_avg:41.70ms +[2025-09-06 02:17:29] [Rank 0] step:7581/10000 train_time:316152ms step_avg:41.70ms +[2025-09-06 02:17:30] [Rank 0] step:7601/10000 train_time:316891ms step_avg:41.69ms +[2025-09-06 02:17:30] [Rank 0] step:7601/10000 train_time:316891ms step_avg:41.69ms +[2025-09-06 02:17:31] [Rank 0] step:7621/10000 train_time:317631ms step_avg:41.68ms +[2025-09-06 02:17:31] [Rank 0] step:7621/10000 train_time:317631ms step_avg:41.68ms +[2025-09-06 02:17:32] [Rank 0] step:7641/10000 train_time:318370ms step_avg:41.67ms +[2025-09-06 02:17:32] [Rank 0] step:7641/10000 train_time:318370ms step_avg:41.67ms +[2025-09-06 02:17:32] [Rank 0] step:7661/10000 train_time:319304ms step_avg:41.68ms +[2025-09-06 02:17:32] [Rank 0] step:7661/10000 train_time:319304ms step_avg:41.68ms +[2025-09-06 02:17:33] [Rank 0] step:7681/10000 train_time:320043ms step_avg:41.67ms +[2025-09-06 02:17:33] [Rank 0] step:7681/10000 train_time:320043ms step_avg:41.67ms +[2025-09-06 02:17:34] [Rank 0] step:7701/10000 train_time:320780ms step_avg:41.65ms +[2025-09-06 02:17:34] [Rank 0] step:7701/10000 train_time:320780ms step_avg:41.65ms +[2025-09-06 02:17:34] [Rank 0] step:7721/10000 train_time:321518ms step_avg:41.64ms +[2025-09-06 02:17:34] [Rank 0] step:7721/10000 train_time:321518ms step_avg:41.64ms +[2025-09-06 02:17:35] [Rank 0] step:7741/10000 train_time:322256ms step_avg:41.63ms +[2025-09-06 02:17:35] [Rank 0] step:7741/10000 train_time:322256ms step_avg:41.63ms +[2025-09-06 02:17:36] [Rank 0] step:7761/10000 train_time:322995ms step_avg:41.62ms +[2025-09-06 02:17:36] [Rank 0] step:7761/10000 train_time:322995ms step_avg:41.62ms +[2025-09-06 02:17:37] [Rank 0] step:7781/10000 train_time:323734ms step_avg:41.61ms +[2025-09-06 02:17:37] [Rank 0] step:7781/10000 train_time:323734ms step_avg:41.61ms +[2025-09-06 02:17:37] [Rank 0] step:7801/10000 train_time:324472ms step_avg:41.59ms +[2025-09-06 02:17:37] [Rank 0] step:7801/10000 train_time:324472ms step_avg:41.59ms +[2025-09-06 02:17:38] [Rank 0] step:7821/10000 train_time:325211ms step_avg:41.58ms +[2025-09-06 02:17:38] [Rank 0] step:7821/10000 train_time:325211ms step_avg:41.58ms +[2025-09-06 02:17:39] [Rank 0] step:7841/10000 train_time:325950ms step_avg:41.57ms +[2025-09-06 02:17:39] [Rank 0] step:7841/10000 train_time:325950ms step_avg:41.57ms +[2025-09-06 02:17:40] [Rank 0] step:7861/10000 train_time:326689ms step_avg:41.56ms +[2025-09-06 02:17:40] [Rank 0] step:7861/10000 train_time:326689ms step_avg:41.56ms +[2025-09-06 02:17:40] [Rank 0] step:7881/10000 train_time:327427ms step_avg:41.55ms +[2025-09-06 02:17:40] [Rank 0] step:7881/10000 train_time:327427ms step_avg:41.55ms +[2025-09-06 02:17:41] [Rank 0] step:7901/10000 train_time:328165ms step_avg:41.53ms +[2025-09-06 02:17:41] [Rank 0] step:7901/10000 train_time:328165ms step_avg:41.53ms +[2025-09-06 02:17:42] [Rank 0] step:7921/10000 train_time:328904ms step_avg:41.52ms +[2025-09-06 02:17:42] [Rank 0] step:7921/10000 train_time:328904ms step_avg:41.52ms +[2025-09-06 02:17:43] [Rank 0] step:7941/10000 train_time:329642ms step_avg:41.51ms +[2025-09-06 02:17:43] [Rank 0] step:7941/10000 train_time:329642ms step_avg:41.51ms +[2025-09-06 02:17:43] [Rank 0] step:7961/10000 train_time:330382ms step_avg:41.50ms +[2025-09-06 02:17:43] [Rank 0] step:7961/10000 train_time:330382ms step_avg:41.50ms +[2025-09-06 02:17:44] [Rank 0] step:7981/10000 train_time:331120ms step_avg:41.49ms +[2025-09-06 02:17:44] [Rank 0] step:7981/10000 train_time:331120ms step_avg:41.49ms +[2025-09-06 02:17:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:17:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:17:45] [Rank 0] PRINT: step:8000/10000 train_loss:2.2122 val_loss:2.1958 train_time:331939ms step_avg:41.49ms +[2025-09-06 02:17:45] [Rank 0] PRINT: step:8000/10000 train_loss:2.2122 val_loss:2.1958 train_time:331939ms step_avg:41.49ms +[2025-09-06 02:17:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:17:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:17:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:17:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:19:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:19:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:19:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:19:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:19:07] [Rank 0] Total Loss: 4.7004 +[2025-09-06 02:19:07] [Rank 0] Total Loss: 4.7004 +[2025-09-06 02:19:07] [Rank 0] Total FTA (Unweighted): 0.2781 +[2025-09-06 02:19:07] [Rank 0] Total FTA (Unweighted): 0.2781 +[2025-09-06 02:19:07] [Rank 0] Total FTA (Weighted): 0.2781 +[2025-09-06 02:19:07] [Rank 0] Total FTA (Weighted): 0.2781 +[2025-09-06 02:19:07] [Rank 0] Group 0 Loss: 3.3642 +[2025-09-06 02:19:07] [Rank 0] Group 0 Loss: 3.3642 +[2025-09-06 02:19:07] [Rank 0] Group 1 Loss: 3.2920 +[2025-09-06 02:19:07] [Rank 0] Group 1 Loss: 3.2920 +[2025-09-06 02:19:07] [Rank 0] Group 2 Loss: 3.2993 +[2025-09-06 02:19:07] [Rank 0] Group 2 Loss: 3.2993 +[2025-09-06 02:19:07] [Rank 0] Group 3 Loss: 3.7761 +[2025-09-06 02:19:07] [Rank 0] Group 3 Loss: 3.7761 +[2025-09-06 02:19:07] [Rank 0] Group 4 Loss: 4.0534 +[2025-09-06 02:19:07] [Rank 0] Group 4 Loss: 4.0534 +[2025-09-06 02:19:07] [Rank 0] Group 5 Loss: 4.5111 +[2025-09-06 02:19:07] [Rank 0] Group 5 Loss: 4.5111 +[2025-09-06 02:19:07] [Rank 0] Group 6 Loss: 4.7948 +[2025-09-06 02:19:07] [Rank 0] Group 6 Loss: 4.7948 +[2025-09-06 02:19:07] [Rank 0] Group 7 Loss: 4.9453 +[2025-09-06 02:19:07] [Rank 0] Group 7 Loss: 4.9453 +[2025-09-06 02:19:07] [Rank 0] Group 8 Loss: 5.2706 +[2025-09-06 02:19:07] [Rank 0] Group 8 Loss: 5.2706 +[2025-09-06 02:19:07] [Rank 0] Group 9 Loss: 5.3953 +[2025-09-06 02:19:07] [Rank 0] Group 9 Loss: 5.3953 +[2025-09-06 02:19:07] [Rank 0] Group 10 Loss: 5.4772 +[2025-09-06 02:19:07] [Rank 0] Group 10 Loss: 5.4772 +[2025-09-06 02:19:07] [Rank 0] Group 11 Loss: 5.4840 +[2025-09-06 02:19:07] [Rank 0] Group 11 Loss: 5.4840 +[2025-09-06 02:19:07] [Rank 0] Group 12 Loss: 5.3522 +[2025-09-06 02:19:07] [Rank 0] Group 12 Loss: 5.3522 +[2025-09-06 02:19:07] [Rank 0] Group 13 Loss: 5.3763 +[2025-09-06 02:19:07] [Rank 0] Group 13 Loss: 5.3763 +[2025-09-06 02:19:07] [Rank 0] Group 14 Loss: 5.4383 +[2025-09-06 02:19:07] [Rank 0] Group 14 Loss: 5.4383 +[2025-09-06 02:19:07] [Rank 0] Group 15 Loss: 5.3757 +[2025-09-06 02:19:07] [Rank 0] Group 15 Loss: 5.3757 +[2025-09-06 02:19:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:19:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:19:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:19:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:19:07] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:19:07] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-06 02:19:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:19:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:19:07] [Rank 0] Group 4 FTA: 0.2300 +[2025-09-06 02:19:07] [Rank 0] Group 4 FTA: 0.2300 +[2025-09-06 02:19:07] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 02:19:07] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-06 02:19:07] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:19:07] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:19:07] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:19:07] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:19:07] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:19:07] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:19:07] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:19:07] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:19:07] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 02:19:07] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-06 02:19:07] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:19:07] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:19:07] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-06 02:19:07] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-06 02:19:07] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-06 02:19:07] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-06 02:19:07] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 02:19:07] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 02:19:07] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:19:07] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-06 02:19:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:19:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:19:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:19:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:19:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:19:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:19:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:19:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:19:08] [Rank 0] step:8001/10000 train_time:331948ms step_avg:41.49ms +[2025-09-06 02:19:08] [Rank 0] step:8001/10000 train_time:331948ms step_avg:41.49ms +[2025-09-06 02:19:10] [Rank 0] step:8021/10000 train_time:333253ms step_avg:41.55ms +[2025-09-06 02:19:10] [Rank 0] step:8021/10000 train_time:333253ms step_avg:41.55ms +[2025-09-06 02:19:11] [Rank 0] step:8041/10000 train_time:333991ms step_avg:41.54ms +[2025-09-06 02:19:11] [Rank 0] step:8041/10000 train_time:333991ms step_avg:41.54ms +[2025-09-06 02:19:11] [Rank 0] step:8061/10000 train_time:334731ms step_avg:41.52ms +[2025-09-06 02:19:11] [Rank 0] step:8061/10000 train_time:334731ms step_avg:41.52ms +[2025-09-06 02:19:12] [Rank 0] step:8081/10000 train_time:335469ms step_avg:41.51ms +[2025-09-06 02:19:12] [Rank 0] step:8081/10000 train_time:335469ms step_avg:41.51ms +[2025-09-06 02:19:13] [Rank 0] step:8101/10000 train_time:336209ms step_avg:41.50ms +[2025-09-06 02:19:13] [Rank 0] step:8101/10000 train_time:336209ms step_avg:41.50ms +[2025-09-06 02:19:13] [Rank 0] step:8121/10000 train_time:336948ms step_avg:41.49ms +[2025-09-06 02:19:13] [Rank 0] step:8121/10000 train_time:336948ms step_avg:41.49ms +[2025-09-06 02:19:14] [Rank 0] step:8141/10000 train_time:337688ms step_avg:41.48ms +[2025-09-06 02:19:14] [Rank 0] step:8141/10000 train_time:337688ms step_avg:41.48ms +[2025-09-06 02:19:15] [Rank 0] step:8161/10000 train_time:338426ms step_avg:41.47ms +[2025-09-06 02:19:15] [Rank 0] step:8161/10000 train_time:338426ms step_avg:41.47ms +[2025-09-06 02:19:16] [Rank 0] step:8181/10000 train_time:339165ms step_avg:41.46ms +[2025-09-06 02:19:16] [Rank 0] step:8181/10000 train_time:339165ms step_avg:41.46ms +[2025-09-06 02:19:16] [Rank 0] step:8201/10000 train_time:339904ms step_avg:41.45ms +[2025-09-06 02:19:16] [Rank 0] step:8201/10000 train_time:339904ms step_avg:41.45ms +[2025-09-06 02:19:17] [Rank 0] step:8221/10000 train_time:340643ms step_avg:41.44ms +[2025-09-06 02:19:17] [Rank 0] step:8221/10000 train_time:340643ms step_avg:41.44ms +[2025-09-06 02:19:18] [Rank 0] step:8241/10000 train_time:341382ms step_avg:41.42ms +[2025-09-06 02:19:18] [Rank 0] step:8241/10000 train_time:341382ms step_avg:41.42ms +[2025-09-06 02:19:19] [Rank 0] step:8261/10000 train_time:342121ms step_avg:41.41ms +[2025-09-06 02:19:19] [Rank 0] step:8261/10000 train_time:342121ms step_avg:41.41ms +[2025-09-06 02:19:19] [Rank 0] step:8281/10000 train_time:342860ms step_avg:41.40ms +[2025-09-06 02:19:19] [Rank 0] step:8281/10000 train_time:342860ms step_avg:41.40ms +[2025-09-06 02:19:20] [Rank 0] step:8301/10000 train_time:343599ms step_avg:41.39ms +[2025-09-06 02:19:20] [Rank 0] step:8301/10000 train_time:343599ms step_avg:41.39ms +[2025-09-06 02:19:21] [Rank 0] step:8321/10000 train_time:344338ms step_avg:41.38ms +[2025-09-06 02:19:21] [Rank 0] step:8321/10000 train_time:344338ms step_avg:41.38ms +[2025-09-06 02:19:22] [Rank 0] step:8341/10000 train_time:345078ms step_avg:41.37ms +[2025-09-06 02:19:22] [Rank 0] step:8341/10000 train_time:345078ms step_avg:41.37ms +[2025-09-06 02:19:22] [Rank 0] step:8361/10000 train_time:345817ms step_avg:41.36ms +[2025-09-06 02:19:22] [Rank 0] step:8361/10000 train_time:345817ms step_avg:41.36ms +[2025-09-06 02:19:23] [Rank 0] step:8381/10000 train_time:346556ms step_avg:41.35ms +[2025-09-06 02:19:23] [Rank 0] step:8381/10000 train_time:346556ms step_avg:41.35ms +[2025-09-06 02:19:24] [Rank 0] step:8401/10000 train_time:347294ms step_avg:41.34ms +[2025-09-06 02:19:24] [Rank 0] step:8401/10000 train_time:347294ms step_avg:41.34ms +[2025-09-06 02:19:25] [Rank 0] step:8421/10000 train_time:348033ms step_avg:41.33ms +[2025-09-06 02:19:25] [Rank 0] step:8421/10000 train_time:348033ms step_avg:41.33ms +[2025-09-06 02:19:25] [Rank 0] step:8441/10000 train_time:348771ms step_avg:41.32ms +[2025-09-06 02:19:25] [Rank 0] step:8441/10000 train_time:348771ms step_avg:41.32ms +[2025-09-06 02:19:26] [Rank 0] step:8461/10000 train_time:349510ms step_avg:41.31ms +[2025-09-06 02:19:26] [Rank 0] step:8461/10000 train_time:349510ms step_avg:41.31ms +[2025-09-06 02:19:27] [Rank 0] step:8481/10000 train_time:350249ms step_avg:41.30ms +[2025-09-06 02:19:27] [Rank 0] step:8481/10000 train_time:350249ms step_avg:41.30ms +[2025-09-06 02:19:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:19:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:19:28] [Rank 0] PRINT: step:8500/10000 train_loss:2.1982 val_loss:2.1821 train_time:351069ms step_avg:41.30ms +[2025-09-06 02:19:28] [Rank 0] PRINT: step:8500/10000 train_loss:2.1982 val_loss:2.1821 train_time:351069ms step_avg:41.30ms +[2025-09-06 02:19:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:19:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:19:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:19:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:20:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:20:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:20:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:20:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:20:50] [Rank 0] Total Loss: 4.7007 +[2025-09-06 02:20:50] [Rank 0] Total Loss: 4.7007 +[2025-09-06 02:20:50] [Rank 0] Total FTA (Unweighted): 0.2919 +[2025-09-06 02:20:50] [Rank 0] Total FTA (Unweighted): 0.2919 +[2025-09-06 02:20:50] [Rank 0] Total FTA (Weighted): 0.2919 +[2025-09-06 02:20:50] [Rank 0] Total FTA (Weighted): 0.2919 +[2025-09-06 02:20:50] [Rank 0] Group 0 Loss: 3.3765 +[2025-09-06 02:20:50] [Rank 0] Group 0 Loss: 3.3765 +[2025-09-06 02:20:50] [Rank 0] Group 1 Loss: 3.2911 +[2025-09-06 02:20:50] [Rank 0] Group 1 Loss: 3.2911 +[2025-09-06 02:20:50] [Rank 0] Group 2 Loss: 3.3061 +[2025-09-06 02:20:50] [Rank 0] Group 2 Loss: 3.3061 +[2025-09-06 02:20:50] [Rank 0] Group 3 Loss: 3.7540 +[2025-09-06 02:20:50] [Rank 0] Group 3 Loss: 3.7540 +[2025-09-06 02:20:50] [Rank 0] Group 4 Loss: 4.0588 +[2025-09-06 02:20:50] [Rank 0] Group 4 Loss: 4.0588 +[2025-09-06 02:20:50] [Rank 0] Group 5 Loss: 4.5236 +[2025-09-06 02:20:50] [Rank 0] Group 5 Loss: 4.5236 +[2025-09-06 02:20:50] [Rank 0] Group 6 Loss: 4.7878 +[2025-09-06 02:20:50] [Rank 0] Group 6 Loss: 4.7878 +[2025-09-06 02:20:50] [Rank 0] Group 7 Loss: 4.9454 +[2025-09-06 02:20:50] [Rank 0] Group 7 Loss: 4.9454 +[2025-09-06 02:20:50] [Rank 0] Group 8 Loss: 5.2672 +[2025-09-06 02:20:50] [Rank 0] Group 8 Loss: 5.2672 +[2025-09-06 02:20:50] [Rank 0] Group 9 Loss: 5.3844 +[2025-09-06 02:20:50] [Rank 0] Group 9 Loss: 5.3844 +[2025-09-06 02:20:50] [Rank 0] Group 10 Loss: 5.4766 +[2025-09-06 02:20:50] [Rank 0] Group 10 Loss: 5.4766 +[2025-09-06 02:20:50] [Rank 0] Group 11 Loss: 5.4868 +[2025-09-06 02:20:50] [Rank 0] Group 11 Loss: 5.4868 +[2025-09-06 02:20:50] [Rank 0] Group 12 Loss: 5.3652 +[2025-09-06 02:20:50] [Rank 0] Group 12 Loss: 5.3652 +[2025-09-06 02:20:50] [Rank 0] Group 13 Loss: 5.3685 +[2025-09-06 02:20:50] [Rank 0] Group 13 Loss: 5.3685 +[2025-09-06 02:20:50] [Rank 0] Group 14 Loss: 5.4402 +[2025-09-06 02:20:50] [Rank 0] Group 14 Loss: 5.4402 +[2025-09-06 02:20:50] [Rank 0] Group 15 Loss: 5.3794 +[2025-09-06 02:20:50] [Rank 0] Group 15 Loss: 5.3794 +[2025-09-06 02:20:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:20:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:20:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:20:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:20:50] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:20:50] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:20:50] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:20:50] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:20:50] [Rank 0] Group 4 FTA: 0.2300 +[2025-09-06 02:20:50] [Rank 0] Group 4 FTA: 0.2300 +[2025-09-06 02:20:50] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:20:50] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:20:50] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:20:50] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:20:50] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:20:50] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:20:50] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:20:50] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:20:50] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:20:50] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:20:50] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-06 02:20:50] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-06 02:20:50] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:20:50] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:20:50] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 02:20:50] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 02:20:50] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 02:20:50] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 02:20:50] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 02:20:50] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-06 02:20:50] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:20:50] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:20:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:20:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:20:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:20:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:20:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:20:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:20:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:20:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:20:51] [Rank 0] step:8501/10000 train_time:351078ms step_avg:41.30ms +[2025-09-06 02:20:51] [Rank 0] step:8501/10000 train_time:351078ms step_avg:41.30ms +[2025-09-06 02:20:52] [Rank 0] step:8521/10000 train_time:351747ms step_avg:41.28ms +[2025-09-06 02:20:52] [Rank 0] step:8521/10000 train_time:351747ms step_avg:41.28ms +[2025-09-06 02:20:53] [Rank 0] step:8541/10000 train_time:352486ms step_avg:41.27ms +[2025-09-06 02:20:53] [Rank 0] step:8541/10000 train_time:352486ms step_avg:41.27ms +[2025-09-06 02:20:53] [Rank 0] step:8561/10000 train_time:353225ms step_avg:41.26ms +[2025-09-06 02:20:53] [Rank 0] step:8561/10000 train_time:353225ms step_avg:41.26ms +[2025-09-06 02:20:54] [Rank 0] step:8581/10000 train_time:353964ms step_avg:41.25ms +[2025-09-06 02:20:54] [Rank 0] step:8581/10000 train_time:353964ms step_avg:41.25ms +[2025-09-06 02:20:55] [Rank 0] step:8601/10000 train_time:354704ms step_avg:41.24ms +[2025-09-06 02:20:55] [Rank 0] step:8601/10000 train_time:354704ms step_avg:41.24ms +[2025-09-06 02:20:56] [Rank 0] step:8621/10000 train_time:355442ms step_avg:41.23ms +[2025-09-06 02:20:56] [Rank 0] step:8621/10000 train_time:355442ms step_avg:41.23ms +[2025-09-06 02:20:56] [Rank 0] step:8641/10000 train_time:356181ms step_avg:41.22ms +[2025-09-06 02:20:56] [Rank 0] step:8641/10000 train_time:356181ms step_avg:41.22ms +[2025-09-06 02:20:57] [Rank 0] step:8661/10000 train_time:356920ms step_avg:41.21ms +[2025-09-06 02:20:57] [Rank 0] step:8661/10000 train_time:356920ms step_avg:41.21ms +[2025-09-06 02:20:58] [Rank 0] step:8681/10000 train_time:357659ms step_avg:41.20ms +[2025-09-06 02:20:58] [Rank 0] step:8681/10000 train_time:357659ms step_avg:41.20ms +[2025-09-06 02:20:59] [Rank 0] step:8701/10000 train_time:358398ms step_avg:41.19ms +[2025-09-06 02:20:59] [Rank 0] step:8701/10000 train_time:358398ms step_avg:41.19ms +[2025-09-06 02:20:59] [Rank 0] step:8721/10000 train_time:359137ms step_avg:41.18ms +[2025-09-06 02:20:59] [Rank 0] step:8721/10000 train_time:359137ms step_avg:41.18ms +[2025-09-06 02:21:00] [Rank 0] step:8741/10000 train_time:359883ms step_avg:41.17ms +[2025-09-06 02:21:00] [Rank 0] step:8741/10000 train_time:359883ms step_avg:41.17ms +[2025-09-06 02:21:01] [Rank 0] step:8761/10000 train_time:360623ms step_avg:41.16ms +[2025-09-06 02:21:01] [Rank 0] step:8761/10000 train_time:360623ms step_avg:41.16ms +[2025-09-06 02:21:02] [Rank 0] step:8781/10000 train_time:361362ms step_avg:41.15ms +[2025-09-06 02:21:02] [Rank 0] step:8781/10000 train_time:361362ms step_avg:41.15ms +[2025-09-06 02:21:02] [Rank 0] step:8801/10000 train_time:362101ms step_avg:41.14ms +[2025-09-06 02:21:02] [Rank 0] step:8801/10000 train_time:362101ms step_avg:41.14ms +[2025-09-06 02:21:03] [Rank 0] step:8821/10000 train_time:362841ms step_avg:41.13ms +[2025-09-06 02:21:03] [Rank 0] step:8821/10000 train_time:362841ms step_avg:41.13ms +[2025-09-06 02:21:04] [Rank 0] step:8841/10000 train_time:364200ms step_avg:41.19ms +[2025-09-06 02:21:04] [Rank 0] step:8841/10000 train_time:364200ms step_avg:41.19ms +[2025-09-06 02:21:05] [Rank 0] step:8861/10000 train_time:364939ms step_avg:41.18ms +[2025-09-06 02:21:05] [Rank 0] step:8861/10000 train_time:364939ms step_avg:41.18ms +[2025-09-06 02:21:06] [Rank 0] step:8881/10000 train_time:365677ms step_avg:41.18ms +[2025-09-06 02:21:06] [Rank 0] step:8881/10000 train_time:365677ms step_avg:41.18ms +[2025-09-06 02:21:07] [Rank 0] step:8901/10000 train_time:366415ms step_avg:41.17ms +[2025-09-06 02:21:07] [Rank 0] step:8901/10000 train_time:366415ms step_avg:41.17ms +[2025-09-06 02:21:07] [Rank 0] step:8921/10000 train_time:367153ms step_avg:41.16ms +[2025-09-06 02:21:07] [Rank 0] step:8921/10000 train_time:367153ms step_avg:41.16ms +[2025-09-06 02:21:08] [Rank 0] step:8941/10000 train_time:367891ms step_avg:41.15ms +[2025-09-06 02:21:08] [Rank 0] step:8941/10000 train_time:367891ms step_avg:41.15ms +[2025-09-06 02:21:09] [Rank 0] step:8961/10000 train_time:368629ms step_avg:41.14ms +[2025-09-06 02:21:09] [Rank 0] step:8961/10000 train_time:368629ms step_avg:41.14ms +[2025-09-06 02:21:10] [Rank 0] step:8981/10000 train_time:369500ms step_avg:41.14ms +[2025-09-06 02:21:10] [Rank 0] step:8981/10000 train_time:369500ms step_avg:41.14ms +[2025-09-06 02:21:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:21:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:21:11] [Rank 0] PRINT: step:9000/10000 train_loss:2.1844 val_loss:2.1710 train_time:370319ms step_avg:41.15ms +[2025-09-06 02:21:11] [Rank 0] PRINT: step:9000/10000 train_loss:2.1844 val_loss:2.1710 train_time:370319ms step_avg:41.15ms +[2025-09-06 02:21:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:21:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:21:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:21:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:22:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:22:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:22:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:22:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:22:32] [Rank 0] Total Loss: 4.6950 +[2025-09-06 02:22:32] [Rank 0] Total Loss: 4.6950 +[2025-09-06 02:22:32] [Rank 0] Total FTA (Unweighted): 0.2944 +[2025-09-06 02:22:32] [Rank 0] Total FTA (Unweighted): 0.2944 +[2025-09-06 02:22:32] [Rank 0] Total FTA (Weighted): 0.2944 +[2025-09-06 02:22:32] [Rank 0] Total FTA (Weighted): 0.2944 +[2025-09-06 02:22:32] [Rank 0] Group 0 Loss: 3.3832 +[2025-09-06 02:22:32] [Rank 0] Group 0 Loss: 3.3832 +[2025-09-06 02:22:32] [Rank 0] Group 1 Loss: 3.2981 +[2025-09-06 02:22:32] [Rank 0] Group 1 Loss: 3.2981 +[2025-09-06 02:22:32] [Rank 0] Group 2 Loss: 3.2868 +[2025-09-06 02:22:32] [Rank 0] Group 2 Loss: 3.2868 +[2025-09-06 02:22:32] [Rank 0] Group 3 Loss: 3.7607 +[2025-09-06 02:22:32] [Rank 0] Group 3 Loss: 3.7607 +[2025-09-06 02:22:32] [Rank 0] Group 4 Loss: 4.0444 +[2025-09-06 02:22:32] [Rank 0] Group 4 Loss: 4.0444 +[2025-09-06 02:22:32] [Rank 0] Group 5 Loss: 4.5126 +[2025-09-06 02:22:32] [Rank 0] Group 5 Loss: 4.5126 +[2025-09-06 02:22:32] [Rank 0] Group 6 Loss: 4.7766 +[2025-09-06 02:22:32] [Rank 0] Group 6 Loss: 4.7766 +[2025-09-06 02:22:32] [Rank 0] Group 7 Loss: 4.9317 +[2025-09-06 02:22:32] [Rank 0] Group 7 Loss: 4.9317 +[2025-09-06 02:22:32] [Rank 0] Group 8 Loss: 5.2554 +[2025-09-06 02:22:32] [Rank 0] Group 8 Loss: 5.2554 +[2025-09-06 02:22:32] [Rank 0] Group 9 Loss: 5.3880 +[2025-09-06 02:22:32] [Rank 0] Group 9 Loss: 5.3880 +[2025-09-06 02:22:33] [Rank 0] Group 10 Loss: 5.4581 +[2025-09-06 02:22:33] [Rank 0] Group 10 Loss: 5.4581 +[2025-09-06 02:22:33] [Rank 0] Group 11 Loss: 5.4765 +[2025-09-06 02:22:33] [Rank 0] Group 11 Loss: 5.4765 +[2025-09-06 02:22:33] [Rank 0] Group 12 Loss: 5.3736 +[2025-09-06 02:22:33] [Rank 0] Group 12 Loss: 5.3736 +[2025-09-06 02:22:33] [Rank 0] Group 13 Loss: 5.3717 +[2025-09-06 02:22:33] [Rank 0] Group 13 Loss: 5.3717 +[2025-09-06 02:22:33] [Rank 0] Group 14 Loss: 5.4235 +[2025-09-06 02:22:33] [Rank 0] Group 14 Loss: 5.4235 +[2025-09-06 02:22:33] [Rank 0] Group 15 Loss: 5.3789 +[2025-09-06 02:22:33] [Rank 0] Group 15 Loss: 5.3789 +[2025-09-06 02:22:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:22:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:22:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:22:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:22:33] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:22:33] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:22:33] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:22:33] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:22:33] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:22:33] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:22:33] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:22:33] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:22:33] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:22:33] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-06 02:22:33] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:22:33] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:22:33] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-06 02:22:33] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-06 02:22:33] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:22:33] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:22:33] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 02:22:33] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-06 02:22:33] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:22:33] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:22:33] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 02:22:33] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-06 02:22:33] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-06 02:22:33] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-06 02:22:33] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 02:22:33] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 02:22:33] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:22:33] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:22:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:22:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:22:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:22:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:22:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:22:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:22:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:22:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:22:35] [Rank 0] step:9001/10000 train_time:370328ms step_avg:41.14ms +[2025-09-06 02:22:35] [Rank 0] step:9001/10000 train_time:370328ms step_avg:41.14ms +[2025-09-06 02:22:35] [Rank 0] step:9021/10000 train_time:371002ms step_avg:41.13ms +[2025-09-06 02:22:35] [Rank 0] step:9021/10000 train_time:371002ms step_avg:41.13ms +[2025-09-06 02:22:36] [Rank 0] step:9041/10000 train_time:371740ms step_avg:41.12ms +[2025-09-06 02:22:36] [Rank 0] step:9041/10000 train_time:371740ms step_avg:41.12ms +[2025-09-06 02:22:37] [Rank 0] step:9061/10000 train_time:372478ms step_avg:41.11ms +[2025-09-06 02:22:37] [Rank 0] step:9061/10000 train_time:372478ms step_avg:41.11ms +[2025-09-06 02:22:38] [Rank 0] step:9081/10000 train_time:373216ms step_avg:41.10ms +[2025-09-06 02:22:38] [Rank 0] step:9081/10000 train_time:373216ms step_avg:41.10ms +[2025-09-06 02:22:38] [Rank 0] step:9101/10000 train_time:373954ms step_avg:41.09ms +[2025-09-06 02:22:38] [Rank 0] step:9101/10000 train_time:373954ms step_avg:41.09ms +[2025-09-06 02:22:39] [Rank 0] step:9121/10000 train_time:374693ms step_avg:41.08ms +[2025-09-06 02:22:39] [Rank 0] step:9121/10000 train_time:374693ms step_avg:41.08ms +[2025-09-06 02:22:40] [Rank 0] step:9141/10000 train_time:375431ms step_avg:41.07ms +[2025-09-06 02:22:40] [Rank 0] step:9141/10000 train_time:375431ms step_avg:41.07ms +[2025-09-06 02:22:41] [Rank 0] step:9161/10000 train_time:376170ms step_avg:41.06ms +[2025-09-06 02:22:41] [Rank 0] step:9161/10000 train_time:376170ms step_avg:41.06ms +[2025-09-06 02:22:41] [Rank 0] step:9181/10000 train_time:376908ms step_avg:41.05ms +[2025-09-06 02:22:41] [Rank 0] step:9181/10000 train_time:376908ms step_avg:41.05ms +[2025-09-06 02:22:42] [Rank 0] step:9201/10000 train_time:377645ms step_avg:41.04ms +[2025-09-06 02:22:42] [Rank 0] step:9201/10000 train_time:377645ms step_avg:41.04ms +[2025-09-06 02:22:43] [Rank 0] step:9221/10000 train_time:378384ms step_avg:41.04ms +[2025-09-06 02:22:43] [Rank 0] step:9221/10000 train_time:378384ms step_avg:41.04ms +[2025-09-06 02:22:44] [Rank 0] step:9241/10000 train_time:379122ms step_avg:41.03ms +[2025-09-06 02:22:44] [Rank 0] step:9241/10000 train_time:379122ms step_avg:41.03ms +[2025-09-06 02:22:44] [Rank 0] step:9261/10000 train_time:379860ms step_avg:41.02ms +[2025-09-06 02:22:44] [Rank 0] step:9261/10000 train_time:379860ms step_avg:41.02ms +[2025-09-06 02:22:45] [Rank 0] step:9281/10000 train_time:380599ms step_avg:41.01ms +[2025-09-06 02:22:45] [Rank 0] step:9281/10000 train_time:380599ms step_avg:41.01ms +[2025-09-06 02:22:46] [Rank 0] step:9301/10000 train_time:381337ms step_avg:41.00ms +[2025-09-06 02:22:46] [Rank 0] step:9301/10000 train_time:381337ms step_avg:41.00ms +[2025-09-06 02:22:47] [Rank 0] step:9321/10000 train_time:382075ms step_avg:40.99ms +[2025-09-06 02:22:47] [Rank 0] step:9321/10000 train_time:382075ms step_avg:40.99ms +[2025-09-06 02:22:47] [Rank 0] step:9341/10000 train_time:382813ms step_avg:40.98ms +[2025-09-06 02:22:47] [Rank 0] step:9341/10000 train_time:382813ms step_avg:40.98ms +[2025-09-06 02:22:48] [Rank 0] step:9361/10000 train_time:383552ms step_avg:40.97ms +[2025-09-06 02:22:48] [Rank 0] step:9361/10000 train_time:383552ms step_avg:40.97ms +[2025-09-06 02:22:49] [Rank 0] step:9381/10000 train_time:384290ms step_avg:40.96ms +[2025-09-06 02:22:49] [Rank 0] step:9381/10000 train_time:384290ms step_avg:40.96ms +[2025-09-06 02:22:50] [Rank 0] step:9401/10000 train_time:385029ms step_avg:40.96ms +[2025-09-06 02:22:50] [Rank 0] step:9401/10000 train_time:385029ms step_avg:40.96ms +[2025-09-06 02:22:50] [Rank 0] step:9421/10000 train_time:385767ms step_avg:40.95ms +[2025-09-06 02:22:50] [Rank 0] step:9421/10000 train_time:385767ms step_avg:40.95ms +[2025-09-06 02:22:51] [Rank 0] step:9441/10000 train_time:386506ms step_avg:40.94ms +[2025-09-06 02:22:51] [Rank 0] step:9441/10000 train_time:386506ms step_avg:40.94ms +[2025-09-06 02:22:52] [Rank 0] step:9461/10000 train_time:387244ms step_avg:40.93ms +[2025-09-06 02:22:52] [Rank 0] step:9461/10000 train_time:387244ms step_avg:40.93ms +[2025-09-06 02:22:52] [Rank 0] step:9481/10000 train_time:387982ms step_avg:40.92ms +[2025-09-06 02:22:52] [Rank 0] step:9481/10000 train_time:387982ms step_avg:40.92ms +[2025-09-06 02:22:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:22:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:22:54] [Rank 0] PRINT: step:9500/10000 train_loss:2.1733 val_loss:2.1614 train_time:388801ms step_avg:40.93ms +[2025-09-06 02:22:54] [Rank 0] PRINT: step:9500/10000 train_loss:2.1733 val_loss:2.1614 train_time:388801ms step_avg:40.93ms +[2025-09-06 02:22:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:22:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:22:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:22:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:24:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:24:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:24:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:24:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:24:14] [Rank 0] Total Loss: 4.6661 +[2025-09-06 02:24:14] [Rank 0] Total Loss: 4.6661 +[2025-09-06 02:24:14] [Rank 0] Total FTA (Unweighted): 0.2975 +[2025-09-06 02:24:14] [Rank 0] Total FTA (Unweighted): 0.2975 +[2025-09-06 02:24:14] [Rank 0] Total FTA (Weighted): 0.2975 +[2025-09-06 02:24:14] [Rank 0] Total FTA (Weighted): 0.2975 +[2025-09-06 02:24:14] [Rank 0] Group 0 Loss: 3.3688 +[2025-09-06 02:24:14] [Rank 0] Group 0 Loss: 3.3688 +[2025-09-06 02:24:14] [Rank 0] Group 1 Loss: 3.2880 +[2025-09-06 02:24:14] [Rank 0] Group 1 Loss: 3.2880 +[2025-09-06 02:24:14] [Rank 0] Group 2 Loss: 3.2349 +[2025-09-06 02:24:14] [Rank 0] Group 2 Loss: 3.2349 +[2025-09-06 02:24:14] [Rank 0] Group 3 Loss: 3.7515 +[2025-09-06 02:24:14] [Rank 0] Group 3 Loss: 3.7515 +[2025-09-06 02:24:14] [Rank 0] Group 4 Loss: 4.0361 +[2025-09-06 02:24:14] [Rank 0] Group 4 Loss: 4.0361 +[2025-09-06 02:24:15] [Rank 0] Group 5 Loss: 4.4746 +[2025-09-06 02:24:15] [Rank 0] Group 5 Loss: 4.4746 +[2025-09-06 02:24:15] [Rank 0] Group 6 Loss: 4.7518 +[2025-09-06 02:24:15] [Rank 0] Group 6 Loss: 4.7518 +[2025-09-06 02:24:15] [Rank 0] Group 7 Loss: 4.9037 +[2025-09-06 02:24:15] [Rank 0] Group 7 Loss: 4.9037 +[2025-09-06 02:24:15] [Rank 0] Group 8 Loss: 5.2307 +[2025-09-06 02:24:15] [Rank 0] Group 8 Loss: 5.2307 +[2025-09-06 02:24:15] [Rank 0] Group 9 Loss: 5.3450 +[2025-09-06 02:24:15] [Rank 0] Group 9 Loss: 5.3450 +[2025-09-06 02:24:15] [Rank 0] Group 10 Loss: 5.4358 +[2025-09-06 02:24:15] [Rank 0] Group 10 Loss: 5.4358 +[2025-09-06 02:24:15] [Rank 0] Group 11 Loss: 5.4369 +[2025-09-06 02:24:15] [Rank 0] Group 11 Loss: 5.4369 +[2025-09-06 02:24:15] [Rank 0] Group 12 Loss: 5.3311 +[2025-09-06 02:24:15] [Rank 0] Group 12 Loss: 5.3311 +[2025-09-06 02:24:15] [Rank 0] Group 13 Loss: 5.3424 +[2025-09-06 02:24:15] [Rank 0] Group 13 Loss: 5.3424 +[2025-09-06 02:24:15] [Rank 0] Group 14 Loss: 5.3922 +[2025-09-06 02:24:15] [Rank 0] Group 14 Loss: 5.3922 +[2025-09-06 02:24:15] [Rank 0] Group 15 Loss: 5.3334 +[2025-09-06 02:24:15] [Rank 0] Group 15 Loss: 5.3334 +[2025-09-06 02:24:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:24:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:24:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:24:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:24:15] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:24:15] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:24:15] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:24:15] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:24:15] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:24:15] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:24:15] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:24:15] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-06 02:24:15] [Rank 0] Group 6 FTA: 0.2600 +[2025-09-06 02:24:15] [Rank 0] Group 6 FTA: 0.2600 +[2025-09-06 02:24:15] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:24:15] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-06 02:24:15] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:24:15] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:24:15] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:24:15] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:24:15] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 02:24:15] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 02:24:15] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 02:24:15] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-06 02:24:15] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 02:24:15] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 02:24:15] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 02:24:15] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-06 02:24:15] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 02:24:15] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-06 02:24:15] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 02:24:15] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-06 02:24:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:24:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:24:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:24:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:24:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:24:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:24:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:24:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:24:17] [Rank 0] step:9501/10000 train_time:388810ms step_avg:40.92ms +[2025-09-06 02:24:17] [Rank 0] step:9501/10000 train_time:388810ms step_avg:40.92ms +[2025-09-06 02:24:17] [Rank 0] step:9521/10000 train_time:389480ms step_avg:40.91ms +[2025-09-06 02:24:17] [Rank 0] step:9521/10000 train_time:389480ms step_avg:40.91ms +[2025-09-06 02:24:18] [Rank 0] step:9541/10000 train_time:390217ms step_avg:40.90ms +[2025-09-06 02:24:18] [Rank 0] step:9541/10000 train_time:390217ms step_avg:40.90ms +[2025-09-06 02:24:19] [Rank 0] step:9561/10000 train_time:390954ms step_avg:40.89ms +[2025-09-06 02:24:19] [Rank 0] step:9561/10000 train_time:390954ms step_avg:40.89ms +[2025-09-06 02:24:20] [Rank 0] step:9581/10000 train_time:391815ms step_avg:40.90ms +[2025-09-06 02:24:20] [Rank 0] step:9581/10000 train_time:391815ms step_avg:40.90ms +[2025-09-06 02:24:20] [Rank 0] step:9601/10000 train_time:392552ms step_avg:40.89ms +[2025-09-06 02:24:20] [Rank 0] step:9601/10000 train_time:392552ms step_avg:40.89ms +[2025-09-06 02:24:21] [Rank 0] step:9621/10000 train_time:393291ms step_avg:40.88ms +[2025-09-06 02:24:21] [Rank 0] step:9621/10000 train_time:393291ms step_avg:40.88ms +[2025-09-06 02:24:22] [Rank 0] step:9641/10000 train_time:394169ms step_avg:40.88ms +[2025-09-06 02:24:22] [Rank 0] step:9641/10000 train_time:394169ms step_avg:40.88ms +[2025-09-06 02:24:23] [Rank 0] step:9661/10000 train_time:394978ms step_avg:40.88ms +[2025-09-06 02:24:23] [Rank 0] step:9661/10000 train_time:394978ms step_avg:40.88ms +[2025-09-06 02:24:24] [Rank 0] step:9681/10000 train_time:395716ms step_avg:40.88ms +[2025-09-06 02:24:24] [Rank 0] step:9681/10000 train_time:395716ms step_avg:40.88ms +[2025-09-06 02:24:24] [Rank 0] step:9701/10000 train_time:396455ms step_avg:40.87ms +[2025-09-06 02:24:24] [Rank 0] step:9701/10000 train_time:396455ms step_avg:40.87ms +[2025-09-06 02:24:25] [Rank 0] step:9721/10000 train_time:397193ms step_avg:40.86ms +[2025-09-06 02:24:25] [Rank 0] step:9721/10000 train_time:397193ms step_avg:40.86ms +[2025-09-06 02:24:26] [Rank 0] step:9741/10000 train_time:397931ms step_avg:40.85ms +[2025-09-06 02:24:26] [Rank 0] step:9741/10000 train_time:397931ms step_avg:40.85ms +[2025-09-06 02:24:27] [Rank 0] step:9761/10000 train_time:398670ms step_avg:40.84ms +[2025-09-06 02:24:27] [Rank 0] step:9761/10000 train_time:398670ms step_avg:40.84ms +[2025-09-06 02:24:27] [Rank 0] step:9781/10000 train_time:399408ms step_avg:40.84ms +[2025-09-06 02:24:27] [Rank 0] step:9781/10000 train_time:399408ms step_avg:40.84ms +[2025-09-06 02:24:28] [Rank 0] step:9801/10000 train_time:400147ms step_avg:40.83ms +[2025-09-06 02:24:28] [Rank 0] step:9801/10000 train_time:400147ms step_avg:40.83ms +[2025-09-06 02:24:29] [Rank 0] step:9821/10000 train_time:400885ms step_avg:40.82ms +[2025-09-06 02:24:29] [Rank 0] step:9821/10000 train_time:400885ms step_avg:40.82ms +[2025-09-06 02:24:30] [Rank 0] step:9841/10000 train_time:401622ms step_avg:40.81ms +[2025-09-06 02:24:30] [Rank 0] step:9841/10000 train_time:401622ms step_avg:40.81ms +[2025-09-06 02:24:30] [Rank 0] step:9861/10000 train_time:402360ms step_avg:40.80ms +[2025-09-06 02:24:30] [Rank 0] step:9861/10000 train_time:402360ms step_avg:40.80ms +[2025-09-06 02:24:31] [Rank 0] step:9881/10000 train_time:403098ms step_avg:40.80ms +[2025-09-06 02:24:31] [Rank 0] step:9881/10000 train_time:403098ms step_avg:40.80ms +[2025-09-06 02:24:32] [Rank 0] step:9901/10000 train_time:403839ms step_avg:40.79ms +[2025-09-06 02:24:32] [Rank 0] step:9901/10000 train_time:403839ms step_avg:40.79ms +[2025-09-06 02:24:33] [Rank 0] step:9921/10000 train_time:404577ms step_avg:40.78ms +[2025-09-06 02:24:33] [Rank 0] step:9921/10000 train_time:404577ms step_avg:40.78ms +[2025-09-06 02:24:33] [Rank 0] step:9941/10000 train_time:405315ms step_avg:40.77ms +[2025-09-06 02:24:33] [Rank 0] step:9941/10000 train_time:405315ms step_avg:40.77ms +[2025-09-06 02:24:34] [Rank 0] step:9961/10000 train_time:406053ms step_avg:40.76ms +[2025-09-06 02:24:34] [Rank 0] step:9961/10000 train_time:406053ms step_avg:40.76ms +[2025-09-06 02:24:35] [Rank 0] step:9981/10000 train_time:406791ms step_avg:40.76ms +[2025-09-06 02:24:35] [Rank 0] step:9981/10000 train_time:406791ms step_avg:40.76ms +[2025-09-06 02:24:35] [Rank 0] step:10000/10000 train_time:407493ms step_avg:40.75ms +[2025-09-06 02:24:35] [Rank 0] step:10000/10000 train_time:407493ms step_avg:40.75ms +[2025-09-06 02:24:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:24:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-06 02:24:36] [Rank 0] PRINT: step:10000/10000 train_loss:2.1648 val_loss:2.1528 train_time:407617ms step_avg:40.76ms +[2025-09-06 02:24:36] [Rank 0] PRINT: step:10000/10000 train_loss:2.1648 val_loss:2.1528 train_time:407617ms step_avg:40.76ms +[2025-09-06 02:24:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:24:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-06 02:24:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:24:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-06 02:25:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:25:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-06 02:25:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:25:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-06 02:25:57] [Rank 0] Total Loss: 4.6745 +[2025-09-06 02:25:57] [Rank 0] Total Loss: 4.6745 +[2025-09-06 02:25:57] [Rank 0] Total FTA (Unweighted): 0.3019 +[2025-09-06 02:25:57] [Rank 0] Total FTA (Unweighted): 0.3019 +[2025-09-06 02:25:57] [Rank 0] Total FTA (Weighted): 0.3019 +[2025-09-06 02:25:57] [Rank 0] Total FTA (Weighted): 0.3019 +[2025-09-06 02:25:57] [Rank 0] Group 0 Loss: 3.3596 +[2025-09-06 02:25:57] [Rank 0] Group 0 Loss: 3.3596 +[2025-09-06 02:25:57] [Rank 0] Group 1 Loss: 3.2956 +[2025-09-06 02:25:57] [Rank 0] Group 1 Loss: 3.2956 +[2025-09-06 02:25:57] [Rank 0] Group 2 Loss: 3.2735 +[2025-09-06 02:25:57] [Rank 0] Group 2 Loss: 3.2735 +[2025-09-06 02:25:57] [Rank 0] Group 3 Loss: 3.7704 +[2025-09-06 02:25:57] [Rank 0] Group 3 Loss: 3.7704 +[2025-09-06 02:25:57] [Rank 0] Group 4 Loss: 4.0385 +[2025-09-06 02:25:57] [Rank 0] Group 4 Loss: 4.0385 +[2025-09-06 02:25:57] [Rank 0] Group 5 Loss: 4.4762 +[2025-09-06 02:25:57] [Rank 0] Group 5 Loss: 4.4762 +[2025-09-06 02:25:57] [Rank 0] Group 6 Loss: 4.7553 +[2025-09-06 02:25:57] [Rank 0] Group 6 Loss: 4.7553 +[2025-09-06 02:25:57] [Rank 0] Group 7 Loss: 4.9065 +[2025-09-06 02:25:57] [Rank 0] Group 7 Loss: 4.9065 +[2025-09-06 02:25:57] [Rank 0] Group 8 Loss: 5.2377 +[2025-09-06 02:25:57] [Rank 0] Group 8 Loss: 5.2377 +[2025-09-06 02:25:57] [Rank 0] Group 9 Loss: 5.3523 +[2025-09-06 02:25:57] [Rank 0] Group 9 Loss: 5.3523 +[2025-09-06 02:25:57] [Rank 0] Group 10 Loss: 5.4448 +[2025-09-06 02:25:57] [Rank 0] Group 10 Loss: 5.4448 +[2025-09-06 02:25:57] [Rank 0] Group 11 Loss: 5.4481 +[2025-09-06 02:25:57] [Rank 0] Group 11 Loss: 5.4481 +[2025-09-06 02:25:57] [Rank 0] Group 12 Loss: 5.3416 +[2025-09-06 02:25:57] [Rank 0] Group 12 Loss: 5.3416 +[2025-09-06 02:25:57] [Rank 0] Group 13 Loss: 5.3500 +[2025-09-06 02:25:57] [Rank 0] Group 13 Loss: 5.3500 +[2025-09-06 02:25:57] [Rank 0] Group 14 Loss: 5.4039 +[2025-09-06 02:25:57] [Rank 0] Group 14 Loss: 5.4039 +[2025-09-06 02:25:57] [Rank 0] Group 15 Loss: 5.3383 +[2025-09-06 02:25:57] [Rank 0] Group 15 Loss: 5.3383 +[2025-09-06 02:25:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:25:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-06 02:25:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:25:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-06 02:25:57] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:25:57] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-06 02:25:57] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:25:57] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-06 02:25:57] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:25:57] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-06 02:25:57] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 02:25:57] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-06 02:25:57] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-06 02:25:57] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-06 02:25:57] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 02:25:57] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-06 02:25:57] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:25:57] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-06 02:25:57] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:25:57] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-06 02:25:57] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 02:25:57] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-06 02:25:57] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:25:57] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-06 02:25:57] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 02:25:57] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-06 02:25:57] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 02:25:57] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-06 02:25:57] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 02:25:57] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-06 02:25:57] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:25:57] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-06 02:25:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:25:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_loss_curves.png +[2025-09-06 02:25:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:25:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/per_class_acc_curves.png +[2025-09-06 02:25:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:25:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_loss_curve.png +[2025-09-06 02:25:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:25:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.08_seed_46/total_acc_curve.png +[2025-09-06 02:25:58] [Rank 0] step:10001/10000 train_time:407625ms step_avg:40.76ms +[2025-09-06 02:25:58] [Rank 0] step:10001/10000 train_time:407625ms step_avg:40.76ms +[2025-09-06 02:25:58] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 02:25:58 2025 --- +[2025-09-06 02:25:58] [Rank 0] PRINT: --- Training Finished: Sat Sep 6 02:25:58 2025 --- +[2025-09-06 02:25:58] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-06 02:25:58] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5a05de9e5ba26b4a3403b9fc67a76c54b6216c57 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.1, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "f411d0a0-8dec-4ae9-9d7f-6511e3839f39", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..32e891bb84b0d1ab7651caada22ce31b2b2e659c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d6951407d1f6ebb8771559ade250cbd656a2e035b70699f7575dbafedb60adf +size 313011 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..a01677901650d90b7a0c102ebf87475235f64a5f --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cb4faec5db746e094684c7296277ae14e461e55a1ebd8c128990f0c09130354 +size 394374 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..8215a305966a171c475c3aaa91cc07ab9f05aa64 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62f5f2338c4ae904660a78d115338a202b746810cb0a12510c9aa4ff2e96836c +size 91409 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..36f830d6721f7c392a799d59b0e5a49bfc872c43 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6c1bdae9180bb4c93a654058374850e8ed4b228a0b4fe26921ed3fcb673bbf1 +size 113580 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/training_log_f411d0a0-8dec-4ae9-9d7f-6511e3839f39.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/training_log_f411d0a0-8dec-4ae9-9d7f-6511e3839f39.txt new file mode 100644 index 0000000000000000000000000000000000000000..9b727124349da5893a14464cd7f1b2867762276c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/training_log_f411d0a0-8dec-4ae9-9d7f-6511e3839f39.txt @@ -0,0 +1,5614 @@ +[2025-09-05 20:26:44] [Rank 0] PRINT: --- Script Start: Fri Sep 5 20:26:44 2025 --- +[2025-09-05 20:26:44] [Rank 0] PRINT: --- Script Start: Fri Sep 5 20:26:44 2025 --- +[2025-09-05 20:26:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 20:26:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 20:26:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 20:26:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 20:26:44] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 20:26:44] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 20:26:44] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42 +[2025-09-05 20:26:44] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42 +[2025-09-05 20:26:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 20:26:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 20:26:44] [Rank 0] PRINT: Constructing model... +[2025-09-05 20:26:44] [Rank 0] PRINT: Constructing model... +[2025-09-05 20:26:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 20:26:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 20:26:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 20:26:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 20:26:46] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 20:26:46] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 20:26:50] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 20:26:50] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 20:26:50] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 20:26:50] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 20:26:50] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 20:26:50] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 20:26:50] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 20:26:50] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 20:26:50] [Rank 0] PRINT: Model returns: +[2025-09-05 20:26:50] [Rank 0] PRINT: Model returns: +[2025-09-05 20:26:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 20:26:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 20:26:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 20:26:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 20:26:50] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 20:26:50] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 20:26:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 20:26:50] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 20:26:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 20:26:50] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 20:26:55] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 20:26:55] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 20:26:55] [Rank 0] PRINT: Starting warmup... +[2025-09-05 20:26:55] [Rank 0] PRINT: Starting warmup... +[2025-09-05 20:27:34] [Rank 0] PRINT: Warmup complete. +[2025-09-05 20:27:34] [Rank 0] PRINT: Warmup complete. +[2025-09-05 20:27:34] [Rank 0] PRINT: Starting training... +[2025-09-05 20:27:34] [Rank 0] PRINT: Starting training... +[2025-09-05 20:27:40] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/fixed_eval_indices.json +[2025-09-05 20:27:40] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/fixed_eval_indices.json +[2025-09-05 20:27:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:27:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:27:44] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 20:27:44] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 20:28:16] [Rank 0] step:21/10000 train_time:32430ms step_avg:1544.31ms +[2025-09-05 20:28:16] [Rank 0] step:21/10000 train_time:32430ms step_avg:1544.31ms +[2025-09-05 20:28:17] [Rank 0] step:41/10000 train_time:33159ms step_avg:808.76ms +[2025-09-05 20:28:17] [Rank 0] step:41/10000 train_time:33159ms step_avg:808.76ms +[2025-09-05 20:28:18] [Rank 0] step:61/10000 train_time:33887ms step_avg:555.52ms +[2025-09-05 20:28:18] [Rank 0] step:61/10000 train_time:33887ms step_avg:555.52ms +[2025-09-05 20:28:18] [Rank 0] step:81/10000 train_time:34616ms step_avg:427.35ms +[2025-09-05 20:28:18] [Rank 0] step:81/10000 train_time:34616ms step_avg:427.35ms +[2025-09-05 20:28:19] [Rank 0] step:101/10000 train_time:35343ms step_avg:349.93ms +[2025-09-05 20:28:19] [Rank 0] step:101/10000 train_time:35343ms step_avg:349.93ms +[2025-09-05 20:28:20] [Rank 0] step:121/10000 train_time:36070ms step_avg:298.10ms +[2025-09-05 20:28:20] [Rank 0] step:121/10000 train_time:36070ms step_avg:298.10ms +[2025-09-05 20:28:21] [Rank 0] step:141/10000 train_time:36797ms step_avg:260.97ms +[2025-09-05 20:28:21] [Rank 0] step:141/10000 train_time:36797ms step_avg:260.97ms +[2025-09-05 20:28:21] [Rank 0] step:161/10000 train_time:37524ms step_avg:233.07ms +[2025-09-05 20:28:21] [Rank 0] step:161/10000 train_time:37524ms step_avg:233.07ms +[2025-09-05 20:28:22] [Rank 0] step:181/10000 train_time:38252ms step_avg:211.33ms +[2025-09-05 20:28:22] [Rank 0] step:181/10000 train_time:38252ms step_avg:211.33ms +[2025-09-05 20:28:23] [Rank 0] step:201/10000 train_time:38978ms step_avg:193.92ms +[2025-09-05 20:28:23] [Rank 0] step:201/10000 train_time:38978ms step_avg:193.92ms +[2025-09-05 20:28:24] [Rank 0] step:221/10000 train_time:39705ms step_avg:179.66ms +[2025-09-05 20:28:24] [Rank 0] step:221/10000 train_time:39705ms step_avg:179.66ms +[2025-09-05 20:28:24] [Rank 0] step:241/10000 train_time:40432ms step_avg:167.77ms +[2025-09-05 20:28:24] [Rank 0] step:241/10000 train_time:40432ms step_avg:167.77ms +[2025-09-05 20:28:25] [Rank 0] step:261/10000 train_time:41160ms step_avg:157.70ms +[2025-09-05 20:28:25] [Rank 0] step:261/10000 train_time:41160ms step_avg:157.70ms +[2025-09-05 20:28:26] [Rank 0] step:281/10000 train_time:41887ms step_avg:149.07ms +[2025-09-05 20:28:26] [Rank 0] step:281/10000 train_time:41887ms step_avg:149.07ms +[2025-09-05 20:28:26] [Rank 0] step:301/10000 train_time:42615ms step_avg:141.58ms +[2025-09-05 20:28:26] [Rank 0] step:301/10000 train_time:42615ms step_avg:141.58ms +[2025-09-05 20:28:27] [Rank 0] step:321/10000 train_time:43342ms step_avg:135.02ms +[2025-09-05 20:28:27] [Rank 0] step:321/10000 train_time:43342ms step_avg:135.02ms +[2025-09-05 20:28:28] [Rank 0] step:341/10000 train_time:44070ms step_avg:129.24ms +[2025-09-05 20:28:28] [Rank 0] step:341/10000 train_time:44070ms step_avg:129.24ms +[2025-09-05 20:28:29] [Rank 0] step:361/10000 train_time:44798ms step_avg:124.09ms +[2025-09-05 20:28:29] [Rank 0] step:361/10000 train_time:44798ms step_avg:124.09ms +[2025-09-05 20:28:29] [Rank 0] step:381/10000 train_time:45525ms step_avg:119.49ms +[2025-09-05 20:28:29] [Rank 0] step:381/10000 train_time:45525ms step_avg:119.49ms +[2025-09-05 20:28:30] [Rank 0] step:401/10000 train_time:46252ms step_avg:115.34ms +[2025-09-05 20:28:30] [Rank 0] step:401/10000 train_time:46252ms step_avg:115.34ms +[2025-09-05 20:28:31] [Rank 0] step:421/10000 train_time:46978ms step_avg:111.59ms +[2025-09-05 20:28:31] [Rank 0] step:421/10000 train_time:46978ms step_avg:111.59ms +[2025-09-05 20:28:32] [Rank 0] step:441/10000 train_time:47705ms step_avg:108.18ms +[2025-09-05 20:28:32] [Rank 0] step:441/10000 train_time:47705ms step_avg:108.18ms +[2025-09-05 20:28:32] [Rank 0] step:461/10000 train_time:48433ms step_avg:105.06ms +[2025-09-05 20:28:32] [Rank 0] step:461/10000 train_time:48433ms step_avg:105.06ms +[2025-09-05 20:28:33] [Rank 0] step:481/10000 train_time:49160ms step_avg:102.20ms +[2025-09-05 20:28:33] [Rank 0] step:481/10000 train_time:49160ms step_avg:102.20ms +[2025-09-05 20:28:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:28:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:28:34] [Rank 0] PRINT: step:500/10000 train_loss:5.6288 val_loss:4.1185 train_time:49966ms step_avg:99.93ms +[2025-09-05 20:28:34] [Rank 0] PRINT: step:500/10000 train_loss:5.6288 val_loss:4.1185 train_time:49966ms step_avg:99.93ms +[2025-09-05 20:28:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:28:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:28:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:28:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:29:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:29:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:29:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:29:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:29:55] [Rank 0] Total Loss: 6.0658 +[2025-09-05 20:29:55] [Rank 0] Total Loss: 6.0658 +[2025-09-05 20:29:55] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-05 20:29:55] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-05 20:29:55] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-05 20:29:55] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-05 20:29:55] [Rank 0] Group 0 Loss: 3.7767 +[2025-09-05 20:29:55] [Rank 0] Group 0 Loss: 3.7767 +[2025-09-05 20:29:55] [Rank 0] Group 1 Loss: 3.7924 +[2025-09-05 20:29:55] [Rank 0] Group 1 Loss: 3.7924 +[2025-09-05 20:29:55] [Rank 0] Group 2 Loss: 4.6332 +[2025-09-05 20:29:55] [Rank 0] Group 2 Loss: 4.6332 +[2025-09-05 20:29:55] [Rank 0] Group 3 Loss: 5.4904 +[2025-09-05 20:29:55] [Rank 0] Group 3 Loss: 5.4904 +[2025-09-05 20:29:55] [Rank 0] Group 4 Loss: 6.2468 +[2025-09-05 20:29:55] [Rank 0] Group 4 Loss: 6.2468 +[2025-09-05 20:29:55] [Rank 0] Group 5 Loss: 6.4232 +[2025-09-05 20:29:55] [Rank 0] Group 5 Loss: 6.4232 +[2025-09-05 20:29:55] [Rank 0] Group 6 Loss: 6.5178 +[2025-09-05 20:29:55] [Rank 0] Group 6 Loss: 6.5178 +[2025-09-05 20:29:55] [Rank 0] Group 7 Loss: 6.4943 +[2025-09-05 20:29:55] [Rank 0] Group 7 Loss: 6.4943 +[2025-09-05 20:29:55] [Rank 0] Group 8 Loss: 6.6415 +[2025-09-05 20:29:55] [Rank 0] Group 8 Loss: 6.6415 +[2025-09-05 20:29:55] [Rank 0] Group 9 Loss: 6.7870 +[2025-09-05 20:29:55] [Rank 0] Group 9 Loss: 6.7870 +[2025-09-05 20:29:55] [Rank 0] Group 10 Loss: 6.7608 +[2025-09-05 20:29:55] [Rank 0] Group 10 Loss: 6.7608 +[2025-09-05 20:29:55] [Rank 0] Group 11 Loss: 6.8222 +[2025-09-05 20:29:55] [Rank 0] Group 11 Loss: 6.8222 +[2025-09-05 20:29:55] [Rank 0] Group 12 Loss: 6.6456 +[2025-09-05 20:29:55] [Rank 0] Group 12 Loss: 6.6456 +[2025-09-05 20:29:55] [Rank 0] Group 13 Loss: 6.6326 +[2025-09-05 20:29:55] [Rank 0] Group 13 Loss: 6.6326 +[2025-09-05 20:29:55] [Rank 0] Group 14 Loss: 6.7484 +[2025-09-05 20:29:55] [Rank 0] Group 14 Loss: 6.7484 +[2025-09-05 20:29:55] [Rank 0] Group 15 Loss: 6.6396 +[2025-09-05 20:29:55] [Rank 0] Group 15 Loss: 6.6396 +[2025-09-05 20:29:55] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 20:29:55] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 20:29:55] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 20:29:55] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 20:29:55] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 20:29:55] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 20:29:55] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 20:29:55] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 20:29:55] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 20:29:55] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 20:29:55] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 20:29:55] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 20:29:55] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 20:29:55] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 20:29:55] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 20:29:55] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 20:29:55] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 20:29:55] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 20:29:55] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 20:29:55] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 20:29:55] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 20:29:55] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 20:29:55] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 20:29:55] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 20:29:55] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:29:55] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:29:55] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 20:29:55] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 20:29:55] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:29:55] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:29:55] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 20:29:55] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 20:29:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:29:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:29:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:29:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:29:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:29:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:29:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:29:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:29:57] [Rank 0] step:501/10000 train_time:49976ms step_avg:99.75ms +[2025-09-05 20:29:57] [Rank 0] step:501/10000 train_time:49976ms step_avg:99.75ms +[2025-09-05 20:29:58] [Rank 0] step:521/10000 train_time:50649ms step_avg:97.21ms +[2025-09-05 20:29:58] [Rank 0] step:521/10000 train_time:50649ms step_avg:97.21ms +[2025-09-05 20:29:59] [Rank 0] step:541/10000 train_time:51375ms step_avg:94.96ms +[2025-09-05 20:29:59] [Rank 0] step:541/10000 train_time:51375ms step_avg:94.96ms +[2025-09-05 20:29:59] [Rank 0] step:561/10000 train_time:52103ms step_avg:92.87ms +[2025-09-05 20:29:59] [Rank 0] step:561/10000 train_time:52103ms step_avg:92.87ms +[2025-09-05 20:30:00] [Rank 0] step:581/10000 train_time:52830ms step_avg:90.93ms +[2025-09-05 20:30:00] [Rank 0] step:581/10000 train_time:52830ms step_avg:90.93ms +[2025-09-05 20:30:01] [Rank 0] step:601/10000 train_time:53557ms step_avg:89.11ms +[2025-09-05 20:30:01] [Rank 0] step:601/10000 train_time:53557ms step_avg:89.11ms +[2025-09-05 20:30:02] [Rank 0] step:621/10000 train_time:54284ms step_avg:87.41ms +[2025-09-05 20:30:02] [Rank 0] step:621/10000 train_time:54284ms step_avg:87.41ms +[2025-09-05 20:30:02] [Rank 0] step:641/10000 train_time:55012ms step_avg:85.82ms +[2025-09-05 20:30:02] [Rank 0] step:641/10000 train_time:55012ms step_avg:85.82ms +[2025-09-05 20:30:03] [Rank 0] step:661/10000 train_time:55738ms step_avg:84.32ms +[2025-09-05 20:30:03] [Rank 0] step:661/10000 train_time:55738ms step_avg:84.32ms +[2025-09-05 20:30:04] [Rank 0] step:681/10000 train_time:56464ms step_avg:82.91ms +[2025-09-05 20:30:04] [Rank 0] step:681/10000 train_time:56464ms step_avg:82.91ms +[2025-09-05 20:30:04] [Rank 0] step:701/10000 train_time:57191ms step_avg:81.58ms +[2025-09-05 20:30:04] [Rank 0] step:701/10000 train_time:57191ms step_avg:81.58ms +[2025-09-05 20:30:05] [Rank 0] step:721/10000 train_time:57917ms step_avg:80.33ms +[2025-09-05 20:30:05] [Rank 0] step:721/10000 train_time:57917ms step_avg:80.33ms +[2025-09-05 20:30:06] [Rank 0] step:741/10000 train_time:58644ms step_avg:79.14ms +[2025-09-05 20:30:06] [Rank 0] step:741/10000 train_time:58644ms step_avg:79.14ms +[2025-09-05 20:30:07] [Rank 0] step:761/10000 train_time:59375ms step_avg:78.02ms +[2025-09-05 20:30:07] [Rank 0] step:761/10000 train_time:59375ms step_avg:78.02ms +[2025-09-05 20:30:07] [Rank 0] step:781/10000 train_time:60107ms step_avg:76.96ms +[2025-09-05 20:30:07] [Rank 0] step:781/10000 train_time:60107ms step_avg:76.96ms +[2025-09-05 20:30:08] [Rank 0] step:801/10000 train_time:60838ms step_avg:75.95ms +[2025-09-05 20:30:08] [Rank 0] step:801/10000 train_time:60838ms step_avg:75.95ms +[2025-09-05 20:30:09] [Rank 0] step:821/10000 train_time:62192ms step_avg:75.75ms +[2025-09-05 20:30:09] [Rank 0] step:821/10000 train_time:62192ms step_avg:75.75ms +[2025-09-05 20:30:10] [Rank 0] step:841/10000 train_time:63082ms step_avg:75.01ms +[2025-09-05 20:30:10] [Rank 0] step:841/10000 train_time:63082ms step_avg:75.01ms +[2025-09-05 20:30:11] [Rank 0] step:861/10000 train_time:63813ms step_avg:74.12ms +[2025-09-05 20:30:11] [Rank 0] step:861/10000 train_time:63813ms step_avg:74.12ms +[2025-09-05 20:30:12] [Rank 0] step:881/10000 train_time:64545ms step_avg:73.26ms +[2025-09-05 20:30:12] [Rank 0] step:881/10000 train_time:64545ms step_avg:73.26ms +[2025-09-05 20:30:13] [Rank 0] step:901/10000 train_time:65435ms step_avg:72.62ms +[2025-09-05 20:30:13] [Rank 0] step:901/10000 train_time:65435ms step_avg:72.62ms +[2025-09-05 20:30:13] [Rank 0] step:921/10000 train_time:66167ms step_avg:71.84ms +[2025-09-05 20:30:13] [Rank 0] step:921/10000 train_time:66167ms step_avg:71.84ms +[2025-09-05 20:30:14] [Rank 0] step:941/10000 train_time:66898ms step_avg:71.09ms +[2025-09-05 20:30:14] [Rank 0] step:941/10000 train_time:66898ms step_avg:71.09ms +[2025-09-05 20:30:15] [Rank 0] step:961/10000 train_time:67629ms step_avg:70.37ms +[2025-09-05 20:30:15] [Rank 0] step:961/10000 train_time:67629ms step_avg:70.37ms +[2025-09-05 20:30:16] [Rank 0] step:981/10000 train_time:68368ms step_avg:69.69ms +[2025-09-05 20:30:16] [Rank 0] step:981/10000 train_time:68368ms step_avg:69.69ms +[2025-09-05 20:30:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:30:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:30:17] [Rank 0] PRINT: step:1000/10000 train_loss:3.6827 val_loss:3.3357 train_time:69181ms step_avg:69.18ms +[2025-09-05 20:30:17] [Rank 0] PRINT: step:1000/10000 train_loss:3.6827 val_loss:3.3357 train_time:69181ms step_avg:69.18ms +[2025-09-05 20:30:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:30:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:30:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:30:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:31:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:31:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:31:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:31:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:31:37] [Rank 0] Total Loss: 5.4771 +[2025-09-05 20:31:37] [Rank 0] Total Loss: 5.4771 +[2025-09-05 20:31:37] [Rank 0] Total FTA (Unweighted): 0.1256 +[2025-09-05 20:31:37] [Rank 0] Total FTA (Unweighted): 0.1256 +[2025-09-05 20:31:37] [Rank 0] Total FTA (Weighted): 0.1256 +[2025-09-05 20:31:37] [Rank 0] Total FTA (Weighted): 0.1256 +[2025-09-05 20:31:37] [Rank 0] Group 0 Loss: 3.3433 +[2025-09-05 20:31:37] [Rank 0] Group 0 Loss: 3.3433 +[2025-09-05 20:31:37] [Rank 0] Group 1 Loss: 3.4034 +[2025-09-05 20:31:37] [Rank 0] Group 1 Loss: 3.4034 +[2025-09-05 20:31:37] [Rank 0] Group 2 Loss: 3.7046 +[2025-09-05 20:31:37] [Rank 0] Group 2 Loss: 3.7046 +[2025-09-05 20:31:37] [Rank 0] Group 3 Loss: 4.4406 +[2025-09-05 20:31:37] [Rank 0] Group 3 Loss: 4.4406 +[2025-09-05 20:31:37] [Rank 0] Group 4 Loss: 5.3253 +[2025-09-05 20:31:37] [Rank 0] Group 4 Loss: 5.3253 +[2025-09-05 20:31:37] [Rank 0] Group 5 Loss: 5.6792 +[2025-09-05 20:31:37] [Rank 0] Group 5 Loss: 5.6792 +[2025-09-05 20:31:37] [Rank 0] Group 6 Loss: 5.9288 +[2025-09-05 20:31:37] [Rank 0] Group 6 Loss: 5.9288 +[2025-09-05 20:31:37] [Rank 0] Group 7 Loss: 5.9229 +[2025-09-05 20:31:37] [Rank 0] Group 7 Loss: 5.9229 +[2025-09-05 20:31:37] [Rank 0] Group 8 Loss: 6.1419 +[2025-09-05 20:31:37] [Rank 0] Group 8 Loss: 6.1419 +[2025-09-05 20:31:37] [Rank 0] Group 9 Loss: 6.3136 +[2025-09-05 20:31:37] [Rank 0] Group 9 Loss: 6.3136 +[2025-09-05 20:31:37] [Rank 0] Group 10 Loss: 6.2941 +[2025-09-05 20:31:37] [Rank 0] Group 10 Loss: 6.2941 +[2025-09-05 20:31:37] [Rank 0] Group 11 Loss: 6.3580 +[2025-09-05 20:31:37] [Rank 0] Group 11 Loss: 6.3580 +[2025-09-05 20:31:37] [Rank 0] Group 12 Loss: 6.1700 +[2025-09-05 20:31:37] [Rank 0] Group 12 Loss: 6.1700 +[2025-09-05 20:31:37] [Rank 0] Group 13 Loss: 6.1637 +[2025-09-05 20:31:37] [Rank 0] Group 13 Loss: 6.1637 +[2025-09-05 20:31:37] [Rank 0] Group 14 Loss: 6.2699 +[2025-09-05 20:31:37] [Rank 0] Group 14 Loss: 6.2699 +[2025-09-05 20:31:37] [Rank 0] Group 15 Loss: 6.1733 +[2025-09-05 20:31:37] [Rank 0] Group 15 Loss: 6.1733 +[2025-09-05 20:31:37] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 20:31:37] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 20:31:37] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 20:31:37] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 20:31:37] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:31:37] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:31:37] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 20:31:37] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 20:31:37] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 20:31:37] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 20:31:37] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 20:31:37] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 20:31:37] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 20:31:37] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 20:31:37] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 20:31:37] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 20:31:37] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 20:31:37] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 20:31:37] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:31:37] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:31:37] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:31:37] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:31:37] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:31:37] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:31:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:31:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:31:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:31:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:31:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:31:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:31:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:31:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:31:39] [Rank 0] step:1001/10000 train_time:69190ms step_avg:69.12ms +[2025-09-05 20:31:39] [Rank 0] step:1001/10000 train_time:69190ms step_avg:69.12ms +[2025-09-05 20:31:40] [Rank 0] step:1021/10000 train_time:69859ms step_avg:68.42ms +[2025-09-05 20:31:40] [Rank 0] step:1021/10000 train_time:69859ms step_avg:68.42ms +[2025-09-05 20:31:41] [Rank 0] step:1041/10000 train_time:70591ms step_avg:67.81ms +[2025-09-05 20:31:41] [Rank 0] step:1041/10000 train_time:70591ms step_avg:67.81ms +[2025-09-05 20:31:41] [Rank 0] step:1061/10000 train_time:71324ms step_avg:67.22ms +[2025-09-05 20:31:41] [Rank 0] step:1061/10000 train_time:71324ms step_avg:67.22ms +[2025-09-05 20:31:42] [Rank 0] step:1081/10000 train_time:72056ms step_avg:66.66ms +[2025-09-05 20:31:42] [Rank 0] step:1081/10000 train_time:72056ms step_avg:66.66ms +[2025-09-05 20:31:43] [Rank 0] step:1101/10000 train_time:72789ms step_avg:66.11ms +[2025-09-05 20:31:43] [Rank 0] step:1101/10000 train_time:72789ms step_avg:66.11ms +[2025-09-05 20:31:44] [Rank 0] step:1121/10000 train_time:73521ms step_avg:65.59ms +[2025-09-05 20:31:44] [Rank 0] step:1121/10000 train_time:73521ms step_avg:65.59ms +[2025-09-05 20:31:44] [Rank 0] step:1141/10000 train_time:74254ms step_avg:65.08ms +[2025-09-05 20:31:44] [Rank 0] step:1141/10000 train_time:74254ms step_avg:65.08ms +[2025-09-05 20:31:45] [Rank 0] step:1161/10000 train_time:74986ms step_avg:64.59ms +[2025-09-05 20:31:45] [Rank 0] step:1161/10000 train_time:74986ms step_avg:64.59ms +[2025-09-05 20:31:46] [Rank 0] step:1181/10000 train_time:75719ms step_avg:64.11ms +[2025-09-05 20:31:46] [Rank 0] step:1181/10000 train_time:75719ms step_avg:64.11ms +[2025-09-05 20:31:46] [Rank 0] step:1201/10000 train_time:76452ms step_avg:63.66ms +[2025-09-05 20:31:46] [Rank 0] step:1201/10000 train_time:76452ms step_avg:63.66ms +[2025-09-05 20:31:47] [Rank 0] step:1221/10000 train_time:77184ms step_avg:63.21ms +[2025-09-05 20:31:47] [Rank 0] step:1221/10000 train_time:77184ms step_avg:63.21ms +[2025-09-05 20:31:48] [Rank 0] step:1241/10000 train_time:77917ms step_avg:62.79ms +[2025-09-05 20:31:48] [Rank 0] step:1241/10000 train_time:77917ms step_avg:62.79ms +[2025-09-05 20:31:49] [Rank 0] step:1261/10000 train_time:78649ms step_avg:62.37ms +[2025-09-05 20:31:49] [Rank 0] step:1261/10000 train_time:78649ms step_avg:62.37ms +[2025-09-05 20:31:49] [Rank 0] step:1281/10000 train_time:79382ms step_avg:61.97ms +[2025-09-05 20:31:49] [Rank 0] step:1281/10000 train_time:79382ms step_avg:61.97ms +[2025-09-05 20:31:50] [Rank 0] step:1301/10000 train_time:80115ms step_avg:61.58ms +[2025-09-05 20:31:50] [Rank 0] step:1301/10000 train_time:80115ms step_avg:61.58ms +[2025-09-05 20:31:51] [Rank 0] step:1321/10000 train_time:80848ms step_avg:61.20ms +[2025-09-05 20:31:51] [Rank 0] step:1321/10000 train_time:80848ms step_avg:61.20ms +[2025-09-05 20:31:52] [Rank 0] step:1341/10000 train_time:81578ms step_avg:60.83ms +[2025-09-05 20:31:52] [Rank 0] step:1341/10000 train_time:81578ms step_avg:60.83ms +[2025-09-05 20:31:52] [Rank 0] step:1361/10000 train_time:82311ms step_avg:60.48ms +[2025-09-05 20:31:52] [Rank 0] step:1361/10000 train_time:82311ms step_avg:60.48ms +[2025-09-05 20:31:53] [Rank 0] step:1381/10000 train_time:83042ms step_avg:60.13ms +[2025-09-05 20:31:53] [Rank 0] step:1381/10000 train_time:83042ms step_avg:60.13ms +[2025-09-05 20:31:54] [Rank 0] step:1401/10000 train_time:83774ms step_avg:59.80ms +[2025-09-05 20:31:54] [Rank 0] step:1401/10000 train_time:83774ms step_avg:59.80ms +[2025-09-05 20:31:55] [Rank 0] step:1421/10000 train_time:84507ms step_avg:59.47ms +[2025-09-05 20:31:55] [Rank 0] step:1421/10000 train_time:84507ms step_avg:59.47ms +[2025-09-05 20:31:55] [Rank 0] step:1441/10000 train_time:85239ms step_avg:59.15ms +[2025-09-05 20:31:55] [Rank 0] step:1441/10000 train_time:85239ms step_avg:59.15ms +[2025-09-05 20:31:56] [Rank 0] step:1461/10000 train_time:85972ms step_avg:58.84ms +[2025-09-05 20:31:56] [Rank 0] step:1461/10000 train_time:85972ms step_avg:58.84ms +[2025-09-05 20:31:57] [Rank 0] step:1481/10000 train_time:86705ms step_avg:58.54ms +[2025-09-05 20:31:57] [Rank 0] step:1481/10000 train_time:86705ms step_avg:58.54ms +[2025-09-05 20:31:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:31:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:31:58] [Rank 0] PRINT: step:1500/10000 train_loss:3.1481 val_loss:2.9730 train_time:87517ms step_avg:58.34ms +[2025-09-05 20:31:58] [Rank 0] PRINT: step:1500/10000 train_loss:3.1481 val_loss:2.9730 train_time:87517ms step_avg:58.34ms +[2025-09-05 20:31:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:31:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:31:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:31:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:33:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:33:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:33:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:33:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:33:19] [Rank 0] Total Loss: 5.2616 +[2025-09-05 20:33:19] [Rank 0] Total Loss: 5.2616 +[2025-09-05 20:33:19] [Rank 0] Total FTA (Unweighted): 0.1756 +[2025-09-05 20:33:19] [Rank 0] Total FTA (Unweighted): 0.1756 +[2025-09-05 20:33:19] [Rank 0] Total FTA (Weighted): 0.1756 +[2025-09-05 20:33:19] [Rank 0] Total FTA (Weighted): 0.1756 +[2025-09-05 20:33:19] [Rank 0] Group 0 Loss: 3.3783 +[2025-09-05 20:33:19] [Rank 0] Group 0 Loss: 3.3783 +[2025-09-05 20:33:19] [Rank 0] Group 1 Loss: 3.2749 +[2025-09-05 20:33:19] [Rank 0] Group 1 Loss: 3.2749 +[2025-09-05 20:33:19] [Rank 0] Group 2 Loss: 3.5484 +[2025-09-05 20:33:19] [Rank 0] Group 2 Loss: 3.5484 +[2025-09-05 20:33:19] [Rank 0] Group 3 Loss: 4.0921 +[2025-09-05 20:33:19] [Rank 0] Group 3 Loss: 4.0921 +[2025-09-05 20:33:19] [Rank 0] Group 4 Loss: 4.9178 +[2025-09-05 20:33:19] [Rank 0] Group 4 Loss: 4.9178 +[2025-09-05 20:33:19] [Rank 0] Group 5 Loss: 5.3407 +[2025-09-05 20:33:19] [Rank 0] Group 5 Loss: 5.3407 +[2025-09-05 20:33:19] [Rank 0] Group 6 Loss: 5.6024 +[2025-09-05 20:33:19] [Rank 0] Group 6 Loss: 5.6024 +[2025-09-05 20:33:19] [Rank 0] Group 7 Loss: 5.6928 +[2025-09-05 20:33:19] [Rank 0] Group 7 Loss: 5.6928 +[2025-09-05 20:33:19] [Rank 0] Group 8 Loss: 5.9497 +[2025-09-05 20:33:19] [Rank 0] Group 8 Loss: 5.9497 +[2025-09-05 20:33:19] [Rank 0] Group 9 Loss: 6.0832 +[2025-09-05 20:33:19] [Rank 0] Group 9 Loss: 6.0832 +[2025-09-05 20:33:19] [Rank 0] Group 10 Loss: 6.0743 +[2025-09-05 20:33:19] [Rank 0] Group 10 Loss: 6.0743 +[2025-09-05 20:33:19] [Rank 0] Group 11 Loss: 6.1234 +[2025-09-05 20:33:19] [Rank 0] Group 11 Loss: 6.1234 +[2025-09-05 20:33:19] [Rank 0] Group 12 Loss: 6.0020 +[2025-09-05 20:33:19] [Rank 0] Group 12 Loss: 6.0020 +[2025-09-05 20:33:19] [Rank 0] Group 13 Loss: 6.0061 +[2025-09-05 20:33:19] [Rank 0] Group 13 Loss: 6.0061 +[2025-09-05 20:33:19] [Rank 0] Group 14 Loss: 6.0906 +[2025-09-05 20:33:19] [Rank 0] Group 14 Loss: 6.0906 +[2025-09-05 20:33:19] [Rank 0] Group 15 Loss: 6.0091 +[2025-09-05 20:33:19] [Rank 0] Group 15 Loss: 6.0091 +[2025-09-05 20:33:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:33:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:33:19] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 20:33:19] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 20:33:19] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:33:19] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:33:19] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 20:33:19] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 20:33:19] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:33:19] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:33:19] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 20:33:19] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 20:33:19] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 20:33:19] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 20:33:19] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 20:33:19] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 20:33:19] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 20:33:19] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 20:33:19] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:33:19] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:33:19] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 20:33:19] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 20:33:19] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 20:33:19] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 20:33:19] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:33:19] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:33:19] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:33:19] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:33:19] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:33:19] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:33:19] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:33:19] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:33:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:33:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:33:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:33:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:33:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:33:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:33:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:33:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:33:20] [Rank 0] step:1501/10000 train_time:87527ms step_avg:58.31ms +[2025-09-05 20:33:20] [Rank 0] step:1501/10000 train_time:87527ms step_avg:58.31ms +[2025-09-05 20:33:21] [Rank 0] step:1521/10000 train_time:88195ms step_avg:57.99ms +[2025-09-05 20:33:21] [Rank 0] step:1521/10000 train_time:88195ms step_avg:57.99ms +[2025-09-05 20:33:22] [Rank 0] step:1541/10000 train_time:88927ms step_avg:57.71ms +[2025-09-05 20:33:22] [Rank 0] step:1541/10000 train_time:88927ms step_avg:57.71ms +[2025-09-05 20:33:23] [Rank 0] step:1561/10000 train_time:89801ms step_avg:57.53ms +[2025-09-05 20:33:23] [Rank 0] step:1561/10000 train_time:89801ms step_avg:57.53ms +[2025-09-05 20:33:24] [Rank 0] step:1581/10000 train_time:90534ms step_avg:57.26ms +[2025-09-05 20:33:24] [Rank 0] step:1581/10000 train_time:90534ms step_avg:57.26ms +[2025-09-05 20:33:24] [Rank 0] step:1601/10000 train_time:91270ms step_avg:57.01ms +[2025-09-05 20:33:24] [Rank 0] step:1601/10000 train_time:91270ms step_avg:57.01ms +[2025-09-05 20:33:25] [Rank 0] step:1621/10000 train_time:92003ms step_avg:56.76ms +[2025-09-05 20:33:25] [Rank 0] step:1621/10000 train_time:92003ms step_avg:56.76ms +[2025-09-05 20:33:26] [Rank 0] step:1641/10000 train_time:93365ms step_avg:56.90ms +[2025-09-05 20:33:26] [Rank 0] step:1641/10000 train_time:93365ms step_avg:56.90ms +[2025-09-05 20:33:27] [Rank 0] step:1661/10000 train_time:94098ms step_avg:56.65ms +[2025-09-05 20:33:27] [Rank 0] step:1661/10000 train_time:94098ms step_avg:56.65ms +[2025-09-05 20:33:28] [Rank 0] step:1681/10000 train_time:94829ms step_avg:56.41ms +[2025-09-05 20:33:28] [Rank 0] step:1681/10000 train_time:94829ms step_avg:56.41ms +[2025-09-05 20:33:29] [Rank 0] step:1701/10000 train_time:95561ms step_avg:56.18ms +[2025-09-05 20:33:29] [Rank 0] step:1701/10000 train_time:95561ms step_avg:56.18ms +[2025-09-05 20:33:29] [Rank 0] step:1721/10000 train_time:96294ms step_avg:55.95ms +[2025-09-05 20:33:29] [Rank 0] step:1721/10000 train_time:96294ms step_avg:55.95ms +[2025-09-05 20:33:30] [Rank 0] step:1741/10000 train_time:97026ms step_avg:55.73ms +[2025-09-05 20:33:30] [Rank 0] step:1741/10000 train_time:97026ms step_avg:55.73ms +[2025-09-05 20:33:31] [Rank 0] step:1761/10000 train_time:97758ms step_avg:55.51ms +[2025-09-05 20:33:31] [Rank 0] step:1761/10000 train_time:97758ms step_avg:55.51ms +[2025-09-05 20:33:31] [Rank 0] step:1781/10000 train_time:98491ms step_avg:55.30ms +[2025-09-05 20:33:31] [Rank 0] step:1781/10000 train_time:98491ms step_avg:55.30ms +[2025-09-05 20:33:32] [Rank 0] step:1801/10000 train_time:99223ms step_avg:55.09ms +[2025-09-05 20:33:32] [Rank 0] step:1801/10000 train_time:99223ms step_avg:55.09ms +[2025-09-05 20:33:33] [Rank 0] step:1821/10000 train_time:99954ms step_avg:54.89ms +[2025-09-05 20:33:33] [Rank 0] step:1821/10000 train_time:99954ms step_avg:54.89ms +[2025-09-05 20:33:34] [Rank 0] step:1841/10000 train_time:100686ms step_avg:54.69ms +[2025-09-05 20:33:34] [Rank 0] step:1841/10000 train_time:100686ms step_avg:54.69ms +[2025-09-05 20:33:34] [Rank 0] step:1861/10000 train_time:101417ms step_avg:54.50ms +[2025-09-05 20:33:34] [Rank 0] step:1861/10000 train_time:101417ms step_avg:54.50ms +[2025-09-05 20:33:35] [Rank 0] step:1881/10000 train_time:102148ms step_avg:54.31ms +[2025-09-05 20:33:35] [Rank 0] step:1881/10000 train_time:102148ms step_avg:54.31ms +[2025-09-05 20:33:36] [Rank 0] step:1901/10000 train_time:102880ms step_avg:54.12ms +[2025-09-05 20:33:36] [Rank 0] step:1901/10000 train_time:102880ms step_avg:54.12ms +[2025-09-05 20:33:37] [Rank 0] step:1921/10000 train_time:103613ms step_avg:53.94ms +[2025-09-05 20:33:37] [Rank 0] step:1921/10000 train_time:103613ms step_avg:53.94ms +[2025-09-05 20:33:37] [Rank 0] step:1941/10000 train_time:104346ms step_avg:53.76ms +[2025-09-05 20:33:37] [Rank 0] step:1941/10000 train_time:104346ms step_avg:53.76ms +[2025-09-05 20:33:38] [Rank 0] step:1961/10000 train_time:105080ms step_avg:53.58ms +[2025-09-05 20:33:38] [Rank 0] step:1961/10000 train_time:105080ms step_avg:53.58ms +[2025-09-05 20:33:39] [Rank 0] step:1981/10000 train_time:105812ms step_avg:53.41ms +[2025-09-05 20:33:39] [Rank 0] step:1981/10000 train_time:105812ms step_avg:53.41ms +[2025-09-05 20:33:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:33:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:33:40] [Rank 0] PRINT: step:2000/10000 train_loss:2.8578 val_loss:2.7379 train_time:106625ms step_avg:53.31ms +[2025-09-05 20:33:40] [Rank 0] PRINT: step:2000/10000 train_loss:2.8578 val_loss:2.7379 train_time:106625ms step_avg:53.31ms +[2025-09-05 20:33:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:33:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:33:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:33:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:35:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:35:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:35:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:35:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:35:01] [Rank 0] Total Loss: 5.1291 +[2025-09-05 20:35:01] [Rank 0] Total Loss: 5.1291 +[2025-09-05 20:35:01] [Rank 0] Total FTA (Unweighted): 0.1881 +[2025-09-05 20:35:01] [Rank 0] Total FTA (Unweighted): 0.1881 +[2025-09-05 20:35:01] [Rank 0] Total FTA (Weighted): 0.1881 +[2025-09-05 20:35:01] [Rank 0] Total FTA (Weighted): 0.1881 +[2025-09-05 20:35:01] [Rank 0] Group 0 Loss: 3.3775 +[2025-09-05 20:35:01] [Rank 0] Group 0 Loss: 3.3775 +[2025-09-05 20:35:01] [Rank 0] Group 1 Loss: 3.3218 +[2025-09-05 20:35:01] [Rank 0] Group 1 Loss: 3.3218 +[2025-09-05 20:35:01] [Rank 0] Group 2 Loss: 3.4285 +[2025-09-05 20:35:01] [Rank 0] Group 2 Loss: 3.4285 +[2025-09-05 20:35:01] [Rank 0] Group 3 Loss: 3.9452 +[2025-09-05 20:35:01] [Rank 0] Group 3 Loss: 3.9452 +[2025-09-05 20:35:01] [Rank 0] Group 4 Loss: 4.6481 +[2025-09-05 20:35:01] [Rank 0] Group 4 Loss: 4.6481 +[2025-09-05 20:35:01] [Rank 0] Group 5 Loss: 5.1332 +[2025-09-05 20:35:01] [Rank 0] Group 5 Loss: 5.1332 +[2025-09-05 20:35:01] [Rank 0] Group 6 Loss: 5.4012 +[2025-09-05 20:35:01] [Rank 0] Group 6 Loss: 5.4012 +[2025-09-05 20:35:01] [Rank 0] Group 7 Loss: 5.5221 +[2025-09-05 20:35:01] [Rank 0] Group 7 Loss: 5.5221 +[2025-09-05 20:35:01] [Rank 0] Group 8 Loss: 5.7861 +[2025-09-05 20:35:01] [Rank 0] Group 8 Loss: 5.7861 +[2025-09-05 20:35:01] [Rank 0] Group 9 Loss: 5.9340 +[2025-09-05 20:35:01] [Rank 0] Group 9 Loss: 5.9340 +[2025-09-05 20:35:01] [Rank 0] Group 10 Loss: 5.9658 +[2025-09-05 20:35:01] [Rank 0] Group 10 Loss: 5.9658 +[2025-09-05 20:35:01] [Rank 0] Group 11 Loss: 5.9891 +[2025-09-05 20:35:01] [Rank 0] Group 11 Loss: 5.9891 +[2025-09-05 20:35:01] [Rank 0] Group 12 Loss: 5.8759 +[2025-09-05 20:35:01] [Rank 0] Group 12 Loss: 5.8759 +[2025-09-05 20:35:01] [Rank 0] Group 13 Loss: 5.8835 +[2025-09-05 20:35:01] [Rank 0] Group 13 Loss: 5.8835 +[2025-09-05 20:35:01] [Rank 0] Group 14 Loss: 5.9546 +[2025-09-05 20:35:01] [Rank 0] Group 14 Loss: 5.9546 +[2025-09-05 20:35:01] [Rank 0] Group 15 Loss: 5.8985 +[2025-09-05 20:35:01] [Rank 0] Group 15 Loss: 5.8985 +[2025-09-05 20:35:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:35:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:35:01] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-05 20:35:01] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-05 20:35:01] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:35:01] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:35:01] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 20:35:01] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 20:35:01] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:35:01] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:35:01] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 20:35:01] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 20:35:01] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 20:35:01] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 20:35:01] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 20:35:01] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 20:35:01] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 20:35:01] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 20:35:01] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 20:35:01] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 20:35:01] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 20:35:01] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 20:35:01] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 20:35:01] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 20:35:01] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:35:01] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:35:01] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 20:35:01] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 20:35:01] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:35:01] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:35:01] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:35:01] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:35:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:35:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:35:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:35:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:35:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:35:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:35:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:35:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:35:03] [Rank 0] step:2001/10000 train_time:106634ms step_avg:53.29ms +[2025-09-05 20:35:03] [Rank 0] step:2001/10000 train_time:106634ms step_avg:53.29ms +[2025-09-05 20:35:04] [Rank 0] step:2021/10000 train_time:107504ms step_avg:53.19ms +[2025-09-05 20:35:04] [Rank 0] step:2021/10000 train_time:107504ms step_avg:53.19ms +[2025-09-05 20:35:04] [Rank 0] step:2041/10000 train_time:108237ms step_avg:53.03ms +[2025-09-05 20:35:04] [Rank 0] step:2041/10000 train_time:108237ms step_avg:53.03ms +[2025-09-05 20:35:05] [Rank 0] step:2061/10000 train_time:108968ms step_avg:52.87ms +[2025-09-05 20:35:05] [Rank 0] step:2061/10000 train_time:108968ms step_avg:52.87ms +[2025-09-05 20:35:06] [Rank 0] step:2081/10000 train_time:109701ms step_avg:52.72ms +[2025-09-05 20:35:06] [Rank 0] step:2081/10000 train_time:109701ms step_avg:52.72ms +[2025-09-05 20:35:06] [Rank 0] step:2101/10000 train_time:110434ms step_avg:52.56ms +[2025-09-05 20:35:06] [Rank 0] step:2101/10000 train_time:110434ms step_avg:52.56ms +[2025-09-05 20:35:07] [Rank 0] step:2121/10000 train_time:111165ms step_avg:52.41ms +[2025-09-05 20:35:07] [Rank 0] step:2121/10000 train_time:111165ms step_avg:52.41ms +[2025-09-05 20:35:08] [Rank 0] step:2141/10000 train_time:111897ms step_avg:52.26ms +[2025-09-05 20:35:08] [Rank 0] step:2141/10000 train_time:111897ms step_avg:52.26ms +[2025-09-05 20:35:09] [Rank 0] step:2161/10000 train_time:112630ms step_avg:52.12ms +[2025-09-05 20:35:09] [Rank 0] step:2161/10000 train_time:112630ms step_avg:52.12ms +[2025-09-05 20:35:09] [Rank 0] step:2181/10000 train_time:113361ms step_avg:51.98ms +[2025-09-05 20:35:09] [Rank 0] step:2181/10000 train_time:113361ms step_avg:51.98ms +[2025-09-05 20:35:10] [Rank 0] step:2201/10000 train_time:114092ms step_avg:51.84ms +[2025-09-05 20:35:10] [Rank 0] step:2201/10000 train_time:114092ms step_avg:51.84ms +[2025-09-05 20:35:11] [Rank 0] step:2221/10000 train_time:114824ms step_avg:51.70ms +[2025-09-05 20:35:11] [Rank 0] step:2221/10000 train_time:114824ms step_avg:51.70ms +[2025-09-05 20:35:12] [Rank 0] step:2241/10000 train_time:115561ms step_avg:51.57ms +[2025-09-05 20:35:12] [Rank 0] step:2241/10000 train_time:115561ms step_avg:51.57ms +[2025-09-05 20:35:12] [Rank 0] step:2261/10000 train_time:116299ms step_avg:51.44ms +[2025-09-05 20:35:12] [Rank 0] step:2261/10000 train_time:116299ms step_avg:51.44ms +[2025-09-05 20:35:13] [Rank 0] step:2281/10000 train_time:117037ms step_avg:51.31ms +[2025-09-05 20:35:13] [Rank 0] step:2281/10000 train_time:117037ms step_avg:51.31ms +[2025-09-05 20:35:14] [Rank 0] step:2301/10000 train_time:117776ms step_avg:51.18ms +[2025-09-05 20:35:14] [Rank 0] step:2301/10000 train_time:117776ms step_avg:51.18ms +[2025-09-05 20:35:15] [Rank 0] step:2321/10000 train_time:118515ms step_avg:51.06ms +[2025-09-05 20:35:15] [Rank 0] step:2321/10000 train_time:118515ms step_avg:51.06ms +[2025-09-05 20:35:15] [Rank 0] step:2341/10000 train_time:119254ms step_avg:50.94ms +[2025-09-05 20:35:15] [Rank 0] step:2341/10000 train_time:119254ms step_avg:50.94ms +[2025-09-05 20:35:16] [Rank 0] step:2361/10000 train_time:119994ms step_avg:50.82ms +[2025-09-05 20:35:16] [Rank 0] step:2361/10000 train_time:119994ms step_avg:50.82ms +[2025-09-05 20:35:17] [Rank 0] step:2381/10000 train_time:120732ms step_avg:50.71ms +[2025-09-05 20:35:17] [Rank 0] step:2381/10000 train_time:120732ms step_avg:50.71ms +[2025-09-05 20:35:17] [Rank 0] step:2401/10000 train_time:121471ms step_avg:50.59ms +[2025-09-05 20:35:17] [Rank 0] step:2401/10000 train_time:121471ms step_avg:50.59ms +[2025-09-05 20:35:18] [Rank 0] step:2421/10000 train_time:122210ms step_avg:50.48ms +[2025-09-05 20:35:18] [Rank 0] step:2421/10000 train_time:122210ms step_avg:50.48ms +[2025-09-05 20:35:19] [Rank 0] step:2441/10000 train_time:122949ms step_avg:50.37ms +[2025-09-05 20:35:19] [Rank 0] step:2441/10000 train_time:122949ms step_avg:50.37ms +[2025-09-05 20:35:20] [Rank 0] step:2461/10000 train_time:123687ms step_avg:50.26ms +[2025-09-05 20:35:20] [Rank 0] step:2461/10000 train_time:123687ms step_avg:50.26ms +[2025-09-05 20:35:20] [Rank 0] step:2481/10000 train_time:124425ms step_avg:50.15ms +[2025-09-05 20:35:20] [Rank 0] step:2481/10000 train_time:124425ms step_avg:50.15ms +[2025-09-05 20:35:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:35:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:35:22] [Rank 0] PRINT: step:2500/10000 train_loss:2.6589 val_loss:2.5671 train_time:125244ms step_avg:50.10ms +[2025-09-05 20:35:22] [Rank 0] PRINT: step:2500/10000 train_loss:2.6589 val_loss:2.5671 train_time:125244ms step_avg:50.10ms +[2025-09-05 20:35:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:35:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:35:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:35:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:36:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:36:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:36:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:36:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:36:42] [Rank 0] Total Loss: 4.9940 +[2025-09-05 20:36:42] [Rank 0] Total Loss: 4.9940 +[2025-09-05 20:36:42] [Rank 0] Total FTA (Unweighted): 0.2238 +[2025-09-05 20:36:42] [Rank 0] Total FTA (Unweighted): 0.2238 +[2025-09-05 20:36:42] [Rank 0] Total FTA (Weighted): 0.2238 +[2025-09-05 20:36:42] [Rank 0] Total FTA (Weighted): 0.2238 +[2025-09-05 20:36:42] [Rank 0] Group 0 Loss: 3.3660 +[2025-09-05 20:36:42] [Rank 0] Group 0 Loss: 3.3660 +[2025-09-05 20:36:42] [Rank 0] Group 1 Loss: 3.2721 +[2025-09-05 20:36:42] [Rank 0] Group 1 Loss: 3.2721 +[2025-09-05 20:36:42] [Rank 0] Group 2 Loss: 3.3842 +[2025-09-05 20:36:42] [Rank 0] Group 2 Loss: 3.3842 +[2025-09-05 20:36:42] [Rank 0] Group 3 Loss: 3.8618 +[2025-09-05 20:36:42] [Rank 0] Group 3 Loss: 3.8618 +[2025-09-05 20:36:42] [Rank 0] Group 4 Loss: 4.4115 +[2025-09-05 20:36:42] [Rank 0] Group 4 Loss: 4.4115 +[2025-09-05 20:36:42] [Rank 0] Group 5 Loss: 4.9039 +[2025-09-05 20:36:42] [Rank 0] Group 5 Loss: 4.9039 +[2025-09-05 20:36:42] [Rank 0] Group 6 Loss: 5.2205 +[2025-09-05 20:36:42] [Rank 0] Group 6 Loss: 5.2205 +[2025-09-05 20:36:42] [Rank 0] Group 7 Loss: 5.3668 +[2025-09-05 20:36:42] [Rank 0] Group 7 Loss: 5.3668 +[2025-09-05 20:36:42] [Rank 0] Group 8 Loss: 5.6475 +[2025-09-05 20:36:42] [Rank 0] Group 8 Loss: 5.6475 +[2025-09-05 20:36:42] [Rank 0] Group 9 Loss: 5.7578 +[2025-09-05 20:36:42] [Rank 0] Group 9 Loss: 5.7578 +[2025-09-05 20:36:42] [Rank 0] Group 10 Loss: 5.8066 +[2025-09-05 20:36:42] [Rank 0] Group 10 Loss: 5.8066 +[2025-09-05 20:36:42] [Rank 0] Group 11 Loss: 5.8364 +[2025-09-05 20:36:42] [Rank 0] Group 11 Loss: 5.8364 +[2025-09-05 20:36:42] [Rank 0] Group 12 Loss: 5.7331 +[2025-09-05 20:36:42] [Rank 0] Group 12 Loss: 5.7331 +[2025-09-05 20:36:43] [Rank 0] Group 13 Loss: 5.7522 +[2025-09-05 20:36:43] [Rank 0] Group 13 Loss: 5.7522 +[2025-09-05 20:36:43] [Rank 0] Group 14 Loss: 5.8362 +[2025-09-05 20:36:43] [Rank 0] Group 14 Loss: 5.8362 +[2025-09-05 20:36:43] [Rank 0] Group 15 Loss: 5.7476 +[2025-09-05 20:36:43] [Rank 0] Group 15 Loss: 5.7476 +[2025-09-05 20:36:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:36:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:36:43] [Rank 0] Group 1 FTA: 0.8300 +[2025-09-05 20:36:43] [Rank 0] Group 1 FTA: 0.8300 +[2025-09-05 20:36:43] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:36:43] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:36:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:36:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:36:43] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:36:43] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 20:36:43] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 20:36:43] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 20:36:43] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 20:36:43] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 20:36:43] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 20:36:43] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 20:36:43] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 20:36:43] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 20:36:43] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:36:43] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:36:43] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 20:36:43] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 20:36:43] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 20:36:43] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 20:36:43] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:36:43] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 20:36:43] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:36:43] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:36:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:36:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:36:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:36:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:36:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:36:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:36:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:36:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:36:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:36:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:36:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:36:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:36:44] [Rank 0] step:2501/10000 train_time:125254ms step_avg:50.08ms +[2025-09-05 20:36:44] [Rank 0] step:2501/10000 train_time:125254ms step_avg:50.08ms +[2025-09-05 20:36:45] [Rank 0] step:2521/10000 train_time:125929ms step_avg:49.95ms +[2025-09-05 20:36:45] [Rank 0] step:2521/10000 train_time:125929ms step_avg:49.95ms +[2025-09-05 20:36:46] [Rank 0] step:2541/10000 train_time:126667ms step_avg:49.85ms +[2025-09-05 20:36:46] [Rank 0] step:2541/10000 train_time:126667ms step_avg:49.85ms +[2025-09-05 20:36:46] [Rank 0] step:2561/10000 train_time:127406ms step_avg:49.75ms +[2025-09-05 20:36:46] [Rank 0] step:2561/10000 train_time:127406ms step_avg:49.75ms +[2025-09-05 20:36:47] [Rank 0] step:2581/10000 train_time:128145ms step_avg:49.65ms +[2025-09-05 20:36:47] [Rank 0] step:2581/10000 train_time:128145ms step_avg:49.65ms +[2025-09-05 20:36:48] [Rank 0] step:2601/10000 train_time:128883ms step_avg:49.55ms +[2025-09-05 20:36:48] [Rank 0] step:2601/10000 train_time:128883ms step_avg:49.55ms +[2025-09-05 20:36:48] [Rank 0] step:2621/10000 train_time:129622ms step_avg:49.46ms +[2025-09-05 20:36:48] [Rank 0] step:2621/10000 train_time:129622ms step_avg:49.46ms +[2025-09-05 20:36:49] [Rank 0] step:2641/10000 train_time:130360ms step_avg:49.36ms +[2025-09-05 20:36:49] [Rank 0] step:2641/10000 train_time:130360ms step_avg:49.36ms +[2025-09-05 20:36:50] [Rank 0] step:2661/10000 train_time:131099ms step_avg:49.27ms +[2025-09-05 20:36:50] [Rank 0] step:2661/10000 train_time:131099ms step_avg:49.27ms +[2025-09-05 20:36:51] [Rank 0] step:2681/10000 train_time:131838ms step_avg:49.17ms +[2025-09-05 20:36:51] [Rank 0] step:2681/10000 train_time:131838ms step_avg:49.17ms +[2025-09-05 20:36:51] [Rank 0] step:2701/10000 train_time:132576ms step_avg:49.08ms +[2025-09-05 20:36:51] [Rank 0] step:2701/10000 train_time:132576ms step_avg:49.08ms +[2025-09-05 20:36:52] [Rank 0] step:2721/10000 train_time:133315ms step_avg:48.99ms +[2025-09-05 20:36:52] [Rank 0] step:2721/10000 train_time:133315ms step_avg:48.99ms +[2025-09-05 20:36:53] [Rank 0] step:2741/10000 train_time:134053ms step_avg:48.91ms +[2025-09-05 20:36:53] [Rank 0] step:2741/10000 train_time:134053ms step_avg:48.91ms +[2025-09-05 20:36:54] [Rank 0] step:2761/10000 train_time:134793ms step_avg:48.82ms +[2025-09-05 20:36:54] [Rank 0] step:2761/10000 train_time:134793ms step_avg:48.82ms +[2025-09-05 20:36:54] [Rank 0] step:2781/10000 train_time:135532ms step_avg:48.73ms +[2025-09-05 20:36:54] [Rank 0] step:2781/10000 train_time:135532ms step_avg:48.73ms +[2025-09-05 20:36:55] [Rank 0] step:2801/10000 train_time:136271ms step_avg:48.65ms +[2025-09-05 20:36:55] [Rank 0] step:2801/10000 train_time:136271ms step_avg:48.65ms +[2025-09-05 20:36:57] [Rank 0] step:2821/10000 train_time:137635ms step_avg:48.79ms +[2025-09-05 20:36:57] [Rank 0] step:2821/10000 train_time:137635ms step_avg:48.79ms +[2025-09-05 20:36:57] [Rank 0] step:2841/10000 train_time:138374ms step_avg:48.71ms +[2025-09-05 20:36:57] [Rank 0] step:2841/10000 train_time:138374ms step_avg:48.71ms +[2025-09-05 20:36:58] [Rank 0] step:2861/10000 train_time:139113ms step_avg:48.62ms +[2025-09-05 20:36:58] [Rank 0] step:2861/10000 train_time:139113ms step_avg:48.62ms +[2025-09-05 20:36:59] [Rank 0] step:2881/10000 train_time:139852ms step_avg:48.54ms +[2025-09-05 20:36:59] [Rank 0] step:2881/10000 train_time:139852ms step_avg:48.54ms +[2025-09-05 20:36:59] [Rank 0] step:2901/10000 train_time:140591ms step_avg:48.46ms +[2025-09-05 20:36:59] [Rank 0] step:2901/10000 train_time:140591ms step_avg:48.46ms +[2025-09-05 20:37:00] [Rank 0] step:2921/10000 train_time:141338ms step_avg:48.39ms +[2025-09-05 20:37:00] [Rank 0] step:2921/10000 train_time:141338ms step_avg:48.39ms +[2025-09-05 20:37:01] [Rank 0] step:2941/10000 train_time:142077ms step_avg:48.31ms +[2025-09-05 20:37:01] [Rank 0] step:2941/10000 train_time:142077ms step_avg:48.31ms +[2025-09-05 20:37:02] [Rank 0] step:2961/10000 train_time:142816ms step_avg:48.23ms +[2025-09-05 20:37:02] [Rank 0] step:2961/10000 train_time:142816ms step_avg:48.23ms +[2025-09-05 20:37:02] [Rank 0] step:2981/10000 train_time:143555ms step_avg:48.16ms +[2025-09-05 20:37:02] [Rank 0] step:2981/10000 train_time:143555ms step_avg:48.16ms +[2025-09-05 20:37:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:37:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:37:04] [Rank 0] PRINT: step:3000/10000 train_loss:2.5117 val_loss:2.4458 train_time:144375ms step_avg:48.12ms +[2025-09-05 20:37:04] [Rank 0] PRINT: step:3000/10000 train_loss:2.5117 val_loss:2.4458 train_time:144375ms step_avg:48.12ms +[2025-09-05 20:37:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:37:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:37:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:37:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:38:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:38:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:38:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:38:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:38:24] [Rank 0] Total Loss: 4.8729 +[2025-09-05 20:38:24] [Rank 0] Total Loss: 4.8729 +[2025-09-05 20:38:24] [Rank 0] Total FTA (Unweighted): 0.2500 +[2025-09-05 20:38:24] [Rank 0] Total FTA (Unweighted): 0.2500 +[2025-09-05 20:38:24] [Rank 0] Total FTA (Weighted): 0.2500 +[2025-09-05 20:38:24] [Rank 0] Total FTA (Weighted): 0.2500 +[2025-09-05 20:38:24] [Rank 0] Group 0 Loss: 3.3533 +[2025-09-05 20:38:24] [Rank 0] Group 0 Loss: 3.3533 +[2025-09-05 20:38:24] [Rank 0] Group 1 Loss: 3.2517 +[2025-09-05 20:38:24] [Rank 0] Group 1 Loss: 3.2517 +[2025-09-05 20:38:24] [Rank 0] Group 2 Loss: 3.3143 +[2025-09-05 20:38:24] [Rank 0] Group 2 Loss: 3.3143 +[2025-09-05 20:38:24] [Rank 0] Group 3 Loss: 3.7756 +[2025-09-05 20:38:24] [Rank 0] Group 3 Loss: 3.7756 +[2025-09-05 20:38:24] [Rank 0] Group 4 Loss: 4.2724 +[2025-09-05 20:38:24] [Rank 0] Group 4 Loss: 4.2724 +[2025-09-05 20:38:24] [Rank 0] Group 5 Loss: 4.7392 +[2025-09-05 20:38:24] [Rank 0] Group 5 Loss: 4.7392 +[2025-09-05 20:38:24] [Rank 0] Group 6 Loss: 5.0393 +[2025-09-05 20:38:24] [Rank 0] Group 6 Loss: 5.0393 +[2025-09-05 20:38:24] [Rank 0] Group 7 Loss: 5.2052 +[2025-09-05 20:38:24] [Rank 0] Group 7 Loss: 5.2052 +[2025-09-05 20:38:25] [Rank 0] Group 8 Loss: 5.4969 +[2025-09-05 20:38:25] [Rank 0] Group 8 Loss: 5.4969 +[2025-09-05 20:38:25] [Rank 0] Group 9 Loss: 5.6121 +[2025-09-05 20:38:25] [Rank 0] Group 9 Loss: 5.6121 +[2025-09-05 20:38:25] [Rank 0] Group 10 Loss: 5.6574 +[2025-09-05 20:38:25] [Rank 0] Group 10 Loss: 5.6574 +[2025-09-05 20:38:25] [Rank 0] Group 11 Loss: 5.6904 +[2025-09-05 20:38:25] [Rank 0] Group 11 Loss: 5.6904 +[2025-09-05 20:38:25] [Rank 0] Group 12 Loss: 5.6225 +[2025-09-05 20:38:25] [Rank 0] Group 12 Loss: 5.6225 +[2025-09-05 20:38:25] [Rank 0] Group 13 Loss: 5.6241 +[2025-09-05 20:38:25] [Rank 0] Group 13 Loss: 5.6241 +[2025-09-05 20:38:25] [Rank 0] Group 14 Loss: 5.6903 +[2025-09-05 20:38:25] [Rank 0] Group 14 Loss: 5.6903 +[2025-09-05 20:38:25] [Rank 0] Group 15 Loss: 5.6224 +[2025-09-05 20:38:25] [Rank 0] Group 15 Loss: 5.6224 +[2025-09-05 20:38:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:38:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:38:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:38:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:38:25] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:38:25] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 20:38:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:38:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:38:25] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 20:38:25] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 20:38:25] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 20:38:25] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 20:38:25] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-05 20:38:25] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-05 20:38:25] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 20:38:25] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 20:38:25] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 20:38:25] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 20:38:25] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:38:25] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:38:25] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 20:38:25] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 20:38:25] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 20:38:25] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 20:38:25] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 20:38:25] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 20:38:25] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 20:38:25] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 20:38:25] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:38:25] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:38:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:38:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:38:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:38:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:38:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:38:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:38:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:38:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:38:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:38:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:38:26] [Rank 0] step:3001/10000 train_time:144384ms step_avg:48.11ms +[2025-09-05 20:38:26] [Rank 0] step:3001/10000 train_time:144384ms step_avg:48.11ms +[2025-09-05 20:38:27] [Rank 0] step:3021/10000 train_time:145056ms step_avg:48.02ms +[2025-09-05 20:38:27] [Rank 0] step:3021/10000 train_time:145056ms step_avg:48.02ms +[2025-09-05 20:38:27] [Rank 0] step:3041/10000 train_time:145794ms step_avg:47.94ms +[2025-09-05 20:38:27] [Rank 0] step:3041/10000 train_time:145794ms step_avg:47.94ms +[2025-09-05 20:38:28] [Rank 0] step:3061/10000 train_time:146533ms step_avg:47.87ms +[2025-09-05 20:38:28] [Rank 0] step:3061/10000 train_time:146533ms step_avg:47.87ms +[2025-09-05 20:38:29] [Rank 0] step:3081/10000 train_time:147272ms step_avg:47.80ms +[2025-09-05 20:38:29] [Rank 0] step:3081/10000 train_time:147272ms step_avg:47.80ms +[2025-09-05 20:38:30] [Rank 0] step:3101/10000 train_time:148011ms step_avg:47.73ms +[2025-09-05 20:38:30] [Rank 0] step:3101/10000 train_time:148011ms step_avg:47.73ms +[2025-09-05 20:38:30] [Rank 0] step:3121/10000 train_time:148749ms step_avg:47.66ms +[2025-09-05 20:38:30] [Rank 0] step:3121/10000 train_time:148749ms step_avg:47.66ms +[2025-09-05 20:38:31] [Rank 0] step:3141/10000 train_time:149488ms step_avg:47.59ms +[2025-09-05 20:38:31] [Rank 0] step:3141/10000 train_time:149488ms step_avg:47.59ms +[2025-09-05 20:38:32] [Rank 0] step:3161/10000 train_time:150227ms step_avg:47.53ms +[2025-09-05 20:38:32] [Rank 0] step:3161/10000 train_time:150227ms step_avg:47.53ms +[2025-09-05 20:38:33] [Rank 0] step:3181/10000 train_time:150965ms step_avg:47.46ms +[2025-09-05 20:38:33] [Rank 0] step:3181/10000 train_time:150965ms step_avg:47.46ms +[2025-09-05 20:38:33] [Rank 0] step:3201/10000 train_time:151703ms step_avg:47.39ms +[2025-09-05 20:38:33] [Rank 0] step:3201/10000 train_time:151703ms step_avg:47.39ms +[2025-09-05 20:38:34] [Rank 0] step:3221/10000 train_time:152442ms step_avg:47.33ms +[2025-09-05 20:38:34] [Rank 0] step:3221/10000 train_time:152442ms step_avg:47.33ms +[2025-09-05 20:38:35] [Rank 0] step:3241/10000 train_time:153181ms step_avg:47.26ms +[2025-09-05 20:38:35] [Rank 0] step:3241/10000 train_time:153181ms step_avg:47.26ms +[2025-09-05 20:38:36] [Rank 0] step:3261/10000 train_time:153920ms step_avg:47.20ms +[2025-09-05 20:38:36] [Rank 0] step:3261/10000 train_time:153920ms step_avg:47.20ms +[2025-09-05 20:38:37] [Rank 0] step:3281/10000 train_time:154803ms step_avg:47.18ms +[2025-09-05 20:38:37] [Rank 0] step:3281/10000 train_time:154803ms step_avg:47.18ms +[2025-09-05 20:38:37] [Rank 0] step:3301/10000 train_time:155542ms step_avg:47.12ms +[2025-09-05 20:38:37] [Rank 0] step:3301/10000 train_time:155542ms step_avg:47.12ms +[2025-09-05 20:38:38] [Rank 0] step:3321/10000 train_time:156281ms step_avg:47.06ms +[2025-09-05 20:38:38] [Rank 0] step:3321/10000 train_time:156281ms step_avg:47.06ms +[2025-09-05 20:38:39] [Rank 0] step:3341/10000 train_time:157176ms step_avg:47.04ms +[2025-09-05 20:38:39] [Rank 0] step:3341/10000 train_time:157176ms step_avg:47.04ms +[2025-09-05 20:38:40] [Rank 0] step:3361/10000 train_time:157913ms step_avg:46.98ms +[2025-09-05 20:38:40] [Rank 0] step:3361/10000 train_time:157913ms step_avg:46.98ms +[2025-09-05 20:38:40] [Rank 0] step:3381/10000 train_time:158652ms step_avg:46.92ms +[2025-09-05 20:38:40] [Rank 0] step:3381/10000 train_time:158652ms step_avg:46.92ms +[2025-09-05 20:38:41] [Rank 0] step:3401/10000 train_time:159392ms step_avg:46.87ms +[2025-09-05 20:38:41] [Rank 0] step:3401/10000 train_time:159392ms step_avg:46.87ms +[2025-09-05 20:38:42] [Rank 0] step:3421/10000 train_time:160130ms step_avg:46.81ms +[2025-09-05 20:38:42] [Rank 0] step:3421/10000 train_time:160130ms step_avg:46.81ms +[2025-09-05 20:38:43] [Rank 0] step:3441/10000 train_time:160868ms step_avg:46.75ms +[2025-09-05 20:38:43] [Rank 0] step:3441/10000 train_time:160868ms step_avg:46.75ms +[2025-09-05 20:38:43] [Rank 0] step:3461/10000 train_time:161607ms step_avg:46.69ms +[2025-09-05 20:38:43] [Rank 0] step:3461/10000 train_time:161607ms step_avg:46.69ms +[2025-09-05 20:38:44] [Rank 0] step:3481/10000 train_time:162345ms step_avg:46.64ms +[2025-09-05 20:38:44] [Rank 0] step:3481/10000 train_time:162345ms step_avg:46.64ms +[2025-09-05 20:38:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:38:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:38:45] [Rank 0] PRINT: step:3500/10000 train_loss:2.4092 val_loss:2.3586 train_time:163165ms step_avg:46.62ms +[2025-09-05 20:38:45] [Rank 0] PRINT: step:3500/10000 train_loss:2.4092 val_loss:2.3586 train_time:163165ms step_avg:46.62ms +[2025-09-05 20:38:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:38:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:38:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:38:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:40:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:40:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:40:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:40:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:40:06] [Rank 0] Total Loss: 4.8766 +[2025-09-05 20:40:06] [Rank 0] Total Loss: 4.8766 +[2025-09-05 20:40:06] [Rank 0] Total FTA (Unweighted): 0.2619 +[2025-09-05 20:40:06] [Rank 0] Total FTA (Unweighted): 0.2619 +[2025-09-05 20:40:06] [Rank 0] Total FTA (Weighted): 0.2619 +[2025-09-05 20:40:06] [Rank 0] Total FTA (Weighted): 0.2619 +[2025-09-05 20:40:06] [Rank 0] Group 0 Loss: 3.3679 +[2025-09-05 20:40:06] [Rank 0] Group 0 Loss: 3.3679 +[2025-09-05 20:40:06] [Rank 0] Group 1 Loss: 3.3181 +[2025-09-05 20:40:06] [Rank 0] Group 1 Loss: 3.3181 +[2025-09-05 20:40:06] [Rank 0] Group 2 Loss: 3.4000 +[2025-09-05 20:40:06] [Rank 0] Group 2 Loss: 3.4000 +[2025-09-05 20:40:06] [Rank 0] Group 3 Loss: 3.7996 +[2025-09-05 20:40:06] [Rank 0] Group 3 Loss: 3.7996 +[2025-09-05 20:40:06] [Rank 0] Group 4 Loss: 4.2571 +[2025-09-05 20:40:06] [Rank 0] Group 4 Loss: 4.2571 +[2025-09-05 20:40:06] [Rank 0] Group 5 Loss: 4.7138 +[2025-09-05 20:40:06] [Rank 0] Group 5 Loss: 4.7138 +[2025-09-05 20:40:06] [Rank 0] Group 6 Loss: 5.0569 +[2025-09-05 20:40:06] [Rank 0] Group 6 Loss: 5.0569 +[2025-09-05 20:40:06] [Rank 0] Group 7 Loss: 5.1899 +[2025-09-05 20:40:06] [Rank 0] Group 7 Loss: 5.1899 +[2025-09-05 20:40:06] [Rank 0] Group 8 Loss: 5.4740 +[2025-09-05 20:40:06] [Rank 0] Group 8 Loss: 5.4740 +[2025-09-05 20:40:06] [Rank 0] Group 9 Loss: 5.5971 +[2025-09-05 20:40:06] [Rank 0] Group 9 Loss: 5.5971 +[2025-09-05 20:40:06] [Rank 0] Group 10 Loss: 5.6617 +[2025-09-05 20:40:06] [Rank 0] Group 10 Loss: 5.6617 +[2025-09-05 20:40:06] [Rank 0] Group 11 Loss: 5.6958 +[2025-09-05 20:40:06] [Rank 0] Group 11 Loss: 5.6958 +[2025-09-05 20:40:06] [Rank 0] Group 12 Loss: 5.5842 +[2025-09-05 20:40:06] [Rank 0] Group 12 Loss: 5.5842 +[2025-09-05 20:40:06] [Rank 0] Group 13 Loss: 5.6190 +[2025-09-05 20:40:06] [Rank 0] Group 13 Loss: 5.6190 +[2025-09-05 20:40:06] [Rank 0] Group 14 Loss: 5.6764 +[2025-09-05 20:40:06] [Rank 0] Group 14 Loss: 5.6764 +[2025-09-05 20:40:06] [Rank 0] Group 15 Loss: 5.6138 +[2025-09-05 20:40:06] [Rank 0] Group 15 Loss: 5.6138 +[2025-09-05 20:40:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:40:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:40:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:40:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:40:06] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 20:40:06] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 20:40:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:40:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:40:06] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 20:40:06] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 20:40:06] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 20:40:06] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 20:40:06] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 20:40:06] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 20:40:06] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 20:40:06] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 20:40:06] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 20:40:06] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 20:40:06] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:40:06] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 20:40:06] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 20:40:06] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 20:40:06] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 20:40:06] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 20:40:06] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 20:40:06] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 20:40:06] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 20:40:06] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 20:40:06] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:40:06] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:40:06] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:40:06] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:40:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:40:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:40:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:40:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:40:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:40:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:40:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:40:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:40:07] [Rank 0] step:3501/10000 train_time:163175ms step_avg:46.61ms +[2025-09-05 20:40:07] [Rank 0] step:3501/10000 train_time:163175ms step_avg:46.61ms +[2025-09-05 20:40:08] [Rank 0] step:3521/10000 train_time:163845ms step_avg:46.53ms +[2025-09-05 20:40:08] [Rank 0] step:3521/10000 train_time:163845ms step_avg:46.53ms +[2025-09-05 20:40:09] [Rank 0] step:3541/10000 train_time:164583ms step_avg:46.48ms +[2025-09-05 20:40:09] [Rank 0] step:3541/10000 train_time:164583ms step_avg:46.48ms +[2025-09-05 20:40:10] [Rank 0] step:3561/10000 train_time:165321ms step_avg:46.43ms +[2025-09-05 20:40:10] [Rank 0] step:3561/10000 train_time:165321ms step_avg:46.43ms +[2025-09-05 20:40:10] [Rank 0] step:3581/10000 train_time:166059ms step_avg:46.37ms +[2025-09-05 20:40:10] [Rank 0] step:3581/10000 train_time:166059ms step_avg:46.37ms +[2025-09-05 20:40:11] [Rank 0] step:3601/10000 train_time:166797ms step_avg:46.32ms +[2025-09-05 20:40:11] [Rank 0] step:3601/10000 train_time:166797ms step_avg:46.32ms +[2025-09-05 20:40:12] [Rank 0] step:3621/10000 train_time:167536ms step_avg:46.27ms +[2025-09-05 20:40:12] [Rank 0] step:3621/10000 train_time:167536ms step_avg:46.27ms +[2025-09-05 20:40:13] [Rank 0] step:3641/10000 train_time:168894ms step_avg:46.39ms +[2025-09-05 20:40:13] [Rank 0] step:3641/10000 train_time:168894ms step_avg:46.39ms +[2025-09-05 20:40:14] [Rank 0] step:3661/10000 train_time:169632ms step_avg:46.33ms +[2025-09-05 20:40:14] [Rank 0] step:3661/10000 train_time:169632ms step_avg:46.33ms +[2025-09-05 20:40:15] [Rank 0] step:3681/10000 train_time:170370ms step_avg:46.28ms +[2025-09-05 20:40:15] [Rank 0] step:3681/10000 train_time:170370ms step_avg:46.28ms +[2025-09-05 20:40:15] [Rank 0] step:3701/10000 train_time:171108ms step_avg:46.23ms +[2025-09-05 20:40:15] [Rank 0] step:3701/10000 train_time:171108ms step_avg:46.23ms +[2025-09-05 20:40:16] [Rank 0] step:3721/10000 train_time:171847ms step_avg:46.18ms +[2025-09-05 20:40:16] [Rank 0] step:3721/10000 train_time:171847ms step_avg:46.18ms +[2025-09-05 20:40:17] [Rank 0] step:3741/10000 train_time:172585ms step_avg:46.13ms +[2025-09-05 20:40:17] [Rank 0] step:3741/10000 train_time:172585ms step_avg:46.13ms +[2025-09-05 20:40:18] [Rank 0] step:3761/10000 train_time:173323ms step_avg:46.08ms +[2025-09-05 20:40:18] [Rank 0] step:3761/10000 train_time:173323ms step_avg:46.08ms +[2025-09-05 20:40:18] [Rank 0] step:3781/10000 train_time:174062ms step_avg:46.04ms +[2025-09-05 20:40:18] [Rank 0] step:3781/10000 train_time:174062ms step_avg:46.04ms +[2025-09-05 20:40:19] [Rank 0] step:3801/10000 train_time:174800ms step_avg:45.99ms +[2025-09-05 20:40:19] [Rank 0] step:3801/10000 train_time:174800ms step_avg:45.99ms +[2025-09-05 20:40:20] [Rank 0] step:3821/10000 train_time:175539ms step_avg:45.94ms +[2025-09-05 20:40:20] [Rank 0] step:3821/10000 train_time:175539ms step_avg:45.94ms +[2025-09-05 20:40:20] [Rank 0] step:3841/10000 train_time:176278ms step_avg:45.89ms +[2025-09-05 20:40:20] [Rank 0] step:3841/10000 train_time:176278ms step_avg:45.89ms +[2025-09-05 20:40:21] [Rank 0] step:3861/10000 train_time:177016ms step_avg:45.85ms +[2025-09-05 20:40:21] [Rank 0] step:3861/10000 train_time:177016ms step_avg:45.85ms +[2025-09-05 20:40:22] [Rank 0] step:3881/10000 train_time:177754ms step_avg:45.80ms +[2025-09-05 20:40:22] [Rank 0] step:3881/10000 train_time:177754ms step_avg:45.80ms +[2025-09-05 20:40:23] [Rank 0] step:3901/10000 train_time:178492ms step_avg:45.76ms +[2025-09-05 20:40:23] [Rank 0] step:3901/10000 train_time:178492ms step_avg:45.76ms +[2025-09-05 20:40:23] [Rank 0] step:3921/10000 train_time:179231ms step_avg:45.71ms +[2025-09-05 20:40:23] [Rank 0] step:3921/10000 train_time:179231ms step_avg:45.71ms +[2025-09-05 20:40:24] [Rank 0] step:3941/10000 train_time:179970ms step_avg:45.67ms +[2025-09-05 20:40:24] [Rank 0] step:3941/10000 train_time:179970ms step_avg:45.67ms +[2025-09-05 20:40:25] [Rank 0] step:3961/10000 train_time:180709ms step_avg:45.62ms +[2025-09-05 20:40:25] [Rank 0] step:3961/10000 train_time:180709ms step_avg:45.62ms +[2025-09-05 20:40:26] [Rank 0] step:3981/10000 train_time:181448ms step_avg:45.58ms +[2025-09-05 20:40:26] [Rank 0] step:3981/10000 train_time:181448ms step_avg:45.58ms +[2025-09-05 20:40:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:40:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:40:27] [Rank 0] PRINT: step:4000/10000 train_loss:2.3306 val_loss:2.2861 train_time:182268ms step_avg:45.57ms +[2025-09-05 20:40:27] [Rank 0] PRINT: step:4000/10000 train_loss:2.3306 val_loss:2.2861 train_time:182268ms step_avg:45.57ms +[2025-09-05 20:40:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:40:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:40:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:40:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:41:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:41:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:41:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:41:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:41:48] [Rank 0] Total Loss: 4.8114 +[2025-09-05 20:41:48] [Rank 0] Total Loss: 4.8114 +[2025-09-05 20:41:48] [Rank 0] Total FTA (Unweighted): 0.2850 +[2025-09-05 20:41:48] [Rank 0] Total FTA (Unweighted): 0.2850 +[2025-09-05 20:41:48] [Rank 0] Total FTA (Weighted): 0.2850 +[2025-09-05 20:41:48] [Rank 0] Total FTA (Weighted): 0.2850 +[2025-09-05 20:41:48] [Rank 0] Group 0 Loss: 3.4006 +[2025-09-05 20:41:48] [Rank 0] Group 0 Loss: 3.4006 +[2025-09-05 20:41:48] [Rank 0] Group 1 Loss: 3.3130 +[2025-09-05 20:41:48] [Rank 0] Group 1 Loss: 3.3130 +[2025-09-05 20:41:48] [Rank 0] Group 2 Loss: 3.3536 +[2025-09-05 20:41:48] [Rank 0] Group 2 Loss: 3.3536 +[2025-09-05 20:41:48] [Rank 0] Group 3 Loss: 3.7759 +[2025-09-05 20:41:48] [Rank 0] Group 3 Loss: 3.7759 +[2025-09-05 20:41:48] [Rank 0] Group 4 Loss: 4.1846 +[2025-09-05 20:41:48] [Rank 0] Group 4 Loss: 4.1846 +[2025-09-05 20:41:48] [Rank 0] Group 5 Loss: 4.6229 +[2025-09-05 20:41:48] [Rank 0] Group 5 Loss: 4.6229 +[2025-09-05 20:41:48] [Rank 0] Group 6 Loss: 4.9349 +[2025-09-05 20:41:48] [Rank 0] Group 6 Loss: 4.9349 +[2025-09-05 20:41:48] [Rank 0] Group 7 Loss: 5.0927 +[2025-09-05 20:41:48] [Rank 0] Group 7 Loss: 5.0927 +[2025-09-05 20:41:48] [Rank 0] Group 8 Loss: 5.3833 +[2025-09-05 20:41:48] [Rank 0] Group 8 Loss: 5.3833 +[2025-09-05 20:41:48] [Rank 0] Group 9 Loss: 5.5202 +[2025-09-05 20:41:48] [Rank 0] Group 9 Loss: 5.5202 +[2025-09-05 20:41:48] [Rank 0] Group 10 Loss: 5.5930 +[2025-09-05 20:41:48] [Rank 0] Group 10 Loss: 5.5930 +[2025-09-05 20:41:48] [Rank 0] Group 11 Loss: 5.6171 +[2025-09-05 20:41:48] [Rank 0] Group 11 Loss: 5.6171 +[2025-09-05 20:41:48] [Rank 0] Group 12 Loss: 5.5070 +[2025-09-05 20:41:48] [Rank 0] Group 12 Loss: 5.5070 +[2025-09-05 20:41:48] [Rank 0] Group 13 Loss: 5.5390 +[2025-09-05 20:41:48] [Rank 0] Group 13 Loss: 5.5390 +[2025-09-05 20:41:48] [Rank 0] Group 14 Loss: 5.5915 +[2025-09-05 20:41:48] [Rank 0] Group 14 Loss: 5.5915 +[2025-09-05 20:41:48] [Rank 0] Group 15 Loss: 5.5528 +[2025-09-05 20:41:48] [Rank 0] Group 15 Loss: 5.5528 +[2025-09-05 20:41:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:41:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:41:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:41:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:41:48] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 20:41:48] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 20:41:48] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:41:48] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:41:48] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:41:48] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:41:48] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 20:41:48] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 20:41:48] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-05 20:41:48] [Rank 0] Group 6 FTA: 0.2500 +[2025-09-05 20:41:48] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:41:48] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:41:48] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:41:48] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:41:48] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 20:41:48] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 20:41:48] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 20:41:48] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 20:41:48] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 20:41:48] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 20:41:48] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:41:48] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:41:48] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 20:41:48] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 20:41:48] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 20:41:48] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 20:41:48] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:41:48] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:41:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:41:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:41:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:41:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:41:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:41:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:41:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:41:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:41:49] [Rank 0] step:4001/10000 train_time:182277ms step_avg:45.56ms +[2025-09-05 20:41:49] [Rank 0] step:4001/10000 train_time:182277ms step_avg:45.56ms +[2025-09-05 20:41:51] [Rank 0] step:4021/10000 train_time:183572ms step_avg:45.65ms +[2025-09-05 20:41:51] [Rank 0] step:4021/10000 train_time:183572ms step_avg:45.65ms +[2025-09-05 20:41:51] [Rank 0] step:4041/10000 train_time:184310ms step_avg:45.61ms +[2025-09-05 20:41:51] [Rank 0] step:4041/10000 train_time:184310ms step_avg:45.61ms +[2025-09-05 20:41:52] [Rank 0] step:4061/10000 train_time:185049ms step_avg:45.57ms +[2025-09-05 20:41:52] [Rank 0] step:4061/10000 train_time:185049ms step_avg:45.57ms +[2025-09-05 20:41:53] [Rank 0] step:4081/10000 train_time:185788ms step_avg:45.53ms +[2025-09-05 20:41:53] [Rank 0] step:4081/10000 train_time:185788ms step_avg:45.53ms +[2025-09-05 20:41:54] [Rank 0] step:4101/10000 train_time:186526ms step_avg:45.48ms +[2025-09-05 20:41:54] [Rank 0] step:4101/10000 train_time:186526ms step_avg:45.48ms +[2025-09-05 20:41:54] [Rank 0] step:4121/10000 train_time:187266ms step_avg:45.44ms +[2025-09-05 20:41:54] [Rank 0] step:4121/10000 train_time:187266ms step_avg:45.44ms +[2025-09-05 20:41:55] [Rank 0] step:4141/10000 train_time:188003ms step_avg:45.40ms +[2025-09-05 20:41:55] [Rank 0] step:4141/10000 train_time:188003ms step_avg:45.40ms +[2025-09-05 20:41:56] [Rank 0] step:4161/10000 train_time:188742ms step_avg:45.36ms +[2025-09-05 20:41:56] [Rank 0] step:4161/10000 train_time:188742ms step_avg:45.36ms +[2025-09-05 20:41:57] [Rank 0] step:4181/10000 train_time:189480ms step_avg:45.32ms +[2025-09-05 20:41:57] [Rank 0] step:4181/10000 train_time:189480ms step_avg:45.32ms +[2025-09-05 20:41:57] [Rank 0] step:4201/10000 train_time:190219ms step_avg:45.28ms +[2025-09-05 20:41:57] [Rank 0] step:4201/10000 train_time:190219ms step_avg:45.28ms +[2025-09-05 20:41:58] [Rank 0] step:4221/10000 train_time:190958ms step_avg:45.24ms +[2025-09-05 20:41:58] [Rank 0] step:4221/10000 train_time:190958ms step_avg:45.24ms +[2025-09-05 20:41:59] [Rank 0] step:4241/10000 train_time:191697ms step_avg:45.20ms +[2025-09-05 20:41:59] [Rank 0] step:4241/10000 train_time:191697ms step_avg:45.20ms +[2025-09-05 20:42:00] [Rank 0] step:4261/10000 train_time:192436ms step_avg:45.16ms +[2025-09-05 20:42:00] [Rank 0] step:4261/10000 train_time:192436ms step_avg:45.16ms +[2025-09-05 20:42:00] [Rank 0] step:4281/10000 train_time:193174ms step_avg:45.12ms +[2025-09-05 20:42:00] [Rank 0] step:4281/10000 train_time:193174ms step_avg:45.12ms +[2025-09-05 20:42:01] [Rank 0] step:4301/10000 train_time:193913ms step_avg:45.09ms +[2025-09-05 20:42:01] [Rank 0] step:4301/10000 train_time:193913ms step_avg:45.09ms +[2025-09-05 20:42:02] [Rank 0] step:4321/10000 train_time:194652ms step_avg:45.05ms +[2025-09-05 20:42:02] [Rank 0] step:4321/10000 train_time:194652ms step_avg:45.05ms +[2025-09-05 20:42:02] [Rank 0] step:4341/10000 train_time:195391ms step_avg:45.01ms +[2025-09-05 20:42:02] [Rank 0] step:4341/10000 train_time:195391ms step_avg:45.01ms +[2025-09-05 20:42:03] [Rank 0] step:4361/10000 train_time:196131ms step_avg:44.97ms +[2025-09-05 20:42:03] [Rank 0] step:4361/10000 train_time:196131ms step_avg:44.97ms +[2025-09-05 20:42:04] [Rank 0] step:4381/10000 train_time:196871ms step_avg:44.94ms +[2025-09-05 20:42:04] [Rank 0] step:4381/10000 train_time:196871ms step_avg:44.94ms +[2025-09-05 20:42:05] [Rank 0] step:4401/10000 train_time:197610ms step_avg:44.90ms +[2025-09-05 20:42:05] [Rank 0] step:4401/10000 train_time:197610ms step_avg:44.90ms +[2025-09-05 20:42:05] [Rank 0] step:4421/10000 train_time:198349ms step_avg:44.87ms +[2025-09-05 20:42:05] [Rank 0] step:4421/10000 train_time:198349ms step_avg:44.87ms +[2025-09-05 20:42:06] [Rank 0] step:4441/10000 train_time:199088ms step_avg:44.83ms +[2025-09-05 20:42:06] [Rank 0] step:4441/10000 train_time:199088ms step_avg:44.83ms +[2025-09-05 20:42:07] [Rank 0] step:4461/10000 train_time:199827ms step_avg:44.79ms +[2025-09-05 20:42:07] [Rank 0] step:4461/10000 train_time:199827ms step_avg:44.79ms +[2025-09-05 20:42:08] [Rank 0] step:4481/10000 train_time:200566ms step_avg:44.76ms +[2025-09-05 20:42:08] [Rank 0] step:4481/10000 train_time:200566ms step_avg:44.76ms +[2025-09-05 20:42:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:42:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:42:09] [Rank 0] PRINT: step:4500/10000 train_loss:2.2689 val_loss:2.2326 train_time:201385ms step_avg:44.75ms +[2025-09-05 20:42:09] [Rank 0] PRINT: step:4500/10000 train_loss:2.2689 val_loss:2.2326 train_time:201385ms step_avg:44.75ms +[2025-09-05 20:42:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:42:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:42:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:42:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:43:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:43:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:43:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:43:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:43:30] [Rank 0] Total Loss: 4.7728 +[2025-09-05 20:43:30] [Rank 0] Total Loss: 4.7728 +[2025-09-05 20:43:30] [Rank 0] Total FTA (Unweighted): 0.2944 +[2025-09-05 20:43:30] [Rank 0] Total FTA (Unweighted): 0.2944 +[2025-09-05 20:43:30] [Rank 0] Total FTA (Weighted): 0.2944 +[2025-09-05 20:43:30] [Rank 0] Total FTA (Weighted): 0.2944 +[2025-09-05 20:43:30] [Rank 0] Group 0 Loss: 3.4028 +[2025-09-05 20:43:30] [Rank 0] Group 0 Loss: 3.4028 +[2025-09-05 20:43:30] [Rank 0] Group 1 Loss: 3.3035 +[2025-09-05 20:43:30] [Rank 0] Group 1 Loss: 3.3035 +[2025-09-05 20:43:30] [Rank 0] Group 2 Loss: 3.3694 +[2025-09-05 20:43:30] [Rank 0] Group 2 Loss: 3.3694 +[2025-09-05 20:43:30] [Rank 0] Group 3 Loss: 3.7544 +[2025-09-05 20:43:30] [Rank 0] Group 3 Loss: 3.7544 +[2025-09-05 20:43:30] [Rank 0] Group 4 Loss: 4.1194 +[2025-09-05 20:43:30] [Rank 0] Group 4 Loss: 4.1194 +[2025-09-05 20:43:30] [Rank 0] Group 5 Loss: 4.5559 +[2025-09-05 20:43:30] [Rank 0] Group 5 Loss: 4.5559 +[2025-09-05 20:43:30] [Rank 0] Group 6 Loss: 4.8668 +[2025-09-05 20:43:30] [Rank 0] Group 6 Loss: 4.8668 +[2025-09-05 20:43:30] [Rank 0] Group 7 Loss: 5.0364 +[2025-09-05 20:43:30] [Rank 0] Group 7 Loss: 5.0364 +[2025-09-05 20:43:30] [Rank 0] Group 8 Loss: 5.3442 +[2025-09-05 20:43:30] [Rank 0] Group 8 Loss: 5.3442 +[2025-09-05 20:43:30] [Rank 0] Group 9 Loss: 5.4687 +[2025-09-05 20:43:30] [Rank 0] Group 9 Loss: 5.4687 +[2025-09-05 20:43:30] [Rank 0] Group 10 Loss: 5.5260 +[2025-09-05 20:43:30] [Rank 0] Group 10 Loss: 5.5260 +[2025-09-05 20:43:30] [Rank 0] Group 11 Loss: 5.5639 +[2025-09-05 20:43:30] [Rank 0] Group 11 Loss: 5.5639 +[2025-09-05 20:43:30] [Rank 0] Group 12 Loss: 5.4869 +[2025-09-05 20:43:30] [Rank 0] Group 12 Loss: 5.4869 +[2025-09-05 20:43:30] [Rank 0] Group 13 Loss: 5.5081 +[2025-09-05 20:43:30] [Rank 0] Group 13 Loss: 5.5081 +[2025-09-05 20:43:30] [Rank 0] Group 14 Loss: 5.5477 +[2025-09-05 20:43:30] [Rank 0] Group 14 Loss: 5.5477 +[2025-09-05 20:43:30] [Rank 0] Group 15 Loss: 5.5108 +[2025-09-05 20:43:30] [Rank 0] Group 15 Loss: 5.5108 +[2025-09-05 20:43:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:43:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:43:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:43:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:43:30] [Rank 0] Group 2 FTA: 0.4200 +[2025-09-05 20:43:30] [Rank 0] Group 2 FTA: 0.4200 +[2025-09-05 20:43:30] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:43:30] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:43:30] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:43:30] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:43:30] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 20:43:30] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 20:43:30] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:43:30] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:43:30] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:43:30] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:43:30] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:43:30] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:43:30] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 20:43:30] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 20:43:30] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 20:43:30] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 20:43:30] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 20:43:30] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 20:43:30] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:43:30] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:43:30] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 20:43:30] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 20:43:30] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:43:30] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:43:30] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:43:30] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:43:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:43:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:43:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:43:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:43:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:43:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:43:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:43:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:43:31] [Rank 0] step:4501/10000 train_time:201394ms step_avg:44.74ms +[2025-09-05 20:43:31] [Rank 0] step:4501/10000 train_time:201394ms step_avg:44.74ms +[2025-09-05 20:43:32] [Rank 0] step:4521/10000 train_time:202076ms step_avg:44.70ms +[2025-09-05 20:43:32] [Rank 0] step:4521/10000 train_time:202076ms step_avg:44.70ms +[2025-09-05 20:43:33] [Rank 0] step:4541/10000 train_time:202814ms step_avg:44.66ms +[2025-09-05 20:43:33] [Rank 0] step:4541/10000 train_time:202814ms step_avg:44.66ms +[2025-09-05 20:43:34] [Rank 0] step:4561/10000 train_time:203552ms step_avg:44.63ms +[2025-09-05 20:43:34] [Rank 0] step:4561/10000 train_time:203552ms step_avg:44.63ms +[2025-09-05 20:43:34] [Rank 0] step:4581/10000 train_time:204291ms step_avg:44.60ms +[2025-09-05 20:43:34] [Rank 0] step:4581/10000 train_time:204291ms step_avg:44.60ms +[2025-09-05 20:43:35] [Rank 0] step:4601/10000 train_time:205029ms step_avg:44.56ms +[2025-09-05 20:43:35] [Rank 0] step:4601/10000 train_time:205029ms step_avg:44.56ms +[2025-09-05 20:43:36] [Rank 0] step:4621/10000 train_time:205772ms step_avg:44.53ms +[2025-09-05 20:43:36] [Rank 0] step:4621/10000 train_time:205772ms step_avg:44.53ms +[2025-09-05 20:43:37] [Rank 0] step:4641/10000 train_time:206510ms step_avg:44.50ms +[2025-09-05 20:43:37] [Rank 0] step:4641/10000 train_time:206510ms step_avg:44.50ms +[2025-09-05 20:43:37] [Rank 0] step:4661/10000 train_time:207250ms step_avg:44.46ms +[2025-09-05 20:43:37] [Rank 0] step:4661/10000 train_time:207250ms step_avg:44.46ms +[2025-09-05 20:43:38] [Rank 0] step:4681/10000 train_time:207989ms step_avg:44.43ms +[2025-09-05 20:43:38] [Rank 0] step:4681/10000 train_time:207989ms step_avg:44.43ms +[2025-09-05 20:43:39] [Rank 0] step:4701/10000 train_time:208727ms step_avg:44.40ms +[2025-09-05 20:43:39] [Rank 0] step:4701/10000 train_time:208727ms step_avg:44.40ms +[2025-09-05 20:43:40] [Rank 0] step:4721/10000 train_time:209466ms step_avg:44.37ms +[2025-09-05 20:43:40] [Rank 0] step:4721/10000 train_time:209466ms step_avg:44.37ms +[2025-09-05 20:43:40] [Rank 0] step:4741/10000 train_time:210206ms step_avg:44.34ms +[2025-09-05 20:43:40] [Rank 0] step:4741/10000 train_time:210206ms step_avg:44.34ms +[2025-09-05 20:43:41] [Rank 0] step:4761/10000 train_time:210944ms step_avg:44.31ms +[2025-09-05 20:43:41] [Rank 0] step:4761/10000 train_time:210944ms step_avg:44.31ms +[2025-09-05 20:43:42] [Rank 0] step:4781/10000 train_time:211684ms step_avg:44.28ms +[2025-09-05 20:43:42] [Rank 0] step:4781/10000 train_time:211684ms step_avg:44.28ms +[2025-09-05 20:43:43] [Rank 0] step:4801/10000 train_time:212422ms step_avg:44.25ms +[2025-09-05 20:43:43] [Rank 0] step:4801/10000 train_time:212422ms step_avg:44.25ms +[2025-09-05 20:43:43] [Rank 0] step:4821/10000 train_time:213161ms step_avg:44.22ms +[2025-09-05 20:43:43] [Rank 0] step:4821/10000 train_time:213161ms step_avg:44.22ms +[2025-09-05 20:43:44] [Rank 0] step:4841/10000 train_time:214206ms step_avg:44.25ms +[2025-09-05 20:43:44] [Rank 0] step:4841/10000 train_time:214206ms step_avg:44.25ms +[2025-09-05 20:43:45] [Rank 0] step:4861/10000 train_time:214944ms step_avg:44.22ms +[2025-09-05 20:43:45] [Rank 0] step:4861/10000 train_time:214944ms step_avg:44.22ms +[2025-09-05 20:43:46] [Rank 0] step:4881/10000 train_time:215683ms step_avg:44.19ms +[2025-09-05 20:43:46] [Rank 0] step:4881/10000 train_time:215683ms step_avg:44.19ms +[2025-09-05 20:43:47] [Rank 0] step:4901/10000 train_time:216422ms step_avg:44.16ms +[2025-09-05 20:43:47] [Rank 0] step:4901/10000 train_time:216422ms step_avg:44.16ms +[2025-09-05 20:43:47] [Rank 0] step:4921/10000 train_time:217162ms step_avg:44.13ms +[2025-09-05 20:43:47] [Rank 0] step:4921/10000 train_time:217162ms step_avg:44.13ms +[2025-09-05 20:43:48] [Rank 0] step:4941/10000 train_time:217901ms step_avg:44.10ms +[2025-09-05 20:43:48] [Rank 0] step:4941/10000 train_time:217901ms step_avg:44.10ms +[2025-09-05 20:43:49] [Rank 0] step:4961/10000 train_time:218640ms step_avg:44.07ms +[2025-09-05 20:43:49] [Rank 0] step:4961/10000 train_time:218640ms step_avg:44.07ms +[2025-09-05 20:43:50] [Rank 0] step:4981/10000 train_time:219379ms step_avg:44.04ms +[2025-09-05 20:43:50] [Rank 0] step:4981/10000 train_time:219379ms step_avg:44.04ms +[2025-09-05 20:43:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:43:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:43:51] [Rank 0] PRINT: step:5000/10000 train_loss:2.2214 val_loss:2.1917 train_time:220199ms step_avg:44.04ms +[2025-09-05 20:43:51] [Rank 0] PRINT: step:5000/10000 train_loss:2.2214 val_loss:2.1917 train_time:220199ms step_avg:44.04ms +[2025-09-05 20:43:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:43:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:43:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:43:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:45:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:45:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:45:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:45:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:45:12] [Rank 0] Total Loss: 4.7216 +[2025-09-05 20:45:12] [Rank 0] Total Loss: 4.7216 +[2025-09-05 20:45:12] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-05 20:45:12] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-05 20:45:12] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-05 20:45:12] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-05 20:45:12] [Rank 0] Group 0 Loss: 3.3945 +[2025-09-05 20:45:12] [Rank 0] Group 0 Loss: 3.3945 +[2025-09-05 20:45:12] [Rank 0] Group 1 Loss: 3.3197 +[2025-09-05 20:45:12] [Rank 0] Group 1 Loss: 3.3197 +[2025-09-05 20:45:12] [Rank 0] Group 2 Loss: 3.3423 +[2025-09-05 20:45:12] [Rank 0] Group 2 Loss: 3.3423 +[2025-09-05 20:45:12] [Rank 0] Group 3 Loss: 3.7226 +[2025-09-05 20:45:12] [Rank 0] Group 3 Loss: 3.7226 +[2025-09-05 20:45:12] [Rank 0] Group 4 Loss: 4.0773 +[2025-09-05 20:45:12] [Rank 0] Group 4 Loss: 4.0773 +[2025-09-05 20:45:12] [Rank 0] Group 5 Loss: 4.4982 +[2025-09-05 20:45:12] [Rank 0] Group 5 Loss: 4.4982 +[2025-09-05 20:45:12] [Rank 0] Group 6 Loss: 4.8068 +[2025-09-05 20:45:12] [Rank 0] Group 6 Loss: 4.8068 +[2025-09-05 20:45:12] [Rank 0] Group 7 Loss: 4.9741 +[2025-09-05 20:45:12] [Rank 0] Group 7 Loss: 4.9741 +[2025-09-05 20:45:12] [Rank 0] Group 8 Loss: 5.2739 +[2025-09-05 20:45:12] [Rank 0] Group 8 Loss: 5.2739 +[2025-09-05 20:45:12] [Rank 0] Group 9 Loss: 5.3938 +[2025-09-05 20:45:12] [Rank 0] Group 9 Loss: 5.3938 +[2025-09-05 20:45:12] [Rank 0] Group 10 Loss: 5.4729 +[2025-09-05 20:45:12] [Rank 0] Group 10 Loss: 5.4729 +[2025-09-05 20:45:12] [Rank 0] Group 11 Loss: 5.4881 +[2025-09-05 20:45:12] [Rank 0] Group 11 Loss: 5.4881 +[2025-09-05 20:45:12] [Rank 0] Group 12 Loss: 5.4224 +[2025-09-05 20:45:12] [Rank 0] Group 12 Loss: 5.4224 +[2025-09-05 20:45:12] [Rank 0] Group 13 Loss: 5.4458 +[2025-09-05 20:45:12] [Rank 0] Group 13 Loss: 5.4458 +[2025-09-05 20:45:12] [Rank 0] Group 14 Loss: 5.4718 +[2025-09-05 20:45:12] [Rank 0] Group 14 Loss: 5.4718 +[2025-09-05 20:45:12] [Rank 0] Group 15 Loss: 5.4410 +[2025-09-05 20:45:12] [Rank 0] Group 15 Loss: 5.4410 +[2025-09-05 20:45:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:45:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:45:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:45:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:45:12] [Rank 0] Group 2 FTA: 0.3300 +[2025-09-05 20:45:12] [Rank 0] Group 2 FTA: 0.3300 +[2025-09-05 20:45:12] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:45:12] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:45:12] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:45:12] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:45:12] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 20:45:12] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 20:45:12] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:45:12] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:45:12] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:45:12] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:45:12] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:45:12] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:45:12] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 20:45:12] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 20:45:12] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 20:45:12] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 20:45:12] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 20:45:12] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 20:45:12] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 20:45:12] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 20:45:12] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 20:45:12] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 20:45:12] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 20:45:12] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 20:45:12] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:45:12] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:45:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:45:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:45:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:45:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:45:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:45:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:45:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:45:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:45:14] [Rank 0] step:5001/10000 train_time:220208ms step_avg:44.03ms +[2025-09-05 20:45:14] [Rank 0] step:5001/10000 train_time:220208ms step_avg:44.03ms +[2025-09-05 20:45:14] [Rank 0] step:5021/10000 train_time:220872ms step_avg:43.99ms +[2025-09-05 20:45:14] [Rank 0] step:5021/10000 train_time:220872ms step_avg:43.99ms +[2025-09-05 20:45:15] [Rank 0] step:5041/10000 train_time:221611ms step_avg:43.96ms +[2025-09-05 20:45:15] [Rank 0] step:5041/10000 train_time:221611ms step_avg:43.96ms +[2025-09-05 20:45:16] [Rank 0] step:5061/10000 train_time:222351ms step_avg:43.93ms +[2025-09-05 20:45:16] [Rank 0] step:5061/10000 train_time:222351ms step_avg:43.93ms +[2025-09-05 20:45:17] [Rank 0] step:5081/10000 train_time:223090ms step_avg:43.91ms +[2025-09-05 20:45:17] [Rank 0] step:5081/10000 train_time:223090ms step_avg:43.91ms +[2025-09-05 20:45:17] [Rank 0] step:5101/10000 train_time:223830ms step_avg:43.88ms +[2025-09-05 20:45:17] [Rank 0] step:5101/10000 train_time:223830ms step_avg:43.88ms +[2025-09-05 20:45:18] [Rank 0] step:5121/10000 train_time:224570ms step_avg:43.85ms +[2025-09-05 20:45:18] [Rank 0] step:5121/10000 train_time:224570ms step_avg:43.85ms +[2025-09-05 20:45:19] [Rank 0] step:5141/10000 train_time:225309ms step_avg:43.83ms +[2025-09-05 20:45:19] [Rank 0] step:5141/10000 train_time:225309ms step_avg:43.83ms +[2025-09-05 20:45:20] [Rank 0] step:5161/10000 train_time:226049ms step_avg:43.80ms +[2025-09-05 20:45:20] [Rank 0] step:5161/10000 train_time:226049ms step_avg:43.80ms +[2025-09-05 20:45:20] [Rank 0] step:5181/10000 train_time:226788ms step_avg:43.77ms +[2025-09-05 20:45:20] [Rank 0] step:5181/10000 train_time:226788ms step_avg:43.77ms +[2025-09-05 20:45:21] [Rank 0] step:5201/10000 train_time:227526ms step_avg:43.75ms +[2025-09-05 20:45:21] [Rank 0] step:5201/10000 train_time:227526ms step_avg:43.75ms +[2025-09-05 20:45:22] [Rank 0] step:5221/10000 train_time:228265ms step_avg:43.72ms +[2025-09-05 20:45:22] [Rank 0] step:5221/10000 train_time:228265ms step_avg:43.72ms +[2025-09-05 20:45:23] [Rank 0] step:5241/10000 train_time:229003ms step_avg:43.69ms +[2025-09-05 20:45:23] [Rank 0] step:5241/10000 train_time:229003ms step_avg:43.69ms +[2025-09-05 20:45:23] [Rank 0] step:5261/10000 train_time:229742ms step_avg:43.67ms +[2025-09-05 20:45:23] [Rank 0] step:5261/10000 train_time:229742ms step_avg:43.67ms +[2025-09-05 20:45:24] [Rank 0] step:5281/10000 train_time:230481ms step_avg:43.64ms +[2025-09-05 20:45:24] [Rank 0] step:5281/10000 train_time:230481ms step_avg:43.64ms +[2025-09-05 20:45:25] [Rank 0] step:5301/10000 train_time:231219ms step_avg:43.62ms +[2025-09-05 20:45:25] [Rank 0] step:5301/10000 train_time:231219ms step_avg:43.62ms +[2025-09-05 20:45:26] [Rank 0] step:5321/10000 train_time:231958ms step_avg:43.59ms +[2025-09-05 20:45:26] [Rank 0] step:5321/10000 train_time:231958ms step_avg:43.59ms +[2025-09-05 20:45:26] [Rank 0] step:5341/10000 train_time:232697ms step_avg:43.57ms +[2025-09-05 20:45:26] [Rank 0] step:5341/10000 train_time:232697ms step_avg:43.57ms +[2025-09-05 20:45:27] [Rank 0] step:5361/10000 train_time:233436ms step_avg:43.54ms +[2025-09-05 20:45:27] [Rank 0] step:5361/10000 train_time:233436ms step_avg:43.54ms +[2025-09-05 20:45:28] [Rank 0] step:5381/10000 train_time:234176ms step_avg:43.52ms +[2025-09-05 20:45:28] [Rank 0] step:5381/10000 train_time:234176ms step_avg:43.52ms +[2025-09-05 20:45:28] [Rank 0] step:5401/10000 train_time:234914ms step_avg:43.49ms +[2025-09-05 20:45:28] [Rank 0] step:5401/10000 train_time:234914ms step_avg:43.49ms +[2025-09-05 20:45:29] [Rank 0] step:5421/10000 train_time:235653ms step_avg:43.47ms +[2025-09-05 20:45:29] [Rank 0] step:5421/10000 train_time:235653ms step_avg:43.47ms +[2025-09-05 20:45:30] [Rank 0] step:5441/10000 train_time:236392ms step_avg:43.45ms +[2025-09-05 20:45:30] [Rank 0] step:5441/10000 train_time:236392ms step_avg:43.45ms +[2025-09-05 20:45:31] [Rank 0] step:5461/10000 train_time:237132ms step_avg:43.42ms +[2025-09-05 20:45:31] [Rank 0] step:5461/10000 train_time:237132ms step_avg:43.42ms +[2025-09-05 20:45:31] [Rank 0] step:5481/10000 train_time:237871ms step_avg:43.40ms +[2025-09-05 20:45:31] [Rank 0] step:5481/10000 train_time:237871ms step_avg:43.40ms +[2025-09-05 20:45:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:45:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:45:33] [Rank 0] PRINT: step:5500/10000 train_loss:2.1816 val_loss:2.1554 train_time:238690ms step_avg:43.40ms +[2025-09-05 20:45:33] [Rank 0] PRINT: step:5500/10000 train_loss:2.1816 val_loss:2.1554 train_time:238690ms step_avg:43.40ms +[2025-09-05 20:45:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:45:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:45:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:45:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:46:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:46:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:46:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:46:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:46:54] [Rank 0] Total Loss: 4.7103 +[2025-09-05 20:46:54] [Rank 0] Total Loss: 4.7103 +[2025-09-05 20:46:54] [Rank 0] Total FTA (Unweighted): 0.3025 +[2025-09-05 20:46:54] [Rank 0] Total FTA (Unweighted): 0.3025 +[2025-09-05 20:46:54] [Rank 0] Total FTA (Weighted): 0.3025 +[2025-09-05 20:46:54] [Rank 0] Total FTA (Weighted): 0.3025 +[2025-09-05 20:46:54] [Rank 0] Group 0 Loss: 3.3927 +[2025-09-05 20:46:54] [Rank 0] Group 0 Loss: 3.3927 +[2025-09-05 20:46:54] [Rank 0] Group 1 Loss: 3.3200 +[2025-09-05 20:46:54] [Rank 0] Group 1 Loss: 3.3200 +[2025-09-05 20:46:54] [Rank 0] Group 2 Loss: 3.2926 +[2025-09-05 20:46:54] [Rank 0] Group 2 Loss: 3.2926 +[2025-09-05 20:46:54] [Rank 0] Group 3 Loss: 3.7388 +[2025-09-05 20:46:54] [Rank 0] Group 3 Loss: 3.7388 +[2025-09-05 20:46:54] [Rank 0] Group 4 Loss: 4.1234 +[2025-09-05 20:46:54] [Rank 0] Group 4 Loss: 4.1234 +[2025-09-05 20:46:54] [Rank 0] Group 5 Loss: 4.4946 +[2025-09-05 20:46:54] [Rank 0] Group 5 Loss: 4.4946 +[2025-09-05 20:46:54] [Rank 0] Group 6 Loss: 4.7806 +[2025-09-05 20:46:54] [Rank 0] Group 6 Loss: 4.7806 +[2025-09-05 20:46:54] [Rank 0] Group 7 Loss: 4.9571 +[2025-09-05 20:46:54] [Rank 0] Group 7 Loss: 4.9571 +[2025-09-05 20:46:54] [Rank 0] Group 8 Loss: 5.2678 +[2025-09-05 20:46:54] [Rank 0] Group 8 Loss: 5.2678 +[2025-09-05 20:46:54] [Rank 0] Group 9 Loss: 5.3844 +[2025-09-05 20:46:54] [Rank 0] Group 9 Loss: 5.3844 +[2025-09-05 20:46:54] [Rank 0] Group 10 Loss: 5.4370 +[2025-09-05 20:46:54] [Rank 0] Group 10 Loss: 5.4370 +[2025-09-05 20:46:54] [Rank 0] Group 11 Loss: 5.4761 +[2025-09-05 20:46:54] [Rank 0] Group 11 Loss: 5.4761 +[2025-09-05 20:46:54] [Rank 0] Group 12 Loss: 5.3903 +[2025-09-05 20:46:54] [Rank 0] Group 12 Loss: 5.3903 +[2025-09-05 20:46:54] [Rank 0] Group 13 Loss: 5.4283 +[2025-09-05 20:46:54] [Rank 0] Group 13 Loss: 5.4283 +[2025-09-05 20:46:54] [Rank 0] Group 14 Loss: 5.4543 +[2025-09-05 20:46:54] [Rank 0] Group 14 Loss: 5.4543 +[2025-09-05 20:46:54] [Rank 0] Group 15 Loss: 5.4267 +[2025-09-05 20:46:54] [Rank 0] Group 15 Loss: 5.4267 +[2025-09-05 20:46:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:46:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:46:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:46:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:46:54] [Rank 0] Group 2 FTA: 0.5100 +[2025-09-05 20:46:54] [Rank 0] Group 2 FTA: 0.5100 +[2025-09-05 20:46:54] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:46:54] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:46:54] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:46:54] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:46:54] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 20:46:54] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 20:46:54] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:46:54] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:46:54] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:46:54] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:46:54] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:46:54] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:46:54] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 20:46:54] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 20:46:54] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 20:46:54] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 20:46:54] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 20:46:54] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 20:46:54] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 20:46:54] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 20:46:54] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 20:46:54] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 20:46:54] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:46:54] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 20:46:54] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 20:46:54] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 20:46:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:46:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:46:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:46:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:46:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:46:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:46:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:46:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:46:55] [Rank 0] step:5501/10000 train_time:238699ms step_avg:43.39ms +[2025-09-05 20:46:55] [Rank 0] step:5501/10000 train_time:238699ms step_avg:43.39ms +[2025-09-05 20:46:56] [Rank 0] step:5521/10000 train_time:239380ms step_avg:43.36ms +[2025-09-05 20:46:56] [Rank 0] step:5521/10000 train_time:239380ms step_avg:43.36ms +[2025-09-05 20:46:57] [Rank 0] step:5541/10000 train_time:240118ms step_avg:43.33ms +[2025-09-05 20:46:57] [Rank 0] step:5541/10000 train_time:240118ms step_avg:43.33ms +[2025-09-05 20:46:58] [Rank 0] step:5561/10000 train_time:240857ms step_avg:43.31ms +[2025-09-05 20:46:58] [Rank 0] step:5561/10000 train_time:240857ms step_avg:43.31ms +[2025-09-05 20:46:58] [Rank 0] step:5581/10000 train_time:241595ms step_avg:43.29ms +[2025-09-05 20:46:58] [Rank 0] step:5581/10000 train_time:241595ms step_avg:43.29ms +[2025-09-05 20:46:59] [Rank 0] step:5601/10000 train_time:242333ms step_avg:43.27ms +[2025-09-05 20:46:59] [Rank 0] step:5601/10000 train_time:242333ms step_avg:43.27ms +[2025-09-05 20:47:00] [Rank 0] step:5621/10000 train_time:243072ms step_avg:43.24ms +[2025-09-05 20:47:00] [Rank 0] step:5621/10000 train_time:243072ms step_avg:43.24ms +[2025-09-05 20:47:01] [Rank 0] step:5641/10000 train_time:244414ms step_avg:43.33ms +[2025-09-05 20:47:01] [Rank 0] step:5641/10000 train_time:244414ms step_avg:43.33ms +[2025-09-05 20:47:02] [Rank 0] step:5661/10000 train_time:245359ms step_avg:43.34ms +[2025-09-05 20:47:02] [Rank 0] step:5661/10000 train_time:245359ms step_avg:43.34ms +[2025-09-05 20:47:03] [Rank 0] step:5681/10000 train_time:246099ms step_avg:43.32ms +[2025-09-05 20:47:03] [Rank 0] step:5681/10000 train_time:246099ms step_avg:43.32ms +[2025-09-05 20:47:03] [Rank 0] step:5701/10000 train_time:246839ms step_avg:43.30ms +[2025-09-05 20:47:03] [Rank 0] step:5701/10000 train_time:246839ms step_avg:43.30ms +[2025-09-05 20:47:04] [Rank 0] step:5721/10000 train_time:247727ms step_avg:43.30ms +[2025-09-05 20:47:04] [Rank 0] step:5721/10000 train_time:247727ms step_avg:43.30ms +[2025-09-05 20:47:05] [Rank 0] step:5741/10000 train_time:248465ms step_avg:43.28ms +[2025-09-05 20:47:05] [Rank 0] step:5741/10000 train_time:248465ms step_avg:43.28ms +[2025-09-05 20:47:06] [Rank 0] step:5761/10000 train_time:249205ms step_avg:43.26ms +[2025-09-05 20:47:06] [Rank 0] step:5761/10000 train_time:249205ms step_avg:43.26ms +[2025-09-05 20:47:07] [Rank 0] step:5781/10000 train_time:249944ms step_avg:43.24ms +[2025-09-05 20:47:07] [Rank 0] step:5781/10000 train_time:249944ms step_avg:43.24ms +[2025-09-05 20:47:07] [Rank 0] step:5801/10000 train_time:250683ms step_avg:43.21ms +[2025-09-05 20:47:07] [Rank 0] step:5801/10000 train_time:250683ms step_avg:43.21ms +[2025-09-05 20:47:08] [Rank 0] step:5821/10000 train_time:251422ms step_avg:43.19ms +[2025-09-05 20:47:08] [Rank 0] step:5821/10000 train_time:251422ms step_avg:43.19ms +[2025-09-05 20:47:09] [Rank 0] step:5841/10000 train_time:252161ms step_avg:43.17ms +[2025-09-05 20:47:09] [Rank 0] step:5841/10000 train_time:252161ms step_avg:43.17ms +[2025-09-05 20:47:10] [Rank 0] step:5861/10000 train_time:252899ms step_avg:43.15ms +[2025-09-05 20:47:10] [Rank 0] step:5861/10000 train_time:252899ms step_avg:43.15ms +[2025-09-05 20:47:10] [Rank 0] step:5881/10000 train_time:253639ms step_avg:43.13ms +[2025-09-05 20:47:10] [Rank 0] step:5881/10000 train_time:253639ms step_avg:43.13ms +[2025-09-05 20:47:11] [Rank 0] step:5901/10000 train_time:254377ms step_avg:43.11ms +[2025-09-05 20:47:11] [Rank 0] step:5901/10000 train_time:254377ms step_avg:43.11ms +[2025-09-05 20:47:12] [Rank 0] step:5921/10000 train_time:255115ms step_avg:43.09ms +[2025-09-05 20:47:12] [Rank 0] step:5921/10000 train_time:255115ms step_avg:43.09ms +[2025-09-05 20:47:13] [Rank 0] step:5941/10000 train_time:255854ms step_avg:43.07ms +[2025-09-05 20:47:13] [Rank 0] step:5941/10000 train_time:255854ms step_avg:43.07ms +[2025-09-05 20:47:13] [Rank 0] step:5961/10000 train_time:256593ms step_avg:43.05ms +[2025-09-05 20:47:13] [Rank 0] step:5961/10000 train_time:256593ms step_avg:43.05ms +[2025-09-05 20:47:14] [Rank 0] step:5981/10000 train_time:257332ms step_avg:43.02ms +[2025-09-05 20:47:14] [Rank 0] step:5981/10000 train_time:257332ms step_avg:43.02ms +[2025-09-05 20:47:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:47:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:47:15] [Rank 0] PRINT: step:6000/10000 train_loss:2.1500 val_loss:2.1265 train_time:258151ms step_avg:43.03ms +[2025-09-05 20:47:15] [Rank 0] PRINT: step:6000/10000 train_loss:2.1500 val_loss:2.1265 train_time:258151ms step_avg:43.03ms +[2025-09-05 20:47:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:47:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:47:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:47:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:48:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:48:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:48:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:48:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:48:36] [Rank 0] Total Loss: 4.7000 +[2025-09-05 20:48:36] [Rank 0] Total Loss: 4.7000 +[2025-09-05 20:48:36] [Rank 0] Total FTA (Unweighted): 0.3163 +[2025-09-05 20:48:36] [Rank 0] Total FTA (Unweighted): 0.3163 +[2025-09-05 20:48:36] [Rank 0] Total FTA (Weighted): 0.3162 +[2025-09-05 20:48:36] [Rank 0] Total FTA (Weighted): 0.3162 +[2025-09-05 20:48:36] [Rank 0] Group 0 Loss: 3.4138 +[2025-09-05 20:48:36] [Rank 0] Group 0 Loss: 3.4138 +[2025-09-05 20:48:36] [Rank 0] Group 1 Loss: 3.3354 +[2025-09-05 20:48:36] [Rank 0] Group 1 Loss: 3.3354 +[2025-09-05 20:48:36] [Rank 0] Group 2 Loss: 3.3507 +[2025-09-05 20:48:36] [Rank 0] Group 2 Loss: 3.3507 +[2025-09-05 20:48:36] [Rank 0] Group 3 Loss: 3.7667 +[2025-09-05 20:48:36] [Rank 0] Group 3 Loss: 3.7667 +[2025-09-05 20:48:36] [Rank 0] Group 4 Loss: 4.0757 +[2025-09-05 20:48:36] [Rank 0] Group 4 Loss: 4.0757 +[2025-09-05 20:48:36] [Rank 0] Group 5 Loss: 4.4626 +[2025-09-05 20:48:36] [Rank 0] Group 5 Loss: 4.4626 +[2025-09-05 20:48:36] [Rank 0] Group 6 Loss: 4.7540 +[2025-09-05 20:48:36] [Rank 0] Group 6 Loss: 4.7540 +[2025-09-05 20:48:36] [Rank 0] Group 7 Loss: 4.9248 +[2025-09-05 20:48:36] [Rank 0] Group 7 Loss: 4.9248 +[2025-09-05 20:48:36] [Rank 0] Group 8 Loss: 5.2333 +[2025-09-05 20:48:36] [Rank 0] Group 8 Loss: 5.2333 +[2025-09-05 20:48:36] [Rank 0] Group 9 Loss: 5.3601 +[2025-09-05 20:48:36] [Rank 0] Group 9 Loss: 5.3601 +[2025-09-05 20:48:36] [Rank 0] Group 10 Loss: 5.4369 +[2025-09-05 20:48:36] [Rank 0] Group 10 Loss: 5.4369 +[2025-09-05 20:48:36] [Rank 0] Group 11 Loss: 5.4518 +[2025-09-05 20:48:36] [Rank 0] Group 11 Loss: 5.4518 +[2025-09-05 20:48:36] [Rank 0] Group 12 Loss: 5.3819 +[2025-09-05 20:48:36] [Rank 0] Group 12 Loss: 5.3819 +[2025-09-05 20:48:36] [Rank 0] Group 13 Loss: 5.4047 +[2025-09-05 20:48:36] [Rank 0] Group 13 Loss: 5.4047 +[2025-09-05 20:48:36] [Rank 0] Group 14 Loss: 5.4283 +[2025-09-05 20:48:36] [Rank 0] Group 14 Loss: 5.4283 +[2025-09-05 20:48:36] [Rank 0] Group 15 Loss: 5.4189 +[2025-09-05 20:48:36] [Rank 0] Group 15 Loss: 5.4189 +[2025-09-05 20:48:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:48:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:48:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:48:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:48:36] [Rank 0] Group 2 FTA: 0.6200 +[2025-09-05 20:48:36] [Rank 0] Group 2 FTA: 0.6200 +[2025-09-05 20:48:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:48:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:48:36] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:48:36] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:48:36] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 20:48:36] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 20:48:36] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:48:36] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:48:36] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:48:36] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 20:48:36] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:48:36] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:48:36] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 20:48:36] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 20:48:36] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 20:48:36] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 20:48:36] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 20:48:36] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 20:48:36] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 20:48:36] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 20:48:36] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 20:48:36] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 20:48:36] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:48:36] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:48:36] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:48:36] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:48:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:48:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:48:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:48:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:48:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:48:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:48:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:48:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:48:38] [Rank 0] step:6001/10000 train_time:258161ms step_avg:43.02ms +[2025-09-05 20:48:38] [Rank 0] step:6001/10000 train_time:258161ms step_avg:43.02ms +[2025-09-05 20:48:39] [Rank 0] step:6021/10000 train_time:259481ms step_avg:43.10ms +[2025-09-05 20:48:39] [Rank 0] step:6021/10000 train_time:259481ms step_avg:43.10ms +[2025-09-05 20:48:40] [Rank 0] step:6041/10000 train_time:260220ms step_avg:43.08ms +[2025-09-05 20:48:40] [Rank 0] step:6041/10000 train_time:260220ms step_avg:43.08ms +[2025-09-05 20:48:41] [Rank 0] step:6061/10000 train_time:260958ms step_avg:43.06ms +[2025-09-05 20:48:41] [Rank 0] step:6061/10000 train_time:260958ms step_avg:43.06ms +[2025-09-05 20:48:41] [Rank 0] step:6081/10000 train_time:261698ms step_avg:43.04ms +[2025-09-05 20:48:41] [Rank 0] step:6081/10000 train_time:261698ms step_avg:43.04ms +[2025-09-05 20:48:42] [Rank 0] step:6101/10000 train_time:262436ms step_avg:43.02ms +[2025-09-05 20:48:42] [Rank 0] step:6101/10000 train_time:262436ms step_avg:43.02ms +[2025-09-05 20:48:43] [Rank 0] step:6121/10000 train_time:263175ms step_avg:43.00ms +[2025-09-05 20:48:43] [Rank 0] step:6121/10000 train_time:263175ms step_avg:43.00ms +[2025-09-05 20:48:44] [Rank 0] step:6141/10000 train_time:263914ms step_avg:42.98ms +[2025-09-05 20:48:44] [Rank 0] step:6141/10000 train_time:263914ms step_avg:42.98ms +[2025-09-05 20:48:44] [Rank 0] step:6161/10000 train_time:264653ms step_avg:42.96ms +[2025-09-05 20:48:44] [Rank 0] step:6161/10000 train_time:264653ms step_avg:42.96ms +[2025-09-05 20:48:45] [Rank 0] step:6181/10000 train_time:265392ms step_avg:42.94ms +[2025-09-05 20:48:45] [Rank 0] step:6181/10000 train_time:265392ms step_avg:42.94ms +[2025-09-05 20:48:46] [Rank 0] step:6201/10000 train_time:266130ms step_avg:42.92ms +[2025-09-05 20:48:46] [Rank 0] step:6201/10000 train_time:266130ms step_avg:42.92ms +[2025-09-05 20:48:47] [Rank 0] step:6221/10000 train_time:266869ms step_avg:42.90ms +[2025-09-05 20:48:47] [Rank 0] step:6221/10000 train_time:266869ms step_avg:42.90ms +[2025-09-05 20:48:47] [Rank 0] step:6241/10000 train_time:267608ms step_avg:42.88ms +[2025-09-05 20:48:47] [Rank 0] step:6241/10000 train_time:267608ms step_avg:42.88ms +[2025-09-05 20:48:48] [Rank 0] step:6261/10000 train_time:268347ms step_avg:42.86ms +[2025-09-05 20:48:48] [Rank 0] step:6261/10000 train_time:268347ms step_avg:42.86ms +[2025-09-05 20:48:49] [Rank 0] step:6281/10000 train_time:269087ms step_avg:42.84ms +[2025-09-05 20:48:49] [Rank 0] step:6281/10000 train_time:269087ms step_avg:42.84ms +[2025-09-05 20:48:50] [Rank 0] step:6301/10000 train_time:269826ms step_avg:42.82ms +[2025-09-05 20:48:50] [Rank 0] step:6301/10000 train_time:269826ms step_avg:42.82ms +[2025-09-05 20:48:50] [Rank 0] step:6321/10000 train_time:270565ms step_avg:42.80ms +[2025-09-05 20:48:50] [Rank 0] step:6321/10000 train_time:270565ms step_avg:42.80ms +[2025-09-05 20:48:51] [Rank 0] step:6341/10000 train_time:271305ms step_avg:42.79ms +[2025-09-05 20:48:51] [Rank 0] step:6341/10000 train_time:271305ms step_avg:42.79ms +[2025-09-05 20:48:52] [Rank 0] step:6361/10000 train_time:272044ms step_avg:42.77ms +[2025-09-05 20:48:52] [Rank 0] step:6361/10000 train_time:272044ms step_avg:42.77ms +[2025-09-05 20:48:52] [Rank 0] step:6381/10000 train_time:272783ms step_avg:42.75ms +[2025-09-05 20:48:52] [Rank 0] step:6381/10000 train_time:272783ms step_avg:42.75ms +[2025-09-05 20:48:53] [Rank 0] step:6401/10000 train_time:273522ms step_avg:42.73ms +[2025-09-05 20:48:53] [Rank 0] step:6401/10000 train_time:273522ms step_avg:42.73ms +[2025-09-05 20:48:54] [Rank 0] step:6421/10000 train_time:274261ms step_avg:42.71ms +[2025-09-05 20:48:54] [Rank 0] step:6421/10000 train_time:274261ms step_avg:42.71ms +[2025-09-05 20:48:55] [Rank 0] step:6441/10000 train_time:274999ms step_avg:42.70ms +[2025-09-05 20:48:55] [Rank 0] step:6441/10000 train_time:274999ms step_avg:42.70ms +[2025-09-05 20:48:55] [Rank 0] step:6461/10000 train_time:275738ms step_avg:42.68ms +[2025-09-05 20:48:55] [Rank 0] step:6461/10000 train_time:275738ms step_avg:42.68ms +[2025-09-05 20:48:56] [Rank 0] step:6481/10000 train_time:276478ms step_avg:42.66ms +[2025-09-05 20:48:56] [Rank 0] step:6481/10000 train_time:276478ms step_avg:42.66ms +[2025-09-05 20:48:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:48:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:48:57] [Rank 0] PRINT: step:6500/10000 train_loss:2.1246 val_loss:2.1052 train_time:277297ms step_avg:42.66ms +[2025-09-05 20:48:57] [Rank 0] PRINT: step:6500/10000 train_loss:2.1246 val_loss:2.1052 train_time:277297ms step_avg:42.66ms +[2025-09-05 20:48:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:48:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:48:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:48:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:50:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:50:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:50:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:50:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:50:19] [Rank 0] Total Loss: 4.6782 +[2025-09-05 20:50:19] [Rank 0] Total Loss: 4.6782 +[2025-09-05 20:50:19] [Rank 0] Total FTA (Unweighted): 0.3331 +[2025-09-05 20:50:19] [Rank 0] Total FTA (Unweighted): 0.3331 +[2025-09-05 20:50:19] [Rank 0] Total FTA (Weighted): 0.3331 +[2025-09-05 20:50:19] [Rank 0] Total FTA (Weighted): 0.3331 +[2025-09-05 20:50:19] [Rank 0] Group 0 Loss: 3.3953 +[2025-09-05 20:50:19] [Rank 0] Group 0 Loss: 3.3953 +[2025-09-05 20:50:19] [Rank 0] Group 1 Loss: 3.3318 +[2025-09-05 20:50:19] [Rank 0] Group 1 Loss: 3.3318 +[2025-09-05 20:50:19] [Rank 0] Group 2 Loss: 3.3328 +[2025-09-05 20:50:19] [Rank 0] Group 2 Loss: 3.3328 +[2025-09-05 20:50:19] [Rank 0] Group 3 Loss: 3.7379 +[2025-09-05 20:50:19] [Rank 0] Group 3 Loss: 3.7379 +[2025-09-05 20:50:19] [Rank 0] Group 4 Loss: 4.0343 +[2025-09-05 20:50:19] [Rank 0] Group 4 Loss: 4.0343 +[2025-09-05 20:50:19] [Rank 0] Group 5 Loss: 4.4416 +[2025-09-05 20:50:19] [Rank 0] Group 5 Loss: 4.4416 +[2025-09-05 20:50:19] [Rank 0] Group 6 Loss: 4.7451 +[2025-09-05 20:50:19] [Rank 0] Group 6 Loss: 4.7451 +[2025-09-05 20:50:19] [Rank 0] Group 7 Loss: 4.9042 +[2025-09-05 20:50:19] [Rank 0] Group 7 Loss: 4.9042 +[2025-09-05 20:50:19] [Rank 0] Group 8 Loss: 5.2132 +[2025-09-05 20:50:19] [Rank 0] Group 8 Loss: 5.2132 +[2025-09-05 20:50:19] [Rank 0] Group 9 Loss: 5.3492 +[2025-09-05 20:50:19] [Rank 0] Group 9 Loss: 5.3492 +[2025-09-05 20:50:19] [Rank 0] Group 10 Loss: 5.4243 +[2025-09-05 20:50:19] [Rank 0] Group 10 Loss: 5.4243 +[2025-09-05 20:50:19] [Rank 0] Group 11 Loss: 5.4404 +[2025-09-05 20:50:19] [Rank 0] Group 11 Loss: 5.4404 +[2025-09-05 20:50:19] [Rank 0] Group 12 Loss: 5.3477 +[2025-09-05 20:50:19] [Rank 0] Group 12 Loss: 5.3477 +[2025-09-05 20:50:19] [Rank 0] Group 13 Loss: 5.3779 +[2025-09-05 20:50:19] [Rank 0] Group 13 Loss: 5.3779 +[2025-09-05 20:50:19] [Rank 0] Group 14 Loss: 5.4064 +[2025-09-05 20:50:19] [Rank 0] Group 14 Loss: 5.4064 +[2025-09-05 20:50:19] [Rank 0] Group 15 Loss: 5.3689 +[2025-09-05 20:50:19] [Rank 0] Group 15 Loss: 5.3689 +[2025-09-05 20:50:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:50:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:50:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:50:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:50:19] [Rank 0] Group 2 FTA: 0.8400 +[2025-09-05 20:50:19] [Rank 0] Group 2 FTA: 0.8400 +[2025-09-05 20:50:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:50:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:50:19] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:50:19] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:50:19] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 20:50:19] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 20:50:19] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:50:19] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:50:19] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 20:50:19] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 20:50:19] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:50:19] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:50:19] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 20:50:19] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 20:50:19] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 20:50:19] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 20:50:19] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 20:50:19] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 20:50:19] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 20:50:19] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 20:50:19] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 20:50:19] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 20:50:19] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 20:50:19] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 20:50:19] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 20:50:19] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 20:50:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:50:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:50:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:50:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:50:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:50:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:50:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:50:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:50:21] [Rank 0] step:6501/10000 train_time:277307ms step_avg:42.66ms +[2025-09-05 20:50:21] [Rank 0] step:6501/10000 train_time:277307ms step_avg:42.66ms +[2025-09-05 20:50:21] [Rank 0] step:6521/10000 train_time:277982ms step_avg:42.63ms +[2025-09-05 20:50:21] [Rank 0] step:6521/10000 train_time:277982ms step_avg:42.63ms +[2025-09-05 20:50:22] [Rank 0] step:6541/10000 train_time:278720ms step_avg:42.61ms +[2025-09-05 20:50:22] [Rank 0] step:6541/10000 train_time:278720ms step_avg:42.61ms +[2025-09-05 20:50:23] [Rank 0] step:6561/10000 train_time:279460ms step_avg:42.59ms +[2025-09-05 20:50:23] [Rank 0] step:6561/10000 train_time:279460ms step_avg:42.59ms +[2025-09-05 20:50:24] [Rank 0] step:6581/10000 train_time:280199ms step_avg:42.58ms +[2025-09-05 20:50:24] [Rank 0] step:6581/10000 train_time:280199ms step_avg:42.58ms +[2025-09-05 20:50:24] [Rank 0] step:6601/10000 train_time:280939ms step_avg:42.56ms +[2025-09-05 20:50:24] [Rank 0] step:6601/10000 train_time:280939ms step_avg:42.56ms +[2025-09-05 20:50:25] [Rank 0] step:6621/10000 train_time:281678ms step_avg:42.54ms +[2025-09-05 20:50:25] [Rank 0] step:6621/10000 train_time:281678ms step_avg:42.54ms +[2025-09-05 20:50:26] [Rank 0] step:6641/10000 train_time:282416ms step_avg:42.53ms +[2025-09-05 20:50:26] [Rank 0] step:6641/10000 train_time:282416ms step_avg:42.53ms +[2025-09-05 20:50:27] [Rank 0] step:6661/10000 train_time:283155ms step_avg:42.51ms +[2025-09-05 20:50:27] [Rank 0] step:6661/10000 train_time:283155ms step_avg:42.51ms +[2025-09-05 20:50:27] [Rank 0] step:6681/10000 train_time:283895ms step_avg:42.49ms +[2025-09-05 20:50:27] [Rank 0] step:6681/10000 train_time:283895ms step_avg:42.49ms +[2025-09-05 20:50:28] [Rank 0] step:6701/10000 train_time:284633ms step_avg:42.48ms +[2025-09-05 20:50:28] [Rank 0] step:6701/10000 train_time:284633ms step_avg:42.48ms +[2025-09-05 20:50:29] [Rank 0] step:6721/10000 train_time:285372ms step_avg:42.46ms +[2025-09-05 20:50:29] [Rank 0] step:6721/10000 train_time:285372ms step_avg:42.46ms +[2025-09-05 20:50:30] [Rank 0] step:6741/10000 train_time:286112ms step_avg:42.44ms +[2025-09-05 20:50:30] [Rank 0] step:6741/10000 train_time:286112ms step_avg:42.44ms +[2025-09-05 20:50:30] [Rank 0] step:6761/10000 train_time:286851ms step_avg:42.43ms +[2025-09-05 20:50:30] [Rank 0] step:6761/10000 train_time:286851ms step_avg:42.43ms +[2025-09-05 20:50:31] [Rank 0] step:6781/10000 train_time:287588ms step_avg:42.41ms +[2025-09-05 20:50:31] [Rank 0] step:6781/10000 train_time:287588ms step_avg:42.41ms +[2025-09-05 20:50:32] [Rank 0] step:6801/10000 train_time:288328ms step_avg:42.39ms +[2025-09-05 20:50:32] [Rank 0] step:6801/10000 train_time:288328ms step_avg:42.39ms +[2025-09-05 20:50:33] [Rank 0] step:6821/10000 train_time:289066ms step_avg:42.38ms +[2025-09-05 20:50:33] [Rank 0] step:6821/10000 train_time:289066ms step_avg:42.38ms +[2025-09-05 20:50:33] [Rank 0] step:6841/10000 train_time:290002ms step_avg:42.39ms +[2025-09-05 20:50:33] [Rank 0] step:6841/10000 train_time:290002ms step_avg:42.39ms +[2025-09-05 20:50:34] [Rank 0] step:6861/10000 train_time:290741ms step_avg:42.38ms +[2025-09-05 20:50:34] [Rank 0] step:6861/10000 train_time:290741ms step_avg:42.38ms +[2025-09-05 20:50:35] [Rank 0] step:6881/10000 train_time:291480ms step_avg:42.36ms +[2025-09-05 20:50:35] [Rank 0] step:6881/10000 train_time:291480ms step_avg:42.36ms +[2025-09-05 20:50:36] [Rank 0] step:6901/10000 train_time:292218ms step_avg:42.34ms +[2025-09-05 20:50:36] [Rank 0] step:6901/10000 train_time:292218ms step_avg:42.34ms +[2025-09-05 20:50:36] [Rank 0] step:6921/10000 train_time:292957ms step_avg:42.33ms +[2025-09-05 20:50:36] [Rank 0] step:6921/10000 train_time:292957ms step_avg:42.33ms +[2025-09-05 20:50:37] [Rank 0] step:6941/10000 train_time:293696ms step_avg:42.31ms +[2025-09-05 20:50:37] [Rank 0] step:6941/10000 train_time:293696ms step_avg:42.31ms +[2025-09-05 20:50:38] [Rank 0] step:6961/10000 train_time:294435ms step_avg:42.30ms +[2025-09-05 20:50:38] [Rank 0] step:6961/10000 train_time:294435ms step_avg:42.30ms +[2025-09-05 20:50:39] [Rank 0] step:6981/10000 train_time:295174ms step_avg:42.28ms +[2025-09-05 20:50:39] [Rank 0] step:6981/10000 train_time:295174ms step_avg:42.28ms +[2025-09-05 20:50:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:50:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:50:40] [Rank 0] PRINT: step:7000/10000 train_loss:2.1024 val_loss:2.0849 train_time:295993ms step_avg:42.28ms +[2025-09-05 20:50:40] [Rank 0] PRINT: step:7000/10000 train_loss:2.1024 val_loss:2.0849 train_time:295993ms step_avg:42.28ms +[2025-09-05 20:50:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:50:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:50:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:50:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:52:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:52:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:52:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:52:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:52:01] [Rank 0] Total Loss: 4.6449 +[2025-09-05 20:52:01] [Rank 0] Total Loss: 4.6449 +[2025-09-05 20:52:01] [Rank 0] Total FTA (Unweighted): 0.3406 +[2025-09-05 20:52:01] [Rank 0] Total FTA (Unweighted): 0.3406 +[2025-09-05 20:52:01] [Rank 0] Total FTA (Weighted): 0.3406 +[2025-09-05 20:52:01] [Rank 0] Total FTA (Weighted): 0.3406 +[2025-09-05 20:52:01] [Rank 0] Group 0 Loss: 3.4274 +[2025-09-05 20:52:01] [Rank 0] Group 0 Loss: 3.4274 +[2025-09-05 20:52:01] [Rank 0] Group 1 Loss: 3.3489 +[2025-09-05 20:52:01] [Rank 0] Group 1 Loss: 3.3489 +[2025-09-05 20:52:01] [Rank 0] Group 2 Loss: 3.3414 +[2025-09-05 20:52:01] [Rank 0] Group 2 Loss: 3.3414 +[2025-09-05 20:52:01] [Rank 0] Group 3 Loss: 3.7179 +[2025-09-05 20:52:01] [Rank 0] Group 3 Loss: 3.7179 +[2025-09-05 20:52:01] [Rank 0] Group 4 Loss: 4.0315 +[2025-09-05 20:52:01] [Rank 0] Group 4 Loss: 4.0315 +[2025-09-05 20:52:01] [Rank 0] Group 5 Loss: 4.3700 +[2025-09-05 20:52:01] [Rank 0] Group 5 Loss: 4.3700 +[2025-09-05 20:52:01] [Rank 0] Group 6 Loss: 4.6726 +[2025-09-05 20:52:01] [Rank 0] Group 6 Loss: 4.6726 +[2025-09-05 20:52:01] [Rank 0] Group 7 Loss: 4.8544 +[2025-09-05 20:52:01] [Rank 0] Group 7 Loss: 4.8544 +[2025-09-05 20:52:01] [Rank 0] Group 8 Loss: 5.1559 +[2025-09-05 20:52:01] [Rank 0] Group 8 Loss: 5.1559 +[2025-09-05 20:52:01] [Rank 0] Group 9 Loss: 5.2792 +[2025-09-05 20:52:01] [Rank 0] Group 9 Loss: 5.2792 +[2025-09-05 20:52:01] [Rank 0] Group 10 Loss: 5.3393 +[2025-09-05 20:52:01] [Rank 0] Group 10 Loss: 5.3393 +[2025-09-05 20:52:01] [Rank 0] Group 11 Loss: 5.3746 +[2025-09-05 20:52:01] [Rank 0] Group 11 Loss: 5.3746 +[2025-09-05 20:52:01] [Rank 0] Group 12 Loss: 5.3302 +[2025-09-05 20:52:01] [Rank 0] Group 12 Loss: 5.3302 +[2025-09-05 20:52:01] [Rank 0] Group 13 Loss: 5.3501 +[2025-09-05 20:52:01] [Rank 0] Group 13 Loss: 5.3501 +[2025-09-05 20:52:01] [Rank 0] Group 14 Loss: 5.3857 +[2025-09-05 20:52:01] [Rank 0] Group 14 Loss: 5.3857 +[2025-09-05 20:52:01] [Rank 0] Group 15 Loss: 5.3396 +[2025-09-05 20:52:01] [Rank 0] Group 15 Loss: 5.3396 +[2025-09-05 20:52:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:52:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:52:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:52:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:52:01] [Rank 0] Group 2 FTA: 0.8400 +[2025-09-05 20:52:01] [Rank 0] Group 2 FTA: 0.8400 +[2025-09-05 20:52:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:52:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:52:01] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:52:01] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:52:01] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:52:01] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:52:01] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:52:01] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:52:01] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 20:52:01] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 20:52:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:52:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:52:01] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:52:01] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:52:01] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 20:52:01] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 20:52:01] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 20:52:01] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 20:52:01] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 20:52:01] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 20:52:01] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 20:52:01] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 20:52:01] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 20:52:01] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 20:52:01] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:52:01] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:52:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:52:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:52:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:52:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:52:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:52:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:52:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:52:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:52:02] [Rank 0] step:7001/10000 train_time:296003ms step_avg:42.28ms +[2025-09-05 20:52:02] [Rank 0] step:7001/10000 train_time:296003ms step_avg:42.28ms +[2025-09-05 20:52:03] [Rank 0] step:7021/10000 train_time:296667ms step_avg:42.25ms +[2025-09-05 20:52:03] [Rank 0] step:7021/10000 train_time:296667ms step_avg:42.25ms +[2025-09-05 20:52:04] [Rank 0] step:7041/10000 train_time:297405ms step_avg:42.24ms +[2025-09-05 20:52:04] [Rank 0] step:7041/10000 train_time:297405ms step_avg:42.24ms +[2025-09-05 20:52:04] [Rank 0] step:7061/10000 train_time:298148ms step_avg:42.22ms +[2025-09-05 20:52:04] [Rank 0] step:7061/10000 train_time:298148ms step_avg:42.22ms +[2025-09-05 20:52:05] [Rank 0] step:7081/10000 train_time:298887ms step_avg:42.21ms +[2025-09-05 20:52:05] [Rank 0] step:7081/10000 train_time:298887ms step_avg:42.21ms +[2025-09-05 20:52:06] [Rank 0] step:7101/10000 train_time:299627ms step_avg:42.19ms +[2025-09-05 20:52:06] [Rank 0] step:7101/10000 train_time:299627ms step_avg:42.19ms +[2025-09-05 20:52:07] [Rank 0] step:7121/10000 train_time:300365ms step_avg:42.18ms +[2025-09-05 20:52:07] [Rank 0] step:7121/10000 train_time:300365ms step_avg:42.18ms +[2025-09-05 20:52:07] [Rank 0] step:7141/10000 train_time:301104ms step_avg:42.17ms +[2025-09-05 20:52:07] [Rank 0] step:7141/10000 train_time:301104ms step_avg:42.17ms +[2025-09-05 20:52:08] [Rank 0] step:7161/10000 train_time:301842ms step_avg:42.15ms +[2025-09-05 20:52:08] [Rank 0] step:7161/10000 train_time:301842ms step_avg:42.15ms +[2025-09-05 20:52:09] [Rank 0] step:7181/10000 train_time:302581ms step_avg:42.14ms +[2025-09-05 20:52:09] [Rank 0] step:7181/10000 train_time:302581ms step_avg:42.14ms +[2025-09-05 20:52:10] [Rank 0] step:7201/10000 train_time:303320ms step_avg:42.12ms +[2025-09-05 20:52:10] [Rank 0] step:7201/10000 train_time:303320ms step_avg:42.12ms +[2025-09-05 20:52:10] [Rank 0] step:7221/10000 train_time:304059ms step_avg:42.11ms +[2025-09-05 20:52:10] [Rank 0] step:7221/10000 train_time:304059ms step_avg:42.11ms +[2025-09-05 20:52:11] [Rank 0] step:7241/10000 train_time:304798ms step_avg:42.09ms +[2025-09-05 20:52:11] [Rank 0] step:7241/10000 train_time:304798ms step_avg:42.09ms +[2025-09-05 20:52:12] [Rank 0] step:7261/10000 train_time:305539ms step_avg:42.08ms +[2025-09-05 20:52:12] [Rank 0] step:7261/10000 train_time:305539ms step_avg:42.08ms +[2025-09-05 20:52:12] [Rank 0] step:7281/10000 train_time:306277ms step_avg:42.07ms +[2025-09-05 20:52:12] [Rank 0] step:7281/10000 train_time:306277ms step_avg:42.07ms +[2025-09-05 20:52:13] [Rank 0] step:7301/10000 train_time:307015ms step_avg:42.05ms +[2025-09-05 20:52:13] [Rank 0] step:7301/10000 train_time:307015ms step_avg:42.05ms +[2025-09-05 20:52:14] [Rank 0] step:7321/10000 train_time:307754ms step_avg:42.04ms +[2025-09-05 20:52:14] [Rank 0] step:7321/10000 train_time:307754ms step_avg:42.04ms +[2025-09-05 20:52:15] [Rank 0] step:7341/10000 train_time:308492ms step_avg:42.02ms +[2025-09-05 20:52:15] [Rank 0] step:7341/10000 train_time:308492ms step_avg:42.02ms +[2025-09-05 20:52:15] [Rank 0] step:7361/10000 train_time:309232ms step_avg:42.01ms +[2025-09-05 20:52:15] [Rank 0] step:7361/10000 train_time:309232ms step_avg:42.01ms +[2025-09-05 20:52:16] [Rank 0] step:7381/10000 train_time:309972ms step_avg:42.00ms +[2025-09-05 20:52:16] [Rank 0] step:7381/10000 train_time:309972ms step_avg:42.00ms +[2025-09-05 20:52:17] [Rank 0] step:7401/10000 train_time:310712ms step_avg:41.98ms +[2025-09-05 20:52:17] [Rank 0] step:7401/10000 train_time:310712ms step_avg:41.98ms +[2025-09-05 20:52:18] [Rank 0] step:7421/10000 train_time:311451ms step_avg:41.97ms +[2025-09-05 20:52:18] [Rank 0] step:7421/10000 train_time:311451ms step_avg:41.97ms +[2025-09-05 20:52:19] [Rank 0] step:7441/10000 train_time:312305ms step_avg:41.97ms +[2025-09-05 20:52:19] [Rank 0] step:7441/10000 train_time:312305ms step_avg:41.97ms +[2025-09-05 20:52:19] [Rank 0] step:7461/10000 train_time:313068ms step_avg:41.96ms +[2025-09-05 20:52:19] [Rank 0] step:7461/10000 train_time:313068ms step_avg:41.96ms +[2025-09-05 20:52:20] [Rank 0] step:7481/10000 train_time:313807ms step_avg:41.95ms +[2025-09-05 20:52:20] [Rank 0] step:7481/10000 train_time:313807ms step_avg:41.95ms +[2025-09-05 20:52:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:52:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:52:21] [Rank 0] PRINT: step:7500/10000 train_loss:2.0836 val_loss:2.0675 train_time:314627ms step_avg:41.95ms +[2025-09-05 20:52:21] [Rank 0] PRINT: step:7500/10000 train_loss:2.0836 val_loss:2.0675 train_time:314627ms step_avg:41.95ms +[2025-09-05 20:52:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:52:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:52:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:52:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:53:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:53:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:53:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:53:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:53:42] [Rank 0] Total Loss: 4.6481 +[2025-09-05 20:53:42] [Rank 0] Total Loss: 4.6481 +[2025-09-05 20:53:42] [Rank 0] Total FTA (Unweighted): 0.3500 +[2025-09-05 20:53:42] [Rank 0] Total FTA (Unweighted): 0.3500 +[2025-09-05 20:53:42] [Rank 0] Total FTA (Weighted): 0.3500 +[2025-09-05 20:53:42] [Rank 0] Total FTA (Weighted): 0.3500 +[2025-09-05 20:53:42] [Rank 0] Group 0 Loss: 3.4475 +[2025-09-05 20:53:42] [Rank 0] Group 0 Loss: 3.4475 +[2025-09-05 20:53:42] [Rank 0] Group 1 Loss: 3.3442 +[2025-09-05 20:53:42] [Rank 0] Group 1 Loss: 3.3442 +[2025-09-05 20:53:42] [Rank 0] Group 2 Loss: 3.3306 +[2025-09-05 20:53:42] [Rank 0] Group 2 Loss: 3.3306 +[2025-09-05 20:53:42] [Rank 0] Group 3 Loss: 3.7394 +[2025-09-05 20:53:42] [Rank 0] Group 3 Loss: 3.7394 +[2025-09-05 20:53:42] [Rank 0] Group 4 Loss: 4.0364 +[2025-09-05 20:53:42] [Rank 0] Group 4 Loss: 4.0364 +[2025-09-05 20:53:42] [Rank 0] Group 5 Loss: 4.3843 +[2025-09-05 20:53:42] [Rank 0] Group 5 Loss: 4.3843 +[2025-09-05 20:53:42] [Rank 0] Group 6 Loss: 4.6754 +[2025-09-05 20:53:42] [Rank 0] Group 6 Loss: 4.6754 +[2025-09-05 20:53:42] [Rank 0] Group 7 Loss: 4.8471 +[2025-09-05 20:53:42] [Rank 0] Group 7 Loss: 4.8471 +[2025-09-05 20:53:42] [Rank 0] Group 8 Loss: 5.1615 +[2025-09-05 20:53:42] [Rank 0] Group 8 Loss: 5.1615 +[2025-09-05 20:53:42] [Rank 0] Group 9 Loss: 5.2847 +[2025-09-05 20:53:42] [Rank 0] Group 9 Loss: 5.2847 +[2025-09-05 20:53:42] [Rank 0] Group 10 Loss: 5.3631 +[2025-09-05 20:53:42] [Rank 0] Group 10 Loss: 5.3631 +[2025-09-05 20:53:42] [Rank 0] Group 11 Loss: 5.3826 +[2025-09-05 20:53:42] [Rank 0] Group 11 Loss: 5.3826 +[2025-09-05 20:53:42] [Rank 0] Group 12 Loss: 5.3246 +[2025-09-05 20:53:42] [Rank 0] Group 12 Loss: 5.3246 +[2025-09-05 20:53:42] [Rank 0] Group 13 Loss: 5.3570 +[2025-09-05 20:53:42] [Rank 0] Group 13 Loss: 5.3570 +[2025-09-05 20:53:42] [Rank 0] Group 14 Loss: 5.3534 +[2025-09-05 20:53:42] [Rank 0] Group 14 Loss: 5.3534 +[2025-09-05 20:53:42] [Rank 0] Group 15 Loss: 5.3378 +[2025-09-05 20:53:42] [Rank 0] Group 15 Loss: 5.3378 +[2025-09-05 20:53:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:53:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:53:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:53:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:53:42] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 20:53:42] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 20:53:42] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:53:42] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:53:42] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:53:42] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:53:42] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:53:42] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:53:42] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:53:42] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:53:42] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 20:53:42] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 20:53:42] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:53:42] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:53:42] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:53:42] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:53:42] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 20:53:42] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 20:53:42] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 20:53:42] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 20:53:42] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 20:53:42] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 20:53:42] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 20:53:42] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 20:53:42] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 20:53:42] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 20:53:42] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 20:53:42] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 20:53:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:53:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:53:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:53:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:53:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:53:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:53:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:53:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:53:44] [Rank 0] step:7501/10000 train_time:314637ms step_avg:41.95ms +[2025-09-05 20:53:44] [Rank 0] step:7501/10000 train_time:314637ms step_avg:41.95ms +[2025-09-05 20:53:45] [Rank 0] step:7521/10000 train_time:315321ms step_avg:41.93ms +[2025-09-05 20:53:45] [Rank 0] step:7521/10000 train_time:315321ms step_avg:41.93ms +[2025-09-05 20:53:45] [Rank 0] step:7541/10000 train_time:316060ms step_avg:41.91ms +[2025-09-05 20:53:45] [Rank 0] step:7541/10000 train_time:316060ms step_avg:41.91ms +[2025-09-05 20:53:46] [Rank 0] step:7561/10000 train_time:316797ms step_avg:41.90ms +[2025-09-05 20:53:46] [Rank 0] step:7561/10000 train_time:316797ms step_avg:41.90ms +[2025-09-05 20:53:47] [Rank 0] step:7581/10000 train_time:317535ms step_avg:41.89ms +[2025-09-05 20:53:47] [Rank 0] step:7581/10000 train_time:317535ms step_avg:41.89ms +[2025-09-05 20:53:48] [Rank 0] step:7601/10000 train_time:318273ms step_avg:41.87ms +[2025-09-05 20:53:48] [Rank 0] step:7601/10000 train_time:318273ms step_avg:41.87ms +[2025-09-05 20:53:48] [Rank 0] step:7621/10000 train_time:319012ms step_avg:41.86ms +[2025-09-05 20:53:48] [Rank 0] step:7621/10000 train_time:319012ms step_avg:41.86ms +[2025-09-05 20:53:50] [Rank 0] step:7641/10000 train_time:319977ms step_avg:41.88ms +[2025-09-05 20:53:50] [Rank 0] step:7641/10000 train_time:319977ms step_avg:41.88ms +[2025-09-05 20:53:50] [Rank 0] step:7661/10000 train_time:321101ms step_avg:41.91ms +[2025-09-05 20:53:50] [Rank 0] step:7661/10000 train_time:321101ms step_avg:41.91ms +[2025-09-05 20:53:51] [Rank 0] step:7681/10000 train_time:321839ms step_avg:41.90ms +[2025-09-05 20:53:51] [Rank 0] step:7681/10000 train_time:321839ms step_avg:41.90ms +[2025-09-05 20:53:52] [Rank 0] step:7701/10000 train_time:322578ms step_avg:41.89ms +[2025-09-05 20:53:52] [Rank 0] step:7701/10000 train_time:322578ms step_avg:41.89ms +[2025-09-05 20:53:53] [Rank 0] step:7721/10000 train_time:323317ms step_avg:41.87ms +[2025-09-05 20:53:53] [Rank 0] step:7721/10000 train_time:323317ms step_avg:41.87ms +[2025-09-05 20:53:53] [Rank 0] step:7741/10000 train_time:324056ms step_avg:41.86ms +[2025-09-05 20:53:53] [Rank 0] step:7741/10000 train_time:324056ms step_avg:41.86ms +[2025-09-05 20:53:54] [Rank 0] step:7761/10000 train_time:324794ms step_avg:41.85ms +[2025-09-05 20:53:54] [Rank 0] step:7761/10000 train_time:324794ms step_avg:41.85ms +[2025-09-05 20:53:55] [Rank 0] step:7781/10000 train_time:325533ms step_avg:41.84ms +[2025-09-05 20:53:55] [Rank 0] step:7781/10000 train_time:325533ms step_avg:41.84ms +[2025-09-05 20:53:56] [Rank 0] step:7801/10000 train_time:326271ms step_avg:41.82ms +[2025-09-05 20:53:56] [Rank 0] step:7801/10000 train_time:326271ms step_avg:41.82ms +[2025-09-05 20:53:56] [Rank 0] step:7821/10000 train_time:327010ms step_avg:41.81ms +[2025-09-05 20:53:56] [Rank 0] step:7821/10000 train_time:327010ms step_avg:41.81ms +[2025-09-05 20:53:57] [Rank 0] step:7841/10000 train_time:327749ms step_avg:41.80ms +[2025-09-05 20:53:57] [Rank 0] step:7841/10000 train_time:327749ms step_avg:41.80ms +[2025-09-05 20:53:58] [Rank 0] step:7861/10000 train_time:328488ms step_avg:41.79ms +[2025-09-05 20:53:58] [Rank 0] step:7861/10000 train_time:328488ms step_avg:41.79ms +[2025-09-05 20:53:59] [Rank 0] step:7881/10000 train_time:329227ms step_avg:41.77ms +[2025-09-05 20:53:59] [Rank 0] step:7881/10000 train_time:329227ms step_avg:41.77ms +[2025-09-05 20:53:59] [Rank 0] step:7901/10000 train_time:329966ms step_avg:41.76ms +[2025-09-05 20:53:59] [Rank 0] step:7901/10000 train_time:329966ms step_avg:41.76ms +[2025-09-05 20:54:00] [Rank 0] step:7921/10000 train_time:330705ms step_avg:41.75ms +[2025-09-05 20:54:00] [Rank 0] step:7921/10000 train_time:330705ms step_avg:41.75ms +[2025-09-05 20:54:01] [Rank 0] step:7941/10000 train_time:331444ms step_avg:41.74ms +[2025-09-05 20:54:01] [Rank 0] step:7941/10000 train_time:331444ms step_avg:41.74ms +[2025-09-05 20:54:02] [Rank 0] step:7961/10000 train_time:332184ms step_avg:41.73ms +[2025-09-05 20:54:02] [Rank 0] step:7961/10000 train_time:332184ms step_avg:41.73ms +[2025-09-05 20:54:02] [Rank 0] step:7981/10000 train_time:332923ms step_avg:41.71ms +[2025-09-05 20:54:02] [Rank 0] step:7981/10000 train_time:332923ms step_avg:41.71ms +[2025-09-05 20:54:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:54:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:54:03] [Rank 0] PRINT: step:8000/10000 train_loss:2.0686 val_loss:2.0524 train_time:333743ms step_avg:41.72ms +[2025-09-05 20:54:03] [Rank 0] PRINT: step:8000/10000 train_loss:2.0686 val_loss:2.0524 train_time:333743ms step_avg:41.72ms +[2025-09-05 20:54:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:54:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:54:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:54:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:55:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:55:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:55:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:55:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:55:25] [Rank 0] Total Loss: 4.6370 +[2025-09-05 20:55:25] [Rank 0] Total Loss: 4.6370 +[2025-09-05 20:55:25] [Rank 0] Total FTA (Unweighted): 0.3537 +[2025-09-05 20:55:25] [Rank 0] Total FTA (Unweighted): 0.3537 +[2025-09-05 20:55:25] [Rank 0] Total FTA (Weighted): 0.3538 +[2025-09-05 20:55:25] [Rank 0] Total FTA (Weighted): 0.3538 +[2025-09-05 20:55:25] [Rank 0] Group 0 Loss: 3.4570 +[2025-09-05 20:55:25] [Rank 0] Group 0 Loss: 3.4570 +[2025-09-05 20:55:25] [Rank 0] Group 1 Loss: 3.3468 +[2025-09-05 20:55:25] [Rank 0] Group 1 Loss: 3.3468 +[2025-09-05 20:55:25] [Rank 0] Group 2 Loss: 3.3358 +[2025-09-05 20:55:25] [Rank 0] Group 2 Loss: 3.3358 +[2025-09-05 20:55:25] [Rank 0] Group 3 Loss: 3.7478 +[2025-09-05 20:55:25] [Rank 0] Group 3 Loss: 3.7478 +[2025-09-05 20:55:25] [Rank 0] Group 4 Loss: 4.0269 +[2025-09-05 20:55:25] [Rank 0] Group 4 Loss: 4.0269 +[2025-09-05 20:55:25] [Rank 0] Group 5 Loss: 4.3759 +[2025-09-05 20:55:25] [Rank 0] Group 5 Loss: 4.3759 +[2025-09-05 20:55:25] [Rank 0] Group 6 Loss: 4.6400 +[2025-09-05 20:55:25] [Rank 0] Group 6 Loss: 4.6400 +[2025-09-05 20:55:25] [Rank 0] Group 7 Loss: 4.8313 +[2025-09-05 20:55:25] [Rank 0] Group 7 Loss: 4.8313 +[2025-09-05 20:55:25] [Rank 0] Group 8 Loss: 5.1417 +[2025-09-05 20:55:25] [Rank 0] Group 8 Loss: 5.1417 +[2025-09-05 20:55:25] [Rank 0] Group 9 Loss: 5.2659 +[2025-09-05 20:55:25] [Rank 0] Group 9 Loss: 5.2659 +[2025-09-05 20:55:25] [Rank 0] Group 10 Loss: 5.3371 +[2025-09-05 20:55:25] [Rank 0] Group 10 Loss: 5.3371 +[2025-09-05 20:55:25] [Rank 0] Group 11 Loss: 5.3594 +[2025-09-05 20:55:25] [Rank 0] Group 11 Loss: 5.3594 +[2025-09-05 20:55:25] [Rank 0] Group 12 Loss: 5.3082 +[2025-09-05 20:55:25] [Rank 0] Group 12 Loss: 5.3082 +[2025-09-05 20:55:25] [Rank 0] Group 13 Loss: 5.3418 +[2025-09-05 20:55:25] [Rank 0] Group 13 Loss: 5.3418 +[2025-09-05 20:55:25] [Rank 0] Group 14 Loss: 5.3556 +[2025-09-05 20:55:25] [Rank 0] Group 14 Loss: 5.3556 +[2025-09-05 20:55:25] [Rank 0] Group 15 Loss: 5.3205 +[2025-09-05 20:55:25] [Rank 0] Group 15 Loss: 5.3205 +[2025-09-05 20:55:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:55:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:55:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:55:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:55:25] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 20:55:25] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 20:55:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:55:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:55:25] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:55:25] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:55:25] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:55:25] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:55:25] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:55:25] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:55:25] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 20:55:25] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 20:55:25] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:55:25] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:55:25] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:55:25] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:55:25] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 20:55:25] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 20:55:25] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 20:55:25] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 20:55:25] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 20:55:25] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 20:55:25] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 20:55:25] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 20:55:25] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 20:55:25] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 20:55:25] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 20:55:25] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 20:55:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:55:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:55:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:55:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:55:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:55:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:55:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:55:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:55:26] [Rank 0] step:8001/10000 train_time:333752ms step_avg:41.71ms +[2025-09-05 20:55:26] [Rank 0] step:8001/10000 train_time:333752ms step_avg:41.71ms +[2025-09-05 20:55:28] [Rank 0] step:8021/10000 train_time:335031ms step_avg:41.77ms +[2025-09-05 20:55:28] [Rank 0] step:8021/10000 train_time:335031ms step_avg:41.77ms +[2025-09-05 20:55:29] [Rank 0] step:8041/10000 train_time:335957ms step_avg:41.78ms +[2025-09-05 20:55:29] [Rank 0] step:8041/10000 train_time:335957ms step_avg:41.78ms +[2025-09-05 20:55:29] [Rank 0] step:8061/10000 train_time:336695ms step_avg:41.77ms +[2025-09-05 20:55:29] [Rank 0] step:8061/10000 train_time:336695ms step_avg:41.77ms +[2025-09-05 20:55:30] [Rank 0] step:8081/10000 train_time:337435ms step_avg:41.76ms +[2025-09-05 20:55:30] [Rank 0] step:8081/10000 train_time:337435ms step_avg:41.76ms +[2025-09-05 20:55:31] [Rank 0] step:8101/10000 train_time:338317ms step_avg:41.76ms +[2025-09-05 20:55:31] [Rank 0] step:8101/10000 train_time:338317ms step_avg:41.76ms +[2025-09-05 20:55:32] [Rank 0] step:8121/10000 train_time:339055ms step_avg:41.75ms +[2025-09-05 20:55:32] [Rank 0] step:8121/10000 train_time:339055ms step_avg:41.75ms +[2025-09-05 20:55:32] [Rank 0] step:8141/10000 train_time:339794ms step_avg:41.74ms +[2025-09-05 20:55:32] [Rank 0] step:8141/10000 train_time:339794ms step_avg:41.74ms +[2025-09-05 20:55:33] [Rank 0] step:8161/10000 train_time:340533ms step_avg:41.73ms +[2025-09-05 20:55:33] [Rank 0] step:8161/10000 train_time:340533ms step_avg:41.73ms +[2025-09-05 20:55:34] [Rank 0] step:8181/10000 train_time:341272ms step_avg:41.72ms +[2025-09-05 20:55:34] [Rank 0] step:8181/10000 train_time:341272ms step_avg:41.72ms +[2025-09-05 20:55:35] [Rank 0] step:8201/10000 train_time:342011ms step_avg:41.70ms +[2025-09-05 20:55:35] [Rank 0] step:8201/10000 train_time:342011ms step_avg:41.70ms +[2025-09-05 20:55:35] [Rank 0] step:8221/10000 train_time:342750ms step_avg:41.69ms +[2025-09-05 20:55:35] [Rank 0] step:8221/10000 train_time:342750ms step_avg:41.69ms +[2025-09-05 20:55:36] [Rank 0] step:8241/10000 train_time:343489ms step_avg:41.68ms +[2025-09-05 20:55:36] [Rank 0] step:8241/10000 train_time:343489ms step_avg:41.68ms +[2025-09-05 20:55:37] [Rank 0] step:8261/10000 train_time:344227ms step_avg:41.67ms +[2025-09-05 20:55:37] [Rank 0] step:8261/10000 train_time:344227ms step_avg:41.67ms +[2025-09-05 20:55:38] [Rank 0] step:8281/10000 train_time:344966ms step_avg:41.66ms +[2025-09-05 20:55:38] [Rank 0] step:8281/10000 train_time:344966ms step_avg:41.66ms +[2025-09-05 20:55:38] [Rank 0] step:8301/10000 train_time:345705ms step_avg:41.65ms +[2025-09-05 20:55:38] [Rank 0] step:8301/10000 train_time:345705ms step_avg:41.65ms +[2025-09-05 20:55:39] [Rank 0] step:8321/10000 train_time:346444ms step_avg:41.63ms +[2025-09-05 20:55:39] [Rank 0] step:8321/10000 train_time:346444ms step_avg:41.63ms +[2025-09-05 20:55:40] [Rank 0] step:8341/10000 train_time:347182ms step_avg:41.62ms +[2025-09-05 20:55:40] [Rank 0] step:8341/10000 train_time:347182ms step_avg:41.62ms +[2025-09-05 20:55:41] [Rank 0] step:8361/10000 train_time:347919ms step_avg:41.61ms +[2025-09-05 20:55:41] [Rank 0] step:8361/10000 train_time:347919ms step_avg:41.61ms +[2025-09-05 20:55:41] [Rank 0] step:8381/10000 train_time:348656ms step_avg:41.60ms +[2025-09-05 20:55:41] [Rank 0] step:8381/10000 train_time:348656ms step_avg:41.60ms +[2025-09-05 20:55:42] [Rank 0] step:8401/10000 train_time:349393ms step_avg:41.59ms +[2025-09-05 20:55:42] [Rank 0] step:8401/10000 train_time:349393ms step_avg:41.59ms +[2025-09-05 20:55:43] [Rank 0] step:8421/10000 train_time:350131ms step_avg:41.58ms +[2025-09-05 20:55:43] [Rank 0] step:8421/10000 train_time:350131ms step_avg:41.58ms +[2025-09-05 20:55:44] [Rank 0] step:8441/10000 train_time:350869ms step_avg:41.57ms +[2025-09-05 20:55:44] [Rank 0] step:8441/10000 train_time:350869ms step_avg:41.57ms +[2025-09-05 20:55:44] [Rank 0] step:8461/10000 train_time:351607ms step_avg:41.56ms +[2025-09-05 20:55:44] [Rank 0] step:8461/10000 train_time:351607ms step_avg:41.56ms +[2025-09-05 20:55:45] [Rank 0] step:8481/10000 train_time:352346ms step_avg:41.55ms +[2025-09-05 20:55:45] [Rank 0] step:8481/10000 train_time:352346ms step_avg:41.55ms +[2025-09-05 20:55:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:55:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:55:46] [Rank 0] PRINT: step:8500/10000 train_loss:2.0556 val_loss:2.0403 train_time:353165ms step_avg:41.55ms +[2025-09-05 20:55:46] [Rank 0] PRINT: step:8500/10000 train_loss:2.0556 val_loss:2.0403 train_time:353165ms step_avg:41.55ms +[2025-09-05 20:55:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:55:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:55:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:55:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:57:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:57:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:57:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:57:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:57:08] [Rank 0] Total Loss: 4.6417 +[2025-09-05 20:57:08] [Rank 0] Total Loss: 4.6417 +[2025-09-05 20:57:08] [Rank 0] Total FTA (Unweighted): 0.3581 +[2025-09-05 20:57:08] [Rank 0] Total FTA (Unweighted): 0.3581 +[2025-09-05 20:57:08] [Rank 0] Total FTA (Weighted): 0.3581 +[2025-09-05 20:57:08] [Rank 0] Total FTA (Weighted): 0.3581 +[2025-09-05 20:57:08] [Rank 0] Group 0 Loss: 3.4442 +[2025-09-05 20:57:08] [Rank 0] Group 0 Loss: 3.4442 +[2025-09-05 20:57:08] [Rank 0] Group 1 Loss: 3.3519 +[2025-09-05 20:57:08] [Rank 0] Group 1 Loss: 3.3519 +[2025-09-05 20:57:08] [Rank 0] Group 2 Loss: 3.3589 +[2025-09-05 20:57:08] [Rank 0] Group 2 Loss: 3.3589 +[2025-09-05 20:57:08] [Rank 0] Group 3 Loss: 3.7515 +[2025-09-05 20:57:08] [Rank 0] Group 3 Loss: 3.7515 +[2025-09-05 20:57:08] [Rank 0] Group 4 Loss: 4.0264 +[2025-09-05 20:57:08] [Rank 0] Group 4 Loss: 4.0264 +[2025-09-05 20:57:08] [Rank 0] Group 5 Loss: 4.3837 +[2025-09-05 20:57:08] [Rank 0] Group 5 Loss: 4.3837 +[2025-09-05 20:57:08] [Rank 0] Group 6 Loss: 4.6578 +[2025-09-05 20:57:08] [Rank 0] Group 6 Loss: 4.6578 +[2025-09-05 20:57:08] [Rank 0] Group 7 Loss: 4.8367 +[2025-09-05 20:57:08] [Rank 0] Group 7 Loss: 4.8367 +[2025-09-05 20:57:08] [Rank 0] Group 8 Loss: 5.1450 +[2025-09-05 20:57:08] [Rank 0] Group 8 Loss: 5.1450 +[2025-09-05 20:57:08] [Rank 0] Group 9 Loss: 5.2613 +[2025-09-05 20:57:08] [Rank 0] Group 9 Loss: 5.2613 +[2025-09-05 20:57:08] [Rank 0] Group 10 Loss: 5.3596 +[2025-09-05 20:57:08] [Rank 0] Group 10 Loss: 5.3596 +[2025-09-05 20:57:08] [Rank 0] Group 11 Loss: 5.3662 +[2025-09-05 20:57:08] [Rank 0] Group 11 Loss: 5.3662 +[2025-09-05 20:57:08] [Rank 0] Group 12 Loss: 5.3139 +[2025-09-05 20:57:08] [Rank 0] Group 12 Loss: 5.3139 +[2025-09-05 20:57:08] [Rank 0] Group 13 Loss: 5.3356 +[2025-09-05 20:57:08] [Rank 0] Group 13 Loss: 5.3356 +[2025-09-05 20:57:08] [Rank 0] Group 14 Loss: 5.3530 +[2025-09-05 20:57:08] [Rank 0] Group 14 Loss: 5.3530 +[2025-09-05 20:57:08] [Rank 0] Group 15 Loss: 5.3206 +[2025-09-05 20:57:08] [Rank 0] Group 15 Loss: 5.3206 +[2025-09-05 20:57:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:57:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:57:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:57:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:57:08] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 20:57:08] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 20:57:08] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:57:08] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:57:08] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:57:08] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:57:08] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:57:08] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:57:08] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 20:57:08] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 20:57:08] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 20:57:08] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 20:57:08] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:57:08] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:57:08] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:57:08] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:57:08] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 20:57:08] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 20:57:08] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 20:57:08] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 20:57:08] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 20:57:08] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 20:57:08] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 20:57:08] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 20:57:08] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 20:57:08] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 20:57:08] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 20:57:08] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 20:57:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:57:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:57:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:57:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:57:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:57:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:57:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:57:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:57:10] [Rank 0] step:8501/10000 train_time:353175ms step_avg:41.55ms +[2025-09-05 20:57:10] [Rank 0] step:8501/10000 train_time:353175ms step_avg:41.55ms +[2025-09-05 20:57:11] [Rank 0] step:8521/10000 train_time:353843ms step_avg:41.53ms +[2025-09-05 20:57:11] [Rank 0] step:8521/10000 train_time:353843ms step_avg:41.53ms +[2025-09-05 20:57:11] [Rank 0] step:8541/10000 train_time:354581ms step_avg:41.52ms +[2025-09-05 20:57:11] [Rank 0] step:8541/10000 train_time:354581ms step_avg:41.52ms +[2025-09-05 20:57:12] [Rank 0] step:8561/10000 train_time:355320ms step_avg:41.50ms +[2025-09-05 20:57:12] [Rank 0] step:8561/10000 train_time:355320ms step_avg:41.50ms +[2025-09-05 20:57:13] [Rank 0] step:8581/10000 train_time:356059ms step_avg:41.49ms +[2025-09-05 20:57:13] [Rank 0] step:8581/10000 train_time:356059ms step_avg:41.49ms +[2025-09-05 20:57:14] [Rank 0] step:8601/10000 train_time:356798ms step_avg:41.48ms +[2025-09-05 20:57:14] [Rank 0] step:8601/10000 train_time:356798ms step_avg:41.48ms +[2025-09-05 20:57:14] [Rank 0] step:8621/10000 train_time:357536ms step_avg:41.47ms +[2025-09-05 20:57:14] [Rank 0] step:8621/10000 train_time:357536ms step_avg:41.47ms +[2025-09-05 20:57:15] [Rank 0] step:8641/10000 train_time:358274ms step_avg:41.46ms +[2025-09-05 20:57:15] [Rank 0] step:8641/10000 train_time:358274ms step_avg:41.46ms +[2025-09-05 20:57:16] [Rank 0] step:8661/10000 train_time:359013ms step_avg:41.45ms +[2025-09-05 20:57:16] [Rank 0] step:8661/10000 train_time:359013ms step_avg:41.45ms +[2025-09-05 20:57:16] [Rank 0] step:8681/10000 train_time:359752ms step_avg:41.44ms +[2025-09-05 20:57:16] [Rank 0] step:8681/10000 train_time:359752ms step_avg:41.44ms +[2025-09-05 20:57:17] [Rank 0] step:8701/10000 train_time:360491ms step_avg:41.43ms +[2025-09-05 20:57:17] [Rank 0] step:8701/10000 train_time:360491ms step_avg:41.43ms +[2025-09-05 20:57:18] [Rank 0] step:8721/10000 train_time:361229ms step_avg:41.42ms +[2025-09-05 20:57:18] [Rank 0] step:8721/10000 train_time:361229ms step_avg:41.42ms +[2025-09-05 20:57:19] [Rank 0] step:8741/10000 train_time:361968ms step_avg:41.41ms +[2025-09-05 20:57:19] [Rank 0] step:8741/10000 train_time:361968ms step_avg:41.41ms +[2025-09-05 20:57:19] [Rank 0] step:8761/10000 train_time:362706ms step_avg:41.40ms +[2025-09-05 20:57:19] [Rank 0] step:8761/10000 train_time:362706ms step_avg:41.40ms +[2025-09-05 20:57:20] [Rank 0] step:8781/10000 train_time:363446ms step_avg:41.39ms +[2025-09-05 20:57:20] [Rank 0] step:8781/10000 train_time:363446ms step_avg:41.39ms +[2025-09-05 20:57:21] [Rank 0] step:8801/10000 train_time:364185ms step_avg:41.38ms +[2025-09-05 20:57:21] [Rank 0] step:8801/10000 train_time:364185ms step_avg:41.38ms +[2025-09-05 20:57:22] [Rank 0] step:8821/10000 train_time:364923ms step_avg:41.37ms +[2025-09-05 20:57:22] [Rank 0] step:8821/10000 train_time:364923ms step_avg:41.37ms +[2025-09-05 20:57:23] [Rank 0] step:8841/10000 train_time:366271ms step_avg:41.43ms +[2025-09-05 20:57:23] [Rank 0] step:8841/10000 train_time:366271ms step_avg:41.43ms +[2025-09-05 20:57:24] [Rank 0] step:8861/10000 train_time:367008ms step_avg:41.42ms +[2025-09-05 20:57:24] [Rank 0] step:8861/10000 train_time:367008ms step_avg:41.42ms +[2025-09-05 20:57:24] [Rank 0] step:8881/10000 train_time:367746ms step_avg:41.41ms +[2025-09-05 20:57:24] [Rank 0] step:8881/10000 train_time:367746ms step_avg:41.41ms +[2025-09-05 20:57:25] [Rank 0] step:8901/10000 train_time:368483ms step_avg:41.40ms +[2025-09-05 20:57:25] [Rank 0] step:8901/10000 train_time:368483ms step_avg:41.40ms +[2025-09-05 20:57:26] [Rank 0] step:8921/10000 train_time:369222ms step_avg:41.39ms +[2025-09-05 20:57:26] [Rank 0] step:8921/10000 train_time:369222ms step_avg:41.39ms +[2025-09-05 20:57:27] [Rank 0] step:8941/10000 train_time:369960ms step_avg:41.38ms +[2025-09-05 20:57:27] [Rank 0] step:8941/10000 train_time:369960ms step_avg:41.38ms +[2025-09-05 20:57:27] [Rank 0] step:8961/10000 train_time:370698ms step_avg:41.37ms +[2025-09-05 20:57:27] [Rank 0] step:8961/10000 train_time:370698ms step_avg:41.37ms +[2025-09-05 20:57:28] [Rank 0] step:8981/10000 train_time:371436ms step_avg:41.36ms +[2025-09-05 20:57:28] [Rank 0] step:8981/10000 train_time:371436ms step_avg:41.36ms +[2025-09-05 20:57:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:57:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:57:29] [Rank 0] PRINT: step:9000/10000 train_loss:2.0433 val_loss:2.0298 train_time:372254ms step_avg:41.36ms +[2025-09-05 20:57:29] [Rank 0] PRINT: step:9000/10000 train_loss:2.0433 val_loss:2.0298 train_time:372254ms step_avg:41.36ms +[2025-09-05 20:57:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:57:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:57:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:57:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:58:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:58:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:58:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:58:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:58:51] [Rank 0] Total Loss: 4.6260 +[2025-09-05 20:58:51] [Rank 0] Total Loss: 4.6260 +[2025-09-05 20:58:51] [Rank 0] Total FTA (Unweighted): 0.3594 +[2025-09-05 20:58:51] [Rank 0] Total FTA (Unweighted): 0.3594 +[2025-09-05 20:58:51] [Rank 0] Total FTA (Weighted): 0.3594 +[2025-09-05 20:58:51] [Rank 0] Total FTA (Weighted): 0.3594 +[2025-09-05 20:58:51] [Rank 0] Group 0 Loss: 3.4354 +[2025-09-05 20:58:51] [Rank 0] Group 0 Loss: 3.4354 +[2025-09-05 20:58:51] [Rank 0] Group 1 Loss: 3.3618 +[2025-09-05 20:58:51] [Rank 0] Group 1 Loss: 3.3618 +[2025-09-05 20:58:51] [Rank 0] Group 2 Loss: 3.3564 +[2025-09-05 20:58:51] [Rank 0] Group 2 Loss: 3.3564 +[2025-09-05 20:58:51] [Rank 0] Group 3 Loss: 3.7585 +[2025-09-05 20:58:51] [Rank 0] Group 3 Loss: 3.7585 +[2025-09-05 20:58:51] [Rank 0] Group 4 Loss: 4.0009 +[2025-09-05 20:58:51] [Rank 0] Group 4 Loss: 4.0009 +[2025-09-05 20:58:51] [Rank 0] Group 5 Loss: 4.3558 +[2025-09-05 20:58:51] [Rank 0] Group 5 Loss: 4.3558 +[2025-09-05 20:58:51] [Rank 0] Group 6 Loss: 4.6312 +[2025-09-05 20:58:51] [Rank 0] Group 6 Loss: 4.6312 +[2025-09-05 20:58:51] [Rank 0] Group 7 Loss: 4.8214 +[2025-09-05 20:58:51] [Rank 0] Group 7 Loss: 4.8214 +[2025-09-05 20:58:51] [Rank 0] Group 8 Loss: 5.1274 +[2025-09-05 20:58:51] [Rank 0] Group 8 Loss: 5.1274 +[2025-09-05 20:58:51] [Rank 0] Group 9 Loss: 5.2447 +[2025-09-05 20:58:51] [Rank 0] Group 9 Loss: 5.2447 +[2025-09-05 20:58:51] [Rank 0] Group 10 Loss: 5.3186 +[2025-09-05 20:58:51] [Rank 0] Group 10 Loss: 5.3186 +[2025-09-05 20:58:51] [Rank 0] Group 11 Loss: 5.3483 +[2025-09-05 20:58:51] [Rank 0] Group 11 Loss: 5.3483 +[2025-09-05 20:58:51] [Rank 0] Group 12 Loss: 5.2954 +[2025-09-05 20:58:51] [Rank 0] Group 12 Loss: 5.2954 +[2025-09-05 20:58:51] [Rank 0] Group 13 Loss: 5.3220 +[2025-09-05 20:58:51] [Rank 0] Group 13 Loss: 5.3220 +[2025-09-05 20:58:51] [Rank 0] Group 14 Loss: 5.3403 +[2025-09-05 20:58:51] [Rank 0] Group 14 Loss: 5.3403 +[2025-09-05 20:58:51] [Rank 0] Group 15 Loss: 5.2980 +[2025-09-05 20:58:51] [Rank 0] Group 15 Loss: 5.2980 +[2025-09-05 20:58:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:58:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:58:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:58:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:58:51] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 20:58:51] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 20:58:51] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:58:51] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 20:58:51] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:58:51] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:58:51] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:58:51] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 20:58:51] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 20:58:51] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 20:58:51] [Rank 0] Group 7 FTA: 0.2000 +[2025-09-05 20:58:51] [Rank 0] Group 7 FTA: 0.2000 +[2025-09-05 20:58:51] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:58:51] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 20:58:51] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:58:51] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:58:51] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 20:58:51] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 20:58:51] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 20:58:51] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 20:58:51] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 20:58:51] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 20:58:51] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 20:58:51] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 20:58:51] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 20:58:51] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 20:58:51] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 20:58:51] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 20:58:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:58:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 20:58:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:58:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 20:58:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:58:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 20:58:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:58:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 20:58:52] [Rank 0] step:9001/10000 train_time:372264ms step_avg:41.36ms +[2025-09-05 20:58:52] [Rank 0] step:9001/10000 train_time:372264ms step_avg:41.36ms +[2025-09-05 20:58:53] [Rank 0] step:9021/10000 train_time:372927ms step_avg:41.34ms +[2025-09-05 20:58:53] [Rank 0] step:9021/10000 train_time:372927ms step_avg:41.34ms +[2025-09-05 20:58:54] [Rank 0] step:9041/10000 train_time:373665ms step_avg:41.33ms +[2025-09-05 20:58:54] [Rank 0] step:9041/10000 train_time:373665ms step_avg:41.33ms +[2025-09-05 20:58:54] [Rank 0] step:9061/10000 train_time:374403ms step_avg:41.32ms +[2025-09-05 20:58:54] [Rank 0] step:9061/10000 train_time:374403ms step_avg:41.32ms +[2025-09-05 20:58:55] [Rank 0] step:9081/10000 train_time:375141ms step_avg:41.31ms +[2025-09-05 20:58:55] [Rank 0] step:9081/10000 train_time:375141ms step_avg:41.31ms +[2025-09-05 20:58:56] [Rank 0] step:9101/10000 train_time:375880ms step_avg:41.30ms +[2025-09-05 20:58:56] [Rank 0] step:9101/10000 train_time:375880ms step_avg:41.30ms +[2025-09-05 20:58:57] [Rank 0] step:9121/10000 train_time:376617ms step_avg:41.29ms +[2025-09-05 20:58:57] [Rank 0] step:9121/10000 train_time:376617ms step_avg:41.29ms +[2025-09-05 20:58:57] [Rank 0] step:9141/10000 train_time:377355ms step_avg:41.28ms +[2025-09-05 20:58:57] [Rank 0] step:9141/10000 train_time:377355ms step_avg:41.28ms +[2025-09-05 20:58:58] [Rank 0] step:9161/10000 train_time:378094ms step_avg:41.27ms +[2025-09-05 20:58:58] [Rank 0] step:9161/10000 train_time:378094ms step_avg:41.27ms +[2025-09-05 20:58:59] [Rank 0] step:9181/10000 train_time:378832ms step_avg:41.26ms +[2025-09-05 20:58:59] [Rank 0] step:9181/10000 train_time:378832ms step_avg:41.26ms +[2025-09-05 20:59:00] [Rank 0] step:9201/10000 train_time:379570ms step_avg:41.25ms +[2025-09-05 20:59:00] [Rank 0] step:9201/10000 train_time:379570ms step_avg:41.25ms +[2025-09-05 20:59:00] [Rank 0] step:9221/10000 train_time:380309ms step_avg:41.24ms +[2025-09-05 20:59:00] [Rank 0] step:9221/10000 train_time:380309ms step_avg:41.24ms +[2025-09-05 20:59:01] [Rank 0] step:9241/10000 train_time:381048ms step_avg:41.23ms +[2025-09-05 20:59:01] [Rank 0] step:9241/10000 train_time:381048ms step_avg:41.23ms +[2025-09-05 20:59:02] [Rank 0] step:9261/10000 train_time:381787ms step_avg:41.23ms +[2025-09-05 20:59:02] [Rank 0] step:9261/10000 train_time:381787ms step_avg:41.23ms +[2025-09-05 20:59:03] [Rank 0] step:9281/10000 train_time:382524ms step_avg:41.22ms +[2025-09-05 20:59:03] [Rank 0] step:9281/10000 train_time:382524ms step_avg:41.22ms +[2025-09-05 20:59:03] [Rank 0] step:9301/10000 train_time:383261ms step_avg:41.21ms +[2025-09-05 20:59:03] [Rank 0] step:9301/10000 train_time:383261ms step_avg:41.21ms +[2025-09-05 20:59:04] [Rank 0] step:9321/10000 train_time:383999ms step_avg:41.20ms +[2025-09-05 20:59:04] [Rank 0] step:9321/10000 train_time:383999ms step_avg:41.20ms +[2025-09-05 20:59:05] [Rank 0] step:9341/10000 train_time:384736ms step_avg:41.19ms +[2025-09-05 20:59:05] [Rank 0] step:9341/10000 train_time:384736ms step_avg:41.19ms +[2025-09-05 20:59:05] [Rank 0] step:9361/10000 train_time:385474ms step_avg:41.18ms +[2025-09-05 20:59:05] [Rank 0] step:9361/10000 train_time:385474ms step_avg:41.18ms +[2025-09-05 20:59:06] [Rank 0] step:9381/10000 train_time:386213ms step_avg:41.17ms +[2025-09-05 20:59:06] [Rank 0] step:9381/10000 train_time:386213ms step_avg:41.17ms +[2025-09-05 20:59:07] [Rank 0] step:9401/10000 train_time:386951ms step_avg:41.16ms +[2025-09-05 20:59:07] [Rank 0] step:9401/10000 train_time:386951ms step_avg:41.16ms +[2025-09-05 20:59:08] [Rank 0] step:9421/10000 train_time:387689ms step_avg:41.15ms +[2025-09-05 20:59:08] [Rank 0] step:9421/10000 train_time:387689ms step_avg:41.15ms +[2025-09-05 20:59:08] [Rank 0] step:9441/10000 train_time:388427ms step_avg:41.14ms +[2025-09-05 20:59:08] [Rank 0] step:9441/10000 train_time:388427ms step_avg:41.14ms +[2025-09-05 20:59:09] [Rank 0] step:9461/10000 train_time:389166ms step_avg:41.13ms +[2025-09-05 20:59:09] [Rank 0] step:9461/10000 train_time:389166ms step_avg:41.13ms +[2025-09-05 20:59:10] [Rank 0] step:9481/10000 train_time:389904ms step_avg:41.12ms +[2025-09-05 20:59:10] [Rank 0] step:9481/10000 train_time:389904ms step_avg:41.12ms +[2025-09-05 20:59:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:59:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:59:11] [Rank 0] PRINT: step:9500/10000 train_loss:2.0328 val_loss:2.0212 train_time:390724ms step_avg:41.13ms +[2025-09-05 20:59:11] [Rank 0] PRINT: step:9500/10000 train_loss:2.0328 val_loss:2.0212 train_time:390724ms step_avg:41.13ms +[2025-09-05 20:59:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:59:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:59:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:59:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:00:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:00:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:00:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:00:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:00:32] [Rank 0] Total Loss: 4.6281 +[2025-09-05 21:00:32] [Rank 0] Total Loss: 4.6281 +[2025-09-05 21:00:32] [Rank 0] Total FTA (Unweighted): 0.3662 +[2025-09-05 21:00:32] [Rank 0] Total FTA (Unweighted): 0.3662 +[2025-09-05 21:00:32] [Rank 0] Total FTA (Weighted): 0.3663 +[2025-09-05 21:00:32] [Rank 0] Total FTA (Weighted): 0.3663 +[2025-09-05 21:00:32] [Rank 0] Group 0 Loss: 3.4196 +[2025-09-05 21:00:32] [Rank 0] Group 0 Loss: 3.4196 +[2025-09-05 21:00:32] [Rank 0] Group 1 Loss: 3.3496 +[2025-09-05 21:00:32] [Rank 0] Group 1 Loss: 3.3496 +[2025-09-05 21:00:32] [Rank 0] Group 2 Loss: 3.3400 +[2025-09-05 21:00:32] [Rank 0] Group 2 Loss: 3.3400 +[2025-09-05 21:00:32] [Rank 0] Group 3 Loss: 3.7518 +[2025-09-05 21:00:32] [Rank 0] Group 3 Loss: 3.7518 +[2025-09-05 21:00:32] [Rank 0] Group 4 Loss: 4.0099 +[2025-09-05 21:00:32] [Rank 0] Group 4 Loss: 4.0099 +[2025-09-05 21:00:32] [Rank 0] Group 5 Loss: 4.3781 +[2025-09-05 21:00:32] [Rank 0] Group 5 Loss: 4.3781 +[2025-09-05 21:00:32] [Rank 0] Group 6 Loss: 4.6363 +[2025-09-05 21:00:32] [Rank 0] Group 6 Loss: 4.6363 +[2025-09-05 21:00:33] [Rank 0] Group 7 Loss: 4.8210 +[2025-09-05 21:00:33] [Rank 0] Group 7 Loss: 4.8210 +[2025-09-05 21:00:33] [Rank 0] Group 8 Loss: 5.1258 +[2025-09-05 21:00:33] [Rank 0] Group 8 Loss: 5.1258 +[2025-09-05 21:00:33] [Rank 0] Group 9 Loss: 5.2551 +[2025-09-05 21:00:33] [Rank 0] Group 9 Loss: 5.2551 +[2025-09-05 21:00:33] [Rank 0] Group 10 Loss: 5.3472 +[2025-09-05 21:00:33] [Rank 0] Group 10 Loss: 5.3472 +[2025-09-05 21:00:33] [Rank 0] Group 11 Loss: 5.3577 +[2025-09-05 21:00:33] [Rank 0] Group 11 Loss: 5.3577 +[2025-09-05 21:00:33] [Rank 0] Group 12 Loss: 5.2934 +[2025-09-05 21:00:33] [Rank 0] Group 12 Loss: 5.2934 +[2025-09-05 21:00:33] [Rank 0] Group 13 Loss: 5.3173 +[2025-09-05 21:00:33] [Rank 0] Group 13 Loss: 5.3173 +[2025-09-05 21:00:33] [Rank 0] Group 14 Loss: 5.3410 +[2025-09-05 21:00:33] [Rank 0] Group 14 Loss: 5.3410 +[2025-09-05 21:00:33] [Rank 0] Group 15 Loss: 5.3055 +[2025-09-05 21:00:33] [Rank 0] Group 15 Loss: 5.3055 +[2025-09-05 21:00:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:00:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:00:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:00:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:00:33] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 21:00:33] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 21:00:33] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:00:33] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:00:33] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:00:33] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:00:33] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:00:33] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:00:33] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 21:00:33] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 21:00:33] [Rank 0] Group 7 FTA: 0.2100 +[2025-09-05 21:00:33] [Rank 0] Group 7 FTA: 0.2100 +[2025-09-05 21:00:33] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 21:00:33] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 21:00:33] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 21:00:33] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 21:00:33] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:00:33] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:00:33] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 21:00:33] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 21:00:33] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 21:00:33] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 21:00:33] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 21:00:33] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 21:00:33] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 21:00:33] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 21:00:33] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 21:00:33] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 21:00:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 21:00:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 21:00:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 21:00:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 21:00:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 21:00:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 21:00:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 21:00:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 21:00:34] [Rank 0] step:9501/10000 train_time:390733ms step_avg:41.13ms +[2025-09-05 21:00:34] [Rank 0] step:9501/10000 train_time:390733ms step_avg:41.13ms +[2025-09-05 21:00:35] [Rank 0] step:9521/10000 train_time:391408ms step_avg:41.11ms +[2025-09-05 21:00:35] [Rank 0] step:9521/10000 train_time:391408ms step_avg:41.11ms +[2025-09-05 21:00:35] [Rank 0] step:9541/10000 train_time:392147ms step_avg:41.10ms +[2025-09-05 21:00:35] [Rank 0] step:9541/10000 train_time:392147ms step_avg:41.10ms +[2025-09-05 21:00:36] [Rank 0] step:9561/10000 train_time:392886ms step_avg:41.09ms +[2025-09-05 21:00:36] [Rank 0] step:9561/10000 train_time:392886ms step_avg:41.09ms +[2025-09-05 21:00:37] [Rank 0] step:9581/10000 train_time:393624ms step_avg:41.08ms +[2025-09-05 21:00:37] [Rank 0] step:9581/10000 train_time:393624ms step_avg:41.08ms +[2025-09-05 21:00:38] [Rank 0] step:9601/10000 train_time:394363ms step_avg:41.08ms +[2025-09-05 21:00:38] [Rank 0] step:9601/10000 train_time:394363ms step_avg:41.08ms +[2025-09-05 21:00:38] [Rank 0] step:9621/10000 train_time:395101ms step_avg:41.07ms +[2025-09-05 21:00:38] [Rank 0] step:9621/10000 train_time:395101ms step_avg:41.07ms +[2025-09-05 21:00:39] [Rank 0] step:9641/10000 train_time:395839ms step_avg:41.06ms +[2025-09-05 21:00:39] [Rank 0] step:9641/10000 train_time:395839ms step_avg:41.06ms +[2025-09-05 21:00:40] [Rank 0] step:9661/10000 train_time:396851ms step_avg:41.08ms +[2025-09-05 21:00:40] [Rank 0] step:9661/10000 train_time:396851ms step_avg:41.08ms +[2025-09-05 21:00:41] [Rank 0] step:9681/10000 train_time:397589ms step_avg:41.07ms +[2025-09-05 21:00:41] [Rank 0] step:9681/10000 train_time:397589ms step_avg:41.07ms +[2025-09-05 21:00:42] [Rank 0] step:9701/10000 train_time:398328ms step_avg:41.06ms +[2025-09-05 21:00:42] [Rank 0] step:9701/10000 train_time:398328ms step_avg:41.06ms +[2025-09-05 21:00:42] [Rank 0] step:9721/10000 train_time:399067ms step_avg:41.05ms +[2025-09-05 21:00:42] [Rank 0] step:9721/10000 train_time:399067ms step_avg:41.05ms +[2025-09-05 21:00:43] [Rank 0] step:9741/10000 train_time:399807ms step_avg:41.04ms +[2025-09-05 21:00:43] [Rank 0] step:9741/10000 train_time:399807ms step_avg:41.04ms +[2025-09-05 21:00:44] [Rank 0] step:9761/10000 train_time:400545ms step_avg:41.04ms +[2025-09-05 21:00:44] [Rank 0] step:9761/10000 train_time:400545ms step_avg:41.04ms +[2025-09-05 21:00:45] [Rank 0] step:9781/10000 train_time:401389ms step_avg:41.04ms +[2025-09-05 21:00:45] [Rank 0] step:9781/10000 train_time:401389ms step_avg:41.04ms +[2025-09-05 21:00:45] [Rank 0] step:9801/10000 train_time:402128ms step_avg:41.03ms +[2025-09-05 21:00:45] [Rank 0] step:9801/10000 train_time:402128ms step_avg:41.03ms +[2025-09-05 21:00:46] [Rank 0] step:9821/10000 train_time:402868ms step_avg:41.02ms +[2025-09-05 21:00:46] [Rank 0] step:9821/10000 train_time:402868ms step_avg:41.02ms +[2025-09-05 21:00:47] [Rank 0] step:9841/10000 train_time:403745ms step_avg:41.03ms +[2025-09-05 21:00:47] [Rank 0] step:9841/10000 train_time:403745ms step_avg:41.03ms +[2025-09-05 21:00:48] [Rank 0] step:9861/10000 train_time:404485ms step_avg:41.02ms +[2025-09-05 21:00:48] [Rank 0] step:9861/10000 train_time:404485ms step_avg:41.02ms +[2025-09-05 21:00:49] [Rank 0] step:9881/10000 train_time:405225ms step_avg:41.01ms +[2025-09-05 21:00:49] [Rank 0] step:9881/10000 train_time:405225ms step_avg:41.01ms +[2025-09-05 21:00:49] [Rank 0] step:9901/10000 train_time:405963ms step_avg:41.00ms +[2025-09-05 21:00:49] [Rank 0] step:9901/10000 train_time:405963ms step_avg:41.00ms +[2025-09-05 21:00:50] [Rank 0] step:9921/10000 train_time:406700ms step_avg:40.99ms +[2025-09-05 21:00:50] [Rank 0] step:9921/10000 train_time:406700ms step_avg:40.99ms +[2025-09-05 21:00:51] [Rank 0] step:9941/10000 train_time:407439ms step_avg:40.99ms +[2025-09-05 21:00:51] [Rank 0] step:9941/10000 train_time:407439ms step_avg:40.99ms +[2025-09-05 21:00:52] [Rank 0] step:9961/10000 train_time:408177ms step_avg:40.98ms +[2025-09-05 21:00:52] [Rank 0] step:9961/10000 train_time:408177ms step_avg:40.98ms +[2025-09-05 21:00:52] [Rank 0] step:9981/10000 train_time:408915ms step_avg:40.97ms +[2025-09-05 21:00:52] [Rank 0] step:9981/10000 train_time:408915ms step_avg:40.97ms +[2025-09-05 21:00:53] [Rank 0] step:10000/10000 train_time:409615ms step_avg:40.96ms +[2025-09-05 21:00:53] [Rank 0] step:10000/10000 train_time:409615ms step_avg:40.96ms +[2025-09-05 21:00:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:00:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:00:53] [Rank 0] PRINT: step:10000/10000 train_loss:2.0251 val_loss:2.0138 train_time:409739ms step_avg:40.97ms +[2025-09-05 21:00:53] [Rank 0] PRINT: step:10000/10000 train_loss:2.0251 val_loss:2.0138 train_time:409739ms step_avg:40.97ms +[2025-09-05 21:00:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:00:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:00:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:00:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:02:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:02:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:02:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:02:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:02:15] [Rank 0] Total Loss: 4.6146 +[2025-09-05 21:02:15] [Rank 0] Total Loss: 4.6146 +[2025-09-05 21:02:15] [Rank 0] Total FTA (Unweighted): 0.3688 +[2025-09-05 21:02:15] [Rank 0] Total FTA (Unweighted): 0.3688 +[2025-09-05 21:02:15] [Rank 0] Total FTA (Weighted): 0.3688 +[2025-09-05 21:02:15] [Rank 0] Total FTA (Weighted): 0.3688 +[2025-09-05 21:02:15] [Rank 0] Group 0 Loss: 3.4232 +[2025-09-05 21:02:15] [Rank 0] Group 0 Loss: 3.4232 +[2025-09-05 21:02:15] [Rank 0] Group 1 Loss: 3.3639 +[2025-09-05 21:02:15] [Rank 0] Group 1 Loss: 3.3639 +[2025-09-05 21:02:15] [Rank 0] Group 2 Loss: 3.3464 +[2025-09-05 21:02:15] [Rank 0] Group 2 Loss: 3.3464 +[2025-09-05 21:02:15] [Rank 0] Group 3 Loss: 3.7283 +[2025-09-05 21:02:15] [Rank 0] Group 3 Loss: 3.7283 +[2025-09-05 21:02:15] [Rank 0] Group 4 Loss: 3.9964 +[2025-09-05 21:02:15] [Rank 0] Group 4 Loss: 3.9964 +[2025-09-05 21:02:15] [Rank 0] Group 5 Loss: 4.3518 +[2025-09-05 21:02:15] [Rank 0] Group 5 Loss: 4.3518 +[2025-09-05 21:02:15] [Rank 0] Group 6 Loss: 4.6175 +[2025-09-05 21:02:15] [Rank 0] Group 6 Loss: 4.6175 +[2025-09-05 21:02:15] [Rank 0] Group 7 Loss: 4.8060 +[2025-09-05 21:02:15] [Rank 0] Group 7 Loss: 4.8060 +[2025-09-05 21:02:15] [Rank 0] Group 8 Loss: 5.1062 +[2025-09-05 21:02:15] [Rank 0] Group 8 Loss: 5.1062 +[2025-09-05 21:02:15] [Rank 0] Group 9 Loss: 5.2352 +[2025-09-05 21:02:15] [Rank 0] Group 9 Loss: 5.2352 +[2025-09-05 21:02:15] [Rank 0] Group 10 Loss: 5.3258 +[2025-09-05 21:02:15] [Rank 0] Group 10 Loss: 5.3258 +[2025-09-05 21:02:15] [Rank 0] Group 11 Loss: 5.3391 +[2025-09-05 21:02:15] [Rank 0] Group 11 Loss: 5.3391 +[2025-09-05 21:02:15] [Rank 0] Group 12 Loss: 5.2743 +[2025-09-05 21:02:15] [Rank 0] Group 12 Loss: 5.2743 +[2025-09-05 21:02:15] [Rank 0] Group 13 Loss: 5.3047 +[2025-09-05 21:02:15] [Rank 0] Group 13 Loss: 5.3047 +[2025-09-05 21:02:15] [Rank 0] Group 14 Loss: 5.3238 +[2025-09-05 21:02:15] [Rank 0] Group 14 Loss: 5.3238 +[2025-09-05 21:02:15] [Rank 0] Group 15 Loss: 5.2902 +[2025-09-05 21:02:15] [Rank 0] Group 15 Loss: 5.2902 +[2025-09-05 21:02:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:02:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:02:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:02:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:02:15] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 21:02:15] [Rank 0] Group 2 FTA: 0.9200 +[2025-09-05 21:02:15] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:02:15] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:02:15] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:02:15] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:02:15] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:02:15] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:02:15] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 21:02:15] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 21:02:15] [Rank 0] Group 7 FTA: 0.2200 +[2025-09-05 21:02:15] [Rank 0] Group 7 FTA: 0.2200 +[2025-09-05 21:02:15] [Rank 0] Group 8 FTA: 0.2500 +[2025-09-05 21:02:15] [Rank 0] Group 8 FTA: 0.2500 +[2025-09-05 21:02:15] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 21:02:15] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 21:02:15] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:02:15] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:02:15] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 21:02:15] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 21:02:15] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 21:02:15] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 21:02:15] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 21:02:15] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 21:02:15] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 21:02:15] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 21:02:15] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 21:02:15] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 21:02:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 21:02:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_loss_curves.png +[2025-09-05 21:02:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 21:02:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/per_class_acc_curves.png +[2025-09-05 21:02:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 21:02:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_loss_curve.png +[2025-09-05 21:02:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 21:02:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_42/total_acc_curve.png +[2025-09-05 21:02:16] [Rank 0] step:10001/10000 train_time:409748ms step_avg:40.97ms +[2025-09-05 21:02:16] [Rank 0] step:10001/10000 train_time:409748ms step_avg:40.97ms +[2025-09-05 21:02:16] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 21:02:16 2025 --- +[2025-09-05 21:02:16] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 21:02:16 2025 --- +[2025-09-05 21:02:16] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 21:02:16] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f8e80bf22a42c163190d87ac4d69bb4901d288c6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.1, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "cd99c1fa-addc-444b-9804-213711e90001", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..77bcdf572fd53c9785c07fd197a40f9685ff4e68 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22c5a451692bba5e73e49c182e7ceef7edf45f6d6770d78947318f5cceee1a31 +size 322862 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..57ddcbbf68305690b2166e0da6f74c9ccdd65648 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a37d0180f818c94a34e73f3ee2af837caa9d40937d8edf78894018a2f2feb3f +size 397788 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..64aeefa974283558fca09a31615258ad97aaa92c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aa438fd551634a1a9961112c897709ef82d50e3781a0fdbedb560b29e69e577 +size 89932 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..f115dc2b869f9f6ef1b2d07351a91001988a464b --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f18ff749a0b6bfbcad17aacefcd4df2e3ee81326dcef4fb39a994d3af5210c58 +size 115282 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/training_log_cd99c1fa-addc-444b-9804-213711e90001.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/training_log_cd99c1fa-addc-444b-9804-213711e90001.txt new file mode 100644 index 0000000000000000000000000000000000000000..c2b5e9fcca926078f602449eaecd68fd7037e2b4 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/training_log_cd99c1fa-addc-444b-9804-213711e90001.txt @@ -0,0 +1,5614 @@ +[2025-09-05 21:02:40] [Rank 0] PRINT: --- Script Start: Fri Sep 5 21:02:40 2025 --- +[2025-09-05 21:02:40] [Rank 0] PRINT: --- Script Start: Fri Sep 5 21:02:40 2025 --- +[2025-09-05 21:02:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 21:02:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 21:02:40] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 21:02:40] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 21:02:40] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 21:02:40] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 21:02:40] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43 +[2025-09-05 21:02:40] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43 +[2025-09-05 21:02:40] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 21:02:40] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 21:02:40] [Rank 0] PRINT: Constructing model... +[2025-09-05 21:02:40] [Rank 0] PRINT: Constructing model... +[2025-09-05 21:02:41] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 21:02:41] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 21:02:41] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 21:02:41] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 21:02:41] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 21:02:41] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 21:02:45] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 21:02:45] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 21:02:45] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 21:02:45] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 21:02:45] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 21:02:45] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 21:02:45] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 21:02:45] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 21:02:46] [Rank 0] PRINT: Model returns: +[2025-09-05 21:02:46] [Rank 0] PRINT: Model returns: +[2025-09-05 21:02:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 21:02:46] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 21:02:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 21:02:46] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 21:02:46] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 21:02:46] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 21:02:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 21:02:46] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 21:02:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 21:02:46] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 21:02:50] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 21:02:50] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 21:02:50] [Rank 0] PRINT: Starting warmup... +[2025-09-05 21:02:50] [Rank 0] PRINT: Starting warmup... +[2025-09-05 21:03:29] [Rank 0] PRINT: Warmup complete. +[2025-09-05 21:03:29] [Rank 0] PRINT: Warmup complete. +[2025-09-05 21:03:29] [Rank 0] PRINT: Starting training... +[2025-09-05 21:03:29] [Rank 0] PRINT: Starting training... +[2025-09-05 21:03:36] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/fixed_eval_indices.json +[2025-09-05 21:03:36] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/fixed_eval_indices.json +[2025-09-05 21:03:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:03:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:03:39] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 21:03:39] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 21:04:14] [Rank 0] step:21/10000 train_time:34343ms step_avg:1635.38ms +[2025-09-05 21:04:14] [Rank 0] step:21/10000 train_time:34343ms step_avg:1635.38ms +[2025-09-05 21:04:14] [Rank 0] step:41/10000 train_time:35071ms step_avg:855.40ms +[2025-09-05 21:04:14] [Rank 0] step:41/10000 train_time:35071ms step_avg:855.40ms +[2025-09-05 21:04:15] [Rank 0] step:61/10000 train_time:35799ms step_avg:586.87ms +[2025-09-05 21:04:15] [Rank 0] step:61/10000 train_time:35799ms step_avg:586.87ms +[2025-09-05 21:04:16] [Rank 0] step:81/10000 train_time:36526ms step_avg:450.94ms +[2025-09-05 21:04:16] [Rank 0] step:81/10000 train_time:36526ms step_avg:450.94ms +[2025-09-05 21:04:17] [Rank 0] step:101/10000 train_time:37253ms step_avg:368.85ms +[2025-09-05 21:04:17] [Rank 0] step:101/10000 train_time:37253ms step_avg:368.85ms +[2025-09-05 21:04:17] [Rank 0] step:121/10000 train_time:37980ms step_avg:313.89ms +[2025-09-05 21:04:17] [Rank 0] step:121/10000 train_time:37980ms step_avg:313.89ms +[2025-09-05 21:04:18] [Rank 0] step:141/10000 train_time:38708ms step_avg:274.52ms +[2025-09-05 21:04:18] [Rank 0] step:141/10000 train_time:38708ms step_avg:274.52ms +[2025-09-05 21:04:19] [Rank 0] step:161/10000 train_time:39435ms step_avg:244.94ms +[2025-09-05 21:04:19] [Rank 0] step:161/10000 train_time:39435ms step_avg:244.94ms +[2025-09-05 21:04:20] [Rank 0] step:181/10000 train_time:40162ms step_avg:221.89ms +[2025-09-05 21:04:20] [Rank 0] step:181/10000 train_time:40162ms step_avg:221.89ms +[2025-09-05 21:04:20] [Rank 0] step:201/10000 train_time:40889ms step_avg:203.43ms +[2025-09-05 21:04:20] [Rank 0] step:201/10000 train_time:40889ms step_avg:203.43ms +[2025-09-05 21:04:21] [Rank 0] step:221/10000 train_time:41616ms step_avg:188.31ms +[2025-09-05 21:04:21] [Rank 0] step:221/10000 train_time:41616ms step_avg:188.31ms +[2025-09-05 21:04:22] [Rank 0] step:241/10000 train_time:42342ms step_avg:175.69ms +[2025-09-05 21:04:22] [Rank 0] step:241/10000 train_time:42342ms step_avg:175.69ms +[2025-09-05 21:04:22] [Rank 0] step:261/10000 train_time:43067ms step_avg:165.01ms +[2025-09-05 21:04:22] [Rank 0] step:261/10000 train_time:43067ms step_avg:165.01ms +[2025-09-05 21:04:23] [Rank 0] step:281/10000 train_time:43793ms step_avg:155.85ms +[2025-09-05 21:04:23] [Rank 0] step:281/10000 train_time:43793ms step_avg:155.85ms +[2025-09-05 21:04:24] [Rank 0] step:301/10000 train_time:44518ms step_avg:147.90ms +[2025-09-05 21:04:24] [Rank 0] step:301/10000 train_time:44518ms step_avg:147.90ms +[2025-09-05 21:04:25] [Rank 0] step:321/10000 train_time:45244ms step_avg:140.95ms +[2025-09-05 21:04:25] [Rank 0] step:321/10000 train_time:45244ms step_avg:140.95ms +[2025-09-05 21:04:25] [Rank 0] step:341/10000 train_time:45968ms step_avg:134.80ms +[2025-09-05 21:04:25] [Rank 0] step:341/10000 train_time:45968ms step_avg:134.80ms +[2025-09-05 21:04:26] [Rank 0] step:361/10000 train_time:46695ms step_avg:129.35ms +[2025-09-05 21:04:26] [Rank 0] step:361/10000 train_time:46695ms step_avg:129.35ms +[2025-09-05 21:04:27] [Rank 0] step:381/10000 train_time:47420ms step_avg:124.46ms +[2025-09-05 21:04:27] [Rank 0] step:381/10000 train_time:47420ms step_avg:124.46ms +[2025-09-05 21:04:28] [Rank 0] step:401/10000 train_time:48147ms step_avg:120.07ms +[2025-09-05 21:04:28] [Rank 0] step:401/10000 train_time:48147ms step_avg:120.07ms +[2025-09-05 21:04:28] [Rank 0] step:421/10000 train_time:48873ms step_avg:116.09ms +[2025-09-05 21:04:28] [Rank 0] step:421/10000 train_time:48873ms step_avg:116.09ms +[2025-09-05 21:04:29] [Rank 0] step:441/10000 train_time:49598ms step_avg:112.47ms +[2025-09-05 21:04:29] [Rank 0] step:441/10000 train_time:49598ms step_avg:112.47ms +[2025-09-05 21:04:30] [Rank 0] step:461/10000 train_time:50325ms step_avg:109.17ms +[2025-09-05 21:04:30] [Rank 0] step:461/10000 train_time:50325ms step_avg:109.17ms +[2025-09-05 21:04:30] [Rank 0] step:481/10000 train_time:51053ms step_avg:106.14ms +[2025-09-05 21:04:30] [Rank 0] step:481/10000 train_time:51053ms step_avg:106.14ms +[2025-09-05 21:04:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:04:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:04:32] [Rank 0] PRINT: step:500/10000 train_loss:5.6536 val_loss:4.0792 train_time:51858ms step_avg:103.72ms +[2025-09-05 21:04:32] [Rank 0] PRINT: step:500/10000 train_loss:5.6536 val_loss:4.0792 train_time:51858ms step_avg:103.72ms +[2025-09-05 21:04:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:04:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:04:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:04:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:05:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:05:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:05:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:05:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:05:52] [Rank 0] Total Loss: 5.8307 +[2025-09-05 21:05:52] [Rank 0] Total Loss: 5.8307 +[2025-09-05 21:05:52] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-05 21:05:52] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-05 21:05:52] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-05 21:05:52] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-05 21:05:52] [Rank 0] Group 0 Loss: 3.5022 +[2025-09-05 21:05:52] [Rank 0] Group 0 Loss: 3.5022 +[2025-09-05 21:05:52] [Rank 0] Group 1 Loss: 3.5745 +[2025-09-05 21:05:52] [Rank 0] Group 1 Loss: 3.5745 +[2025-09-05 21:05:52] [Rank 0] Group 2 Loss: 4.4122 +[2025-09-05 21:05:52] [Rank 0] Group 2 Loss: 4.4122 +[2025-09-05 21:05:52] [Rank 0] Group 3 Loss: 5.2522 +[2025-09-05 21:05:52] [Rank 0] Group 3 Loss: 5.2522 +[2025-09-05 21:05:52] [Rank 0] Group 4 Loss: 6.0283 +[2025-09-05 21:05:52] [Rank 0] Group 4 Loss: 6.0283 +[2025-09-05 21:05:52] [Rank 0] Group 5 Loss: 6.1763 +[2025-09-05 21:05:52] [Rank 0] Group 5 Loss: 6.1763 +[2025-09-05 21:05:52] [Rank 0] Group 6 Loss: 6.2783 +[2025-09-05 21:05:52] [Rank 0] Group 6 Loss: 6.2783 +[2025-09-05 21:05:52] [Rank 0] Group 7 Loss: 6.2582 +[2025-09-05 21:05:52] [Rank 0] Group 7 Loss: 6.2582 +[2025-09-05 21:05:52] [Rank 0] Group 8 Loss: 6.4073 +[2025-09-05 21:05:52] [Rank 0] Group 8 Loss: 6.4073 +[2025-09-05 21:05:52] [Rank 0] Group 9 Loss: 6.5610 +[2025-09-05 21:05:52] [Rank 0] Group 9 Loss: 6.5610 +[2025-09-05 21:05:52] [Rank 0] Group 10 Loss: 6.5154 +[2025-09-05 21:05:52] [Rank 0] Group 10 Loss: 6.5154 +[2025-09-05 21:05:52] [Rank 0] Group 11 Loss: 6.5815 +[2025-09-05 21:05:52] [Rank 0] Group 11 Loss: 6.5815 +[2025-09-05 21:05:52] [Rank 0] Group 12 Loss: 6.4157 +[2025-09-05 21:05:52] [Rank 0] Group 12 Loss: 6.4157 +[2025-09-05 21:05:52] [Rank 0] Group 13 Loss: 6.3927 +[2025-09-05 21:05:52] [Rank 0] Group 13 Loss: 6.3927 +[2025-09-05 21:05:52] [Rank 0] Group 14 Loss: 6.5215 +[2025-09-05 21:05:52] [Rank 0] Group 14 Loss: 6.5215 +[2025-09-05 21:05:52] [Rank 0] Group 15 Loss: 6.4134 +[2025-09-05 21:05:52] [Rank 0] Group 15 Loss: 6.4134 +[2025-09-05 21:05:52] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 21:05:52] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 21:05:52] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:05:52] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:05:52] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 21:05:52] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 21:05:52] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 21:05:52] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 21:05:52] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 21:05:52] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 21:05:52] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 21:05:52] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 21:05:52] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 21:05:52] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 21:05:52] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-05 21:05:52] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-05 21:05:52] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 21:05:52] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 21:05:52] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 21:05:52] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 21:05:52] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 21:05:52] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 21:05:52] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 21:05:52] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 21:05:52] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:05:52] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:05:52] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 21:05:52] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 21:05:52] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:05:52] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:05:52] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 21:05:52] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 21:05:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:05:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:05:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:05:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:05:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:05:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:05:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:05:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:05:54] [Rank 0] step:501/10000 train_time:51867ms step_avg:103.53ms +[2025-09-05 21:05:54] [Rank 0] step:501/10000 train_time:51867ms step_avg:103.53ms +[2025-09-05 21:05:55] [Rank 0] step:521/10000 train_time:52539ms step_avg:100.84ms +[2025-09-05 21:05:55] [Rank 0] step:521/10000 train_time:52539ms step_avg:100.84ms +[2025-09-05 21:05:55] [Rank 0] step:541/10000 train_time:53265ms step_avg:98.46ms +[2025-09-05 21:05:55] [Rank 0] step:541/10000 train_time:53265ms step_avg:98.46ms +[2025-09-05 21:05:56] [Rank 0] step:561/10000 train_time:53992ms step_avg:96.24ms +[2025-09-05 21:05:56] [Rank 0] step:561/10000 train_time:53992ms step_avg:96.24ms +[2025-09-05 21:05:57] [Rank 0] step:581/10000 train_time:54718ms step_avg:94.18ms +[2025-09-05 21:05:57] [Rank 0] step:581/10000 train_time:54718ms step_avg:94.18ms +[2025-09-05 21:05:58] [Rank 0] step:601/10000 train_time:55444ms step_avg:92.25ms +[2025-09-05 21:05:58] [Rank 0] step:601/10000 train_time:55444ms step_avg:92.25ms +[2025-09-05 21:05:58] [Rank 0] step:621/10000 train_time:56170ms step_avg:90.45ms +[2025-09-05 21:05:58] [Rank 0] step:621/10000 train_time:56170ms step_avg:90.45ms +[2025-09-05 21:05:59] [Rank 0] step:641/10000 train_time:56896ms step_avg:88.76ms +[2025-09-05 21:05:59] [Rank 0] step:641/10000 train_time:56896ms step_avg:88.76ms +[2025-09-05 21:06:00] [Rank 0] step:661/10000 train_time:57625ms step_avg:87.18ms +[2025-09-05 21:06:00] [Rank 0] step:661/10000 train_time:57625ms step_avg:87.18ms +[2025-09-05 21:06:01] [Rank 0] step:681/10000 train_time:58476ms step_avg:85.87ms +[2025-09-05 21:06:01] [Rank 0] step:681/10000 train_time:58476ms step_avg:85.87ms +[2025-09-05 21:06:01] [Rank 0] step:701/10000 train_time:59203ms step_avg:84.45ms +[2025-09-05 21:06:01] [Rank 0] step:701/10000 train_time:59203ms step_avg:84.45ms +[2025-09-05 21:06:02] [Rank 0] step:721/10000 train_time:59930ms step_avg:83.12ms +[2025-09-05 21:06:02] [Rank 0] step:721/10000 train_time:59930ms step_avg:83.12ms +[2025-09-05 21:06:03] [Rank 0] step:741/10000 train_time:60807ms step_avg:82.06ms +[2025-09-05 21:06:03] [Rank 0] step:741/10000 train_time:60807ms step_avg:82.06ms +[2025-09-05 21:06:04] [Rank 0] step:761/10000 train_time:61538ms step_avg:80.86ms +[2025-09-05 21:06:04] [Rank 0] step:761/10000 train_time:61538ms step_avg:80.86ms +[2025-09-05 21:06:04] [Rank 0] step:781/10000 train_time:62269ms step_avg:79.73ms +[2025-09-05 21:06:04] [Rank 0] step:781/10000 train_time:62269ms step_avg:79.73ms +[2025-09-05 21:06:05] [Rank 0] step:801/10000 train_time:63001ms step_avg:78.65ms +[2025-09-05 21:06:05] [Rank 0] step:801/10000 train_time:63001ms step_avg:78.65ms +[2025-09-05 21:06:07] [Rank 0] step:821/10000 train_time:64340ms step_avg:78.37ms +[2025-09-05 21:06:07] [Rank 0] step:821/10000 train_time:64340ms step_avg:78.37ms +[2025-09-05 21:06:07] [Rank 0] step:841/10000 train_time:65071ms step_avg:77.37ms +[2025-09-05 21:06:07] [Rank 0] step:841/10000 train_time:65071ms step_avg:77.37ms +[2025-09-05 21:06:08] [Rank 0] step:861/10000 train_time:65803ms step_avg:76.43ms +[2025-09-05 21:06:08] [Rank 0] step:861/10000 train_time:65803ms step_avg:76.43ms +[2025-09-05 21:06:09] [Rank 0] step:881/10000 train_time:66535ms step_avg:75.52ms +[2025-09-05 21:06:09] [Rank 0] step:881/10000 train_time:66535ms step_avg:75.52ms +[2025-09-05 21:06:09] [Rank 0] step:901/10000 train_time:67266ms step_avg:74.66ms +[2025-09-05 21:06:09] [Rank 0] step:901/10000 train_time:67266ms step_avg:74.66ms +[2025-09-05 21:06:10] [Rank 0] step:921/10000 train_time:67998ms step_avg:73.83ms +[2025-09-05 21:06:10] [Rank 0] step:921/10000 train_time:67998ms step_avg:73.83ms +[2025-09-05 21:06:11] [Rank 0] step:941/10000 train_time:68730ms step_avg:73.04ms +[2025-09-05 21:06:11] [Rank 0] step:941/10000 train_time:68730ms step_avg:73.04ms +[2025-09-05 21:06:12] [Rank 0] step:961/10000 train_time:69461ms step_avg:72.28ms +[2025-09-05 21:06:12] [Rank 0] step:961/10000 train_time:69461ms step_avg:72.28ms +[2025-09-05 21:06:12] [Rank 0] step:981/10000 train_time:70193ms step_avg:71.55ms +[2025-09-05 21:06:12] [Rank 0] step:981/10000 train_time:70193ms step_avg:71.55ms +[2025-09-05 21:06:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:06:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:06:14] [Rank 0] PRINT: step:1000/10000 train_loss:3.6621 val_loss:3.3540 train_time:71005ms step_avg:71.01ms +[2025-09-05 21:06:14] [Rank 0] PRINT: step:1000/10000 train_loss:3.6621 val_loss:3.3540 train_time:71005ms step_avg:71.01ms +[2025-09-05 21:06:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:06:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:06:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:06:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:07:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:07:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:07:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:07:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:07:34] [Rank 0] Total Loss: 5.3292 +[2025-09-05 21:07:34] [Rank 0] Total Loss: 5.3292 +[2025-09-05 21:07:34] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-05 21:07:34] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-05 21:07:34] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-05 21:07:34] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-05 21:07:34] [Rank 0] Group 0 Loss: 3.2878 +[2025-09-05 21:07:34] [Rank 0] Group 0 Loss: 3.2878 +[2025-09-05 21:07:34] [Rank 0] Group 1 Loss: 3.1577 +[2025-09-05 21:07:34] [Rank 0] Group 1 Loss: 3.1577 +[2025-09-05 21:07:34] [Rank 0] Group 2 Loss: 3.5730 +[2025-09-05 21:07:34] [Rank 0] Group 2 Loss: 3.5730 +[2025-09-05 21:07:34] [Rank 0] Group 3 Loss: 4.2981 +[2025-09-05 21:07:34] [Rank 0] Group 3 Loss: 4.2981 +[2025-09-05 21:07:34] [Rank 0] Group 4 Loss: 5.1986 +[2025-09-05 21:07:34] [Rank 0] Group 4 Loss: 5.1986 +[2025-09-05 21:07:34] [Rank 0] Group 5 Loss: 5.5520 +[2025-09-05 21:07:34] [Rank 0] Group 5 Loss: 5.5520 +[2025-09-05 21:07:34] [Rank 0] Group 6 Loss: 5.7755 +[2025-09-05 21:07:34] [Rank 0] Group 6 Loss: 5.7755 +[2025-09-05 21:07:34] [Rank 0] Group 7 Loss: 5.7989 +[2025-09-05 21:07:34] [Rank 0] Group 7 Loss: 5.7989 +[2025-09-05 21:07:34] [Rank 0] Group 8 Loss: 6.0100 +[2025-09-05 21:07:34] [Rank 0] Group 8 Loss: 6.0100 +[2025-09-05 21:07:34] [Rank 0] Group 9 Loss: 6.1459 +[2025-09-05 21:07:34] [Rank 0] Group 9 Loss: 6.1459 +[2025-09-05 21:07:34] [Rank 0] Group 10 Loss: 6.0969 +[2025-09-05 21:07:34] [Rank 0] Group 10 Loss: 6.0969 +[2025-09-05 21:07:34] [Rank 0] Group 11 Loss: 6.1740 +[2025-09-05 21:07:34] [Rank 0] Group 11 Loss: 6.1740 +[2025-09-05 21:07:34] [Rank 0] Group 12 Loss: 6.0290 +[2025-09-05 21:07:34] [Rank 0] Group 12 Loss: 6.0290 +[2025-09-05 21:07:34] [Rank 0] Group 13 Loss: 6.0332 +[2025-09-05 21:07:34] [Rank 0] Group 13 Loss: 6.0332 +[2025-09-05 21:07:34] [Rank 0] Group 14 Loss: 6.1153 +[2025-09-05 21:07:34] [Rank 0] Group 14 Loss: 6.1153 +[2025-09-05 21:07:34] [Rank 0] Group 15 Loss: 6.0223 +[2025-09-05 21:07:34] [Rank 0] Group 15 Loss: 6.0223 +[2025-09-05 21:07:34] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 21:07:34] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 21:07:34] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:07:34] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:07:34] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:07:34] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:07:34] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:07:34] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:07:34] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 21:07:34] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 21:07:34] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 21:07:34] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 21:07:34] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-05 21:07:34] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-05 21:07:34] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 21:07:34] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 21:07:34] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 21:07:34] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 21:07:34] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 21:07:34] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 21:07:34] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:07:34] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:07:34] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:07:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:07:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:07:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:07:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:07:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:07:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:07:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:07:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:07:36] [Rank 0] step:1001/10000 train_time:71014ms step_avg:70.94ms +[2025-09-05 21:07:36] [Rank 0] step:1001/10000 train_time:71014ms step_avg:70.94ms +[2025-09-05 21:07:36] [Rank 0] step:1021/10000 train_time:71684ms step_avg:70.21ms +[2025-09-05 21:07:36] [Rank 0] step:1021/10000 train_time:71684ms step_avg:70.21ms +[2025-09-05 21:07:37] [Rank 0] step:1041/10000 train_time:72416ms step_avg:69.56ms +[2025-09-05 21:07:37] [Rank 0] step:1041/10000 train_time:72416ms step_avg:69.56ms +[2025-09-05 21:07:38] [Rank 0] step:1061/10000 train_time:73147ms step_avg:68.94ms +[2025-09-05 21:07:38] [Rank 0] step:1061/10000 train_time:73147ms step_avg:68.94ms +[2025-09-05 21:07:39] [Rank 0] step:1081/10000 train_time:73878ms step_avg:68.34ms +[2025-09-05 21:07:39] [Rank 0] step:1081/10000 train_time:73878ms step_avg:68.34ms +[2025-09-05 21:07:39] [Rank 0] step:1101/10000 train_time:74609ms step_avg:67.77ms +[2025-09-05 21:07:39] [Rank 0] step:1101/10000 train_time:74609ms step_avg:67.77ms +[2025-09-05 21:07:40] [Rank 0] step:1121/10000 train_time:75341ms step_avg:67.21ms +[2025-09-05 21:07:40] [Rank 0] step:1121/10000 train_time:75341ms step_avg:67.21ms +[2025-09-05 21:07:41] [Rank 0] step:1141/10000 train_time:76073ms step_avg:66.67ms +[2025-09-05 21:07:41] [Rank 0] step:1141/10000 train_time:76073ms step_avg:66.67ms +[2025-09-05 21:07:42] [Rank 0] step:1161/10000 train_time:76805ms step_avg:66.15ms +[2025-09-05 21:07:42] [Rank 0] step:1161/10000 train_time:76805ms step_avg:66.15ms +[2025-09-05 21:07:42] [Rank 0] step:1181/10000 train_time:77537ms step_avg:65.65ms +[2025-09-05 21:07:42] [Rank 0] step:1181/10000 train_time:77537ms step_avg:65.65ms +[2025-09-05 21:07:43] [Rank 0] step:1201/10000 train_time:78269ms step_avg:65.17ms +[2025-09-05 21:07:43] [Rank 0] step:1201/10000 train_time:78269ms step_avg:65.17ms +[2025-09-05 21:07:44] [Rank 0] step:1221/10000 train_time:79000ms step_avg:64.70ms +[2025-09-05 21:07:44] [Rank 0] step:1221/10000 train_time:79000ms step_avg:64.70ms +[2025-09-05 21:07:45] [Rank 0] step:1241/10000 train_time:79733ms step_avg:64.25ms +[2025-09-05 21:07:45] [Rank 0] step:1241/10000 train_time:79733ms step_avg:64.25ms +[2025-09-05 21:07:45] [Rank 0] step:1261/10000 train_time:80465ms step_avg:63.81ms +[2025-09-05 21:07:45] [Rank 0] step:1261/10000 train_time:80465ms step_avg:63.81ms +[2025-09-05 21:07:46] [Rank 0] step:1281/10000 train_time:81196ms step_avg:63.38ms +[2025-09-05 21:07:46] [Rank 0] step:1281/10000 train_time:81196ms step_avg:63.38ms +[2025-09-05 21:07:47] [Rank 0] step:1301/10000 train_time:81928ms step_avg:62.97ms +[2025-09-05 21:07:47] [Rank 0] step:1301/10000 train_time:81928ms step_avg:62.97ms +[2025-09-05 21:07:47] [Rank 0] step:1321/10000 train_time:82659ms step_avg:62.57ms +[2025-09-05 21:07:47] [Rank 0] step:1321/10000 train_time:82659ms step_avg:62.57ms +[2025-09-05 21:07:48] [Rank 0] step:1341/10000 train_time:83391ms step_avg:62.19ms +[2025-09-05 21:07:48] [Rank 0] step:1341/10000 train_time:83391ms step_avg:62.19ms +[2025-09-05 21:07:49] [Rank 0] step:1361/10000 train_time:84123ms step_avg:61.81ms +[2025-09-05 21:07:49] [Rank 0] step:1361/10000 train_time:84123ms step_avg:61.81ms +[2025-09-05 21:07:50] [Rank 0] step:1381/10000 train_time:84852ms step_avg:61.44ms +[2025-09-05 21:07:50] [Rank 0] step:1381/10000 train_time:84852ms step_avg:61.44ms +[2025-09-05 21:07:50] [Rank 0] step:1401/10000 train_time:85583ms step_avg:61.09ms +[2025-09-05 21:07:50] [Rank 0] step:1401/10000 train_time:85583ms step_avg:61.09ms +[2025-09-05 21:07:51] [Rank 0] step:1421/10000 train_time:86314ms step_avg:60.74ms +[2025-09-05 21:07:51] [Rank 0] step:1421/10000 train_time:86314ms step_avg:60.74ms +[2025-09-05 21:07:52] [Rank 0] step:1441/10000 train_time:87046ms step_avg:60.41ms +[2025-09-05 21:07:52] [Rank 0] step:1441/10000 train_time:87046ms step_avg:60.41ms +[2025-09-05 21:07:53] [Rank 0] step:1461/10000 train_time:87776ms step_avg:60.08ms +[2025-09-05 21:07:53] [Rank 0] step:1461/10000 train_time:87776ms step_avg:60.08ms +[2025-09-05 21:07:53] [Rank 0] step:1481/10000 train_time:88508ms step_avg:59.76ms +[2025-09-05 21:07:53] [Rank 0] step:1481/10000 train_time:88508ms step_avg:59.76ms +[2025-09-05 21:07:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:07:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:07:54] [Rank 0] PRINT: step:1500/10000 train_loss:3.1665 val_loss:2.9912 train_time:89319ms step_avg:59.55ms +[2025-09-05 21:07:54] [Rank 0] PRINT: step:1500/10000 train_loss:3.1665 val_loss:2.9912 train_time:89319ms step_avg:59.55ms +[2025-09-05 21:07:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:07:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:07:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:07:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:09:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:09:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:09:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:09:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:09:15] [Rank 0] Total Loss: 5.1678 +[2025-09-05 21:09:15] [Rank 0] Total Loss: 5.1678 +[2025-09-05 21:09:15] [Rank 0] Total FTA (Unweighted): 0.1494 +[2025-09-05 21:09:15] [Rank 0] Total FTA (Unweighted): 0.1494 +[2025-09-05 21:09:15] [Rank 0] Total FTA (Weighted): 0.1494 +[2025-09-05 21:09:15] [Rank 0] Total FTA (Weighted): 0.1494 +[2025-09-05 21:09:15] [Rank 0] Group 0 Loss: 3.2327 +[2025-09-05 21:09:15] [Rank 0] Group 0 Loss: 3.2327 +[2025-09-05 21:09:15] [Rank 0] Group 1 Loss: 3.2347 +[2025-09-05 21:09:15] [Rank 0] Group 1 Loss: 3.2347 +[2025-09-05 21:09:15] [Rank 0] Group 2 Loss: 3.4685 +[2025-09-05 21:09:15] [Rank 0] Group 2 Loss: 3.4685 +[2025-09-05 21:09:15] [Rank 0] Group 3 Loss: 3.9619 +[2025-09-05 21:09:15] [Rank 0] Group 3 Loss: 3.9619 +[2025-09-05 21:09:15] [Rank 0] Group 4 Loss: 4.7762 +[2025-09-05 21:09:15] [Rank 0] Group 4 Loss: 4.7762 +[2025-09-05 21:09:15] [Rank 0] Group 5 Loss: 5.2431 +[2025-09-05 21:09:15] [Rank 0] Group 5 Loss: 5.2431 +[2025-09-05 21:09:15] [Rank 0] Group 6 Loss: 5.5559 +[2025-09-05 21:09:15] [Rank 0] Group 6 Loss: 5.5559 +[2025-09-05 21:09:15] [Rank 0] Group 7 Loss: 5.6100 +[2025-09-05 21:09:15] [Rank 0] Group 7 Loss: 5.6100 +[2025-09-05 21:09:15] [Rank 0] Group 8 Loss: 5.8318 +[2025-09-05 21:09:15] [Rank 0] Group 8 Loss: 5.8318 +[2025-09-05 21:09:15] [Rank 0] Group 9 Loss: 5.9904 +[2025-09-05 21:09:15] [Rank 0] Group 9 Loss: 5.9904 +[2025-09-05 21:09:15] [Rank 0] Group 10 Loss: 5.9702 +[2025-09-05 21:09:15] [Rank 0] Group 10 Loss: 5.9702 +[2025-09-05 21:09:15] [Rank 0] Group 11 Loss: 6.0358 +[2025-09-05 21:09:15] [Rank 0] Group 11 Loss: 6.0358 +[2025-09-05 21:09:15] [Rank 0] Group 12 Loss: 5.9245 +[2025-09-05 21:09:15] [Rank 0] Group 12 Loss: 5.9245 +[2025-09-05 21:09:15] [Rank 0] Group 13 Loss: 5.9296 +[2025-09-05 21:09:15] [Rank 0] Group 13 Loss: 5.9296 +[2025-09-05 21:09:15] [Rank 0] Group 14 Loss: 5.9975 +[2025-09-05 21:09:15] [Rank 0] Group 14 Loss: 5.9975 +[2025-09-05 21:09:15] [Rank 0] Group 15 Loss: 5.9225 +[2025-09-05 21:09:15] [Rank 0] Group 15 Loss: 5.9225 +[2025-09-05 21:09:15] [Rank 0] Group 0 FTA: 0.6200 +[2025-09-05 21:09:15] [Rank 0] Group 0 FTA: 0.6200 +[2025-09-05 21:09:15] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:09:15] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:09:15] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:09:15] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:09:15] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:09:15] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:09:15] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:09:15] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:09:15] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 21:09:15] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 21:09:15] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:09:15] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:09:15] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 21:09:15] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 21:09:15] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:09:15] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:09:15] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:09:15] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:09:15] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 21:09:15] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:09:15] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:09:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:09:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:09:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:09:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:09:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:09:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:09:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:09:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:09:16] [Rank 0] step:1501/10000 train_time:89328ms step_avg:59.51ms +[2025-09-05 21:09:16] [Rank 0] step:1501/10000 train_time:89328ms step_avg:59.51ms +[2025-09-05 21:09:17] [Rank 0] step:1521/10000 train_time:89999ms step_avg:59.17ms +[2025-09-05 21:09:17] [Rank 0] step:1521/10000 train_time:89999ms step_avg:59.17ms +[2025-09-05 21:09:18] [Rank 0] step:1541/10000 train_time:90731ms step_avg:58.88ms +[2025-09-05 21:09:18] [Rank 0] step:1541/10000 train_time:90731ms step_avg:58.88ms +[2025-09-05 21:09:19] [Rank 0] step:1561/10000 train_time:91463ms step_avg:58.59ms +[2025-09-05 21:09:19] [Rank 0] step:1561/10000 train_time:91463ms step_avg:58.59ms +[2025-09-05 21:09:19] [Rank 0] step:1581/10000 train_time:92194ms step_avg:58.31ms +[2025-09-05 21:09:19] [Rank 0] step:1581/10000 train_time:92194ms step_avg:58.31ms +[2025-09-05 21:09:20] [Rank 0] step:1601/10000 train_time:92924ms step_avg:58.04ms +[2025-09-05 21:09:20] [Rank 0] step:1601/10000 train_time:92924ms step_avg:58.04ms +[2025-09-05 21:09:21] [Rank 0] step:1621/10000 train_time:93656ms step_avg:57.78ms +[2025-09-05 21:09:21] [Rank 0] step:1621/10000 train_time:93656ms step_avg:57.78ms +[2025-09-05 21:09:22] [Rank 0] step:1641/10000 train_time:94994ms step_avg:57.89ms +[2025-09-05 21:09:22] [Rank 0] step:1641/10000 train_time:94994ms step_avg:57.89ms +[2025-09-05 21:09:23] [Rank 0] step:1661/10000 train_time:95725ms step_avg:57.63ms +[2025-09-05 21:09:23] [Rank 0] step:1661/10000 train_time:95725ms step_avg:57.63ms +[2025-09-05 21:09:24] [Rank 0] step:1681/10000 train_time:96455ms step_avg:57.38ms +[2025-09-05 21:09:24] [Rank 0] step:1681/10000 train_time:96455ms step_avg:57.38ms +[2025-09-05 21:09:24] [Rank 0] step:1701/10000 train_time:97185ms step_avg:57.13ms +[2025-09-05 21:09:24] [Rank 0] step:1701/10000 train_time:97185ms step_avg:57.13ms +[2025-09-05 21:09:25] [Rank 0] step:1721/10000 train_time:97915ms step_avg:56.89ms +[2025-09-05 21:09:25] [Rank 0] step:1721/10000 train_time:97915ms step_avg:56.89ms +[2025-09-05 21:09:26] [Rank 0] step:1741/10000 train_time:98645ms step_avg:56.66ms +[2025-09-05 21:09:26] [Rank 0] step:1741/10000 train_time:98645ms step_avg:56.66ms +[2025-09-05 21:09:26] [Rank 0] step:1761/10000 train_time:99376ms step_avg:56.43ms +[2025-09-05 21:09:26] [Rank 0] step:1761/10000 train_time:99376ms step_avg:56.43ms +[2025-09-05 21:09:27] [Rank 0] step:1781/10000 train_time:100108ms step_avg:56.21ms +[2025-09-05 21:09:27] [Rank 0] step:1781/10000 train_time:100108ms step_avg:56.21ms +[2025-09-05 21:09:28] [Rank 0] step:1801/10000 train_time:100839ms step_avg:55.99ms +[2025-09-05 21:09:28] [Rank 0] step:1801/10000 train_time:100839ms step_avg:55.99ms +[2025-09-05 21:09:29] [Rank 0] step:1821/10000 train_time:101570ms step_avg:55.78ms +[2025-09-05 21:09:29] [Rank 0] step:1821/10000 train_time:101570ms step_avg:55.78ms +[2025-09-05 21:09:29] [Rank 0] step:1841/10000 train_time:102302ms step_avg:55.57ms +[2025-09-05 21:09:29] [Rank 0] step:1841/10000 train_time:102302ms step_avg:55.57ms +[2025-09-05 21:09:30] [Rank 0] step:1861/10000 train_time:103034ms step_avg:55.36ms +[2025-09-05 21:09:30] [Rank 0] step:1861/10000 train_time:103034ms step_avg:55.36ms +[2025-09-05 21:09:31] [Rank 0] step:1881/10000 train_time:103766ms step_avg:55.17ms +[2025-09-05 21:09:31] [Rank 0] step:1881/10000 train_time:103766ms step_avg:55.17ms +[2025-09-05 21:09:32] [Rank 0] step:1901/10000 train_time:104498ms step_avg:54.97ms +[2025-09-05 21:09:32] [Rank 0] step:1901/10000 train_time:104498ms step_avg:54.97ms +[2025-09-05 21:09:32] [Rank 0] step:1921/10000 train_time:105229ms step_avg:54.78ms +[2025-09-05 21:09:32] [Rank 0] step:1921/10000 train_time:105229ms step_avg:54.78ms +[2025-09-05 21:09:33] [Rank 0] step:1941/10000 train_time:105960ms step_avg:54.59ms +[2025-09-05 21:09:33] [Rank 0] step:1941/10000 train_time:105960ms step_avg:54.59ms +[2025-09-05 21:09:34] [Rank 0] step:1961/10000 train_time:106692ms step_avg:54.41ms +[2025-09-05 21:09:34] [Rank 0] step:1961/10000 train_time:106692ms step_avg:54.41ms +[2025-09-05 21:09:35] [Rank 0] step:1981/10000 train_time:107424ms step_avg:54.23ms +[2025-09-05 21:09:35] [Rank 0] step:1981/10000 train_time:107424ms step_avg:54.23ms +[2025-09-05 21:09:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:09:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:09:36] [Rank 0] PRINT: step:2000/10000 train_loss:2.8811 val_loss:2.7605 train_time:108236ms step_avg:54.12ms +[2025-09-05 21:09:36] [Rank 0] PRINT: step:2000/10000 train_loss:2.8811 val_loss:2.7605 train_time:108236ms step_avg:54.12ms +[2025-09-05 21:09:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:09:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:09:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:09:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:10:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:10:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:10:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:10:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:10:56] [Rank 0] Total Loss: 4.9372 +[2025-09-05 21:10:56] [Rank 0] Total Loss: 4.9372 +[2025-09-05 21:10:56] [Rank 0] Total FTA (Unweighted): 0.1800 +[2025-09-05 21:10:56] [Rank 0] Total FTA (Unweighted): 0.1800 +[2025-09-05 21:10:56] [Rank 0] Total FTA (Weighted): 0.1800 +[2025-09-05 21:10:56] [Rank 0] Total FTA (Weighted): 0.1800 +[2025-09-05 21:10:56] [Rank 0] Group 0 Loss: 3.1697 +[2025-09-05 21:10:56] [Rank 0] Group 0 Loss: 3.1697 +[2025-09-05 21:10:57] [Rank 0] Group 1 Loss: 3.0141 +[2025-09-05 21:10:57] [Rank 0] Group 1 Loss: 3.0141 +[2025-09-05 21:10:57] [Rank 0] Group 2 Loss: 3.3190 +[2025-09-05 21:10:57] [Rank 0] Group 2 Loss: 3.3190 +[2025-09-05 21:10:57] [Rank 0] Group 3 Loss: 3.7286 +[2025-09-05 21:10:57] [Rank 0] Group 3 Loss: 3.7286 +[2025-09-05 21:10:57] [Rank 0] Group 4 Loss: 4.4519 +[2025-09-05 21:10:57] [Rank 0] Group 4 Loss: 4.4519 +[2025-09-05 21:10:57] [Rank 0] Group 5 Loss: 4.9344 +[2025-09-05 21:10:57] [Rank 0] Group 5 Loss: 4.9344 +[2025-09-05 21:10:57] [Rank 0] Group 6 Loss: 5.2362 +[2025-09-05 21:10:57] [Rank 0] Group 6 Loss: 5.2362 +[2025-09-05 21:10:57] [Rank 0] Group 7 Loss: 5.3388 +[2025-09-05 21:10:57] [Rank 0] Group 7 Loss: 5.3388 +[2025-09-05 21:10:57] [Rank 0] Group 8 Loss: 5.6057 +[2025-09-05 21:10:57] [Rank 0] Group 8 Loss: 5.6057 +[2025-09-05 21:10:57] [Rank 0] Group 9 Loss: 5.7509 +[2025-09-05 21:10:57] [Rank 0] Group 9 Loss: 5.7509 +[2025-09-05 21:10:57] [Rank 0] Group 10 Loss: 5.7450 +[2025-09-05 21:10:57] [Rank 0] Group 10 Loss: 5.7450 +[2025-09-05 21:10:57] [Rank 0] Group 11 Loss: 5.7998 +[2025-09-05 21:10:57] [Rank 0] Group 11 Loss: 5.7998 +[2025-09-05 21:10:57] [Rank 0] Group 12 Loss: 5.7116 +[2025-09-05 21:10:57] [Rank 0] Group 12 Loss: 5.7116 +[2025-09-05 21:10:57] [Rank 0] Group 13 Loss: 5.7119 +[2025-09-05 21:10:57] [Rank 0] Group 13 Loss: 5.7119 +[2025-09-05 21:10:57] [Rank 0] Group 14 Loss: 5.7849 +[2025-09-05 21:10:57] [Rank 0] Group 14 Loss: 5.7849 +[2025-09-05 21:10:57] [Rank 0] Group 15 Loss: 5.6930 +[2025-09-05 21:10:57] [Rank 0] Group 15 Loss: 5.6930 +[2025-09-05 21:10:57] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-05 21:10:57] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-05 21:10:57] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-05 21:10:57] [Rank 0] Group 1 FTA: 0.3400 +[2025-09-05 21:10:57] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:10:57] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:10:57] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:10:57] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:10:57] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:10:57] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:10:57] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:10:57] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:10:57] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 21:10:57] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 21:10:57] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:10:57] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:10:57] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 21:10:57] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 21:10:57] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:10:57] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:10:57] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 21:10:57] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 21:10:57] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 21:10:57] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 21:10:57] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:10:57] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:10:57] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 21:10:57] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 21:10:57] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:10:57] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:10:57] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:10:57] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:10:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:10:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:10:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:10:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:10:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:10:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:10:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:10:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:10:58] [Rank 0] step:2001/10000 train_time:108245ms step_avg:54.10ms +[2025-09-05 21:10:58] [Rank 0] step:2001/10000 train_time:108245ms step_avg:54.10ms +[2025-09-05 21:10:59] [Rank 0] step:2021/10000 train_time:108915ms step_avg:53.89ms +[2025-09-05 21:10:59] [Rank 0] step:2021/10000 train_time:108915ms step_avg:53.89ms +[2025-09-05 21:11:00] [Rank 0] step:2041/10000 train_time:109647ms step_avg:53.72ms +[2025-09-05 21:11:00] [Rank 0] step:2041/10000 train_time:109647ms step_avg:53.72ms +[2025-09-05 21:11:00] [Rank 0] step:2061/10000 train_time:110379ms step_avg:53.56ms +[2025-09-05 21:11:00] [Rank 0] step:2061/10000 train_time:110379ms step_avg:53.56ms +[2025-09-05 21:11:01] [Rank 0] step:2081/10000 train_time:111110ms step_avg:53.39ms +[2025-09-05 21:11:01] [Rank 0] step:2081/10000 train_time:111110ms step_avg:53.39ms +[2025-09-05 21:11:02] [Rank 0] step:2101/10000 train_time:111842ms step_avg:53.23ms +[2025-09-05 21:11:02] [Rank 0] step:2101/10000 train_time:111842ms step_avg:53.23ms +[2025-09-05 21:11:02] [Rank 0] step:2121/10000 train_time:112573ms step_avg:53.08ms +[2025-09-05 21:11:02] [Rank 0] step:2121/10000 train_time:112573ms step_avg:53.08ms +[2025-09-05 21:11:03] [Rank 0] step:2141/10000 train_time:113305ms step_avg:52.92ms +[2025-09-05 21:11:03] [Rank 0] step:2141/10000 train_time:113305ms step_avg:52.92ms +[2025-09-05 21:11:04] [Rank 0] step:2161/10000 train_time:114037ms step_avg:52.77ms +[2025-09-05 21:11:04] [Rank 0] step:2161/10000 train_time:114037ms step_avg:52.77ms +[2025-09-05 21:11:05] [Rank 0] step:2181/10000 train_time:114768ms step_avg:52.62ms +[2025-09-05 21:11:05] [Rank 0] step:2181/10000 train_time:114768ms step_avg:52.62ms +[2025-09-05 21:11:05] [Rank 0] step:2201/10000 train_time:115499ms step_avg:52.48ms +[2025-09-05 21:11:05] [Rank 0] step:2201/10000 train_time:115499ms step_avg:52.48ms +[2025-09-05 21:11:06] [Rank 0] step:2221/10000 train_time:116231ms step_avg:52.33ms +[2025-09-05 21:11:06] [Rank 0] step:2221/10000 train_time:116231ms step_avg:52.33ms +[2025-09-05 21:11:07] [Rank 0] step:2241/10000 train_time:116968ms step_avg:52.19ms +[2025-09-05 21:11:07] [Rank 0] step:2241/10000 train_time:116968ms step_avg:52.19ms +[2025-09-05 21:11:08] [Rank 0] step:2261/10000 train_time:117706ms step_avg:52.06ms +[2025-09-05 21:11:08] [Rank 0] step:2261/10000 train_time:117706ms step_avg:52.06ms +[2025-09-05 21:11:08] [Rank 0] step:2281/10000 train_time:118444ms step_avg:51.93ms +[2025-09-05 21:11:08] [Rank 0] step:2281/10000 train_time:118444ms step_avg:51.93ms +[2025-09-05 21:11:09] [Rank 0] step:2301/10000 train_time:119183ms step_avg:51.80ms +[2025-09-05 21:11:09] [Rank 0] step:2301/10000 train_time:119183ms step_avg:51.80ms +[2025-09-05 21:11:10] [Rank 0] step:2321/10000 train_time:119920ms step_avg:51.67ms +[2025-09-05 21:11:10] [Rank 0] step:2321/10000 train_time:119920ms step_avg:51.67ms +[2025-09-05 21:11:11] [Rank 0] step:2341/10000 train_time:120656ms step_avg:51.54ms +[2025-09-05 21:11:11] [Rank 0] step:2341/10000 train_time:120656ms step_avg:51.54ms +[2025-09-05 21:11:11] [Rank 0] step:2361/10000 train_time:121394ms step_avg:51.42ms +[2025-09-05 21:11:11] [Rank 0] step:2361/10000 train_time:121394ms step_avg:51.42ms +[2025-09-05 21:11:12] [Rank 0] step:2381/10000 train_time:122132ms step_avg:51.29ms +[2025-09-05 21:11:12] [Rank 0] step:2381/10000 train_time:122132ms step_avg:51.29ms +[2025-09-05 21:11:13] [Rank 0] step:2401/10000 train_time:122869ms step_avg:51.17ms +[2025-09-05 21:11:13] [Rank 0] step:2401/10000 train_time:122869ms step_avg:51.17ms +[2025-09-05 21:11:13] [Rank 0] step:2421/10000 train_time:123606ms step_avg:51.06ms +[2025-09-05 21:11:13] [Rank 0] step:2421/10000 train_time:123606ms step_avg:51.06ms +[2025-09-05 21:11:14] [Rank 0] step:2441/10000 train_time:124344ms step_avg:50.94ms +[2025-09-05 21:11:14] [Rank 0] step:2441/10000 train_time:124344ms step_avg:50.94ms +[2025-09-05 21:11:15] [Rank 0] step:2461/10000 train_time:125081ms step_avg:50.83ms +[2025-09-05 21:11:15] [Rank 0] step:2461/10000 train_time:125081ms step_avg:50.83ms +[2025-09-05 21:11:16] [Rank 0] step:2481/10000 train_time:125818ms step_avg:50.71ms +[2025-09-05 21:11:16] [Rank 0] step:2481/10000 train_time:125818ms step_avg:50.71ms +[2025-09-05 21:11:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:11:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:11:17] [Rank 0] PRINT: step:2500/10000 train_loss:2.6812 val_loss:2.5915 train_time:126780ms step_avg:50.71ms +[2025-09-05 21:11:17] [Rank 0] PRINT: step:2500/10000 train_loss:2.6812 val_loss:2.5915 train_time:126780ms step_avg:50.71ms +[2025-09-05 21:11:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:11:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:11:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:11:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:12:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:12:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:12:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:12:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:12:38] [Rank 0] Total Loss: 4.8737 +[2025-09-05 21:12:38] [Rank 0] Total Loss: 4.8737 +[2025-09-05 21:12:38] [Rank 0] Total FTA (Unweighted): 0.2094 +[2025-09-05 21:12:38] [Rank 0] Total FTA (Unweighted): 0.2094 +[2025-09-05 21:12:38] [Rank 0] Total FTA (Weighted): 0.2094 +[2025-09-05 21:12:38] [Rank 0] Total FTA (Weighted): 0.2094 +[2025-09-05 21:12:38] [Rank 0] Group 0 Loss: 3.2227 +[2025-09-05 21:12:38] [Rank 0] Group 0 Loss: 3.2227 +[2025-09-05 21:12:38] [Rank 0] Group 1 Loss: 3.1030 +[2025-09-05 21:12:38] [Rank 0] Group 1 Loss: 3.1030 +[2025-09-05 21:12:38] [Rank 0] Group 2 Loss: 3.3362 +[2025-09-05 21:12:38] [Rank 0] Group 2 Loss: 3.3362 +[2025-09-05 21:12:38] [Rank 0] Group 3 Loss: 3.7243 +[2025-09-05 21:12:38] [Rank 0] Group 3 Loss: 3.7243 +[2025-09-05 21:12:38] [Rank 0] Group 4 Loss: 4.3222 +[2025-09-05 21:12:38] [Rank 0] Group 4 Loss: 4.3222 +[2025-09-05 21:12:38] [Rank 0] Group 5 Loss: 4.7978 +[2025-09-05 21:12:38] [Rank 0] Group 5 Loss: 4.7978 +[2025-09-05 21:12:38] [Rank 0] Group 6 Loss: 5.1117 +[2025-09-05 21:12:38] [Rank 0] Group 6 Loss: 5.1117 +[2025-09-05 21:12:38] [Rank 0] Group 7 Loss: 5.2306 +[2025-09-05 21:12:38] [Rank 0] Group 7 Loss: 5.2306 +[2025-09-05 21:12:38] [Rank 0] Group 8 Loss: 5.4936 +[2025-09-05 21:12:38] [Rank 0] Group 8 Loss: 5.4936 +[2025-09-05 21:12:38] [Rank 0] Group 9 Loss: 5.6503 +[2025-09-05 21:12:38] [Rank 0] Group 9 Loss: 5.6503 +[2025-09-05 21:12:38] [Rank 0] Group 10 Loss: 5.6465 +[2025-09-05 21:12:38] [Rank 0] Group 10 Loss: 5.6465 +[2025-09-05 21:12:38] [Rank 0] Group 11 Loss: 5.7045 +[2025-09-05 21:12:38] [Rank 0] Group 11 Loss: 5.7045 +[2025-09-05 21:12:38] [Rank 0] Group 12 Loss: 5.6373 +[2025-09-05 21:12:38] [Rank 0] Group 12 Loss: 5.6373 +[2025-09-05 21:12:38] [Rank 0] Group 13 Loss: 5.6447 +[2025-09-05 21:12:38] [Rank 0] Group 13 Loss: 5.6447 +[2025-09-05 21:12:38] [Rank 0] Group 14 Loss: 5.7067 +[2025-09-05 21:12:38] [Rank 0] Group 14 Loss: 5.7067 +[2025-09-05 21:12:38] [Rank 0] Group 15 Loss: 5.6464 +[2025-09-05 21:12:38] [Rank 0] Group 15 Loss: 5.6464 +[2025-09-05 21:12:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:12:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:12:38] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 21:12:38] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 21:12:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:12:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:12:38] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:12:38] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:12:38] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 21:12:38] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 21:12:38] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:12:38] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:12:38] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 21:12:38] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 21:12:38] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 21:12:38] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 21:12:38] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 21:12:38] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 21:12:38] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 21:12:38] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 21:12:38] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 21:12:38] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 21:12:38] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 21:12:38] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 21:12:38] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 21:12:38] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 21:12:38] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 21:12:38] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 21:12:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:12:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:12:38] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:12:38] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:12:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:12:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:12:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:12:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:12:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:12:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:12:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:12:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:12:39] [Rank 0] step:2501/10000 train_time:126789ms step_avg:50.70ms +[2025-09-05 21:12:39] [Rank 0] step:2501/10000 train_time:126789ms step_avg:50.70ms +[2025-09-05 21:12:40] [Rank 0] step:2521/10000 train_time:127460ms step_avg:50.56ms +[2025-09-05 21:12:40] [Rank 0] step:2521/10000 train_time:127460ms step_avg:50.56ms +[2025-09-05 21:12:41] [Rank 0] step:2541/10000 train_time:128198ms step_avg:50.45ms +[2025-09-05 21:12:41] [Rank 0] step:2541/10000 train_time:128198ms step_avg:50.45ms +[2025-09-05 21:12:42] [Rank 0] step:2561/10000 train_time:128935ms step_avg:50.35ms +[2025-09-05 21:12:42] [Rank 0] step:2561/10000 train_time:128935ms step_avg:50.35ms +[2025-09-05 21:12:42] [Rank 0] step:2581/10000 train_time:129674ms step_avg:50.24ms +[2025-09-05 21:12:42] [Rank 0] step:2581/10000 train_time:129674ms step_avg:50.24ms +[2025-09-05 21:12:43] [Rank 0] step:2601/10000 train_time:130413ms step_avg:50.14ms +[2025-09-05 21:12:43] [Rank 0] step:2601/10000 train_time:130413ms step_avg:50.14ms +[2025-09-05 21:12:44] [Rank 0] step:2621/10000 train_time:131151ms step_avg:50.04ms +[2025-09-05 21:12:44] [Rank 0] step:2621/10000 train_time:131151ms step_avg:50.04ms +[2025-09-05 21:12:45] [Rank 0] step:2641/10000 train_time:131889ms step_avg:49.94ms +[2025-09-05 21:12:45] [Rank 0] step:2641/10000 train_time:131889ms step_avg:49.94ms +[2025-09-05 21:12:45] [Rank 0] step:2661/10000 train_time:132627ms step_avg:49.84ms +[2025-09-05 21:12:45] [Rank 0] step:2661/10000 train_time:132627ms step_avg:49.84ms +[2025-09-05 21:12:46] [Rank 0] step:2681/10000 train_time:133364ms step_avg:49.74ms +[2025-09-05 21:12:46] [Rank 0] step:2681/10000 train_time:133364ms step_avg:49.74ms +[2025-09-05 21:12:47] [Rank 0] step:2701/10000 train_time:134102ms step_avg:49.65ms +[2025-09-05 21:12:47] [Rank 0] step:2701/10000 train_time:134102ms step_avg:49.65ms +[2025-09-05 21:12:48] [Rank 0] step:2721/10000 train_time:134840ms step_avg:49.56ms +[2025-09-05 21:12:48] [Rank 0] step:2721/10000 train_time:134840ms step_avg:49.56ms +[2025-09-05 21:12:48] [Rank 0] step:2741/10000 train_time:135578ms step_avg:49.46ms +[2025-09-05 21:12:48] [Rank 0] step:2741/10000 train_time:135578ms step_avg:49.46ms +[2025-09-05 21:12:49] [Rank 0] step:2761/10000 train_time:136315ms step_avg:49.37ms +[2025-09-05 21:12:49] [Rank 0] step:2761/10000 train_time:136315ms step_avg:49.37ms +[2025-09-05 21:12:50] [Rank 0] step:2781/10000 train_time:137053ms step_avg:49.28ms +[2025-09-05 21:12:50] [Rank 0] step:2781/10000 train_time:137053ms step_avg:49.28ms +[2025-09-05 21:12:51] [Rank 0] step:2801/10000 train_time:137790ms step_avg:49.19ms +[2025-09-05 21:12:51] [Rank 0] step:2801/10000 train_time:137790ms step_avg:49.19ms +[2025-09-05 21:12:52] [Rank 0] step:2821/10000 train_time:139147ms step_avg:49.33ms +[2025-09-05 21:12:52] [Rank 0] step:2821/10000 train_time:139147ms step_avg:49.33ms +[2025-09-05 21:12:53] [Rank 0] step:2841/10000 train_time:139885ms step_avg:49.24ms +[2025-09-05 21:12:53] [Rank 0] step:2841/10000 train_time:139885ms step_avg:49.24ms +[2025-09-05 21:12:53] [Rank 0] step:2861/10000 train_time:140623ms step_avg:49.15ms +[2025-09-05 21:12:53] [Rank 0] step:2861/10000 train_time:140623ms step_avg:49.15ms +[2025-09-05 21:12:54] [Rank 0] step:2881/10000 train_time:141362ms step_avg:49.07ms +[2025-09-05 21:12:54] [Rank 0] step:2881/10000 train_time:141362ms step_avg:49.07ms +[2025-09-05 21:12:55] [Rank 0] step:2901/10000 train_time:142100ms step_avg:48.98ms +[2025-09-05 21:12:55] [Rank 0] step:2901/10000 train_time:142100ms step_avg:48.98ms +[2025-09-05 21:12:56] [Rank 0] step:2921/10000 train_time:142839ms step_avg:48.90ms +[2025-09-05 21:12:56] [Rank 0] step:2921/10000 train_time:142839ms step_avg:48.90ms +[2025-09-05 21:12:56] [Rank 0] step:2941/10000 train_time:143576ms step_avg:48.82ms +[2025-09-05 21:12:56] [Rank 0] step:2941/10000 train_time:143576ms step_avg:48.82ms +[2025-09-05 21:12:57] [Rank 0] step:2961/10000 train_time:144313ms step_avg:48.74ms +[2025-09-05 21:12:57] [Rank 0] step:2961/10000 train_time:144313ms step_avg:48.74ms +[2025-09-05 21:12:58] [Rank 0] step:2981/10000 train_time:145050ms step_avg:48.66ms +[2025-09-05 21:12:58] [Rank 0] step:2981/10000 train_time:145050ms step_avg:48.66ms +[2025-09-05 21:12:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:12:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:12:59] [Rank 0] PRINT: step:3000/10000 train_loss:2.5335 val_loss:2.4641 train_time:145868ms step_avg:48.62ms +[2025-09-05 21:12:59] [Rank 0] PRINT: step:3000/10000 train_loss:2.5335 val_loss:2.4641 train_time:145868ms step_avg:48.62ms +[2025-09-05 21:12:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:12:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:12:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:12:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:14:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:14:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:14:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:14:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:14:20] [Rank 0] Total Loss: 4.7680 +[2025-09-05 21:14:20] [Rank 0] Total Loss: 4.7680 +[2025-09-05 21:14:20] [Rank 0] Total FTA (Unweighted): 0.2594 +[2025-09-05 21:14:20] [Rank 0] Total FTA (Unweighted): 0.2594 +[2025-09-05 21:14:20] [Rank 0] Total FTA (Weighted): 0.2594 +[2025-09-05 21:14:20] [Rank 0] Total FTA (Weighted): 0.2594 +[2025-09-05 21:14:20] [Rank 0] Group 0 Loss: 3.2105 +[2025-09-05 21:14:20] [Rank 0] Group 0 Loss: 3.2105 +[2025-09-05 21:14:20] [Rank 0] Group 1 Loss: 3.0697 +[2025-09-05 21:14:20] [Rank 0] Group 1 Loss: 3.0697 +[2025-09-05 21:14:20] [Rank 0] Group 2 Loss: 3.2619 +[2025-09-05 21:14:20] [Rank 0] Group 2 Loss: 3.2619 +[2025-09-05 21:14:20] [Rank 0] Group 3 Loss: 3.6671 +[2025-09-05 21:14:20] [Rank 0] Group 3 Loss: 3.6671 +[2025-09-05 21:14:20] [Rank 0] Group 4 Loss: 4.1565 +[2025-09-05 21:14:20] [Rank 0] Group 4 Loss: 4.1565 +[2025-09-05 21:14:20] [Rank 0] Group 5 Loss: 4.6458 +[2025-09-05 21:14:20] [Rank 0] Group 5 Loss: 4.6458 +[2025-09-05 21:14:20] [Rank 0] Group 6 Loss: 4.9773 +[2025-09-05 21:14:20] [Rank 0] Group 6 Loss: 4.9773 +[2025-09-05 21:14:20] [Rank 0] Group 7 Loss: 5.1087 +[2025-09-05 21:14:20] [Rank 0] Group 7 Loss: 5.1087 +[2025-09-05 21:14:20] [Rank 0] Group 8 Loss: 5.3814 +[2025-09-05 21:14:20] [Rank 0] Group 8 Loss: 5.3814 +[2025-09-05 21:14:20] [Rank 0] Group 9 Loss: 5.5206 +[2025-09-05 21:14:20] [Rank 0] Group 9 Loss: 5.5206 +[2025-09-05 21:14:20] [Rank 0] Group 10 Loss: 5.5158 +[2025-09-05 21:14:20] [Rank 0] Group 10 Loss: 5.5158 +[2025-09-05 21:14:20] [Rank 0] Group 11 Loss: 5.5901 +[2025-09-05 21:14:20] [Rank 0] Group 11 Loss: 5.5901 +[2025-09-05 21:14:20] [Rank 0] Group 12 Loss: 5.5262 +[2025-09-05 21:14:20] [Rank 0] Group 12 Loss: 5.5262 +[2025-09-05 21:14:20] [Rank 0] Group 13 Loss: 5.5390 +[2025-09-05 21:14:20] [Rank 0] Group 13 Loss: 5.5390 +[2025-09-05 21:14:20] [Rank 0] Group 14 Loss: 5.5868 +[2025-09-05 21:14:20] [Rank 0] Group 14 Loss: 5.5868 +[2025-09-05 21:14:20] [Rank 0] Group 15 Loss: 5.5308 +[2025-09-05 21:14:20] [Rank 0] Group 15 Loss: 5.5308 +[2025-09-05 21:14:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:14:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:14:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:14:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:14:20] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:14:20] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:14:20] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:14:20] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:14:20] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 21:14:20] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 21:14:20] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 21:14:20] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 21:14:20] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 21:14:20] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 21:14:20] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 21:14:20] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 21:14:20] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 21:14:20] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 21:14:20] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:14:20] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:14:20] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 21:14:20] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 21:14:20] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:14:20] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:14:20] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 21:14:20] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 21:14:20] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 21:14:20] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 21:14:20] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:14:20] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:14:20] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:14:20] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:14:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:14:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:14:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:14:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:14:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:14:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:14:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:14:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:14:21] [Rank 0] step:3001/10000 train_time:145877ms step_avg:48.61ms +[2025-09-05 21:14:21] [Rank 0] step:3001/10000 train_time:145877ms step_avg:48.61ms +[2025-09-05 21:14:22] [Rank 0] step:3021/10000 train_time:146547ms step_avg:48.51ms +[2025-09-05 21:14:22] [Rank 0] step:3021/10000 train_time:146547ms step_avg:48.51ms +[2025-09-05 21:14:23] [Rank 0] step:3041/10000 train_time:147286ms step_avg:48.43ms +[2025-09-05 21:14:23] [Rank 0] step:3041/10000 train_time:147286ms step_avg:48.43ms +[2025-09-05 21:14:23] [Rank 0] step:3061/10000 train_time:148024ms step_avg:48.36ms +[2025-09-05 21:14:23] [Rank 0] step:3061/10000 train_time:148024ms step_avg:48.36ms +[2025-09-05 21:14:24] [Rank 0] step:3081/10000 train_time:148761ms step_avg:48.28ms +[2025-09-05 21:14:24] [Rank 0] step:3081/10000 train_time:148761ms step_avg:48.28ms +[2025-09-05 21:14:25] [Rank 0] step:3101/10000 train_time:149499ms step_avg:48.21ms +[2025-09-05 21:14:25] [Rank 0] step:3101/10000 train_time:149499ms step_avg:48.21ms +[2025-09-05 21:14:26] [Rank 0] step:3121/10000 train_time:150237ms step_avg:48.14ms +[2025-09-05 21:14:26] [Rank 0] step:3121/10000 train_time:150237ms step_avg:48.14ms +[2025-09-05 21:14:27] [Rank 0] step:3141/10000 train_time:151114ms step_avg:48.11ms +[2025-09-05 21:14:27] [Rank 0] step:3141/10000 train_time:151114ms step_avg:48.11ms +[2025-09-05 21:14:27] [Rank 0] step:3161/10000 train_time:151852ms step_avg:48.04ms +[2025-09-05 21:14:27] [Rank 0] step:3161/10000 train_time:151852ms step_avg:48.04ms +[2025-09-05 21:14:28] [Rank 0] step:3181/10000 train_time:152590ms step_avg:47.97ms +[2025-09-05 21:14:28] [Rank 0] step:3181/10000 train_time:152590ms step_avg:47.97ms +[2025-09-05 21:14:29] [Rank 0] step:3201/10000 train_time:153329ms step_avg:47.90ms +[2025-09-05 21:14:29] [Rank 0] step:3201/10000 train_time:153329ms step_avg:47.90ms +[2025-09-05 21:14:30] [Rank 0] step:3221/10000 train_time:154206ms step_avg:47.88ms +[2025-09-05 21:14:30] [Rank 0] step:3221/10000 train_time:154206ms step_avg:47.88ms +[2025-09-05 21:14:30] [Rank 0] step:3241/10000 train_time:154944ms step_avg:47.81ms +[2025-09-05 21:14:30] [Rank 0] step:3241/10000 train_time:154944ms step_avg:47.81ms +[2025-09-05 21:14:31] [Rank 0] step:3261/10000 train_time:155681ms step_avg:47.74ms +[2025-09-05 21:14:31] [Rank 0] step:3261/10000 train_time:155681ms step_avg:47.74ms +[2025-09-05 21:14:32] [Rank 0] step:3281/10000 train_time:156419ms step_avg:47.67ms +[2025-09-05 21:14:32] [Rank 0] step:3281/10000 train_time:156419ms step_avg:47.67ms +[2025-09-05 21:14:33] [Rank 0] step:3301/10000 train_time:157156ms step_avg:47.61ms +[2025-09-05 21:14:33] [Rank 0] step:3301/10000 train_time:157156ms step_avg:47.61ms +[2025-09-05 21:14:33] [Rank 0] step:3321/10000 train_time:157895ms step_avg:47.54ms +[2025-09-05 21:14:33] [Rank 0] step:3321/10000 train_time:157895ms step_avg:47.54ms +[2025-09-05 21:14:34] [Rank 0] step:3341/10000 train_time:158633ms step_avg:47.48ms +[2025-09-05 21:14:34] [Rank 0] step:3341/10000 train_time:158633ms step_avg:47.48ms +[2025-09-05 21:14:35] [Rank 0] step:3361/10000 train_time:159371ms step_avg:47.42ms +[2025-09-05 21:14:35] [Rank 0] step:3361/10000 train_time:159371ms step_avg:47.42ms +[2025-09-05 21:14:35] [Rank 0] step:3381/10000 train_time:160109ms step_avg:47.36ms +[2025-09-05 21:14:35] [Rank 0] step:3381/10000 train_time:160109ms step_avg:47.36ms +[2025-09-05 21:14:36] [Rank 0] step:3401/10000 train_time:160846ms step_avg:47.29ms +[2025-09-05 21:14:36] [Rank 0] step:3401/10000 train_time:160846ms step_avg:47.29ms +[2025-09-05 21:14:37] [Rank 0] step:3421/10000 train_time:161584ms step_avg:47.23ms +[2025-09-05 21:14:37] [Rank 0] step:3421/10000 train_time:161584ms step_avg:47.23ms +[2025-09-05 21:14:38] [Rank 0] step:3441/10000 train_time:162323ms step_avg:47.17ms +[2025-09-05 21:14:38] [Rank 0] step:3441/10000 train_time:162323ms step_avg:47.17ms +[2025-09-05 21:14:38] [Rank 0] step:3461/10000 train_time:163060ms step_avg:47.11ms +[2025-09-05 21:14:38] [Rank 0] step:3461/10000 train_time:163060ms step_avg:47.11ms +[2025-09-05 21:14:39] [Rank 0] step:3481/10000 train_time:163798ms step_avg:47.05ms +[2025-09-05 21:14:39] [Rank 0] step:3481/10000 train_time:163798ms step_avg:47.05ms +[2025-09-05 21:14:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:14:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:14:40] [Rank 0] PRINT: step:3500/10000 train_loss:2.4378 val_loss:2.3762 train_time:164615ms step_avg:47.03ms +[2025-09-05 21:14:40] [Rank 0] PRINT: step:3500/10000 train_loss:2.4378 val_loss:2.3762 train_time:164615ms step_avg:47.03ms +[2025-09-05 21:14:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:14:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:14:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:14:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:16:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:16:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:16:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:16:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:16:01] [Rank 0] Total Loss: 4.6632 +[2025-09-05 21:16:01] [Rank 0] Total Loss: 4.6632 +[2025-09-05 21:16:01] [Rank 0] Total FTA (Unweighted): 0.2625 +[2025-09-05 21:16:01] [Rank 0] Total FTA (Unweighted): 0.2625 +[2025-09-05 21:16:01] [Rank 0] Total FTA (Weighted): 0.2625 +[2025-09-05 21:16:01] [Rank 0] Total FTA (Weighted): 0.2625 +[2025-09-05 21:16:01] [Rank 0] Group 0 Loss: 3.1863 +[2025-09-05 21:16:01] [Rank 0] Group 0 Loss: 3.1863 +[2025-09-05 21:16:01] [Rank 0] Group 1 Loss: 2.9579 +[2025-09-05 21:16:01] [Rank 0] Group 1 Loss: 2.9579 +[2025-09-05 21:16:01] [Rank 0] Group 2 Loss: 3.1675 +[2025-09-05 21:16:01] [Rank 0] Group 2 Loss: 3.1675 +[2025-09-05 21:16:01] [Rank 0] Group 3 Loss: 3.5803 +[2025-09-05 21:16:01] [Rank 0] Group 3 Loss: 3.5803 +[2025-09-05 21:16:01] [Rank 0] Group 4 Loss: 4.0290 +[2025-09-05 21:16:01] [Rank 0] Group 4 Loss: 4.0290 +[2025-09-05 21:16:01] [Rank 0] Group 5 Loss: 4.5184 +[2025-09-05 21:16:01] [Rank 0] Group 5 Loss: 4.5184 +[2025-09-05 21:16:01] [Rank 0] Group 6 Loss: 4.8445 +[2025-09-05 21:16:01] [Rank 0] Group 6 Loss: 4.8445 +[2025-09-05 21:16:01] [Rank 0] Group 7 Loss: 4.9844 +[2025-09-05 21:16:01] [Rank 0] Group 7 Loss: 4.9844 +[2025-09-05 21:16:01] [Rank 0] Group 8 Loss: 5.2703 +[2025-09-05 21:16:01] [Rank 0] Group 8 Loss: 5.2703 +[2025-09-05 21:16:01] [Rank 0] Group 9 Loss: 5.4184 +[2025-09-05 21:16:01] [Rank 0] Group 9 Loss: 5.4184 +[2025-09-05 21:16:01] [Rank 0] Group 10 Loss: 5.4210 +[2025-09-05 21:16:01] [Rank 0] Group 10 Loss: 5.4210 +[2025-09-05 21:16:01] [Rank 0] Group 11 Loss: 5.5112 +[2025-09-05 21:16:01] [Rank 0] Group 11 Loss: 5.5112 +[2025-09-05 21:16:01] [Rank 0] Group 12 Loss: 5.3971 +[2025-09-05 21:16:01] [Rank 0] Group 12 Loss: 5.3971 +[2025-09-05 21:16:01] [Rank 0] Group 13 Loss: 5.4330 +[2025-09-05 21:16:01] [Rank 0] Group 13 Loss: 5.4330 +[2025-09-05 21:16:01] [Rank 0] Group 14 Loss: 5.4814 +[2025-09-05 21:16:01] [Rank 0] Group 14 Loss: 5.4814 +[2025-09-05 21:16:01] [Rank 0] Group 15 Loss: 5.4105 +[2025-09-05 21:16:01] [Rank 0] Group 15 Loss: 5.4105 +[2025-09-05 21:16:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:16:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:16:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:16:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:16:01] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:16:01] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:16:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:16:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:16:01] [Rank 0] Group 4 FTA: 0.1800 +[2025-09-05 21:16:01] [Rank 0] Group 4 FTA: 0.1800 +[2025-09-05 21:16:01] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 21:16:01] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 21:16:01] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 21:16:01] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 21:16:01] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 21:16:01] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 21:16:01] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 21:16:01] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 21:16:01] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:16:01] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:16:01] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 21:16:01] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 21:16:01] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 21:16:01] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 21:16:01] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 21:16:01] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 21:16:01] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 21:16:01] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 21:16:01] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 21:16:01] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 21:16:01] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 21:16:01] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 21:16:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:16:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:16:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:16:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:16:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:16:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:16:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:16:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:16:02] [Rank 0] step:3501/10000 train_time:164624ms step_avg:47.02ms +[2025-09-05 21:16:02] [Rank 0] step:3501/10000 train_time:164624ms step_avg:47.02ms +[2025-09-05 21:16:03] [Rank 0] step:3521/10000 train_time:165292ms step_avg:46.94ms +[2025-09-05 21:16:03] [Rank 0] step:3521/10000 train_time:165292ms step_avg:46.94ms +[2025-09-05 21:16:04] [Rank 0] step:3541/10000 train_time:166038ms step_avg:46.89ms +[2025-09-05 21:16:04] [Rank 0] step:3541/10000 train_time:166038ms step_avg:46.89ms +[2025-09-05 21:16:05] [Rank 0] step:3561/10000 train_time:166776ms step_avg:46.83ms +[2025-09-05 21:16:05] [Rank 0] step:3561/10000 train_time:166776ms step_avg:46.83ms +[2025-09-05 21:16:05] [Rank 0] step:3581/10000 train_time:167514ms step_avg:46.78ms +[2025-09-05 21:16:05] [Rank 0] step:3581/10000 train_time:167514ms step_avg:46.78ms +[2025-09-05 21:16:06] [Rank 0] step:3601/10000 train_time:168251ms step_avg:46.72ms +[2025-09-05 21:16:06] [Rank 0] step:3601/10000 train_time:168251ms step_avg:46.72ms +[2025-09-05 21:16:07] [Rank 0] step:3621/10000 train_time:168989ms step_avg:46.67ms +[2025-09-05 21:16:07] [Rank 0] step:3621/10000 train_time:168989ms step_avg:46.67ms +[2025-09-05 21:16:08] [Rank 0] step:3641/10000 train_time:170347ms step_avg:46.79ms +[2025-09-05 21:16:08] [Rank 0] step:3641/10000 train_time:170347ms step_avg:46.79ms +[2025-09-05 21:16:09] [Rank 0] step:3661/10000 train_time:171085ms step_avg:46.73ms +[2025-09-05 21:16:09] [Rank 0] step:3661/10000 train_time:171085ms step_avg:46.73ms +[2025-09-05 21:16:10] [Rank 0] step:3681/10000 train_time:171823ms step_avg:46.68ms +[2025-09-05 21:16:10] [Rank 0] step:3681/10000 train_time:171823ms step_avg:46.68ms +[2025-09-05 21:16:10] [Rank 0] step:3701/10000 train_time:172560ms step_avg:46.63ms +[2025-09-05 21:16:10] [Rank 0] step:3701/10000 train_time:172560ms step_avg:46.63ms +[2025-09-05 21:16:11] [Rank 0] step:3721/10000 train_time:173298ms step_avg:46.57ms +[2025-09-05 21:16:11] [Rank 0] step:3721/10000 train_time:173298ms step_avg:46.57ms +[2025-09-05 21:16:12] [Rank 0] step:3741/10000 train_time:174035ms step_avg:46.52ms +[2025-09-05 21:16:12] [Rank 0] step:3741/10000 train_time:174035ms step_avg:46.52ms +[2025-09-05 21:16:13] [Rank 0] step:3761/10000 train_time:174773ms step_avg:46.47ms +[2025-09-05 21:16:13] [Rank 0] step:3761/10000 train_time:174773ms step_avg:46.47ms +[2025-09-05 21:16:13] [Rank 0] step:3781/10000 train_time:175511ms step_avg:46.42ms +[2025-09-05 21:16:13] [Rank 0] step:3781/10000 train_time:175511ms step_avg:46.42ms +[2025-09-05 21:16:14] [Rank 0] step:3801/10000 train_time:176249ms step_avg:46.37ms +[2025-09-05 21:16:14] [Rank 0] step:3801/10000 train_time:176249ms step_avg:46.37ms +[2025-09-05 21:16:15] [Rank 0] step:3821/10000 train_time:176986ms step_avg:46.32ms +[2025-09-05 21:16:15] [Rank 0] step:3821/10000 train_time:176986ms step_avg:46.32ms +[2025-09-05 21:16:16] [Rank 0] step:3841/10000 train_time:177723ms step_avg:46.27ms +[2025-09-05 21:16:16] [Rank 0] step:3841/10000 train_time:177723ms step_avg:46.27ms +[2025-09-05 21:16:16] [Rank 0] step:3861/10000 train_time:178461ms step_avg:46.22ms +[2025-09-05 21:16:16] [Rank 0] step:3861/10000 train_time:178461ms step_avg:46.22ms +[2025-09-05 21:16:17] [Rank 0] step:3881/10000 train_time:179199ms step_avg:46.17ms +[2025-09-05 21:16:17] [Rank 0] step:3881/10000 train_time:179199ms step_avg:46.17ms +[2025-09-05 21:16:18] [Rank 0] step:3901/10000 train_time:179937ms step_avg:46.13ms +[2025-09-05 21:16:18] [Rank 0] step:3901/10000 train_time:179937ms step_avg:46.13ms +[2025-09-05 21:16:18] [Rank 0] step:3921/10000 train_time:180674ms step_avg:46.08ms +[2025-09-05 21:16:18] [Rank 0] step:3921/10000 train_time:180674ms step_avg:46.08ms +[2025-09-05 21:16:19] [Rank 0] step:3941/10000 train_time:181412ms step_avg:46.03ms +[2025-09-05 21:16:19] [Rank 0] step:3941/10000 train_time:181412ms step_avg:46.03ms +[2025-09-05 21:16:20] [Rank 0] step:3961/10000 train_time:182150ms step_avg:45.99ms +[2025-09-05 21:16:20] [Rank 0] step:3961/10000 train_time:182150ms step_avg:45.99ms +[2025-09-05 21:16:21] [Rank 0] step:3981/10000 train_time:182888ms step_avg:45.94ms +[2025-09-05 21:16:21] [Rank 0] step:3981/10000 train_time:182888ms step_avg:45.94ms +[2025-09-05 21:16:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:16:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:16:22] [Rank 0] PRINT: step:4000/10000 train_loss:2.3500 val_loss:2.3068 train_time:183706ms step_avg:45.93ms +[2025-09-05 21:16:22] [Rank 0] PRINT: step:4000/10000 train_loss:2.3500 val_loss:2.3068 train_time:183706ms step_avg:45.93ms +[2025-09-05 21:16:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:16:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:16:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:16:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:17:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:17:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:17:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:17:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:17:43] [Rank 0] Total Loss: 4.6961 +[2025-09-05 21:17:43] [Rank 0] Total Loss: 4.6961 +[2025-09-05 21:17:43] [Rank 0] Total FTA (Unweighted): 0.2800 +[2025-09-05 21:17:43] [Rank 0] Total FTA (Unweighted): 0.2800 +[2025-09-05 21:17:43] [Rank 0] Total FTA (Weighted): 0.2800 +[2025-09-05 21:17:43] [Rank 0] Total FTA (Weighted): 0.2800 +[2025-09-05 21:17:43] [Rank 0] Group 0 Loss: 3.2510 +[2025-09-05 21:17:43] [Rank 0] Group 0 Loss: 3.2510 +[2025-09-05 21:17:43] [Rank 0] Group 1 Loss: 3.1521 +[2025-09-05 21:17:43] [Rank 0] Group 1 Loss: 3.1521 +[2025-09-05 21:17:43] [Rank 0] Group 2 Loss: 3.2460 +[2025-09-05 21:17:43] [Rank 0] Group 2 Loss: 3.2460 +[2025-09-05 21:17:43] [Rank 0] Group 3 Loss: 3.6723 +[2025-09-05 21:17:43] [Rank 0] Group 3 Loss: 3.6723 +[2025-09-05 21:17:43] [Rank 0] Group 4 Loss: 4.0494 +[2025-09-05 21:17:43] [Rank 0] Group 4 Loss: 4.0494 +[2025-09-05 21:17:43] [Rank 0] Group 5 Loss: 4.5325 +[2025-09-05 21:17:43] [Rank 0] Group 5 Loss: 4.5325 +[2025-09-05 21:17:43] [Rank 0] Group 6 Loss: 4.8412 +[2025-09-05 21:17:43] [Rank 0] Group 6 Loss: 4.8412 +[2025-09-05 21:17:43] [Rank 0] Group 7 Loss: 4.9709 +[2025-09-05 21:17:43] [Rank 0] Group 7 Loss: 4.9709 +[2025-09-05 21:17:43] [Rank 0] Group 8 Loss: 5.2738 +[2025-09-05 21:17:43] [Rank 0] Group 8 Loss: 5.2738 +[2025-09-05 21:17:43] [Rank 0] Group 9 Loss: 5.4014 +[2025-09-05 21:17:43] [Rank 0] Group 9 Loss: 5.4014 +[2025-09-05 21:17:43] [Rank 0] Group 10 Loss: 5.4317 +[2025-09-05 21:17:43] [Rank 0] Group 10 Loss: 5.4317 +[2025-09-05 21:17:43] [Rank 0] Group 11 Loss: 5.4924 +[2025-09-05 21:17:43] [Rank 0] Group 11 Loss: 5.4924 +[2025-09-05 21:17:43] [Rank 0] Group 12 Loss: 5.4249 +[2025-09-05 21:17:43] [Rank 0] Group 12 Loss: 5.4249 +[2025-09-05 21:17:43] [Rank 0] Group 13 Loss: 5.4672 +[2025-09-05 21:17:43] [Rank 0] Group 13 Loss: 5.4672 +[2025-09-05 21:17:43] [Rank 0] Group 14 Loss: 5.5163 +[2025-09-05 21:17:43] [Rank 0] Group 14 Loss: 5.5163 +[2025-09-05 21:17:43] [Rank 0] Group 15 Loss: 5.4147 +[2025-09-05 21:17:43] [Rank 0] Group 15 Loss: 5.4147 +[2025-09-05 21:17:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:17:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:17:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:17:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:17:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:17:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:17:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:17:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:17:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:17:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:17:43] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:17:43] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:17:43] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:17:43] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:17:43] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:17:43] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:17:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:17:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:17:43] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:17:43] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:17:43] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 21:17:43] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 21:17:43] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 21:17:43] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 21:17:43] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 21:17:43] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 21:17:43] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 21:17:43] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 21:17:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:17:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:17:43] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 21:17:43] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 21:17:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:17:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:17:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:17:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:17:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:17:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:17:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:17:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:17:44] [Rank 0] step:4001/10000 train_time:183715ms step_avg:45.92ms +[2025-09-05 21:17:44] [Rank 0] step:4001/10000 train_time:183715ms step_avg:45.92ms +[2025-09-05 21:17:46] [Rank 0] step:4021/10000 train_time:184992ms step_avg:46.01ms +[2025-09-05 21:17:46] [Rank 0] step:4021/10000 train_time:184992ms step_avg:46.01ms +[2025-09-05 21:17:47] [Rank 0] step:4041/10000 train_time:185730ms step_avg:45.96ms +[2025-09-05 21:17:47] [Rank 0] step:4041/10000 train_time:185730ms step_avg:45.96ms +[2025-09-05 21:17:47] [Rank 0] step:4061/10000 train_time:186467ms step_avg:45.92ms +[2025-09-05 21:17:47] [Rank 0] step:4061/10000 train_time:186467ms step_avg:45.92ms +[2025-09-05 21:17:48] [Rank 0] step:4081/10000 train_time:187205ms step_avg:45.87ms +[2025-09-05 21:17:48] [Rank 0] step:4081/10000 train_time:187205ms step_avg:45.87ms +[2025-09-05 21:17:49] [Rank 0] step:4101/10000 train_time:187943ms step_avg:45.83ms +[2025-09-05 21:17:49] [Rank 0] step:4101/10000 train_time:187943ms step_avg:45.83ms +[2025-09-05 21:17:49] [Rank 0] step:4121/10000 train_time:188680ms step_avg:45.79ms +[2025-09-05 21:17:49] [Rank 0] step:4121/10000 train_time:188680ms step_avg:45.79ms +[2025-09-05 21:17:50] [Rank 0] step:4141/10000 train_time:189418ms step_avg:45.74ms +[2025-09-05 21:17:50] [Rank 0] step:4141/10000 train_time:189418ms step_avg:45.74ms +[2025-09-05 21:17:51] [Rank 0] step:4161/10000 train_time:190157ms step_avg:45.70ms +[2025-09-05 21:17:51] [Rank 0] step:4161/10000 train_time:190157ms step_avg:45.70ms +[2025-09-05 21:17:52] [Rank 0] step:4181/10000 train_time:190893ms step_avg:45.66ms +[2025-09-05 21:17:52] [Rank 0] step:4181/10000 train_time:190893ms step_avg:45.66ms +[2025-09-05 21:17:52] [Rank 0] step:4201/10000 train_time:191630ms step_avg:45.62ms +[2025-09-05 21:17:52] [Rank 0] step:4201/10000 train_time:191630ms step_avg:45.62ms +[2025-09-05 21:17:53] [Rank 0] step:4221/10000 train_time:192367ms step_avg:45.57ms +[2025-09-05 21:17:53] [Rank 0] step:4221/10000 train_time:192367ms step_avg:45.57ms +[2025-09-05 21:17:54] [Rank 0] step:4241/10000 train_time:193105ms step_avg:45.53ms +[2025-09-05 21:17:54] [Rank 0] step:4241/10000 train_time:193105ms step_avg:45.53ms +[2025-09-05 21:17:55] [Rank 0] step:4261/10000 train_time:193843ms step_avg:45.49ms +[2025-09-05 21:17:55] [Rank 0] step:4261/10000 train_time:193843ms step_avg:45.49ms +[2025-09-05 21:17:55] [Rank 0] step:4281/10000 train_time:194579ms step_avg:45.45ms +[2025-09-05 21:17:55] [Rank 0] step:4281/10000 train_time:194579ms step_avg:45.45ms +[2025-09-05 21:17:56] [Rank 0] step:4301/10000 train_time:195317ms step_avg:45.41ms +[2025-09-05 21:17:56] [Rank 0] step:4301/10000 train_time:195317ms step_avg:45.41ms +[2025-09-05 21:17:57] [Rank 0] step:4321/10000 train_time:196055ms step_avg:45.37ms +[2025-09-05 21:17:57] [Rank 0] step:4321/10000 train_time:196055ms step_avg:45.37ms +[2025-09-05 21:17:58] [Rank 0] step:4341/10000 train_time:196793ms step_avg:45.33ms +[2025-09-05 21:17:58] [Rank 0] step:4341/10000 train_time:196793ms step_avg:45.33ms +[2025-09-05 21:17:58] [Rank 0] step:4361/10000 train_time:197531ms step_avg:45.29ms +[2025-09-05 21:17:58] [Rank 0] step:4361/10000 train_time:197531ms step_avg:45.29ms +[2025-09-05 21:17:59] [Rank 0] step:4381/10000 train_time:198270ms step_avg:45.26ms +[2025-09-05 21:17:59] [Rank 0] step:4381/10000 train_time:198270ms step_avg:45.26ms +[2025-09-05 21:18:00] [Rank 0] step:4401/10000 train_time:199009ms step_avg:45.22ms +[2025-09-05 21:18:00] [Rank 0] step:4401/10000 train_time:199009ms step_avg:45.22ms +[2025-09-05 21:18:01] [Rank 0] step:4421/10000 train_time:199746ms step_avg:45.18ms +[2025-09-05 21:18:01] [Rank 0] step:4421/10000 train_time:199746ms step_avg:45.18ms +[2025-09-05 21:18:01] [Rank 0] step:4441/10000 train_time:200485ms step_avg:45.14ms +[2025-09-05 21:18:01] [Rank 0] step:4441/10000 train_time:200485ms step_avg:45.14ms +[2025-09-05 21:18:02] [Rank 0] step:4461/10000 train_time:201224ms step_avg:45.11ms +[2025-09-05 21:18:02] [Rank 0] step:4461/10000 train_time:201224ms step_avg:45.11ms +[2025-09-05 21:18:03] [Rank 0] step:4481/10000 train_time:201963ms step_avg:45.07ms +[2025-09-05 21:18:03] [Rank 0] step:4481/10000 train_time:201963ms step_avg:45.07ms +[2025-09-05 21:18:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:18:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:18:04] [Rank 0] PRINT: step:4500/10000 train_loss:2.2924 val_loss:2.2558 train_time:202782ms step_avg:45.06ms +[2025-09-05 21:18:04] [Rank 0] PRINT: step:4500/10000 train_loss:2.2924 val_loss:2.2558 train_time:202782ms step_avg:45.06ms +[2025-09-05 21:18:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:18:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:18:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:18:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:19:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:19:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:19:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:19:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:19:24] [Rank 0] Total Loss: 4.6194 +[2025-09-05 21:19:24] [Rank 0] Total Loss: 4.6194 +[2025-09-05 21:19:24] [Rank 0] Total FTA (Unweighted): 0.2869 +[2025-09-05 21:19:24] [Rank 0] Total FTA (Unweighted): 0.2869 +[2025-09-05 21:19:24] [Rank 0] Total FTA (Weighted): 0.2869 +[2025-09-05 21:19:24] [Rank 0] Total FTA (Weighted): 0.2869 +[2025-09-05 21:19:24] [Rank 0] Group 0 Loss: 3.2530 +[2025-09-05 21:19:24] [Rank 0] Group 0 Loss: 3.2530 +[2025-09-05 21:19:24] [Rank 0] Group 1 Loss: 3.0430 +[2025-09-05 21:19:24] [Rank 0] Group 1 Loss: 3.0430 +[2025-09-05 21:19:24] [Rank 0] Group 2 Loss: 3.2282 +[2025-09-05 21:19:24] [Rank 0] Group 2 Loss: 3.2282 +[2025-09-05 21:19:24] [Rank 0] Group 3 Loss: 3.6273 +[2025-09-05 21:19:24] [Rank 0] Group 3 Loss: 3.6273 +[2025-09-05 21:19:24] [Rank 0] Group 4 Loss: 3.9795 +[2025-09-05 21:19:24] [Rank 0] Group 4 Loss: 3.9795 +[2025-09-05 21:19:24] [Rank 0] Group 5 Loss: 4.4345 +[2025-09-05 21:19:24] [Rank 0] Group 5 Loss: 4.4345 +[2025-09-05 21:19:24] [Rank 0] Group 6 Loss: 4.7376 +[2025-09-05 21:19:24] [Rank 0] Group 6 Loss: 4.7376 +[2025-09-05 21:19:24] [Rank 0] Group 7 Loss: 4.8844 +[2025-09-05 21:19:24] [Rank 0] Group 7 Loss: 4.8844 +[2025-09-05 21:19:24] [Rank 0] Group 8 Loss: 5.2011 +[2025-09-05 21:19:24] [Rank 0] Group 8 Loss: 5.2011 +[2025-09-05 21:19:24] [Rank 0] Group 9 Loss: 5.3309 +[2025-09-05 21:19:24] [Rank 0] Group 9 Loss: 5.3309 +[2025-09-05 21:19:24] [Rank 0] Group 10 Loss: 5.3293 +[2025-09-05 21:19:24] [Rank 0] Group 10 Loss: 5.3293 +[2025-09-05 21:19:24] [Rank 0] Group 11 Loss: 5.4053 +[2025-09-05 21:19:24] [Rank 0] Group 11 Loss: 5.4053 +[2025-09-05 21:19:24] [Rank 0] Group 12 Loss: 5.3436 +[2025-09-05 21:19:24] [Rank 0] Group 12 Loss: 5.3436 +[2025-09-05 21:19:24] [Rank 0] Group 13 Loss: 5.3531 +[2025-09-05 21:19:24] [Rank 0] Group 13 Loss: 5.3531 +[2025-09-05 21:19:24] [Rank 0] Group 14 Loss: 5.4202 +[2025-09-05 21:19:24] [Rank 0] Group 14 Loss: 5.4202 +[2025-09-05 21:19:24] [Rank 0] Group 15 Loss: 5.3399 +[2025-09-05 21:19:24] [Rank 0] Group 15 Loss: 5.3399 +[2025-09-05 21:19:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:19:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:19:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:19:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:19:24] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 21:19:24] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 21:19:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:19:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:19:24] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:19:24] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:19:24] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:19:24] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:19:24] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:19:24] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:19:24] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:19:24] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:19:24] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:19:24] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:19:24] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:19:24] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:19:24] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 21:19:24] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 21:19:24] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:19:24] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:19:24] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 21:19:24] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 21:19:24] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 21:19:24] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 21:19:24] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 21:19:24] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 21:19:24] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 21:19:24] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 21:19:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:19:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:19:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:19:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:19:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:19:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:19:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:19:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:19:26] [Rank 0] step:4501/10000 train_time:202791ms step_avg:45.05ms +[2025-09-05 21:19:26] [Rank 0] step:4501/10000 train_time:202791ms step_avg:45.05ms +[2025-09-05 21:19:27] [Rank 0] step:4521/10000 train_time:203455ms step_avg:45.00ms +[2025-09-05 21:19:27] [Rank 0] step:4521/10000 train_time:203455ms step_avg:45.00ms +[2025-09-05 21:19:27] [Rank 0] step:4541/10000 train_time:204193ms step_avg:44.97ms +[2025-09-05 21:19:27] [Rank 0] step:4541/10000 train_time:204193ms step_avg:44.97ms +[2025-09-05 21:19:28] [Rank 0] step:4561/10000 train_time:204931ms step_avg:44.93ms +[2025-09-05 21:19:28] [Rank 0] step:4561/10000 train_time:204931ms step_avg:44.93ms +[2025-09-05 21:19:29] [Rank 0] step:4581/10000 train_time:205668ms step_avg:44.90ms +[2025-09-05 21:19:29] [Rank 0] step:4581/10000 train_time:205668ms step_avg:44.90ms +[2025-09-05 21:19:29] [Rank 0] step:4601/10000 train_time:206405ms step_avg:44.86ms +[2025-09-05 21:19:29] [Rank 0] step:4601/10000 train_time:206405ms step_avg:44.86ms +[2025-09-05 21:19:30] [Rank 0] step:4621/10000 train_time:207143ms step_avg:44.83ms +[2025-09-05 21:19:30] [Rank 0] step:4621/10000 train_time:207143ms step_avg:44.83ms +[2025-09-05 21:19:31] [Rank 0] step:4641/10000 train_time:207880ms step_avg:44.79ms +[2025-09-05 21:19:31] [Rank 0] step:4641/10000 train_time:207880ms step_avg:44.79ms +[2025-09-05 21:19:32] [Rank 0] step:4661/10000 train_time:208617ms step_avg:44.76ms +[2025-09-05 21:19:32] [Rank 0] step:4661/10000 train_time:208617ms step_avg:44.76ms +[2025-09-05 21:19:32] [Rank 0] step:4681/10000 train_time:209356ms step_avg:44.72ms +[2025-09-05 21:19:32] [Rank 0] step:4681/10000 train_time:209356ms step_avg:44.72ms +[2025-09-05 21:19:33] [Rank 0] step:4701/10000 train_time:210093ms step_avg:44.69ms +[2025-09-05 21:19:33] [Rank 0] step:4701/10000 train_time:210093ms step_avg:44.69ms +[2025-09-05 21:19:34] [Rank 0] step:4721/10000 train_time:210831ms step_avg:44.66ms +[2025-09-05 21:19:34] [Rank 0] step:4721/10000 train_time:210831ms step_avg:44.66ms +[2025-09-05 21:19:35] [Rank 0] step:4741/10000 train_time:211569ms step_avg:44.63ms +[2025-09-05 21:19:35] [Rank 0] step:4741/10000 train_time:211569ms step_avg:44.63ms +[2025-09-05 21:19:35] [Rank 0] step:4761/10000 train_time:212307ms step_avg:44.59ms +[2025-09-05 21:19:35] [Rank 0] step:4761/10000 train_time:212307ms step_avg:44.59ms +[2025-09-05 21:19:36] [Rank 0] step:4781/10000 train_time:213045ms step_avg:44.56ms +[2025-09-05 21:19:36] [Rank 0] step:4781/10000 train_time:213045ms step_avg:44.56ms +[2025-09-05 21:19:37] [Rank 0] step:4801/10000 train_time:213782ms step_avg:44.53ms +[2025-09-05 21:19:37] [Rank 0] step:4801/10000 train_time:213782ms step_avg:44.53ms +[2025-09-05 21:19:38] [Rank 0] step:4821/10000 train_time:214519ms step_avg:44.50ms +[2025-09-05 21:19:38] [Rank 0] step:4821/10000 train_time:214519ms step_avg:44.50ms +[2025-09-05 21:19:39] [Rank 0] step:4841/10000 train_time:215564ms step_avg:44.53ms +[2025-09-05 21:19:39] [Rank 0] step:4841/10000 train_time:215564ms step_avg:44.53ms +[2025-09-05 21:19:39] [Rank 0] step:4861/10000 train_time:216302ms step_avg:44.50ms +[2025-09-05 21:19:39] [Rank 0] step:4861/10000 train_time:216302ms step_avg:44.50ms +[2025-09-05 21:19:40] [Rank 0] step:4881/10000 train_time:217040ms step_avg:44.47ms +[2025-09-05 21:19:40] [Rank 0] step:4881/10000 train_time:217040ms step_avg:44.47ms +[2025-09-05 21:19:41] [Rank 0] step:4901/10000 train_time:217778ms step_avg:44.44ms +[2025-09-05 21:19:41] [Rank 0] step:4901/10000 train_time:217778ms step_avg:44.44ms +[2025-09-05 21:19:42] [Rank 0] step:4921/10000 train_time:218516ms step_avg:44.40ms +[2025-09-05 21:19:42] [Rank 0] step:4921/10000 train_time:218516ms step_avg:44.40ms +[2025-09-05 21:19:42] [Rank 0] step:4941/10000 train_time:219254ms step_avg:44.37ms +[2025-09-05 21:19:42] [Rank 0] step:4941/10000 train_time:219254ms step_avg:44.37ms +[2025-09-05 21:19:43] [Rank 0] step:4961/10000 train_time:220137ms step_avg:44.37ms +[2025-09-05 21:19:43] [Rank 0] step:4961/10000 train_time:220137ms step_avg:44.37ms +[2025-09-05 21:19:44] [Rank 0] step:4981/10000 train_time:220875ms step_avg:44.34ms +[2025-09-05 21:19:44] [Rank 0] step:4981/10000 train_time:220875ms step_avg:44.34ms +[2025-09-05 21:19:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:19:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:19:45] [Rank 0] PRINT: step:5000/10000 train_loss:2.2423 val_loss:2.2149 train_time:221693ms step_avg:44.34ms +[2025-09-05 21:19:45] [Rank 0] PRINT: step:5000/10000 train_loss:2.2423 val_loss:2.2149 train_time:221693ms step_avg:44.34ms +[2025-09-05 21:19:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:19:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:19:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:19:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:21:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:21:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:21:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:21:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:21:06] [Rank 0] Total Loss: 4.5725 +[2025-09-05 21:21:06] [Rank 0] Total Loss: 4.5725 +[2025-09-05 21:21:06] [Rank 0] Total FTA (Unweighted): 0.2869 +[2025-09-05 21:21:06] [Rank 0] Total FTA (Unweighted): 0.2869 +[2025-09-05 21:21:06] [Rank 0] Total FTA (Weighted): 0.2869 +[2025-09-05 21:21:06] [Rank 0] Total FTA (Weighted): 0.2869 +[2025-09-05 21:21:06] [Rank 0] Group 0 Loss: 3.2301 +[2025-09-05 21:21:06] [Rank 0] Group 0 Loss: 3.2301 +[2025-09-05 21:21:06] [Rank 0] Group 1 Loss: 3.0631 +[2025-09-05 21:21:06] [Rank 0] Group 1 Loss: 3.0631 +[2025-09-05 21:21:06] [Rank 0] Group 2 Loss: 3.1612 +[2025-09-05 21:21:06] [Rank 0] Group 2 Loss: 3.1612 +[2025-09-05 21:21:06] [Rank 0] Group 3 Loss: 3.5611 +[2025-09-05 21:21:06] [Rank 0] Group 3 Loss: 3.5611 +[2025-09-05 21:21:06] [Rank 0] Group 4 Loss: 3.9229 +[2025-09-05 21:21:06] [Rank 0] Group 4 Loss: 3.9229 +[2025-09-05 21:21:06] [Rank 0] Group 5 Loss: 4.3496 +[2025-09-05 21:21:06] [Rank 0] Group 5 Loss: 4.3496 +[2025-09-05 21:21:06] [Rank 0] Group 6 Loss: 4.6730 +[2025-09-05 21:21:06] [Rank 0] Group 6 Loss: 4.6730 +[2025-09-05 21:21:06] [Rank 0] Group 7 Loss: 4.8360 +[2025-09-05 21:21:06] [Rank 0] Group 7 Loss: 4.8360 +[2025-09-05 21:21:06] [Rank 0] Group 8 Loss: 5.1492 +[2025-09-05 21:21:06] [Rank 0] Group 8 Loss: 5.1492 +[2025-09-05 21:21:06] [Rank 0] Group 9 Loss: 5.2799 +[2025-09-05 21:21:06] [Rank 0] Group 9 Loss: 5.2799 +[2025-09-05 21:21:06] [Rank 0] Group 10 Loss: 5.2912 +[2025-09-05 21:21:06] [Rank 0] Group 10 Loss: 5.2912 +[2025-09-05 21:21:06] [Rank 0] Group 11 Loss: 5.3591 +[2025-09-05 21:21:06] [Rank 0] Group 11 Loss: 5.3591 +[2025-09-05 21:21:06] [Rank 0] Group 12 Loss: 5.2905 +[2025-09-05 21:21:06] [Rank 0] Group 12 Loss: 5.2905 +[2025-09-05 21:21:06] [Rank 0] Group 13 Loss: 5.3263 +[2025-09-05 21:21:06] [Rank 0] Group 13 Loss: 5.3263 +[2025-09-05 21:21:06] [Rank 0] Group 14 Loss: 5.3695 +[2025-09-05 21:21:06] [Rank 0] Group 14 Loss: 5.3695 +[2025-09-05 21:21:06] [Rank 0] Group 15 Loss: 5.2979 +[2025-09-05 21:21:06] [Rank 0] Group 15 Loss: 5.2979 +[2025-09-05 21:21:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:21:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:21:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:21:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:21:06] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 21:21:06] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 21:21:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:21:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:21:06] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:21:06] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:21:06] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:21:06] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:21:06] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:21:06] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:21:06] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:21:06] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:21:06] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:21:06] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:21:06] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:21:06] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:21:06] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:21:06] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:21:06] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 21:21:06] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 21:21:06] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 21:21:06] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 21:21:06] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 21:21:06] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 21:21:06] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 21:21:06] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 21:21:06] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 21:21:06] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 21:21:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:21:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:21:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:21:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:21:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:21:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:21:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:21:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:21:07] [Rank 0] step:5001/10000 train_time:221702ms step_avg:44.33ms +[2025-09-05 21:21:07] [Rank 0] step:5001/10000 train_time:221702ms step_avg:44.33ms +[2025-09-05 21:21:08] [Rank 0] step:5021/10000 train_time:222376ms step_avg:44.29ms +[2025-09-05 21:21:08] [Rank 0] step:5021/10000 train_time:222376ms step_avg:44.29ms +[2025-09-05 21:21:09] [Rank 0] step:5041/10000 train_time:223115ms step_avg:44.26ms +[2025-09-05 21:21:09] [Rank 0] step:5041/10000 train_time:223115ms step_avg:44.26ms +[2025-09-05 21:21:10] [Rank 0] step:5061/10000 train_time:223853ms step_avg:44.23ms +[2025-09-05 21:21:10] [Rank 0] step:5061/10000 train_time:223853ms step_avg:44.23ms +[2025-09-05 21:21:10] [Rank 0] step:5081/10000 train_time:224591ms step_avg:44.20ms +[2025-09-05 21:21:10] [Rank 0] step:5081/10000 train_time:224591ms step_avg:44.20ms +[2025-09-05 21:21:11] [Rank 0] step:5101/10000 train_time:225328ms step_avg:44.17ms +[2025-09-05 21:21:11] [Rank 0] step:5101/10000 train_time:225328ms step_avg:44.17ms +[2025-09-05 21:21:12] [Rank 0] step:5121/10000 train_time:226066ms step_avg:44.14ms +[2025-09-05 21:21:12] [Rank 0] step:5121/10000 train_time:226066ms step_avg:44.14ms +[2025-09-05 21:21:13] [Rank 0] step:5141/10000 train_time:226804ms step_avg:44.12ms +[2025-09-05 21:21:13] [Rank 0] step:5141/10000 train_time:226804ms step_avg:44.12ms +[2025-09-05 21:21:13] [Rank 0] step:5161/10000 train_time:227542ms step_avg:44.09ms +[2025-09-05 21:21:13] [Rank 0] step:5161/10000 train_time:227542ms step_avg:44.09ms +[2025-09-05 21:21:14] [Rank 0] step:5181/10000 train_time:228280ms step_avg:44.06ms +[2025-09-05 21:21:14] [Rank 0] step:5181/10000 train_time:228280ms step_avg:44.06ms +[2025-09-05 21:21:15] [Rank 0] step:5201/10000 train_time:229018ms step_avg:44.03ms +[2025-09-05 21:21:15] [Rank 0] step:5201/10000 train_time:229018ms step_avg:44.03ms +[2025-09-05 21:21:16] [Rank 0] step:5221/10000 train_time:229756ms step_avg:44.01ms +[2025-09-05 21:21:16] [Rank 0] step:5221/10000 train_time:229756ms step_avg:44.01ms +[2025-09-05 21:21:16] [Rank 0] step:5241/10000 train_time:230494ms step_avg:43.98ms +[2025-09-05 21:21:16] [Rank 0] step:5241/10000 train_time:230494ms step_avg:43.98ms +[2025-09-05 21:21:17] [Rank 0] step:5261/10000 train_time:231232ms step_avg:43.95ms +[2025-09-05 21:21:17] [Rank 0] step:5261/10000 train_time:231232ms step_avg:43.95ms +[2025-09-05 21:21:18] [Rank 0] step:5281/10000 train_time:231970ms step_avg:43.93ms +[2025-09-05 21:21:18] [Rank 0] step:5281/10000 train_time:231970ms step_avg:43.93ms +[2025-09-05 21:21:18] [Rank 0] step:5301/10000 train_time:232707ms step_avg:43.90ms +[2025-09-05 21:21:18] [Rank 0] step:5301/10000 train_time:232707ms step_avg:43.90ms +[2025-09-05 21:21:19] [Rank 0] step:5321/10000 train_time:233453ms step_avg:43.87ms +[2025-09-05 21:21:19] [Rank 0] step:5321/10000 train_time:233453ms step_avg:43.87ms +[2025-09-05 21:21:20] [Rank 0] step:5341/10000 train_time:234191ms step_avg:43.85ms +[2025-09-05 21:21:20] [Rank 0] step:5341/10000 train_time:234191ms step_avg:43.85ms +[2025-09-05 21:21:21] [Rank 0] step:5361/10000 train_time:234929ms step_avg:43.82ms +[2025-09-05 21:21:21] [Rank 0] step:5361/10000 train_time:234929ms step_avg:43.82ms +[2025-09-05 21:21:21] [Rank 0] step:5381/10000 train_time:235667ms step_avg:43.80ms +[2025-09-05 21:21:21] [Rank 0] step:5381/10000 train_time:235667ms step_avg:43.80ms +[2025-09-05 21:21:22] [Rank 0] step:5401/10000 train_time:236405ms step_avg:43.77ms +[2025-09-05 21:21:22] [Rank 0] step:5401/10000 train_time:236405ms step_avg:43.77ms +[2025-09-05 21:21:23] [Rank 0] step:5421/10000 train_time:237143ms step_avg:43.75ms +[2025-09-05 21:21:23] [Rank 0] step:5421/10000 train_time:237143ms step_avg:43.75ms +[2025-09-05 21:21:24] [Rank 0] step:5441/10000 train_time:237881ms step_avg:43.72ms +[2025-09-05 21:21:24] [Rank 0] step:5441/10000 train_time:237881ms step_avg:43.72ms +[2025-09-05 21:21:24] [Rank 0] step:5461/10000 train_time:238620ms step_avg:43.70ms +[2025-09-05 21:21:24] [Rank 0] step:5461/10000 train_time:238620ms step_avg:43.70ms +[2025-09-05 21:21:25] [Rank 0] step:5481/10000 train_time:239359ms step_avg:43.67ms +[2025-09-05 21:21:25] [Rank 0] step:5481/10000 train_time:239359ms step_avg:43.67ms +[2025-09-05 21:21:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:21:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:21:26] [Rank 0] PRINT: step:5500/10000 train_loss:2.2050 val_loss:2.1791 train_time:240178ms step_avg:43.67ms +[2025-09-05 21:21:26] [Rank 0] PRINT: step:5500/10000 train_loss:2.2050 val_loss:2.1791 train_time:240178ms step_avg:43.67ms +[2025-09-05 21:21:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:21:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:21:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:21:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:22:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:22:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:22:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:22:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:22:47] [Rank 0] Total Loss: 4.5662 +[2025-09-05 21:22:47] [Rank 0] Total Loss: 4.5662 +[2025-09-05 21:22:47] [Rank 0] Total FTA (Unweighted): 0.2919 +[2025-09-05 21:22:47] [Rank 0] Total FTA (Unweighted): 0.2919 +[2025-09-05 21:22:47] [Rank 0] Total FTA (Weighted): 0.2919 +[2025-09-05 21:22:47] [Rank 0] Total FTA (Weighted): 0.2919 +[2025-09-05 21:22:47] [Rank 0] Group 0 Loss: 3.2391 +[2025-09-05 21:22:47] [Rank 0] Group 0 Loss: 3.2391 +[2025-09-05 21:22:47] [Rank 0] Group 1 Loss: 3.0702 +[2025-09-05 21:22:47] [Rank 0] Group 1 Loss: 3.0702 +[2025-09-05 21:22:47] [Rank 0] Group 2 Loss: 3.2039 +[2025-09-05 21:22:47] [Rank 0] Group 2 Loss: 3.2039 +[2025-09-05 21:22:47] [Rank 0] Group 3 Loss: 3.5691 +[2025-09-05 21:22:47] [Rank 0] Group 3 Loss: 3.5691 +[2025-09-05 21:22:47] [Rank 0] Group 4 Loss: 3.9297 +[2025-09-05 21:22:47] [Rank 0] Group 4 Loss: 3.9297 +[2025-09-05 21:22:47] [Rank 0] Group 5 Loss: 4.3614 +[2025-09-05 21:22:47] [Rank 0] Group 5 Loss: 4.3614 +[2025-09-05 21:22:47] [Rank 0] Group 6 Loss: 4.6683 +[2025-09-05 21:22:47] [Rank 0] Group 6 Loss: 4.6683 +[2025-09-05 21:22:47] [Rank 0] Group 7 Loss: 4.8140 +[2025-09-05 21:22:47] [Rank 0] Group 7 Loss: 4.8140 +[2025-09-05 21:22:47] [Rank 0] Group 8 Loss: 5.1339 +[2025-09-05 21:22:47] [Rank 0] Group 8 Loss: 5.1339 +[2025-09-05 21:22:47] [Rank 0] Group 9 Loss: 5.2586 +[2025-09-05 21:22:47] [Rank 0] Group 9 Loss: 5.2586 +[2025-09-05 21:22:47] [Rank 0] Group 10 Loss: 5.2601 +[2025-09-05 21:22:47] [Rank 0] Group 10 Loss: 5.2601 +[2025-09-05 21:22:47] [Rank 0] Group 11 Loss: 5.3182 +[2025-09-05 21:22:47] [Rank 0] Group 11 Loss: 5.3182 +[2025-09-05 21:22:47] [Rank 0] Group 12 Loss: 5.2771 +[2025-09-05 21:22:47] [Rank 0] Group 12 Loss: 5.2771 +[2025-09-05 21:22:47] [Rank 0] Group 13 Loss: 5.3159 +[2025-09-05 21:22:47] [Rank 0] Group 13 Loss: 5.3159 +[2025-09-05 21:22:47] [Rank 0] Group 14 Loss: 5.3470 +[2025-09-05 21:22:47] [Rank 0] Group 14 Loss: 5.3470 +[2025-09-05 21:22:47] [Rank 0] Group 15 Loss: 5.2929 +[2025-09-05 21:22:47] [Rank 0] Group 15 Loss: 5.2929 +[2025-09-05 21:22:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:22:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:22:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:22:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:22:47] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 21:22:47] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 21:22:47] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:22:47] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:22:47] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:22:47] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:22:47] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:22:47] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:22:47] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:22:47] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:22:47] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:22:47] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:22:47] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:22:47] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:22:47] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:22:47] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:22:47] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:22:47] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:22:47] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:22:47] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:22:47] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 21:22:47] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 21:22:47] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 21:22:47] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 21:22:47] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 21:22:47] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 21:22:47] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 21:22:47] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 21:22:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:22:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:22:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:22:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:22:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:22:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:22:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:22:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:22:48] [Rank 0] step:5501/10000 train_time:240186ms step_avg:43.66ms +[2025-09-05 21:22:48] [Rank 0] step:5501/10000 train_time:240186ms step_avg:43.66ms +[2025-09-05 21:22:49] [Rank 0] step:5521/10000 train_time:240869ms step_avg:43.63ms +[2025-09-05 21:22:49] [Rank 0] step:5521/10000 train_time:240869ms step_avg:43.63ms +[2025-09-05 21:22:50] [Rank 0] step:5541/10000 train_time:241605ms step_avg:43.60ms +[2025-09-05 21:22:50] [Rank 0] step:5541/10000 train_time:241605ms step_avg:43.60ms +[2025-09-05 21:22:50] [Rank 0] step:5561/10000 train_time:242343ms step_avg:43.58ms +[2025-09-05 21:22:50] [Rank 0] step:5561/10000 train_time:242343ms step_avg:43.58ms +[2025-09-05 21:22:51] [Rank 0] step:5581/10000 train_time:243082ms step_avg:43.56ms +[2025-09-05 21:22:51] [Rank 0] step:5581/10000 train_time:243082ms step_avg:43.56ms +[2025-09-05 21:22:52] [Rank 0] step:5601/10000 train_time:243820ms step_avg:43.53ms +[2025-09-05 21:22:52] [Rank 0] step:5601/10000 train_time:243820ms step_avg:43.53ms +[2025-09-05 21:22:53] [Rank 0] step:5621/10000 train_time:244701ms step_avg:43.53ms +[2025-09-05 21:22:53] [Rank 0] step:5621/10000 train_time:244701ms step_avg:43.53ms +[2025-09-05 21:22:54] [Rank 0] step:5641/10000 train_time:246062ms step_avg:43.62ms +[2025-09-05 21:22:54] [Rank 0] step:5641/10000 train_time:246062ms step_avg:43.62ms +[2025-09-05 21:22:55] [Rank 0] step:5661/10000 train_time:247018ms step_avg:43.64ms +[2025-09-05 21:22:55] [Rank 0] step:5661/10000 train_time:247018ms step_avg:43.64ms +[2025-09-05 21:22:56] [Rank 0] step:5681/10000 train_time:247758ms step_avg:43.61ms +[2025-09-05 21:22:56] [Rank 0] step:5681/10000 train_time:247758ms step_avg:43.61ms +[2025-09-05 21:22:57] [Rank 0] step:5701/10000 train_time:248496ms step_avg:43.59ms +[2025-09-05 21:22:57] [Rank 0] step:5701/10000 train_time:248496ms step_avg:43.59ms +[2025-09-05 21:22:57] [Rank 0] step:5721/10000 train_time:249235ms step_avg:43.56ms +[2025-09-05 21:22:57] [Rank 0] step:5721/10000 train_time:249235ms step_avg:43.56ms +[2025-09-05 21:22:58] [Rank 0] step:5741/10000 train_time:249974ms step_avg:43.54ms +[2025-09-05 21:22:58] [Rank 0] step:5741/10000 train_time:249974ms step_avg:43.54ms +[2025-09-05 21:22:59] [Rank 0] step:5761/10000 train_time:250712ms step_avg:43.52ms +[2025-09-05 21:22:59] [Rank 0] step:5761/10000 train_time:250712ms step_avg:43.52ms +[2025-09-05 21:23:00] [Rank 0] step:5781/10000 train_time:251450ms step_avg:43.50ms +[2025-09-05 21:23:00] [Rank 0] step:5781/10000 train_time:251450ms step_avg:43.50ms +[2025-09-05 21:23:00] [Rank 0] step:5801/10000 train_time:252188ms step_avg:43.47ms +[2025-09-05 21:23:00] [Rank 0] step:5801/10000 train_time:252188ms step_avg:43.47ms +[2025-09-05 21:23:01] [Rank 0] step:5821/10000 train_time:252926ms step_avg:43.45ms +[2025-09-05 21:23:01] [Rank 0] step:5821/10000 train_time:252926ms step_avg:43.45ms +[2025-09-05 21:23:02] [Rank 0] step:5841/10000 train_time:253663ms step_avg:43.43ms +[2025-09-05 21:23:02] [Rank 0] step:5841/10000 train_time:253663ms step_avg:43.43ms +[2025-09-05 21:23:03] [Rank 0] step:5861/10000 train_time:254401ms step_avg:43.41ms +[2025-09-05 21:23:03] [Rank 0] step:5861/10000 train_time:254401ms step_avg:43.41ms +[2025-09-05 21:23:03] [Rank 0] step:5881/10000 train_time:255139ms step_avg:43.38ms +[2025-09-05 21:23:03] [Rank 0] step:5881/10000 train_time:255139ms step_avg:43.38ms +[2025-09-05 21:23:04] [Rank 0] step:5901/10000 train_time:255878ms step_avg:43.36ms +[2025-09-05 21:23:04] [Rank 0] step:5901/10000 train_time:255878ms step_avg:43.36ms +[2025-09-05 21:23:05] [Rank 0] step:5921/10000 train_time:256615ms step_avg:43.34ms +[2025-09-05 21:23:05] [Rank 0] step:5921/10000 train_time:256615ms step_avg:43.34ms +[2025-09-05 21:23:05] [Rank 0] step:5941/10000 train_time:257354ms step_avg:43.32ms +[2025-09-05 21:23:05] [Rank 0] step:5941/10000 train_time:257354ms step_avg:43.32ms +[2025-09-05 21:23:06] [Rank 0] step:5961/10000 train_time:258092ms step_avg:43.30ms +[2025-09-05 21:23:06] [Rank 0] step:5961/10000 train_time:258092ms step_avg:43.30ms +[2025-09-05 21:23:07] [Rank 0] step:5981/10000 train_time:258830ms step_avg:43.28ms +[2025-09-05 21:23:07] [Rank 0] step:5981/10000 train_time:258830ms step_avg:43.28ms +[2025-09-05 21:23:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:23:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:23:08] [Rank 0] PRINT: step:6000/10000 train_loss:2.1735 val_loss:2.1503 train_time:259649ms step_avg:43.27ms +[2025-09-05 21:23:08] [Rank 0] PRINT: step:6000/10000 train_loss:2.1735 val_loss:2.1503 train_time:259649ms step_avg:43.27ms +[2025-09-05 21:23:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:23:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:23:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:23:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:24:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:24:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:24:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:24:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:24:28] [Rank 0] Total Loss: 4.5495 +[2025-09-05 21:24:28] [Rank 0] Total Loss: 4.5495 +[2025-09-05 21:24:28] [Rank 0] Total FTA (Unweighted): 0.2950 +[2025-09-05 21:24:28] [Rank 0] Total FTA (Unweighted): 0.2950 +[2025-09-05 21:24:28] [Rank 0] Total FTA (Weighted): 0.2950 +[2025-09-05 21:24:28] [Rank 0] Total FTA (Weighted): 0.2950 +[2025-09-05 21:24:28] [Rank 0] Group 0 Loss: 3.2500 +[2025-09-05 21:24:28] [Rank 0] Group 0 Loss: 3.2500 +[2025-09-05 21:24:28] [Rank 0] Group 1 Loss: 3.0391 +[2025-09-05 21:24:28] [Rank 0] Group 1 Loss: 3.0391 +[2025-09-05 21:24:29] [Rank 0] Group 2 Loss: 3.2196 +[2025-09-05 21:24:29] [Rank 0] Group 2 Loss: 3.2196 +[2025-09-05 21:24:29] [Rank 0] Group 3 Loss: 3.5743 +[2025-09-05 21:24:29] [Rank 0] Group 3 Loss: 3.5743 +[2025-09-05 21:24:29] [Rank 0] Group 4 Loss: 3.9146 +[2025-09-05 21:24:29] [Rank 0] Group 4 Loss: 3.9146 +[2025-09-05 21:24:29] [Rank 0] Group 5 Loss: 4.3518 +[2025-09-05 21:24:29] [Rank 0] Group 5 Loss: 4.3518 +[2025-09-05 21:24:29] [Rank 0] Group 6 Loss: 4.6293 +[2025-09-05 21:24:29] [Rank 0] Group 6 Loss: 4.6293 +[2025-09-05 21:24:29] [Rank 0] Group 7 Loss: 4.7938 +[2025-09-05 21:24:29] [Rank 0] Group 7 Loss: 4.7938 +[2025-09-05 21:24:29] [Rank 0] Group 8 Loss: 5.1102 +[2025-09-05 21:24:29] [Rank 0] Group 8 Loss: 5.1102 +[2025-09-05 21:24:29] [Rank 0] Group 9 Loss: 5.2213 +[2025-09-05 21:24:29] [Rank 0] Group 9 Loss: 5.2213 +[2025-09-05 21:24:29] [Rank 0] Group 10 Loss: 5.2407 +[2025-09-05 21:24:29] [Rank 0] Group 10 Loss: 5.2407 +[2025-09-05 21:24:29] [Rank 0] Group 11 Loss: 5.2992 +[2025-09-05 21:24:29] [Rank 0] Group 11 Loss: 5.2992 +[2025-09-05 21:24:29] [Rank 0] Group 12 Loss: 5.2632 +[2025-09-05 21:24:29] [Rank 0] Group 12 Loss: 5.2632 +[2025-09-05 21:24:29] [Rank 0] Group 13 Loss: 5.2945 +[2025-09-05 21:24:29] [Rank 0] Group 13 Loss: 5.2945 +[2025-09-05 21:24:29] [Rank 0] Group 14 Loss: 5.3217 +[2025-09-05 21:24:29] [Rank 0] Group 14 Loss: 5.3217 +[2025-09-05 21:24:29] [Rank 0] Group 15 Loss: 5.2688 +[2025-09-05 21:24:29] [Rank 0] Group 15 Loss: 5.2688 +[2025-09-05 21:24:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:24:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:24:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:24:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:24:29] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 21:24:29] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 21:24:29] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:24:29] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:24:29] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:24:29] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:24:29] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:24:29] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:24:29] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:24:29] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:24:29] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:24:29] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:24:29] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:24:29] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:24:29] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:24:29] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:24:29] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:24:29] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:24:29] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:24:29] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:24:29] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 21:24:29] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 21:24:29] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 21:24:29] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 21:24:29] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 21:24:29] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 21:24:29] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 21:24:29] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 21:24:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:24:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:24:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:24:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:24:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:24:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:24:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:24:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:24:30] [Rank 0] step:6001/10000 train_time:259658ms step_avg:43.27ms +[2025-09-05 21:24:30] [Rank 0] step:6001/10000 train_time:259658ms step_avg:43.27ms +[2025-09-05 21:24:32] [Rank 0] step:6021/10000 train_time:260943ms step_avg:43.34ms +[2025-09-05 21:24:32] [Rank 0] step:6021/10000 train_time:260943ms step_avg:43.34ms +[2025-09-05 21:24:32] [Rank 0] step:6041/10000 train_time:261681ms step_avg:43.32ms +[2025-09-05 21:24:32] [Rank 0] step:6041/10000 train_time:261681ms step_avg:43.32ms +[2025-09-05 21:24:33] [Rank 0] step:6061/10000 train_time:262419ms step_avg:43.30ms +[2025-09-05 21:24:33] [Rank 0] step:6061/10000 train_time:262419ms step_avg:43.30ms +[2025-09-05 21:24:34] [Rank 0] step:6081/10000 train_time:263157ms step_avg:43.28ms +[2025-09-05 21:24:34] [Rank 0] step:6081/10000 train_time:263157ms step_avg:43.28ms +[2025-09-05 21:24:34] [Rank 0] step:6101/10000 train_time:263895ms step_avg:43.25ms +[2025-09-05 21:24:34] [Rank 0] step:6101/10000 train_time:263895ms step_avg:43.25ms +[2025-09-05 21:24:35] [Rank 0] step:6121/10000 train_time:264633ms step_avg:43.23ms +[2025-09-05 21:24:35] [Rank 0] step:6121/10000 train_time:264633ms step_avg:43.23ms +[2025-09-05 21:24:36] [Rank 0] step:6141/10000 train_time:265371ms step_avg:43.21ms +[2025-09-05 21:24:36] [Rank 0] step:6141/10000 train_time:265371ms step_avg:43.21ms +[2025-09-05 21:24:37] [Rank 0] step:6161/10000 train_time:266109ms step_avg:43.19ms +[2025-09-05 21:24:37] [Rank 0] step:6161/10000 train_time:266109ms step_avg:43.19ms +[2025-09-05 21:24:37] [Rank 0] step:6181/10000 train_time:266847ms step_avg:43.17ms +[2025-09-05 21:24:37] [Rank 0] step:6181/10000 train_time:266847ms step_avg:43.17ms +[2025-09-05 21:24:38] [Rank 0] step:6201/10000 train_time:267584ms step_avg:43.15ms +[2025-09-05 21:24:38] [Rank 0] step:6201/10000 train_time:267584ms step_avg:43.15ms +[2025-09-05 21:24:39] [Rank 0] step:6221/10000 train_time:268322ms step_avg:43.13ms +[2025-09-05 21:24:39] [Rank 0] step:6221/10000 train_time:268322ms step_avg:43.13ms +[2025-09-05 21:24:40] [Rank 0] step:6241/10000 train_time:269059ms step_avg:43.11ms +[2025-09-05 21:24:40] [Rank 0] step:6241/10000 train_time:269059ms step_avg:43.11ms +[2025-09-05 21:24:40] [Rank 0] step:6261/10000 train_time:269797ms step_avg:43.09ms +[2025-09-05 21:24:40] [Rank 0] step:6261/10000 train_time:269797ms step_avg:43.09ms +[2025-09-05 21:24:41] [Rank 0] step:6281/10000 train_time:270535ms step_avg:43.07ms +[2025-09-05 21:24:41] [Rank 0] step:6281/10000 train_time:270535ms step_avg:43.07ms +[2025-09-05 21:24:42] [Rank 0] step:6301/10000 train_time:271273ms step_avg:43.05ms +[2025-09-05 21:24:42] [Rank 0] step:6301/10000 train_time:271273ms step_avg:43.05ms +[2025-09-05 21:24:43] [Rank 0] step:6321/10000 train_time:272010ms step_avg:43.03ms +[2025-09-05 21:24:43] [Rank 0] step:6321/10000 train_time:272010ms step_avg:43.03ms +[2025-09-05 21:24:43] [Rank 0] step:6341/10000 train_time:272747ms step_avg:43.01ms +[2025-09-05 21:24:43] [Rank 0] step:6341/10000 train_time:272747ms step_avg:43.01ms +[2025-09-05 21:24:44] [Rank 0] step:6361/10000 train_time:273484ms step_avg:42.99ms +[2025-09-05 21:24:44] [Rank 0] step:6361/10000 train_time:273484ms step_avg:42.99ms +[2025-09-05 21:24:45] [Rank 0] step:6381/10000 train_time:274222ms step_avg:42.97ms +[2025-09-05 21:24:45] [Rank 0] step:6381/10000 train_time:274222ms step_avg:42.97ms +[2025-09-05 21:24:46] [Rank 0] step:6401/10000 train_time:274960ms step_avg:42.96ms +[2025-09-05 21:24:46] [Rank 0] step:6401/10000 train_time:274960ms step_avg:42.96ms +[2025-09-05 21:24:46] [Rank 0] step:6421/10000 train_time:275698ms step_avg:42.94ms +[2025-09-05 21:24:46] [Rank 0] step:6421/10000 train_time:275698ms step_avg:42.94ms +[2025-09-05 21:24:47] [Rank 0] step:6441/10000 train_time:276437ms step_avg:42.92ms +[2025-09-05 21:24:47] [Rank 0] step:6441/10000 train_time:276437ms step_avg:42.92ms +[2025-09-05 21:24:48] [Rank 0] step:6461/10000 train_time:277175ms step_avg:42.90ms +[2025-09-05 21:24:48] [Rank 0] step:6461/10000 train_time:277175ms step_avg:42.90ms +[2025-09-05 21:24:49] [Rank 0] step:6481/10000 train_time:277913ms step_avg:42.88ms +[2025-09-05 21:24:49] [Rank 0] step:6481/10000 train_time:277913ms step_avg:42.88ms +[2025-09-05 21:24:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:24:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:24:50] [Rank 0] PRINT: step:6500/10000 train_loss:2.1473 val_loss:2.1262 train_time:278731ms step_avg:42.88ms +[2025-09-05 21:24:50] [Rank 0] PRINT: step:6500/10000 train_loss:2.1473 val_loss:2.1262 train_time:278731ms step_avg:42.88ms +[2025-09-05 21:24:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:24:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:24:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:24:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:26:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:26:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:26:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:26:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:26:11] [Rank 0] Total Loss: 4.5291 +[2025-09-05 21:26:11] [Rank 0] Total Loss: 4.5291 +[2025-09-05 21:26:11] [Rank 0] Total FTA (Unweighted): 0.3063 +[2025-09-05 21:26:11] [Rank 0] Total FTA (Unweighted): 0.3063 +[2025-09-05 21:26:11] [Rank 0] Total FTA (Weighted): 0.3063 +[2025-09-05 21:26:11] [Rank 0] Total FTA (Weighted): 0.3063 +[2025-09-05 21:26:11] [Rank 0] Group 0 Loss: 3.2554 +[2025-09-05 21:26:11] [Rank 0] Group 0 Loss: 3.2554 +[2025-09-05 21:26:11] [Rank 0] Group 1 Loss: 3.0391 +[2025-09-05 21:26:11] [Rank 0] Group 1 Loss: 3.0391 +[2025-09-05 21:26:11] [Rank 0] Group 2 Loss: 3.2225 +[2025-09-05 21:26:11] [Rank 0] Group 2 Loss: 3.2225 +[2025-09-05 21:26:11] [Rank 0] Group 3 Loss: 3.5763 +[2025-09-05 21:26:11] [Rank 0] Group 3 Loss: 3.5763 +[2025-09-05 21:26:11] [Rank 0] Group 4 Loss: 3.9043 +[2025-09-05 21:26:11] [Rank 0] Group 4 Loss: 3.9043 +[2025-09-05 21:26:11] [Rank 0] Group 5 Loss: 4.3246 +[2025-09-05 21:26:11] [Rank 0] Group 5 Loss: 4.3246 +[2025-09-05 21:26:11] [Rank 0] Group 6 Loss: 4.5920 +[2025-09-05 21:26:11] [Rank 0] Group 6 Loss: 4.5920 +[2025-09-05 21:26:11] [Rank 0] Group 7 Loss: 4.7575 +[2025-09-05 21:26:11] [Rank 0] Group 7 Loss: 4.7575 +[2025-09-05 21:26:11] [Rank 0] Group 8 Loss: 5.0746 +[2025-09-05 21:26:11] [Rank 0] Group 8 Loss: 5.0746 +[2025-09-05 21:26:11] [Rank 0] Group 9 Loss: 5.1931 +[2025-09-05 21:26:11] [Rank 0] Group 9 Loss: 5.1931 +[2025-09-05 21:26:11] [Rank 0] Group 10 Loss: 5.2346 +[2025-09-05 21:26:11] [Rank 0] Group 10 Loss: 5.2346 +[2025-09-05 21:26:11] [Rank 0] Group 11 Loss: 5.2917 +[2025-09-05 21:26:11] [Rank 0] Group 11 Loss: 5.2917 +[2025-09-05 21:26:11] [Rank 0] Group 12 Loss: 5.2177 +[2025-09-05 21:26:11] [Rank 0] Group 12 Loss: 5.2177 +[2025-09-05 21:26:11] [Rank 0] Group 13 Loss: 5.2542 +[2025-09-05 21:26:11] [Rank 0] Group 13 Loss: 5.2542 +[2025-09-05 21:26:11] [Rank 0] Group 14 Loss: 5.3029 +[2025-09-05 21:26:11] [Rank 0] Group 14 Loss: 5.3029 +[2025-09-05 21:26:11] [Rank 0] Group 15 Loss: 5.2256 +[2025-09-05 21:26:11] [Rank 0] Group 15 Loss: 5.2256 +[2025-09-05 21:26:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:26:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:26:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:26:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:26:11] [Rank 0] Group 2 FTA: 0.4700 +[2025-09-05 21:26:11] [Rank 0] Group 2 FTA: 0.4700 +[2025-09-05 21:26:11] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:26:11] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:26:12] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:26:12] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:26:12] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 21:26:12] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 21:26:12] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:26:12] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:26:12] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 21:26:12] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 21:26:12] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:26:12] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:26:12] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 21:26:12] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 21:26:12] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 21:26:12] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 21:26:12] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 21:26:12] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 21:26:12] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 21:26:12] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 21:26:12] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 21:26:12] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 21:26:12] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:26:12] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:26:12] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:26:12] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:26:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:26:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:26:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:26:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:26:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:26:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:26:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:26:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:26:13] [Rank 0] step:6501/10000 train_time:278740ms step_avg:42.88ms +[2025-09-05 21:26:13] [Rank 0] step:6501/10000 train_time:278740ms step_avg:42.88ms +[2025-09-05 21:26:14] [Rank 0] step:6521/10000 train_time:279410ms step_avg:42.85ms +[2025-09-05 21:26:14] [Rank 0] step:6521/10000 train_time:279410ms step_avg:42.85ms +[2025-09-05 21:26:14] [Rank 0] step:6541/10000 train_time:280147ms step_avg:42.83ms +[2025-09-05 21:26:14] [Rank 0] step:6541/10000 train_time:280147ms step_avg:42.83ms +[2025-09-05 21:26:15] [Rank 0] step:6561/10000 train_time:280886ms step_avg:42.81ms +[2025-09-05 21:26:15] [Rank 0] step:6561/10000 train_time:280886ms step_avg:42.81ms +[2025-09-05 21:26:16] [Rank 0] step:6581/10000 train_time:281624ms step_avg:42.79ms +[2025-09-05 21:26:16] [Rank 0] step:6581/10000 train_time:281624ms step_avg:42.79ms +[2025-09-05 21:26:17] [Rank 0] step:6601/10000 train_time:282362ms step_avg:42.78ms +[2025-09-05 21:26:17] [Rank 0] step:6601/10000 train_time:282362ms step_avg:42.78ms +[2025-09-05 21:26:17] [Rank 0] step:6621/10000 train_time:283100ms step_avg:42.76ms +[2025-09-05 21:26:17] [Rank 0] step:6621/10000 train_time:283100ms step_avg:42.76ms +[2025-09-05 21:26:18] [Rank 0] step:6641/10000 train_time:283838ms step_avg:42.74ms +[2025-09-05 21:26:18] [Rank 0] step:6641/10000 train_time:283838ms step_avg:42.74ms +[2025-09-05 21:26:19] [Rank 0] step:6661/10000 train_time:284576ms step_avg:42.72ms +[2025-09-05 21:26:19] [Rank 0] step:6661/10000 train_time:284576ms step_avg:42.72ms +[2025-09-05 21:26:20] [Rank 0] step:6681/10000 train_time:285314ms step_avg:42.71ms +[2025-09-05 21:26:20] [Rank 0] step:6681/10000 train_time:285314ms step_avg:42.71ms +[2025-09-05 21:26:20] [Rank 0] step:6701/10000 train_time:286051ms step_avg:42.69ms +[2025-09-05 21:26:20] [Rank 0] step:6701/10000 train_time:286051ms step_avg:42.69ms +[2025-09-05 21:26:21] [Rank 0] step:6721/10000 train_time:286790ms step_avg:42.67ms +[2025-09-05 21:26:21] [Rank 0] step:6721/10000 train_time:286790ms step_avg:42.67ms +[2025-09-05 21:26:22] [Rank 0] step:6741/10000 train_time:287527ms step_avg:42.65ms +[2025-09-05 21:26:22] [Rank 0] step:6741/10000 train_time:287527ms step_avg:42.65ms +[2025-09-05 21:26:23] [Rank 0] step:6761/10000 train_time:288265ms step_avg:42.64ms +[2025-09-05 21:26:23] [Rank 0] step:6761/10000 train_time:288265ms step_avg:42.64ms +[2025-09-05 21:26:23] [Rank 0] step:6781/10000 train_time:289003ms step_avg:42.62ms +[2025-09-05 21:26:23] [Rank 0] step:6781/10000 train_time:289003ms step_avg:42.62ms +[2025-09-05 21:26:24] [Rank 0] step:6801/10000 train_time:289741ms step_avg:42.60ms +[2025-09-05 21:26:24] [Rank 0] step:6801/10000 train_time:289741ms step_avg:42.60ms +[2025-09-05 21:26:25] [Rank 0] step:6821/10000 train_time:290479ms step_avg:42.59ms +[2025-09-05 21:26:25] [Rank 0] step:6821/10000 train_time:290479ms step_avg:42.59ms +[2025-09-05 21:26:26] [Rank 0] step:6841/10000 train_time:291835ms step_avg:42.66ms +[2025-09-05 21:26:26] [Rank 0] step:6841/10000 train_time:291835ms step_avg:42.66ms +[2025-09-05 21:26:27] [Rank 0] step:6861/10000 train_time:292574ms step_avg:42.64ms +[2025-09-05 21:26:27] [Rank 0] step:6861/10000 train_time:292574ms step_avg:42.64ms +[2025-09-05 21:26:28] [Rank 0] step:6881/10000 train_time:293311ms step_avg:42.63ms +[2025-09-05 21:26:28] [Rank 0] step:6881/10000 train_time:293311ms step_avg:42.63ms +[2025-09-05 21:26:28] [Rank 0] step:6901/10000 train_time:294049ms step_avg:42.61ms +[2025-09-05 21:26:28] [Rank 0] step:6901/10000 train_time:294049ms step_avg:42.61ms +[2025-09-05 21:26:29] [Rank 0] step:6921/10000 train_time:294787ms step_avg:42.59ms +[2025-09-05 21:26:29] [Rank 0] step:6921/10000 train_time:294787ms step_avg:42.59ms +[2025-09-05 21:26:30] [Rank 0] step:6941/10000 train_time:295525ms step_avg:42.58ms +[2025-09-05 21:26:30] [Rank 0] step:6941/10000 train_time:295525ms step_avg:42.58ms +[2025-09-05 21:26:31] [Rank 0] step:6961/10000 train_time:296262ms step_avg:42.56ms +[2025-09-05 21:26:31] [Rank 0] step:6961/10000 train_time:296262ms step_avg:42.56ms +[2025-09-05 21:26:31] [Rank 0] step:6981/10000 train_time:297000ms step_avg:42.54ms +[2025-09-05 21:26:31] [Rank 0] step:6981/10000 train_time:297000ms step_avg:42.54ms +[2025-09-05 21:26:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:26:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:26:32] [Rank 0] PRINT: step:7000/10000 train_loss:2.1236 val_loss:2.1049 train_time:297817ms step_avg:42.55ms +[2025-09-05 21:26:32] [Rank 0] PRINT: step:7000/10000 train_loss:2.1236 val_loss:2.1049 train_time:297817ms step_avg:42.55ms +[2025-09-05 21:26:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:26:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:26:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:26:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:27:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:27:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:27:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:27:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:27:54] [Rank 0] Total Loss: 4.5235 +[2025-09-05 21:27:54] [Rank 0] Total Loss: 4.5235 +[2025-09-05 21:27:54] [Rank 0] Total FTA (Unweighted): 0.3206 +[2025-09-05 21:27:54] [Rank 0] Total FTA (Unweighted): 0.3206 +[2025-09-05 21:27:54] [Rank 0] Total FTA (Weighted): 0.3206 +[2025-09-05 21:27:54] [Rank 0] Total FTA (Weighted): 0.3206 +[2025-09-05 21:27:54] [Rank 0] Group 0 Loss: 3.2814 +[2025-09-05 21:27:54] [Rank 0] Group 0 Loss: 3.2814 +[2025-09-05 21:27:54] [Rank 0] Group 1 Loss: 3.0467 +[2025-09-05 21:27:54] [Rank 0] Group 1 Loss: 3.0467 +[2025-09-05 21:27:54] [Rank 0] Group 2 Loss: 3.2185 +[2025-09-05 21:27:54] [Rank 0] Group 2 Loss: 3.2185 +[2025-09-05 21:27:54] [Rank 0] Group 3 Loss: 3.5667 +[2025-09-05 21:27:54] [Rank 0] Group 3 Loss: 3.5667 +[2025-09-05 21:27:54] [Rank 0] Group 4 Loss: 3.9002 +[2025-09-05 21:27:54] [Rank 0] Group 4 Loss: 3.9002 +[2025-09-05 21:27:54] [Rank 0] Group 5 Loss: 4.3103 +[2025-09-05 21:27:54] [Rank 0] Group 5 Loss: 4.3103 +[2025-09-05 21:27:54] [Rank 0] Group 6 Loss: 4.5891 +[2025-09-05 21:27:54] [Rank 0] Group 6 Loss: 4.5891 +[2025-09-05 21:27:54] [Rank 0] Group 7 Loss: 4.7426 +[2025-09-05 21:27:54] [Rank 0] Group 7 Loss: 4.7426 +[2025-09-05 21:27:54] [Rank 0] Group 8 Loss: 5.0639 +[2025-09-05 21:27:54] [Rank 0] Group 8 Loss: 5.0639 +[2025-09-05 21:27:54] [Rank 0] Group 9 Loss: 5.1841 +[2025-09-05 21:27:54] [Rank 0] Group 9 Loss: 5.1841 +[2025-09-05 21:27:54] [Rank 0] Group 10 Loss: 5.2123 +[2025-09-05 21:27:54] [Rank 0] Group 10 Loss: 5.2123 +[2025-09-05 21:27:54] [Rank 0] Group 11 Loss: 5.2608 +[2025-09-05 21:27:54] [Rank 0] Group 11 Loss: 5.2608 +[2025-09-05 21:27:54] [Rank 0] Group 12 Loss: 5.2258 +[2025-09-05 21:27:54] [Rank 0] Group 12 Loss: 5.2258 +[2025-09-05 21:27:54] [Rank 0] Group 13 Loss: 5.2533 +[2025-09-05 21:27:54] [Rank 0] Group 13 Loss: 5.2533 +[2025-09-05 21:27:54] [Rank 0] Group 14 Loss: 5.3005 +[2025-09-05 21:27:54] [Rank 0] Group 14 Loss: 5.3005 +[2025-09-05 21:27:54] [Rank 0] Group 15 Loss: 5.2198 +[2025-09-05 21:27:54] [Rank 0] Group 15 Loss: 5.2198 +[2025-09-05 21:27:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:27:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:27:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:27:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:27:54] [Rank 0] Group 2 FTA: 0.5800 +[2025-09-05 21:27:54] [Rank 0] Group 2 FTA: 0.5800 +[2025-09-05 21:27:54] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:27:54] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:27:54] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:27:54] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:27:54] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:27:54] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:27:54] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:27:54] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:27:54] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:27:54] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:27:54] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:27:54] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:27:54] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 21:27:54] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 21:27:54] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 21:27:54] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 21:27:54] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:27:54] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:27:54] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 21:27:54] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 21:27:54] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 21:27:54] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 21:27:54] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 21:27:54] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 21:27:54] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:27:54] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:27:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:27:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:27:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:27:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:27:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:27:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:27:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:27:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:27:55] [Rank 0] step:7001/10000 train_time:297826ms step_avg:42.54ms +[2025-09-05 21:27:55] [Rank 0] step:7001/10000 train_time:297826ms step_avg:42.54ms +[2025-09-05 21:27:56] [Rank 0] step:7021/10000 train_time:298491ms step_avg:42.51ms +[2025-09-05 21:27:56] [Rank 0] step:7021/10000 train_time:298491ms step_avg:42.51ms +[2025-09-05 21:27:57] [Rank 0] step:7041/10000 train_time:299229ms step_avg:42.50ms +[2025-09-05 21:27:57] [Rank 0] step:7041/10000 train_time:299229ms step_avg:42.50ms +[2025-09-05 21:27:57] [Rank 0] step:7061/10000 train_time:299967ms step_avg:42.48ms +[2025-09-05 21:27:57] [Rank 0] step:7061/10000 train_time:299967ms step_avg:42.48ms +[2025-09-05 21:27:58] [Rank 0] step:7081/10000 train_time:300705ms step_avg:42.47ms +[2025-09-05 21:27:58] [Rank 0] step:7081/10000 train_time:300705ms step_avg:42.47ms +[2025-09-05 21:27:59] [Rank 0] step:7101/10000 train_time:301443ms step_avg:42.45ms +[2025-09-05 21:27:59] [Rank 0] step:7101/10000 train_time:301443ms step_avg:42.45ms +[2025-09-05 21:28:00] [Rank 0] step:7121/10000 train_time:302181ms step_avg:42.44ms +[2025-09-05 21:28:00] [Rank 0] step:7121/10000 train_time:302181ms step_avg:42.44ms +[2025-09-05 21:28:00] [Rank 0] step:7141/10000 train_time:302919ms step_avg:42.42ms +[2025-09-05 21:28:00] [Rank 0] step:7141/10000 train_time:302919ms step_avg:42.42ms +[2025-09-05 21:28:01] [Rank 0] step:7161/10000 train_time:303657ms step_avg:42.40ms +[2025-09-05 21:28:01] [Rank 0] step:7161/10000 train_time:303657ms step_avg:42.40ms +[2025-09-05 21:28:02] [Rank 0] step:7181/10000 train_time:304396ms step_avg:42.39ms +[2025-09-05 21:28:02] [Rank 0] step:7181/10000 train_time:304396ms step_avg:42.39ms +[2025-09-05 21:28:03] [Rank 0] step:7201/10000 train_time:305135ms step_avg:42.37ms +[2025-09-05 21:28:03] [Rank 0] step:7201/10000 train_time:305135ms step_avg:42.37ms +[2025-09-05 21:28:03] [Rank 0] step:7221/10000 train_time:305873ms step_avg:42.36ms +[2025-09-05 21:28:03] [Rank 0] step:7221/10000 train_time:305873ms step_avg:42.36ms +[2025-09-05 21:28:04] [Rank 0] step:7241/10000 train_time:306611ms step_avg:42.34ms +[2025-09-05 21:28:04] [Rank 0] step:7241/10000 train_time:306611ms step_avg:42.34ms +[2025-09-05 21:28:05] [Rank 0] step:7261/10000 train_time:307347ms step_avg:42.33ms +[2025-09-05 21:28:05] [Rank 0] step:7261/10000 train_time:307347ms step_avg:42.33ms +[2025-09-05 21:28:05] [Rank 0] step:7281/10000 train_time:308085ms step_avg:42.31ms +[2025-09-05 21:28:05] [Rank 0] step:7281/10000 train_time:308085ms step_avg:42.31ms +[2025-09-05 21:28:06] [Rank 0] step:7301/10000 train_time:308822ms step_avg:42.30ms +[2025-09-05 21:28:06] [Rank 0] step:7301/10000 train_time:308822ms step_avg:42.30ms +[2025-09-05 21:28:07] [Rank 0] step:7321/10000 train_time:309559ms step_avg:42.28ms +[2025-09-05 21:28:07] [Rank 0] step:7321/10000 train_time:309559ms step_avg:42.28ms +[2025-09-05 21:28:08] [Rank 0] step:7341/10000 train_time:310297ms step_avg:42.27ms +[2025-09-05 21:28:08] [Rank 0] step:7341/10000 train_time:310297ms step_avg:42.27ms +[2025-09-05 21:28:09] [Rank 0] step:7361/10000 train_time:311243ms step_avg:42.28ms +[2025-09-05 21:28:09] [Rank 0] step:7361/10000 train_time:311243ms step_avg:42.28ms +[2025-09-05 21:28:09] [Rank 0] step:7381/10000 train_time:311980ms step_avg:42.27ms +[2025-09-05 21:28:09] [Rank 0] step:7381/10000 train_time:311980ms step_avg:42.27ms +[2025-09-05 21:28:10] [Rank 0] step:7401/10000 train_time:312717ms step_avg:42.25ms +[2025-09-05 21:28:10] [Rank 0] step:7401/10000 train_time:312717ms step_avg:42.25ms +[2025-09-05 21:28:11] [Rank 0] step:7421/10000 train_time:313577ms step_avg:42.26ms +[2025-09-05 21:28:11] [Rank 0] step:7421/10000 train_time:313577ms step_avg:42.26ms +[2025-09-05 21:28:12] [Rank 0] step:7441/10000 train_time:314315ms step_avg:42.24ms +[2025-09-05 21:28:12] [Rank 0] step:7441/10000 train_time:314315ms step_avg:42.24ms +[2025-09-05 21:28:12] [Rank 0] step:7461/10000 train_time:315052ms step_avg:42.23ms +[2025-09-05 21:28:12] [Rank 0] step:7461/10000 train_time:315052ms step_avg:42.23ms +[2025-09-05 21:28:13] [Rank 0] step:7481/10000 train_time:315789ms step_avg:42.21ms +[2025-09-05 21:28:13] [Rank 0] step:7481/10000 train_time:315789ms step_avg:42.21ms +[2025-09-05 21:28:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:28:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:28:14] [Rank 0] PRINT: step:7500/10000 train_loss:2.1047 val_loss:2.0886 train_time:316608ms step_avg:42.21ms +[2025-09-05 21:28:14] [Rank 0] PRINT: step:7500/10000 train_loss:2.1047 val_loss:2.0886 train_time:316608ms step_avg:42.21ms +[2025-09-05 21:28:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:28:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:28:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:28:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:29:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:29:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:29:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:29:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:29:35] [Rank 0] Total Loss: 4.4975 +[2025-09-05 21:29:35] [Rank 0] Total Loss: 4.4975 +[2025-09-05 21:29:35] [Rank 0] Total FTA (Unweighted): 0.3294 +[2025-09-05 21:29:35] [Rank 0] Total FTA (Unweighted): 0.3294 +[2025-09-05 21:29:35] [Rank 0] Total FTA (Weighted): 0.3294 +[2025-09-05 21:29:35] [Rank 0] Total FTA (Weighted): 0.3294 +[2025-09-05 21:29:35] [Rank 0] Group 0 Loss: 3.2679 +[2025-09-05 21:29:35] [Rank 0] Group 0 Loss: 3.2679 +[2025-09-05 21:29:35] [Rank 0] Group 1 Loss: 3.0500 +[2025-09-05 21:29:35] [Rank 0] Group 1 Loss: 3.0500 +[2025-09-05 21:29:35] [Rank 0] Group 2 Loss: 3.2060 +[2025-09-05 21:29:35] [Rank 0] Group 2 Loss: 3.2060 +[2025-09-05 21:29:35] [Rank 0] Group 3 Loss: 3.5460 +[2025-09-05 21:29:35] [Rank 0] Group 3 Loss: 3.5460 +[2025-09-05 21:29:35] [Rank 0] Group 4 Loss: 3.8739 +[2025-09-05 21:29:35] [Rank 0] Group 4 Loss: 3.8739 +[2025-09-05 21:29:36] [Rank 0] Group 5 Loss: 4.2825 +[2025-09-05 21:29:36] [Rank 0] Group 5 Loss: 4.2825 +[2025-09-05 21:29:36] [Rank 0] Group 6 Loss: 4.5537 +[2025-09-05 21:29:36] [Rank 0] Group 6 Loss: 4.5537 +[2025-09-05 21:29:36] [Rank 0] Group 7 Loss: 4.7165 +[2025-09-05 21:29:36] [Rank 0] Group 7 Loss: 4.7165 +[2025-09-05 21:29:36] [Rank 0] Group 8 Loss: 5.0286 +[2025-09-05 21:29:36] [Rank 0] Group 8 Loss: 5.0286 +[2025-09-05 21:29:36] [Rank 0] Group 9 Loss: 5.1510 +[2025-09-05 21:29:36] [Rank 0] Group 9 Loss: 5.1510 +[2025-09-05 21:29:36] [Rank 0] Group 10 Loss: 5.1978 +[2025-09-05 21:29:36] [Rank 0] Group 10 Loss: 5.1978 +[2025-09-05 21:29:36] [Rank 0] Group 11 Loss: 5.2242 +[2025-09-05 21:29:36] [Rank 0] Group 11 Loss: 5.2242 +[2025-09-05 21:29:36] [Rank 0] Group 12 Loss: 5.1945 +[2025-09-05 21:29:36] [Rank 0] Group 12 Loss: 5.1945 +[2025-09-05 21:29:36] [Rank 0] Group 13 Loss: 5.2170 +[2025-09-05 21:29:36] [Rank 0] Group 13 Loss: 5.2170 +[2025-09-05 21:29:36] [Rank 0] Group 14 Loss: 5.2626 +[2025-09-05 21:29:36] [Rank 0] Group 14 Loss: 5.2626 +[2025-09-05 21:29:36] [Rank 0] Group 15 Loss: 5.1877 +[2025-09-05 21:29:36] [Rank 0] Group 15 Loss: 5.1877 +[2025-09-05 21:29:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:29:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:29:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:29:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:29:36] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 21:29:36] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 21:29:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:29:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:29:36] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:29:36] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:29:36] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:29:36] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:29:36] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:29:36] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:29:36] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:29:36] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:29:36] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 21:29:36] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 21:29:36] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:29:36] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:29:36] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:29:36] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:29:36] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 21:29:36] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 21:29:36] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 21:29:36] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 21:29:36] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 21:29:36] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 21:29:36] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 21:29:36] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 21:29:36] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:29:36] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:29:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:29:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:29:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:29:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:29:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:29:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:29:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:29:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:29:37] [Rank 0] step:7501/10000 train_time:316617ms step_avg:42.21ms +[2025-09-05 21:29:37] [Rank 0] step:7501/10000 train_time:316617ms step_avg:42.21ms +[2025-09-05 21:29:38] [Rank 0] step:7521/10000 train_time:317289ms step_avg:42.19ms +[2025-09-05 21:29:38] [Rank 0] step:7521/10000 train_time:317289ms step_avg:42.19ms +[2025-09-05 21:29:39] [Rank 0] step:7541/10000 train_time:318028ms step_avg:42.17ms +[2025-09-05 21:29:39] [Rank 0] step:7541/10000 train_time:318028ms step_avg:42.17ms +[2025-09-05 21:29:39] [Rank 0] step:7561/10000 train_time:318766ms step_avg:42.16ms +[2025-09-05 21:29:39] [Rank 0] step:7561/10000 train_time:318766ms step_avg:42.16ms +[2025-09-05 21:29:40] [Rank 0] step:7581/10000 train_time:319505ms step_avg:42.15ms +[2025-09-05 21:29:40] [Rank 0] step:7581/10000 train_time:319505ms step_avg:42.15ms +[2025-09-05 21:29:41] [Rank 0] step:7601/10000 train_time:320241ms step_avg:42.13ms +[2025-09-05 21:29:41] [Rank 0] step:7601/10000 train_time:320241ms step_avg:42.13ms +[2025-09-05 21:29:42] [Rank 0] step:7621/10000 train_time:320982ms step_avg:42.12ms +[2025-09-05 21:29:42] [Rank 0] step:7621/10000 train_time:320982ms step_avg:42.12ms +[2025-09-05 21:29:43] [Rank 0] step:7641/10000 train_time:321720ms step_avg:42.10ms +[2025-09-05 21:29:43] [Rank 0] step:7641/10000 train_time:321720ms step_avg:42.10ms +[2025-09-05 21:29:44] [Rank 0] step:7661/10000 train_time:323081ms step_avg:42.17ms +[2025-09-05 21:29:44] [Rank 0] step:7661/10000 train_time:323081ms step_avg:42.17ms +[2025-09-05 21:29:44] [Rank 0] step:7681/10000 train_time:323818ms step_avg:42.16ms +[2025-09-05 21:29:44] [Rank 0] step:7681/10000 train_time:323818ms step_avg:42.16ms +[2025-09-05 21:29:45] [Rank 0] step:7701/10000 train_time:324555ms step_avg:42.14ms +[2025-09-05 21:29:45] [Rank 0] step:7701/10000 train_time:324555ms step_avg:42.14ms +[2025-09-05 21:29:46] [Rank 0] step:7721/10000 train_time:325293ms step_avg:42.13ms +[2025-09-05 21:29:46] [Rank 0] step:7721/10000 train_time:325293ms step_avg:42.13ms +[2025-09-05 21:29:47] [Rank 0] step:7741/10000 train_time:326031ms step_avg:42.12ms +[2025-09-05 21:29:47] [Rank 0] step:7741/10000 train_time:326031ms step_avg:42.12ms +[2025-09-05 21:29:47] [Rank 0] step:7761/10000 train_time:326769ms step_avg:42.10ms +[2025-09-05 21:29:47] [Rank 0] step:7761/10000 train_time:326769ms step_avg:42.10ms +[2025-09-05 21:29:48] [Rank 0] step:7781/10000 train_time:327508ms step_avg:42.09ms +[2025-09-05 21:29:48] [Rank 0] step:7781/10000 train_time:327508ms step_avg:42.09ms +[2025-09-05 21:29:49] [Rank 0] step:7801/10000 train_time:328246ms step_avg:42.08ms +[2025-09-05 21:29:49] [Rank 0] step:7801/10000 train_time:328246ms step_avg:42.08ms +[2025-09-05 21:29:50] [Rank 0] step:7821/10000 train_time:328984ms step_avg:42.06ms +[2025-09-05 21:29:50] [Rank 0] step:7821/10000 train_time:328984ms step_avg:42.06ms +[2025-09-05 21:29:50] [Rank 0] step:7841/10000 train_time:329722ms step_avg:42.05ms +[2025-09-05 21:29:50] [Rank 0] step:7841/10000 train_time:329722ms step_avg:42.05ms +[2025-09-05 21:29:51] [Rank 0] step:7861/10000 train_time:330461ms step_avg:42.04ms +[2025-09-05 21:29:51] [Rank 0] step:7861/10000 train_time:330461ms step_avg:42.04ms +[2025-09-05 21:29:52] [Rank 0] step:7881/10000 train_time:331199ms step_avg:42.03ms +[2025-09-05 21:29:52] [Rank 0] step:7881/10000 train_time:331199ms step_avg:42.03ms +[2025-09-05 21:29:52] [Rank 0] step:7901/10000 train_time:331938ms step_avg:42.01ms +[2025-09-05 21:29:52] [Rank 0] step:7901/10000 train_time:331938ms step_avg:42.01ms +[2025-09-05 21:29:53] [Rank 0] step:7921/10000 train_time:332676ms step_avg:42.00ms +[2025-09-05 21:29:53] [Rank 0] step:7921/10000 train_time:332676ms step_avg:42.00ms +[2025-09-05 21:29:54] [Rank 0] step:7941/10000 train_time:333413ms step_avg:41.99ms +[2025-09-05 21:29:54] [Rank 0] step:7941/10000 train_time:333413ms step_avg:41.99ms +[2025-09-05 21:29:55] [Rank 0] step:7961/10000 train_time:334149ms step_avg:41.97ms +[2025-09-05 21:29:55] [Rank 0] step:7961/10000 train_time:334149ms step_avg:41.97ms +[2025-09-05 21:29:55] [Rank 0] step:7981/10000 train_time:334886ms step_avg:41.96ms +[2025-09-05 21:29:55] [Rank 0] step:7981/10000 train_time:334886ms step_avg:41.96ms +[2025-09-05 21:29:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:29:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:29:57] [Rank 0] PRINT: step:8000/10000 train_loss:2.0904 val_loss:2.0741 train_time:335704ms step_avg:41.96ms +[2025-09-05 21:29:57] [Rank 0] PRINT: step:8000/10000 train_loss:2.0904 val_loss:2.0741 train_time:335704ms step_avg:41.96ms +[2025-09-05 21:29:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:29:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:29:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:29:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:31:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:31:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:31:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:31:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:31:18] [Rank 0] Total Loss: 4.4738 +[2025-09-05 21:31:18] [Rank 0] Total Loss: 4.4738 +[2025-09-05 21:31:18] [Rank 0] Total FTA (Unweighted): 0.3306 +[2025-09-05 21:31:18] [Rank 0] Total FTA (Unweighted): 0.3306 +[2025-09-05 21:31:18] [Rank 0] Total FTA (Weighted): 0.3306 +[2025-09-05 21:31:18] [Rank 0] Total FTA (Weighted): 0.3306 +[2025-09-05 21:31:18] [Rank 0] Group 0 Loss: 3.2806 +[2025-09-05 21:31:18] [Rank 0] Group 0 Loss: 3.2806 +[2025-09-05 21:31:18] [Rank 0] Group 1 Loss: 2.9963 +[2025-09-05 21:31:18] [Rank 0] Group 1 Loss: 2.9963 +[2025-09-05 21:31:18] [Rank 0] Group 2 Loss: 3.1916 +[2025-09-05 21:31:18] [Rank 0] Group 2 Loss: 3.1916 +[2025-09-05 21:31:18] [Rank 0] Group 3 Loss: 3.5491 +[2025-09-05 21:31:18] [Rank 0] Group 3 Loss: 3.5491 +[2025-09-05 21:31:18] [Rank 0] Group 4 Loss: 3.8409 +[2025-09-05 21:31:18] [Rank 0] Group 4 Loss: 3.8409 +[2025-09-05 21:31:18] [Rank 0] Group 5 Loss: 4.2612 +[2025-09-05 21:31:18] [Rank 0] Group 5 Loss: 4.2612 +[2025-09-05 21:31:18] [Rank 0] Group 6 Loss: 4.5215 +[2025-09-05 21:31:18] [Rank 0] Group 6 Loss: 4.5215 +[2025-09-05 21:31:18] [Rank 0] Group 7 Loss: 4.6766 +[2025-09-05 21:31:18] [Rank 0] Group 7 Loss: 4.6766 +[2025-09-05 21:31:18] [Rank 0] Group 8 Loss: 4.9918 +[2025-09-05 21:31:18] [Rank 0] Group 8 Loss: 4.9918 +[2025-09-05 21:31:18] [Rank 0] Group 9 Loss: 5.1291 +[2025-09-05 21:31:18] [Rank 0] Group 9 Loss: 5.1291 +[2025-09-05 21:31:18] [Rank 0] Group 10 Loss: 5.1684 +[2025-09-05 21:31:18] [Rank 0] Group 10 Loss: 5.1684 +[2025-09-05 21:31:18] [Rank 0] Group 11 Loss: 5.2035 +[2025-09-05 21:31:18] [Rank 0] Group 11 Loss: 5.2035 +[2025-09-05 21:31:18] [Rank 0] Group 12 Loss: 5.1758 +[2025-09-05 21:31:18] [Rank 0] Group 12 Loss: 5.1758 +[2025-09-05 21:31:18] [Rank 0] Group 13 Loss: 5.1898 +[2025-09-05 21:31:18] [Rank 0] Group 13 Loss: 5.1898 +[2025-09-05 21:31:18] [Rank 0] Group 14 Loss: 5.2384 +[2025-09-05 21:31:18] [Rank 0] Group 14 Loss: 5.2384 +[2025-09-05 21:31:18] [Rank 0] Group 15 Loss: 5.1665 +[2025-09-05 21:31:18] [Rank 0] Group 15 Loss: 5.1665 +[2025-09-05 21:31:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:31:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:31:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:31:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:31:18] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 21:31:18] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 21:31:18] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:31:18] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:31:18] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:31:18] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:31:18] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:31:18] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:31:18] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:31:18] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:31:18] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:31:18] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:31:18] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:31:18] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:31:18] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 21:31:18] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 21:31:18] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:31:18] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:31:18] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 21:31:18] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 21:31:18] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 21:31:18] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 21:31:18] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 21:31:18] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 21:31:18] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 21:31:18] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 21:31:18] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 21:31:18] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 21:31:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:31:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:31:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:31:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:31:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:31:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:31:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:31:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:31:19] [Rank 0] step:8001/10000 train_time:335714ms step_avg:41.96ms +[2025-09-05 21:31:19] [Rank 0] step:8001/10000 train_time:335714ms step_avg:41.96ms +[2025-09-05 21:31:20] [Rank 0] step:8021/10000 train_time:336598ms step_avg:41.96ms +[2025-09-05 21:31:20] [Rank 0] step:8021/10000 train_time:336598ms step_avg:41.96ms +[2025-09-05 21:31:21] [Rank 0] step:8041/10000 train_time:337479ms step_avg:41.97ms +[2025-09-05 21:31:21] [Rank 0] step:8041/10000 train_time:337479ms step_avg:41.97ms +[2025-09-05 21:31:22] [Rank 0] step:8061/10000 train_time:338217ms step_avg:41.96ms +[2025-09-05 21:31:22] [Rank 0] step:8061/10000 train_time:338217ms step_avg:41.96ms +[2025-09-05 21:31:23] [Rank 0] step:8081/10000 train_time:338955ms step_avg:41.94ms +[2025-09-05 21:31:23] [Rank 0] step:8081/10000 train_time:338955ms step_avg:41.94ms +[2025-09-05 21:31:23] [Rank 0] step:8101/10000 train_time:339693ms step_avg:41.93ms +[2025-09-05 21:31:23] [Rank 0] step:8101/10000 train_time:339693ms step_avg:41.93ms +[2025-09-05 21:31:24] [Rank 0] step:8121/10000 train_time:340431ms step_avg:41.92ms +[2025-09-05 21:31:24] [Rank 0] step:8121/10000 train_time:340431ms step_avg:41.92ms +[2025-09-05 21:31:25] [Rank 0] step:8141/10000 train_time:341167ms step_avg:41.91ms +[2025-09-05 21:31:25] [Rank 0] step:8141/10000 train_time:341167ms step_avg:41.91ms +[2025-09-05 21:31:25] [Rank 0] step:8161/10000 train_time:341905ms step_avg:41.89ms +[2025-09-05 21:31:25] [Rank 0] step:8161/10000 train_time:341905ms step_avg:41.89ms +[2025-09-05 21:31:26] [Rank 0] step:8181/10000 train_time:342642ms step_avg:41.88ms +[2025-09-05 21:31:26] [Rank 0] step:8181/10000 train_time:342642ms step_avg:41.88ms +[2025-09-05 21:31:27] [Rank 0] step:8201/10000 train_time:343381ms step_avg:41.87ms +[2025-09-05 21:31:27] [Rank 0] step:8201/10000 train_time:343381ms step_avg:41.87ms +[2025-09-05 21:31:28] [Rank 0] step:8221/10000 train_time:344118ms step_avg:41.86ms +[2025-09-05 21:31:28] [Rank 0] step:8221/10000 train_time:344118ms step_avg:41.86ms +[2025-09-05 21:31:28] [Rank 0] step:8241/10000 train_time:344855ms step_avg:41.85ms +[2025-09-05 21:31:28] [Rank 0] step:8241/10000 train_time:344855ms step_avg:41.85ms +[2025-09-05 21:31:29] [Rank 0] step:8261/10000 train_time:345593ms step_avg:41.83ms +[2025-09-05 21:31:29] [Rank 0] step:8261/10000 train_time:345593ms step_avg:41.83ms +[2025-09-05 21:31:30] [Rank 0] step:8281/10000 train_time:346332ms step_avg:41.82ms +[2025-09-05 21:31:30] [Rank 0] step:8281/10000 train_time:346332ms step_avg:41.82ms +[2025-09-05 21:31:31] [Rank 0] step:8301/10000 train_time:347069ms step_avg:41.81ms +[2025-09-05 21:31:31] [Rank 0] step:8301/10000 train_time:347069ms step_avg:41.81ms +[2025-09-05 21:31:31] [Rank 0] step:8321/10000 train_time:347807ms step_avg:41.80ms +[2025-09-05 21:31:31] [Rank 0] step:8321/10000 train_time:347807ms step_avg:41.80ms +[2025-09-05 21:31:32] [Rank 0] step:8341/10000 train_time:348545ms step_avg:41.79ms +[2025-09-05 21:31:32] [Rank 0] step:8341/10000 train_time:348545ms step_avg:41.79ms +[2025-09-05 21:31:33] [Rank 0] step:8361/10000 train_time:349282ms step_avg:41.78ms +[2025-09-05 21:31:33] [Rank 0] step:8361/10000 train_time:349282ms step_avg:41.78ms +[2025-09-05 21:31:34] [Rank 0] step:8381/10000 train_time:350020ms step_avg:41.76ms +[2025-09-05 21:31:34] [Rank 0] step:8381/10000 train_time:350020ms step_avg:41.76ms +[2025-09-05 21:31:34] [Rank 0] step:8401/10000 train_time:350757ms step_avg:41.75ms +[2025-09-05 21:31:34] [Rank 0] step:8401/10000 train_time:350757ms step_avg:41.75ms +[2025-09-05 21:31:35] [Rank 0] step:8421/10000 train_time:351495ms step_avg:41.74ms +[2025-09-05 21:31:35] [Rank 0] step:8421/10000 train_time:351495ms step_avg:41.74ms +[2025-09-05 21:31:36] [Rank 0] step:8441/10000 train_time:352232ms step_avg:41.73ms +[2025-09-05 21:31:36] [Rank 0] step:8441/10000 train_time:352232ms step_avg:41.73ms +[2025-09-05 21:31:37] [Rank 0] step:8461/10000 train_time:352969ms step_avg:41.72ms +[2025-09-05 21:31:37] [Rank 0] step:8461/10000 train_time:352969ms step_avg:41.72ms +[2025-09-05 21:31:37] [Rank 0] step:8481/10000 train_time:353708ms step_avg:41.71ms +[2025-09-05 21:31:37] [Rank 0] step:8481/10000 train_time:353708ms step_avg:41.71ms +[2025-09-05 21:31:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:31:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:31:38] [Rank 0] PRINT: step:8500/10000 train_loss:2.0775 val_loss:2.0626 train_time:354526ms step_avg:41.71ms +[2025-09-05 21:31:38] [Rank 0] PRINT: step:8500/10000 train_loss:2.0775 val_loss:2.0626 train_time:354526ms step_avg:41.71ms +[2025-09-05 21:31:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:31:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:31:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:31:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:32:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:32:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:32:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:32:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:32:59] [Rank 0] Total Loss: 4.4697 +[2025-09-05 21:32:59] [Rank 0] Total Loss: 4.4697 +[2025-09-05 21:32:59] [Rank 0] Total FTA (Unweighted): 0.3344 +[2025-09-05 21:32:59] [Rank 0] Total FTA (Unweighted): 0.3344 +[2025-09-05 21:32:59] [Rank 0] Total FTA (Weighted): 0.3344 +[2025-09-05 21:32:59] [Rank 0] Total FTA (Weighted): 0.3344 +[2025-09-05 21:33:00] [Rank 0] Group 0 Loss: 3.2686 +[2025-09-05 21:33:00] [Rank 0] Group 0 Loss: 3.2686 +[2025-09-05 21:33:00] [Rank 0] Group 1 Loss: 3.0517 +[2025-09-05 21:33:00] [Rank 0] Group 1 Loss: 3.0517 +[2025-09-05 21:33:00] [Rank 0] Group 2 Loss: 3.1881 +[2025-09-05 21:33:00] [Rank 0] Group 2 Loss: 3.1881 +[2025-09-05 21:33:00] [Rank 0] Group 3 Loss: 3.5329 +[2025-09-05 21:33:00] [Rank 0] Group 3 Loss: 3.5329 +[2025-09-05 21:33:00] [Rank 0] Group 4 Loss: 3.8423 +[2025-09-05 21:33:00] [Rank 0] Group 4 Loss: 3.8423 +[2025-09-05 21:33:00] [Rank 0] Group 5 Loss: 4.2320 +[2025-09-05 21:33:00] [Rank 0] Group 5 Loss: 4.2320 +[2025-09-05 21:33:00] [Rank 0] Group 6 Loss: 4.5174 +[2025-09-05 21:33:00] [Rank 0] Group 6 Loss: 4.5174 +[2025-09-05 21:33:00] [Rank 0] Group 7 Loss: 4.6758 +[2025-09-05 21:33:00] [Rank 0] Group 7 Loss: 4.6758 +[2025-09-05 21:33:00] [Rank 0] Group 8 Loss: 4.9973 +[2025-09-05 21:33:00] [Rank 0] Group 8 Loss: 4.9973 +[2025-09-05 21:33:00] [Rank 0] Group 9 Loss: 5.1280 +[2025-09-05 21:33:00] [Rank 0] Group 9 Loss: 5.1280 +[2025-09-05 21:33:00] [Rank 0] Group 10 Loss: 5.1672 +[2025-09-05 21:33:00] [Rank 0] Group 10 Loss: 5.1672 +[2025-09-05 21:33:00] [Rank 0] Group 11 Loss: 5.1808 +[2025-09-05 21:33:00] [Rank 0] Group 11 Loss: 5.1808 +[2025-09-05 21:33:00] [Rank 0] Group 12 Loss: 5.1642 +[2025-09-05 21:33:00] [Rank 0] Group 12 Loss: 5.1642 +[2025-09-05 21:33:00] [Rank 0] Group 13 Loss: 5.1778 +[2025-09-05 21:33:00] [Rank 0] Group 13 Loss: 5.1778 +[2025-09-05 21:33:00] [Rank 0] Group 14 Loss: 5.2343 +[2025-09-05 21:33:00] [Rank 0] Group 14 Loss: 5.2343 +[2025-09-05 21:33:00] [Rank 0] Group 15 Loss: 5.1574 +[2025-09-05 21:33:00] [Rank 0] Group 15 Loss: 5.1574 +[2025-09-05 21:33:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:33:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:33:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:33:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:33:00] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 21:33:00] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 21:33:00] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:33:00] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:33:00] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:33:00] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:33:00] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:33:00] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:33:00] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:33:00] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:33:00] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:33:00] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:33:00] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:33:00] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:33:00] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:33:00] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:33:00] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:33:00] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 21:33:00] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 21:33:00] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 21:33:00] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 21:33:00] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 21:33:00] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 21:33:00] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 21:33:00] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:33:00] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:33:00] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:33:00] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:33:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:33:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:33:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:33:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:33:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:33:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:33:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:33:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:33:02] [Rank 0] step:8501/10000 train_time:354536ms step_avg:41.71ms +[2025-09-05 21:33:02] [Rank 0] step:8501/10000 train_time:354536ms step_avg:41.71ms +[2025-09-05 21:33:03] [Rank 0] step:8521/10000 train_time:355203ms step_avg:41.69ms +[2025-09-05 21:33:03] [Rank 0] step:8521/10000 train_time:355203ms step_avg:41.69ms +[2025-09-05 21:33:03] [Rank 0] step:8541/10000 train_time:355941ms step_avg:41.67ms +[2025-09-05 21:33:03] [Rank 0] step:8541/10000 train_time:355941ms step_avg:41.67ms +[2025-09-05 21:33:04] [Rank 0] step:8561/10000 train_time:356679ms step_avg:41.66ms +[2025-09-05 21:33:04] [Rank 0] step:8561/10000 train_time:356679ms step_avg:41.66ms +[2025-09-05 21:33:05] [Rank 0] step:8581/10000 train_time:357418ms step_avg:41.65ms +[2025-09-05 21:33:05] [Rank 0] step:8581/10000 train_time:357418ms step_avg:41.65ms +[2025-09-05 21:33:06] [Rank 0] step:8601/10000 train_time:358155ms step_avg:41.64ms +[2025-09-05 21:33:06] [Rank 0] step:8601/10000 train_time:358155ms step_avg:41.64ms +[2025-09-05 21:33:06] [Rank 0] step:8621/10000 train_time:358894ms step_avg:41.63ms +[2025-09-05 21:33:06] [Rank 0] step:8621/10000 train_time:358894ms step_avg:41.63ms +[2025-09-05 21:33:07] [Rank 0] step:8641/10000 train_time:359636ms step_avg:41.62ms +[2025-09-05 21:33:07] [Rank 0] step:8641/10000 train_time:359636ms step_avg:41.62ms +[2025-09-05 21:33:08] [Rank 0] step:8661/10000 train_time:360373ms step_avg:41.61ms +[2025-09-05 21:33:08] [Rank 0] step:8661/10000 train_time:360373ms step_avg:41.61ms +[2025-09-05 21:33:08] [Rank 0] step:8681/10000 train_time:361110ms step_avg:41.60ms +[2025-09-05 21:33:08] [Rank 0] step:8681/10000 train_time:361110ms step_avg:41.60ms +[2025-09-05 21:33:09] [Rank 0] step:8701/10000 train_time:361847ms step_avg:41.59ms +[2025-09-05 21:33:09] [Rank 0] step:8701/10000 train_time:361847ms step_avg:41.59ms +[2025-09-05 21:33:10] [Rank 0] step:8721/10000 train_time:362583ms step_avg:41.58ms +[2025-09-05 21:33:10] [Rank 0] step:8721/10000 train_time:362583ms step_avg:41.58ms +[2025-09-05 21:33:11] [Rank 0] step:8741/10000 train_time:363320ms step_avg:41.57ms +[2025-09-05 21:33:11] [Rank 0] step:8741/10000 train_time:363320ms step_avg:41.57ms +[2025-09-05 21:33:11] [Rank 0] step:8761/10000 train_time:364058ms step_avg:41.55ms +[2025-09-05 21:33:11] [Rank 0] step:8761/10000 train_time:364058ms step_avg:41.55ms +[2025-09-05 21:33:12] [Rank 0] step:8781/10000 train_time:364797ms step_avg:41.54ms +[2025-09-05 21:33:12] [Rank 0] step:8781/10000 train_time:364797ms step_avg:41.54ms +[2025-09-05 21:33:13] [Rank 0] step:8801/10000 train_time:365534ms step_avg:41.53ms +[2025-09-05 21:33:13] [Rank 0] step:8801/10000 train_time:365534ms step_avg:41.53ms +[2025-09-05 21:33:14] [Rank 0] step:8821/10000 train_time:366275ms step_avg:41.52ms +[2025-09-05 21:33:14] [Rank 0] step:8821/10000 train_time:366275ms step_avg:41.52ms +[2025-09-05 21:33:15] [Rank 0] step:8841/10000 train_time:367622ms step_avg:41.58ms +[2025-09-05 21:33:15] [Rank 0] step:8841/10000 train_time:367622ms step_avg:41.58ms +[2025-09-05 21:33:16] [Rank 0] step:8861/10000 train_time:368360ms step_avg:41.57ms +[2025-09-05 21:33:16] [Rank 0] step:8861/10000 train_time:368360ms step_avg:41.57ms +[2025-09-05 21:33:16] [Rank 0] step:8881/10000 train_time:369098ms step_avg:41.56ms +[2025-09-05 21:33:16] [Rank 0] step:8881/10000 train_time:369098ms step_avg:41.56ms +[2025-09-05 21:33:17] [Rank 0] step:8901/10000 train_time:369837ms step_avg:41.55ms +[2025-09-05 21:33:17] [Rank 0] step:8901/10000 train_time:369837ms step_avg:41.55ms +[2025-09-05 21:33:18] [Rank 0] step:8921/10000 train_time:370576ms step_avg:41.54ms +[2025-09-05 21:33:18] [Rank 0] step:8921/10000 train_time:370576ms step_avg:41.54ms +[2025-09-05 21:33:19] [Rank 0] step:8941/10000 train_time:371314ms step_avg:41.53ms +[2025-09-05 21:33:19] [Rank 0] step:8941/10000 train_time:371314ms step_avg:41.53ms +[2025-09-05 21:33:19] [Rank 0] step:8961/10000 train_time:372052ms step_avg:41.52ms +[2025-09-05 21:33:19] [Rank 0] step:8961/10000 train_time:372052ms step_avg:41.52ms +[2025-09-05 21:33:20] [Rank 0] step:8981/10000 train_time:372790ms step_avg:41.51ms +[2025-09-05 21:33:20] [Rank 0] step:8981/10000 train_time:372790ms step_avg:41.51ms +[2025-09-05 21:33:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:33:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:33:21] [Rank 0] PRINT: step:9000/10000 train_loss:2.0646 val_loss:2.0507 train_time:373611ms step_avg:41.51ms +[2025-09-05 21:33:21] [Rank 0] PRINT: step:9000/10000 train_loss:2.0646 val_loss:2.0507 train_time:373611ms step_avg:41.51ms +[2025-09-05 21:33:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:33:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:33:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:33:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:34:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:34:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:34:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:34:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:34:43] [Rank 0] Total Loss: 4.4691 +[2025-09-05 21:34:43] [Rank 0] Total Loss: 4.4691 +[2025-09-05 21:34:43] [Rank 0] Total FTA (Unweighted): 0.3475 +[2025-09-05 21:34:43] [Rank 0] Total FTA (Unweighted): 0.3475 +[2025-09-05 21:34:43] [Rank 0] Total FTA (Weighted): 0.3475 +[2025-09-05 21:34:43] [Rank 0] Total FTA (Weighted): 0.3475 +[2025-09-05 21:34:43] [Rank 0] Group 0 Loss: 3.2728 +[2025-09-05 21:34:43] [Rank 0] Group 0 Loss: 3.2728 +[2025-09-05 21:34:43] [Rank 0] Group 1 Loss: 3.0980 +[2025-09-05 21:34:43] [Rank 0] Group 1 Loss: 3.0980 +[2025-09-05 21:34:43] [Rank 0] Group 2 Loss: 3.1912 +[2025-09-05 21:34:43] [Rank 0] Group 2 Loss: 3.1912 +[2025-09-05 21:34:43] [Rank 0] Group 3 Loss: 3.5456 +[2025-09-05 21:34:43] [Rank 0] Group 3 Loss: 3.5456 +[2025-09-05 21:34:43] [Rank 0] Group 4 Loss: 3.8385 +[2025-09-05 21:34:43] [Rank 0] Group 4 Loss: 3.8385 +[2025-09-05 21:34:43] [Rank 0] Group 5 Loss: 4.2349 +[2025-09-05 21:34:43] [Rank 0] Group 5 Loss: 4.2349 +[2025-09-05 21:34:43] [Rank 0] Group 6 Loss: 4.5211 +[2025-09-05 21:34:43] [Rank 0] Group 6 Loss: 4.5211 +[2025-09-05 21:34:43] [Rank 0] Group 7 Loss: 4.6602 +[2025-09-05 21:34:43] [Rank 0] Group 7 Loss: 4.6602 +[2025-09-05 21:34:43] [Rank 0] Group 8 Loss: 4.9963 +[2025-09-05 21:34:43] [Rank 0] Group 8 Loss: 4.9963 +[2025-09-05 21:34:43] [Rank 0] Group 9 Loss: 5.1222 +[2025-09-05 21:34:43] [Rank 0] Group 9 Loss: 5.1222 +[2025-09-05 21:34:43] [Rank 0] Group 10 Loss: 5.1471 +[2025-09-05 21:34:43] [Rank 0] Group 10 Loss: 5.1471 +[2025-09-05 21:34:43] [Rank 0] Group 11 Loss: 5.1757 +[2025-09-05 21:34:43] [Rank 0] Group 11 Loss: 5.1757 +[2025-09-05 21:34:43] [Rank 0] Group 12 Loss: 5.1534 +[2025-09-05 21:34:43] [Rank 0] Group 12 Loss: 5.1534 +[2025-09-05 21:34:43] [Rank 0] Group 13 Loss: 5.1858 +[2025-09-05 21:34:43] [Rank 0] Group 13 Loss: 5.1858 +[2025-09-05 21:34:43] [Rank 0] Group 14 Loss: 5.2288 +[2025-09-05 21:34:43] [Rank 0] Group 14 Loss: 5.2288 +[2025-09-05 21:34:43] [Rank 0] Group 15 Loss: 5.1344 +[2025-09-05 21:34:43] [Rank 0] Group 15 Loss: 5.1344 +[2025-09-05 21:34:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:34:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:34:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:34:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:34:43] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 21:34:43] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 21:34:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:34:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:34:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:34:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:34:43] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:34:43] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:34:43] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:34:43] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:34:43] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:34:43] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:34:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:34:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:34:43] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:34:43] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:34:43] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 21:34:43] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 21:34:43] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 21:34:43] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 21:34:43] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 21:34:43] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 21:34:43] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 21:34:43] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 21:34:43] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 21:34:43] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 21:34:43] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 21:34:43] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 21:34:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:34:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:34:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:34:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:34:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:34:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:34:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:34:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:34:44] [Rank 0] step:9001/10000 train_time:373620ms step_avg:41.51ms +[2025-09-05 21:34:44] [Rank 0] step:9001/10000 train_time:373620ms step_avg:41.51ms +[2025-09-05 21:34:45] [Rank 0] step:9021/10000 train_time:374285ms step_avg:41.49ms +[2025-09-05 21:34:45] [Rank 0] step:9021/10000 train_time:374285ms step_avg:41.49ms +[2025-09-05 21:34:46] [Rank 0] step:9041/10000 train_time:375023ms step_avg:41.48ms +[2025-09-05 21:34:46] [Rank 0] step:9041/10000 train_time:375023ms step_avg:41.48ms +[2025-09-05 21:34:46] [Rank 0] step:9061/10000 train_time:375761ms step_avg:41.47ms +[2025-09-05 21:34:46] [Rank 0] step:9061/10000 train_time:375761ms step_avg:41.47ms +[2025-09-05 21:34:47] [Rank 0] step:9081/10000 train_time:376499ms step_avg:41.46ms +[2025-09-05 21:34:47] [Rank 0] step:9081/10000 train_time:376499ms step_avg:41.46ms +[2025-09-05 21:34:48] [Rank 0] step:9101/10000 train_time:377237ms step_avg:41.45ms +[2025-09-05 21:34:48] [Rank 0] step:9101/10000 train_time:377237ms step_avg:41.45ms +[2025-09-05 21:34:49] [Rank 0] step:9121/10000 train_time:377974ms step_avg:41.44ms +[2025-09-05 21:34:49] [Rank 0] step:9121/10000 train_time:377974ms step_avg:41.44ms +[2025-09-05 21:34:49] [Rank 0] step:9141/10000 train_time:378713ms step_avg:41.43ms +[2025-09-05 21:34:49] [Rank 0] step:9141/10000 train_time:378713ms step_avg:41.43ms +[2025-09-05 21:34:50] [Rank 0] step:9161/10000 train_time:379452ms step_avg:41.42ms +[2025-09-05 21:34:50] [Rank 0] step:9161/10000 train_time:379452ms step_avg:41.42ms +[2025-09-05 21:34:51] [Rank 0] step:9181/10000 train_time:380189ms step_avg:41.41ms +[2025-09-05 21:34:51] [Rank 0] step:9181/10000 train_time:380189ms step_avg:41.41ms +[2025-09-05 21:34:52] [Rank 0] step:9201/10000 train_time:380927ms step_avg:41.40ms +[2025-09-05 21:34:52] [Rank 0] step:9201/10000 train_time:380927ms step_avg:41.40ms +[2025-09-05 21:34:52] [Rank 0] step:9221/10000 train_time:381666ms step_avg:41.39ms +[2025-09-05 21:34:52] [Rank 0] step:9221/10000 train_time:381666ms step_avg:41.39ms +[2025-09-05 21:34:53] [Rank 0] step:9241/10000 train_time:382404ms step_avg:41.38ms +[2025-09-05 21:34:53] [Rank 0] step:9241/10000 train_time:382404ms step_avg:41.38ms +[2025-09-05 21:34:54] [Rank 0] step:9261/10000 train_time:383141ms step_avg:41.37ms +[2025-09-05 21:34:54] [Rank 0] step:9261/10000 train_time:383141ms step_avg:41.37ms +[2025-09-05 21:34:55] [Rank 0] step:9281/10000 train_time:383880ms step_avg:41.36ms +[2025-09-05 21:34:55] [Rank 0] step:9281/10000 train_time:383880ms step_avg:41.36ms +[2025-09-05 21:34:55] [Rank 0] step:9301/10000 train_time:384618ms step_avg:41.35ms +[2025-09-05 21:34:55] [Rank 0] step:9301/10000 train_time:384618ms step_avg:41.35ms +[2025-09-05 21:34:56] [Rank 0] step:9321/10000 train_time:385355ms step_avg:41.34ms +[2025-09-05 21:34:56] [Rank 0] step:9321/10000 train_time:385355ms step_avg:41.34ms +[2025-09-05 21:34:57] [Rank 0] step:9341/10000 train_time:386092ms step_avg:41.33ms +[2025-09-05 21:34:57] [Rank 0] step:9341/10000 train_time:386092ms step_avg:41.33ms +[2025-09-05 21:34:57] [Rank 0] step:9361/10000 train_time:386830ms step_avg:41.32ms +[2025-09-05 21:34:57] [Rank 0] step:9361/10000 train_time:386830ms step_avg:41.32ms +[2025-09-05 21:34:58] [Rank 0] step:9381/10000 train_time:387567ms step_avg:41.31ms +[2025-09-05 21:34:58] [Rank 0] step:9381/10000 train_time:387567ms step_avg:41.31ms +[2025-09-05 21:34:59] [Rank 0] step:9401/10000 train_time:388306ms step_avg:41.30ms +[2025-09-05 21:34:59] [Rank 0] step:9401/10000 train_time:388306ms step_avg:41.30ms +[2025-09-05 21:35:00] [Rank 0] step:9421/10000 train_time:389043ms step_avg:41.30ms +[2025-09-05 21:35:00] [Rank 0] step:9421/10000 train_time:389043ms step_avg:41.30ms +[2025-09-05 21:35:00] [Rank 0] step:9441/10000 train_time:389781ms step_avg:41.29ms +[2025-09-05 21:35:00] [Rank 0] step:9441/10000 train_time:389781ms step_avg:41.29ms +[2025-09-05 21:35:01] [Rank 0] step:9461/10000 train_time:390517ms step_avg:41.28ms +[2025-09-05 21:35:01] [Rank 0] step:9461/10000 train_time:390517ms step_avg:41.28ms +[2025-09-05 21:35:02] [Rank 0] step:9481/10000 train_time:391255ms step_avg:41.27ms +[2025-09-05 21:35:02] [Rank 0] step:9481/10000 train_time:391255ms step_avg:41.27ms +[2025-09-05 21:35:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:35:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:35:03] [Rank 0] PRINT: step:9500/10000 train_loss:2.0537 val_loss:2.0419 train_time:392075ms step_avg:41.27ms +[2025-09-05 21:35:03] [Rank 0] PRINT: step:9500/10000 train_loss:2.0537 val_loss:2.0419 train_time:392075ms step_avg:41.27ms +[2025-09-05 21:35:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:35:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:35:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:35:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:36:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:36:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:36:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:36:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:36:24] [Rank 0] Total Loss: 4.4684 +[2025-09-05 21:36:24] [Rank 0] Total Loss: 4.4684 +[2025-09-05 21:36:24] [Rank 0] Total FTA (Unweighted): 0.3481 +[2025-09-05 21:36:24] [Rank 0] Total FTA (Unweighted): 0.3481 +[2025-09-05 21:36:24] [Rank 0] Total FTA (Weighted): 0.3481 +[2025-09-05 21:36:24] [Rank 0] Total FTA (Weighted): 0.3481 +[2025-09-05 21:36:24] [Rank 0] Group 0 Loss: 3.2927 +[2025-09-05 21:36:24] [Rank 0] Group 0 Loss: 3.2927 +[2025-09-05 21:36:24] [Rank 0] Group 1 Loss: 3.1056 +[2025-09-05 21:36:24] [Rank 0] Group 1 Loss: 3.1056 +[2025-09-05 21:36:24] [Rank 0] Group 2 Loss: 3.2157 +[2025-09-05 21:36:24] [Rank 0] Group 2 Loss: 3.2157 +[2025-09-05 21:36:24] [Rank 0] Group 3 Loss: 3.5439 +[2025-09-05 21:36:24] [Rank 0] Group 3 Loss: 3.5439 +[2025-09-05 21:36:24] [Rank 0] Group 4 Loss: 3.8415 +[2025-09-05 21:36:24] [Rank 0] Group 4 Loss: 3.8415 +[2025-09-05 21:36:24] [Rank 0] Group 5 Loss: 4.2313 +[2025-09-05 21:36:24] [Rank 0] Group 5 Loss: 4.2313 +[2025-09-05 21:36:24] [Rank 0] Group 6 Loss: 4.5053 +[2025-09-05 21:36:24] [Rank 0] Group 6 Loss: 4.5053 +[2025-09-05 21:36:24] [Rank 0] Group 7 Loss: 4.6562 +[2025-09-05 21:36:24] [Rank 0] Group 7 Loss: 4.6562 +[2025-09-05 21:36:24] [Rank 0] Group 8 Loss: 4.9896 +[2025-09-05 21:36:24] [Rank 0] Group 8 Loss: 4.9896 +[2025-09-05 21:36:24] [Rank 0] Group 9 Loss: 5.1184 +[2025-09-05 21:36:24] [Rank 0] Group 9 Loss: 5.1184 +[2025-09-05 21:36:24] [Rank 0] Group 10 Loss: 5.1537 +[2025-09-05 21:36:24] [Rank 0] Group 10 Loss: 5.1537 +[2025-09-05 21:36:24] [Rank 0] Group 11 Loss: 5.1836 +[2025-09-05 21:36:24] [Rank 0] Group 11 Loss: 5.1836 +[2025-09-05 21:36:24] [Rank 0] Group 12 Loss: 5.1480 +[2025-09-05 21:36:24] [Rank 0] Group 12 Loss: 5.1480 +[2025-09-05 21:36:24] [Rank 0] Group 13 Loss: 5.1741 +[2025-09-05 21:36:24] [Rank 0] Group 13 Loss: 5.1741 +[2025-09-05 21:36:24] [Rank 0] Group 14 Loss: 5.2146 +[2025-09-05 21:36:24] [Rank 0] Group 14 Loss: 5.2146 +[2025-09-05 21:36:24] [Rank 0] Group 15 Loss: 5.1206 +[2025-09-05 21:36:24] [Rank 0] Group 15 Loss: 5.1206 +[2025-09-05 21:36:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:36:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:36:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:36:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:36:24] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 21:36:24] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 21:36:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:36:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:36:24] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:36:24] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:36:24] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:36:24] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:36:24] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:36:24] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:36:24] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:36:24] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:36:24] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:36:24] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:36:24] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:36:24] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:36:24] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-05 21:36:24] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-05 21:36:24] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 21:36:24] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 21:36:24] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 21:36:24] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 21:36:24] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 21:36:24] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 21:36:24] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 21:36:24] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 21:36:24] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 21:36:24] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 21:36:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:36:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:36:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:36:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:36:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:36:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:36:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:36:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:36:25] [Rank 0] step:9501/10000 train_time:392085ms step_avg:41.27ms +[2025-09-05 21:36:25] [Rank 0] step:9501/10000 train_time:392085ms step_avg:41.27ms +[2025-09-05 21:36:26] [Rank 0] step:9521/10000 train_time:392750ms step_avg:41.25ms +[2025-09-05 21:36:26] [Rank 0] step:9521/10000 train_time:392750ms step_avg:41.25ms +[2025-09-05 21:36:27] [Rank 0] step:9541/10000 train_time:393489ms step_avg:41.24ms +[2025-09-05 21:36:27] [Rank 0] step:9541/10000 train_time:393489ms step_avg:41.24ms +[2025-09-05 21:36:27] [Rank 0] step:9561/10000 train_time:394228ms step_avg:41.23ms +[2025-09-05 21:36:27] [Rank 0] step:9561/10000 train_time:394228ms step_avg:41.23ms +[2025-09-05 21:36:28] [Rank 0] step:9581/10000 train_time:394966ms step_avg:41.22ms +[2025-09-05 21:36:28] [Rank 0] step:9581/10000 train_time:394966ms step_avg:41.22ms +[2025-09-05 21:36:29] [Rank 0] step:9601/10000 train_time:395704ms step_avg:41.21ms +[2025-09-05 21:36:29] [Rank 0] step:9601/10000 train_time:395704ms step_avg:41.21ms +[2025-09-05 21:36:30] [Rank 0] step:9621/10000 train_time:396441ms step_avg:41.21ms +[2025-09-05 21:36:30] [Rank 0] step:9621/10000 train_time:396441ms step_avg:41.21ms +[2025-09-05 21:36:30] [Rank 0] step:9641/10000 train_time:397178ms step_avg:41.20ms +[2025-09-05 21:36:30] [Rank 0] step:9641/10000 train_time:397178ms step_avg:41.20ms +[2025-09-05 21:36:31] [Rank 0] step:9661/10000 train_time:398191ms step_avg:41.22ms +[2025-09-05 21:36:31] [Rank 0] step:9661/10000 train_time:398191ms step_avg:41.22ms +[2025-09-05 21:36:32] [Rank 0] step:9681/10000 train_time:398929ms step_avg:41.21ms +[2025-09-05 21:36:32] [Rank 0] step:9681/10000 train_time:398929ms step_avg:41.21ms +[2025-09-05 21:36:33] [Rank 0] step:9701/10000 train_time:399668ms step_avg:41.20ms +[2025-09-05 21:36:33] [Rank 0] step:9701/10000 train_time:399668ms step_avg:41.20ms +[2025-09-05 21:36:34] [Rank 0] step:9721/10000 train_time:400406ms step_avg:41.19ms +[2025-09-05 21:36:34] [Rank 0] step:9721/10000 train_time:400406ms step_avg:41.19ms +[2025-09-05 21:36:35] [Rank 0] step:9741/10000 train_time:401286ms step_avg:41.20ms +[2025-09-05 21:36:35] [Rank 0] step:9741/10000 train_time:401286ms step_avg:41.20ms +[2025-09-05 21:36:35] [Rank 0] step:9761/10000 train_time:402024ms step_avg:41.19ms +[2025-09-05 21:36:35] [Rank 0] step:9761/10000 train_time:402024ms step_avg:41.19ms +[2025-09-05 21:36:36] [Rank 0] step:9781/10000 train_time:402762ms step_avg:41.18ms +[2025-09-05 21:36:36] [Rank 0] step:9781/10000 train_time:402762ms step_avg:41.18ms +[2025-09-05 21:36:37] [Rank 0] step:9801/10000 train_time:403634ms step_avg:41.18ms +[2025-09-05 21:36:37] [Rank 0] step:9801/10000 train_time:403634ms step_avg:41.18ms +[2025-09-05 21:36:38] [Rank 0] step:9821/10000 train_time:404415ms step_avg:41.18ms +[2025-09-05 21:36:38] [Rank 0] step:9821/10000 train_time:404415ms step_avg:41.18ms +[2025-09-05 21:36:38] [Rank 0] step:9841/10000 train_time:405154ms step_avg:41.17ms +[2025-09-05 21:36:38] [Rank 0] step:9841/10000 train_time:405154ms step_avg:41.17ms +[2025-09-05 21:36:39] [Rank 0] step:9861/10000 train_time:405893ms step_avg:41.16ms +[2025-09-05 21:36:39] [Rank 0] step:9861/10000 train_time:405893ms step_avg:41.16ms +[2025-09-05 21:36:40] [Rank 0] step:9881/10000 train_time:406630ms step_avg:41.15ms +[2025-09-05 21:36:40] [Rank 0] step:9881/10000 train_time:406630ms step_avg:41.15ms +[2025-09-05 21:36:41] [Rank 0] step:9901/10000 train_time:407369ms step_avg:41.14ms +[2025-09-05 21:36:41] [Rank 0] step:9901/10000 train_time:407369ms step_avg:41.14ms +[2025-09-05 21:36:41] [Rank 0] step:9921/10000 train_time:408108ms step_avg:41.14ms +[2025-09-05 21:36:41] [Rank 0] step:9921/10000 train_time:408108ms step_avg:41.14ms +[2025-09-05 21:36:42] [Rank 0] step:9941/10000 train_time:408846ms step_avg:41.13ms +[2025-09-05 21:36:42] [Rank 0] step:9941/10000 train_time:408846ms step_avg:41.13ms +[2025-09-05 21:36:43] [Rank 0] step:9961/10000 train_time:409584ms step_avg:41.12ms +[2025-09-05 21:36:43] [Rank 0] step:9961/10000 train_time:409584ms step_avg:41.12ms +[2025-09-05 21:36:44] [Rank 0] step:9981/10000 train_time:410322ms step_avg:41.11ms +[2025-09-05 21:36:44] [Rank 0] step:9981/10000 train_time:410322ms step_avg:41.11ms +[2025-09-05 21:36:44] [Rank 0] step:10000/10000 train_time:411025ms step_avg:41.10ms +[2025-09-05 21:36:44] [Rank 0] step:10000/10000 train_time:411025ms step_avg:41.10ms +[2025-09-05 21:36:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:36:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:36:45] [Rank 0] PRINT: step:10000/10000 train_loss:2.0456 val_loss:2.0343 train_time:411149ms step_avg:41.11ms +[2025-09-05 21:36:45] [Rank 0] PRINT: step:10000/10000 train_loss:2.0456 val_loss:2.0343 train_time:411149ms step_avg:41.11ms +[2025-09-05 21:36:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:36:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:36:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:36:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:38:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:38:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:38:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:38:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:38:06] [Rank 0] Total Loss: 4.4500 +[2025-09-05 21:38:06] [Rank 0] Total Loss: 4.4500 +[2025-09-05 21:38:06] [Rank 0] Total FTA (Unweighted): 0.3550 +[2025-09-05 21:38:06] [Rank 0] Total FTA (Unweighted): 0.3550 +[2025-09-05 21:38:06] [Rank 0] Total FTA (Weighted): 0.3550 +[2025-09-05 21:38:06] [Rank 0] Total FTA (Weighted): 0.3550 +[2025-09-05 21:38:06] [Rank 0] Group 0 Loss: 3.2679 +[2025-09-05 21:38:06] [Rank 0] Group 0 Loss: 3.2679 +[2025-09-05 21:38:06] [Rank 0] Group 1 Loss: 3.0634 +[2025-09-05 21:38:06] [Rank 0] Group 1 Loss: 3.0634 +[2025-09-05 21:38:06] [Rank 0] Group 2 Loss: 3.1815 +[2025-09-05 21:38:06] [Rank 0] Group 2 Loss: 3.1815 +[2025-09-05 21:38:06] [Rank 0] Group 3 Loss: 3.5355 +[2025-09-05 21:38:06] [Rank 0] Group 3 Loss: 3.5355 +[2025-09-05 21:38:06] [Rank 0] Group 4 Loss: 3.8257 +[2025-09-05 21:38:06] [Rank 0] Group 4 Loss: 3.8257 +[2025-09-05 21:38:06] [Rank 0] Group 5 Loss: 4.2133 +[2025-09-05 21:38:06] [Rank 0] Group 5 Loss: 4.2133 +[2025-09-05 21:38:06] [Rank 0] Group 6 Loss: 4.4844 +[2025-09-05 21:38:06] [Rank 0] Group 6 Loss: 4.4844 +[2025-09-05 21:38:06] [Rank 0] Group 7 Loss: 4.6352 +[2025-09-05 21:38:06] [Rank 0] Group 7 Loss: 4.6352 +[2025-09-05 21:38:06] [Rank 0] Group 8 Loss: 4.9777 +[2025-09-05 21:38:06] [Rank 0] Group 8 Loss: 4.9777 +[2025-09-05 21:38:06] [Rank 0] Group 9 Loss: 5.1009 +[2025-09-05 21:38:06] [Rank 0] Group 9 Loss: 5.1009 +[2025-09-05 21:38:06] [Rank 0] Group 10 Loss: 5.1373 +[2025-09-05 21:38:06] [Rank 0] Group 10 Loss: 5.1373 +[2025-09-05 21:38:06] [Rank 0] Group 11 Loss: 5.1618 +[2025-09-05 21:38:06] [Rank 0] Group 11 Loss: 5.1618 +[2025-09-05 21:38:06] [Rank 0] Group 12 Loss: 5.1331 +[2025-09-05 21:38:06] [Rank 0] Group 12 Loss: 5.1331 +[2025-09-05 21:38:06] [Rank 0] Group 13 Loss: 5.1554 +[2025-09-05 21:38:06] [Rank 0] Group 13 Loss: 5.1554 +[2025-09-05 21:38:06] [Rank 0] Group 14 Loss: 5.2099 +[2025-09-05 21:38:06] [Rank 0] Group 14 Loss: 5.2099 +[2025-09-05 21:38:06] [Rank 0] Group 15 Loss: 5.1170 +[2025-09-05 21:38:06] [Rank 0] Group 15 Loss: 5.1170 +[2025-09-05 21:38:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:38:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:38:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:38:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:38:06] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 21:38:06] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 21:38:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:38:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:38:06] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:38:06] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:38:06] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:38:06] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 21:38:06] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:38:06] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 21:38:06] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:38:06] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:38:06] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 21:38:06] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 21:38:06] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:38:06] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 21:38:06] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 21:38:06] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 21:38:06] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 21:38:06] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 21:38:06] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 21:38:06] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 21:38:06] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 21:38:06] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 21:38:06] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 21:38:06] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 21:38:06] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 21:38:06] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 21:38:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:38:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_loss_curves.png +[2025-09-05 21:38:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:38:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/per_class_acc_curves.png +[2025-09-05 21:38:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:38:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_loss_curve.png +[2025-09-05 21:38:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:38:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_43/total_acc_curve.png +[2025-09-05 21:38:07] [Rank 0] step:10001/10000 train_time:411158ms step_avg:41.11ms +[2025-09-05 21:38:07] [Rank 0] step:10001/10000 train_time:411158ms step_avg:41.11ms +[2025-09-05 21:38:07] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 21:38:07 2025 --- +[2025-09-05 21:38:07] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 21:38:07 2025 --- +[2025-09-05 21:38:07] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 21:38:07] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..56f8cc8eba0b8bbfbc20396243d2fb6e1217ef09 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.1, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "1ba80d26-3400-4d71-94ed-8fc0c0ee7433", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..10963c57fa46ae480f7f3c06077247fc623713b7 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2f790f6c212b6f54404e9179a6ecffc5e044ccd1ed6ed5a327d7cac9eed3206 +size 322778 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7e2f02109e29ee4ce677bb95bf6423fa9c81573d --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:096ff87a3dd78a299f4e8c6effa46fbd52f83fc90bfa5ecf9be148b7f6fbbb87 +size 420977 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..0876c9881c831cf7e711c0fae04f993baa963fa9 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bf4e1f543eae8458e027756affad5103af8d467bde509148562d413dc5d7f78 +size 91554 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..86e34cff6ac870819280100be310415b673144a2 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8f3fad91e0d86d28453fcb383600e51ac12d5fa9fc9e0be9d87f7dbb62afc43 +size 114171 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/training_log_1ba80d26-3400-4d71-94ed-8fc0c0ee7433.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/training_log_1ba80d26-3400-4d71-94ed-8fc0c0ee7433.txt new file mode 100644 index 0000000000000000000000000000000000000000..bdd71175003ce16e0d85d1ccf9bdd50f2f8c6a39 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/training_log_1ba80d26-3400-4d71-94ed-8fc0c0ee7433.txt @@ -0,0 +1,5614 @@ +[2025-09-05 21:38:33] [Rank 0] PRINT: --- Script Start: Fri Sep 5 21:38:33 2025 --- +[2025-09-05 21:38:33] [Rank 0] PRINT: --- Script Start: Fri Sep 5 21:38:33 2025 --- +[2025-09-05 21:38:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 21:38:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 21:38:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 21:38:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 21:38:33] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-05 21:38:33] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-05 21:38:33] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44 +[2025-09-05 21:38:33] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44 +[2025-09-05 21:38:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 21:38:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 21:38:33] [Rank 0] PRINT: Constructing model... +[2025-09-05 21:38:33] [Rank 0] PRINT: Constructing model... +[2025-09-05 21:38:35] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 21:38:35] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 21:38:35] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 21:38:35] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 21:38:35] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 21:38:35] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 21:38:39] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 21:38:39] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 21:38:39] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 21:38:39] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 21:38:39] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 21:38:39] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 21:38:39] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 21:38:39] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 21:38:39] [Rank 0] PRINT: Model returns: +[2025-09-05 21:38:39] [Rank 0] PRINT: Model returns: +[2025-09-05 21:38:39] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 21:38:39] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 21:38:39] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 21:38:39] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 21:38:39] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 21:38:39] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 21:38:39] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 21:38:39] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 21:38:39] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 21:38:39] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 21:38:44] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 21:38:44] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 21:38:44] [Rank 0] PRINT: Starting warmup... +[2025-09-05 21:38:44] [Rank 0] PRINT: Starting warmup... +[2025-09-05 21:39:25] [Rank 0] PRINT: Warmup complete. +[2025-09-05 21:39:25] [Rank 0] PRINT: Warmup complete. +[2025-09-05 21:39:25] [Rank 0] PRINT: Starting training... +[2025-09-05 21:39:25] [Rank 0] PRINT: Starting training... +[2025-09-05 21:39:31] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/fixed_eval_indices.json +[2025-09-05 21:39:31] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/fixed_eval_indices.json +[2025-09-05 21:39:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:39:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:39:35] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 21:39:35] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 21:40:09] [Rank 0] step:21/10000 train_time:34168ms step_avg:1627.03ms +[2025-09-05 21:40:09] [Rank 0] step:21/10000 train_time:34168ms step_avg:1627.03ms +[2025-09-05 21:40:10] [Rank 0] step:41/10000 train_time:34896ms step_avg:851.12ms +[2025-09-05 21:40:10] [Rank 0] step:41/10000 train_time:34896ms step_avg:851.12ms +[2025-09-05 21:40:10] [Rank 0] step:61/10000 train_time:35623ms step_avg:583.99ms +[2025-09-05 21:40:10] [Rank 0] step:61/10000 train_time:35623ms step_avg:583.99ms +[2025-09-05 21:40:11] [Rank 0] step:81/10000 train_time:36350ms step_avg:448.77ms +[2025-09-05 21:40:11] [Rank 0] step:81/10000 train_time:36350ms step_avg:448.77ms +[2025-09-05 21:40:12] [Rank 0] step:101/10000 train_time:37081ms step_avg:367.14ms +[2025-09-05 21:40:12] [Rank 0] step:101/10000 train_time:37081ms step_avg:367.14ms +[2025-09-05 21:40:13] [Rank 0] step:121/10000 train_time:37809ms step_avg:312.47ms +[2025-09-05 21:40:13] [Rank 0] step:121/10000 train_time:37809ms step_avg:312.47ms +[2025-09-05 21:40:13] [Rank 0] step:141/10000 train_time:38536ms step_avg:273.30ms +[2025-09-05 21:40:13] [Rank 0] step:141/10000 train_time:38536ms step_avg:273.30ms +[2025-09-05 21:40:14] [Rank 0] step:161/10000 train_time:39263ms step_avg:243.87ms +[2025-09-05 21:40:14] [Rank 0] step:161/10000 train_time:39263ms step_avg:243.87ms +[2025-09-05 21:40:15] [Rank 0] step:181/10000 train_time:39991ms step_avg:220.94ms +[2025-09-05 21:40:15] [Rank 0] step:181/10000 train_time:39991ms step_avg:220.94ms +[2025-09-05 21:40:16] [Rank 0] step:201/10000 train_time:40717ms step_avg:202.57ms +[2025-09-05 21:40:16] [Rank 0] step:201/10000 train_time:40717ms step_avg:202.57ms +[2025-09-05 21:40:16] [Rank 0] step:221/10000 train_time:41444ms step_avg:187.53ms +[2025-09-05 21:40:16] [Rank 0] step:221/10000 train_time:41444ms step_avg:187.53ms +[2025-09-05 21:40:17] [Rank 0] step:241/10000 train_time:42170ms step_avg:174.98ms +[2025-09-05 21:40:17] [Rank 0] step:241/10000 train_time:42170ms step_avg:174.98ms +[2025-09-05 21:40:18] [Rank 0] step:261/10000 train_time:42898ms step_avg:164.36ms +[2025-09-05 21:40:18] [Rank 0] step:261/10000 train_time:42898ms step_avg:164.36ms +[2025-09-05 21:40:18] [Rank 0] step:281/10000 train_time:43625ms step_avg:155.25ms +[2025-09-05 21:40:18] [Rank 0] step:281/10000 train_time:43625ms step_avg:155.25ms +[2025-09-05 21:40:19] [Rank 0] step:301/10000 train_time:44353ms step_avg:147.35ms +[2025-09-05 21:40:19] [Rank 0] step:301/10000 train_time:44353ms step_avg:147.35ms +[2025-09-05 21:40:20] [Rank 0] step:321/10000 train_time:45080ms step_avg:140.44ms +[2025-09-05 21:40:20] [Rank 0] step:321/10000 train_time:45080ms step_avg:140.44ms +[2025-09-05 21:40:21] [Rank 0] step:341/10000 train_time:45806ms step_avg:134.33ms +[2025-09-05 21:40:21] [Rank 0] step:341/10000 train_time:45806ms step_avg:134.33ms +[2025-09-05 21:40:21] [Rank 0] step:361/10000 train_time:46532ms step_avg:128.90ms +[2025-09-05 21:40:21] [Rank 0] step:361/10000 train_time:46532ms step_avg:128.90ms +[2025-09-05 21:40:22] [Rank 0] step:381/10000 train_time:47260ms step_avg:124.04ms +[2025-09-05 21:40:22] [Rank 0] step:381/10000 train_time:47260ms step_avg:124.04ms +[2025-09-05 21:40:23] [Rank 0] step:401/10000 train_time:47987ms step_avg:119.67ms +[2025-09-05 21:40:23] [Rank 0] step:401/10000 train_time:47987ms step_avg:119.67ms +[2025-09-05 21:40:24] [Rank 0] step:421/10000 train_time:48714ms step_avg:115.71ms +[2025-09-05 21:40:24] [Rank 0] step:421/10000 train_time:48714ms step_avg:115.71ms +[2025-09-05 21:40:24] [Rank 0] step:441/10000 train_time:49442ms step_avg:112.11ms +[2025-09-05 21:40:24] [Rank 0] step:441/10000 train_time:49442ms step_avg:112.11ms +[2025-09-05 21:40:25] [Rank 0] step:461/10000 train_time:50169ms step_avg:108.83ms +[2025-09-05 21:40:25] [Rank 0] step:461/10000 train_time:50169ms step_avg:108.83ms +[2025-09-05 21:40:26] [Rank 0] step:481/10000 train_time:50899ms step_avg:105.82ms +[2025-09-05 21:40:26] [Rank 0] step:481/10000 train_time:50899ms step_avg:105.82ms +[2025-09-05 21:40:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:40:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:40:27] [Rank 0] PRINT: step:500/10000 train_loss:5.6949 val_loss:4.0680 train_time:51705ms step_avg:103.41ms +[2025-09-05 21:40:27] [Rank 0] PRINT: step:500/10000 train_loss:5.6949 val_loss:4.0680 train_time:51705ms step_avg:103.41ms +[2025-09-05 21:40:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:40:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:40:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:40:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:41:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:41:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:41:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:41:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:41:49] [Rank 0] Total Loss: 5.8770 +[2025-09-05 21:41:49] [Rank 0] Total Loss: 5.8770 +[2025-09-05 21:41:49] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-05 21:41:49] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-05 21:41:49] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-05 21:41:49] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-05 21:41:49] [Rank 0] Group 0 Loss: 3.6326 +[2025-09-05 21:41:49] [Rank 0] Group 0 Loss: 3.6326 +[2025-09-05 21:41:49] [Rank 0] Group 1 Loss: 3.6526 +[2025-09-05 21:41:49] [Rank 0] Group 1 Loss: 3.6526 +[2025-09-05 21:41:49] [Rank 0] Group 2 Loss: 4.4720 +[2025-09-05 21:41:49] [Rank 0] Group 2 Loss: 4.4720 +[2025-09-05 21:41:49] [Rank 0] Group 3 Loss: 5.2847 +[2025-09-05 21:41:49] [Rank 0] Group 3 Loss: 5.2847 +[2025-09-05 21:41:49] [Rank 0] Group 4 Loss: 6.0577 +[2025-09-05 21:41:49] [Rank 0] Group 4 Loss: 6.0577 +[2025-09-05 21:41:49] [Rank 0] Group 5 Loss: 6.2192 +[2025-09-05 21:41:49] [Rank 0] Group 5 Loss: 6.2192 +[2025-09-05 21:41:49] [Rank 0] Group 6 Loss: 6.3110 +[2025-09-05 21:41:49] [Rank 0] Group 6 Loss: 6.3110 +[2025-09-05 21:41:49] [Rank 0] Group 7 Loss: 6.3011 +[2025-09-05 21:41:49] [Rank 0] Group 7 Loss: 6.3011 +[2025-09-05 21:41:49] [Rank 0] Group 8 Loss: 6.4620 +[2025-09-05 21:41:49] [Rank 0] Group 8 Loss: 6.4620 +[2025-09-05 21:41:49] [Rank 0] Group 9 Loss: 6.5826 +[2025-09-05 21:41:49] [Rank 0] Group 9 Loss: 6.5826 +[2025-09-05 21:41:49] [Rank 0] Group 10 Loss: 6.5456 +[2025-09-05 21:41:49] [Rank 0] Group 10 Loss: 6.5456 +[2025-09-05 21:41:49] [Rank 0] Group 11 Loss: 6.6167 +[2025-09-05 21:41:49] [Rank 0] Group 11 Loss: 6.6167 +[2025-09-05 21:41:49] [Rank 0] Group 12 Loss: 6.4524 +[2025-09-05 21:41:49] [Rank 0] Group 12 Loss: 6.4524 +[2025-09-05 21:41:49] [Rank 0] Group 13 Loss: 6.4333 +[2025-09-05 21:41:49] [Rank 0] Group 13 Loss: 6.4333 +[2025-09-05 21:41:49] [Rank 0] Group 14 Loss: 6.5495 +[2025-09-05 21:41:49] [Rank 0] Group 14 Loss: 6.5495 +[2025-09-05 21:41:49] [Rank 0] Group 15 Loss: 6.4592 +[2025-09-05 21:41:49] [Rank 0] Group 15 Loss: 6.4592 +[2025-09-05 21:41:49] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 21:41:49] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 21:41:49] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:41:49] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:41:49] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 21:41:49] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 21:41:49] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 21:41:49] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 21:41:49] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 21:41:49] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 21:41:49] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 21:41:49] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 21:41:49] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 21:41:49] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 21:41:49] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-05 21:41:49] [Rank 0] Group 7 FTA: 0.0700 +[2025-09-05 21:41:49] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 21:41:49] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 21:41:49] [Rank 0] Group 9 FTA: 0.0600 +[2025-09-05 21:41:49] [Rank 0] Group 9 FTA: 0.0600 +[2025-09-05 21:41:49] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 21:41:49] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 21:41:49] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 21:41:49] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 21:41:49] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:41:49] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:41:49] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 21:41:49] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 21:41:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:41:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:41:49] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 21:41:49] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 21:41:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:41:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:41:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:41:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:41:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:41:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:41:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:41:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:41:51] [Rank 0] step:501/10000 train_time:51715ms step_avg:103.22ms +[2025-09-05 21:41:51] [Rank 0] step:501/10000 train_time:51715ms step_avg:103.22ms +[2025-09-05 21:41:52] [Rank 0] step:521/10000 train_time:52371ms step_avg:100.52ms +[2025-09-05 21:41:52] [Rank 0] step:521/10000 train_time:52371ms step_avg:100.52ms +[2025-09-05 21:41:53] [Rank 0] step:541/10000 train_time:53096ms step_avg:98.14ms +[2025-09-05 21:41:53] [Rank 0] step:541/10000 train_time:53096ms step_avg:98.14ms +[2025-09-05 21:41:53] [Rank 0] step:561/10000 train_time:53972ms step_avg:96.21ms +[2025-09-05 21:41:53] [Rank 0] step:561/10000 train_time:53972ms step_avg:96.21ms +[2025-09-05 21:41:54] [Rank 0] step:581/10000 train_time:54699ms step_avg:94.15ms +[2025-09-05 21:41:54] [Rank 0] step:581/10000 train_time:54699ms step_avg:94.15ms +[2025-09-05 21:41:55] [Rank 0] step:601/10000 train_time:55426ms step_avg:92.22ms +[2025-09-05 21:41:55] [Rank 0] step:601/10000 train_time:55426ms step_avg:92.22ms +[2025-09-05 21:41:56] [Rank 0] step:621/10000 train_time:56154ms step_avg:90.42ms +[2025-09-05 21:41:56] [Rank 0] step:621/10000 train_time:56154ms step_avg:90.42ms +[2025-09-05 21:41:56] [Rank 0] step:641/10000 train_time:56880ms step_avg:88.74ms +[2025-09-05 21:41:56] [Rank 0] step:641/10000 train_time:56880ms step_avg:88.74ms +[2025-09-05 21:41:57] [Rank 0] step:661/10000 train_time:57605ms step_avg:87.15ms +[2025-09-05 21:41:57] [Rank 0] step:661/10000 train_time:57605ms step_avg:87.15ms +[2025-09-05 21:41:58] [Rank 0] step:681/10000 train_time:58332ms step_avg:85.66ms +[2025-09-05 21:41:58] [Rank 0] step:681/10000 train_time:58332ms step_avg:85.66ms +[2025-09-05 21:41:59] [Rank 0] step:701/10000 train_time:59059ms step_avg:84.25ms +[2025-09-05 21:41:59] [Rank 0] step:701/10000 train_time:59059ms step_avg:84.25ms +[2025-09-05 21:41:59] [Rank 0] step:721/10000 train_time:59786ms step_avg:82.92ms +[2025-09-05 21:41:59] [Rank 0] step:721/10000 train_time:59786ms step_avg:82.92ms +[2025-09-05 21:42:00] [Rank 0] step:741/10000 train_time:60513ms step_avg:81.66ms +[2025-09-05 21:42:00] [Rank 0] step:741/10000 train_time:60513ms step_avg:81.66ms +[2025-09-05 21:42:01] [Rank 0] step:761/10000 train_time:61244ms step_avg:80.48ms +[2025-09-05 21:42:01] [Rank 0] step:761/10000 train_time:61244ms step_avg:80.48ms +[2025-09-05 21:42:01] [Rank 0] step:781/10000 train_time:61975ms step_avg:79.35ms +[2025-09-05 21:42:01] [Rank 0] step:781/10000 train_time:61975ms step_avg:79.35ms +[2025-09-05 21:42:02] [Rank 0] step:801/10000 train_time:62708ms step_avg:78.29ms +[2025-09-05 21:42:02] [Rank 0] step:801/10000 train_time:62708ms step_avg:78.29ms +[2025-09-05 21:42:04] [Rank 0] step:821/10000 train_time:64044ms step_avg:78.01ms +[2025-09-05 21:42:04] [Rank 0] step:821/10000 train_time:64044ms step_avg:78.01ms +[2025-09-05 21:42:04] [Rank 0] step:841/10000 train_time:64777ms step_avg:77.02ms +[2025-09-05 21:42:04] [Rank 0] step:841/10000 train_time:64777ms step_avg:77.02ms +[2025-09-05 21:42:05] [Rank 0] step:861/10000 train_time:65509ms step_avg:76.09ms +[2025-09-05 21:42:05] [Rank 0] step:861/10000 train_time:65509ms step_avg:76.09ms +[2025-09-05 21:42:06] [Rank 0] step:881/10000 train_time:66241ms step_avg:75.19ms +[2025-09-05 21:42:06] [Rank 0] step:881/10000 train_time:66241ms step_avg:75.19ms +[2025-09-05 21:42:06] [Rank 0] step:901/10000 train_time:66974ms step_avg:74.33ms +[2025-09-05 21:42:06] [Rank 0] step:901/10000 train_time:66974ms step_avg:74.33ms +[2025-09-05 21:42:07] [Rank 0] step:921/10000 train_time:67707ms step_avg:73.51ms +[2025-09-05 21:42:07] [Rank 0] step:921/10000 train_time:67707ms step_avg:73.51ms +[2025-09-05 21:42:08] [Rank 0] step:941/10000 train_time:68439ms step_avg:72.73ms +[2025-09-05 21:42:08] [Rank 0] step:941/10000 train_time:68439ms step_avg:72.73ms +[2025-09-05 21:42:09] [Rank 0] step:961/10000 train_time:69171ms step_avg:71.98ms +[2025-09-05 21:42:09] [Rank 0] step:961/10000 train_time:69171ms step_avg:71.98ms +[2025-09-05 21:42:09] [Rank 0] step:981/10000 train_time:69902ms step_avg:71.26ms +[2025-09-05 21:42:09] [Rank 0] step:981/10000 train_time:69902ms step_avg:71.26ms +[2025-09-05 21:42:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:42:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:42:11] [Rank 0] PRINT: step:1000/10000 train_loss:3.6207 val_loss:3.2890 train_time:70714ms step_avg:70.71ms +[2025-09-05 21:42:11] [Rank 0] PRINT: step:1000/10000 train_loss:3.6207 val_loss:3.2890 train_time:70714ms step_avg:70.71ms +[2025-09-05 21:42:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:42:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:42:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:42:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:43:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:43:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:43:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:43:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:43:32] [Rank 0] Total Loss: 5.4175 +[2025-09-05 21:43:32] [Rank 0] Total Loss: 5.4175 +[2025-09-05 21:43:32] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-05 21:43:32] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-05 21:43:32] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-05 21:43:32] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-05 21:43:32] [Rank 0] Group 0 Loss: 3.3080 +[2025-09-05 21:43:32] [Rank 0] Group 0 Loss: 3.3080 +[2025-09-05 21:43:32] [Rank 0] Group 1 Loss: 3.3001 +[2025-09-05 21:43:32] [Rank 0] Group 1 Loss: 3.3001 +[2025-09-05 21:43:32] [Rank 0] Group 2 Loss: 3.7486 +[2025-09-05 21:43:32] [Rank 0] Group 2 Loss: 3.7486 +[2025-09-05 21:43:32] [Rank 0] Group 3 Loss: 4.3327 +[2025-09-05 21:43:32] [Rank 0] Group 3 Loss: 4.3327 +[2025-09-05 21:43:32] [Rank 0] Group 4 Loss: 5.2572 +[2025-09-05 21:43:32] [Rank 0] Group 4 Loss: 5.2572 +[2025-09-05 21:43:32] [Rank 0] Group 5 Loss: 5.6185 +[2025-09-05 21:43:32] [Rank 0] Group 5 Loss: 5.6185 +[2025-09-05 21:43:32] [Rank 0] Group 6 Loss: 5.8498 +[2025-09-05 21:43:32] [Rank 0] Group 6 Loss: 5.8498 +[2025-09-05 21:43:32] [Rank 0] Group 7 Loss: 5.8951 +[2025-09-05 21:43:32] [Rank 0] Group 7 Loss: 5.8951 +[2025-09-05 21:43:32] [Rank 0] Group 8 Loss: 6.0919 +[2025-09-05 21:43:32] [Rank 0] Group 8 Loss: 6.0919 +[2025-09-05 21:43:32] [Rank 0] Group 9 Loss: 6.2481 +[2025-09-05 21:43:32] [Rank 0] Group 9 Loss: 6.2481 +[2025-09-05 21:43:32] [Rank 0] Group 10 Loss: 6.2002 +[2025-09-05 21:43:32] [Rank 0] Group 10 Loss: 6.2002 +[2025-09-05 21:43:32] [Rank 0] Group 11 Loss: 6.2744 +[2025-09-05 21:43:32] [Rank 0] Group 11 Loss: 6.2744 +[2025-09-05 21:43:32] [Rank 0] Group 12 Loss: 6.1075 +[2025-09-05 21:43:32] [Rank 0] Group 12 Loss: 6.1075 +[2025-09-05 21:43:32] [Rank 0] Group 13 Loss: 6.1062 +[2025-09-05 21:43:32] [Rank 0] Group 13 Loss: 6.1062 +[2025-09-05 21:43:32] [Rank 0] Group 14 Loss: 6.2122 +[2025-09-05 21:43:32] [Rank 0] Group 14 Loss: 6.2122 +[2025-09-05 21:43:32] [Rank 0] Group 15 Loss: 6.1292 +[2025-09-05 21:43:32] [Rank 0] Group 15 Loss: 6.1292 +[2025-09-05 21:43:32] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 21:43:32] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 21:43:32] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:43:32] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:43:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:43:32] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:43:32] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:43:32] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:43:32] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:43:32] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:43:32] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 21:43:32] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 21:43:32] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 21:43:32] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 21:43:32] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:43:32] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:43:32] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 21:43:32] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 21:43:32] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:43:32] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:43:32] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 21:43:32] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 21:43:32] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:43:32] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:43:32] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 21:43:32] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 21:43:32] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 21:43:32] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 21:43:32] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 21:43:32] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 21:43:32] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:43:32] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 21:43:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:43:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:43:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:43:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:43:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:43:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:43:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:43:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:43:34] [Rank 0] step:1001/10000 train_time:70724ms step_avg:70.65ms +[2025-09-05 21:43:34] [Rank 0] step:1001/10000 train_time:70724ms step_avg:70.65ms +[2025-09-05 21:43:35] [Rank 0] step:1021/10000 train_time:71402ms step_avg:69.93ms +[2025-09-05 21:43:35] [Rank 0] step:1021/10000 train_time:71402ms step_avg:69.93ms +[2025-09-05 21:43:36] [Rank 0] step:1041/10000 train_time:72135ms step_avg:69.29ms +[2025-09-05 21:43:36] [Rank 0] step:1041/10000 train_time:72135ms step_avg:69.29ms +[2025-09-05 21:43:36] [Rank 0] step:1061/10000 train_time:72867ms step_avg:68.68ms +[2025-09-05 21:43:36] [Rank 0] step:1061/10000 train_time:72867ms step_avg:68.68ms +[2025-09-05 21:43:37] [Rank 0] step:1081/10000 train_time:73600ms step_avg:68.08ms +[2025-09-05 21:43:37] [Rank 0] step:1081/10000 train_time:73600ms step_avg:68.08ms +[2025-09-05 21:43:38] [Rank 0] step:1101/10000 train_time:74333ms step_avg:67.51ms +[2025-09-05 21:43:38] [Rank 0] step:1101/10000 train_time:74333ms step_avg:67.51ms +[2025-09-05 21:43:39] [Rank 0] step:1121/10000 train_time:75066ms step_avg:66.96ms +[2025-09-05 21:43:39] [Rank 0] step:1121/10000 train_time:75066ms step_avg:66.96ms +[2025-09-05 21:43:39] [Rank 0] step:1141/10000 train_time:75798ms step_avg:66.43ms +[2025-09-05 21:43:39] [Rank 0] step:1141/10000 train_time:75798ms step_avg:66.43ms +[2025-09-05 21:43:40] [Rank 0] step:1161/10000 train_time:76530ms step_avg:65.92ms +[2025-09-05 21:43:40] [Rank 0] step:1161/10000 train_time:76530ms step_avg:65.92ms +[2025-09-05 21:43:41] [Rank 0] step:1181/10000 train_time:77262ms step_avg:65.42ms +[2025-09-05 21:43:41] [Rank 0] step:1181/10000 train_time:77262ms step_avg:65.42ms +[2025-09-05 21:43:41] [Rank 0] step:1201/10000 train_time:77994ms step_avg:64.94ms +[2025-09-05 21:43:41] [Rank 0] step:1201/10000 train_time:77994ms step_avg:64.94ms +[2025-09-05 21:43:42] [Rank 0] step:1221/10000 train_time:78727ms step_avg:64.48ms +[2025-09-05 21:43:42] [Rank 0] step:1221/10000 train_time:78727ms step_avg:64.48ms +[2025-09-05 21:43:43] [Rank 0] step:1241/10000 train_time:79460ms step_avg:64.03ms +[2025-09-05 21:43:43] [Rank 0] step:1241/10000 train_time:79460ms step_avg:64.03ms +[2025-09-05 21:43:44] [Rank 0] step:1261/10000 train_time:80192ms step_avg:63.59ms +[2025-09-05 21:43:44] [Rank 0] step:1261/10000 train_time:80192ms step_avg:63.59ms +[2025-09-05 21:43:44] [Rank 0] step:1281/10000 train_time:80923ms step_avg:63.17ms +[2025-09-05 21:43:44] [Rank 0] step:1281/10000 train_time:80923ms step_avg:63.17ms +[2025-09-05 21:43:45] [Rank 0] step:1301/10000 train_time:81656ms step_avg:62.76ms +[2025-09-05 21:43:45] [Rank 0] step:1301/10000 train_time:81656ms step_avg:62.76ms +[2025-09-05 21:43:46] [Rank 0] step:1321/10000 train_time:82392ms step_avg:62.37ms +[2025-09-05 21:43:46] [Rank 0] step:1321/10000 train_time:82392ms step_avg:62.37ms +[2025-09-05 21:43:47] [Rank 0] step:1341/10000 train_time:83124ms step_avg:61.99ms +[2025-09-05 21:43:47] [Rank 0] step:1341/10000 train_time:83124ms step_avg:61.99ms +[2025-09-05 21:43:47] [Rank 0] step:1361/10000 train_time:83857ms step_avg:61.61ms +[2025-09-05 21:43:47] [Rank 0] step:1361/10000 train_time:83857ms step_avg:61.61ms +[2025-09-05 21:43:48] [Rank 0] step:1381/10000 train_time:84589ms step_avg:61.25ms +[2025-09-05 21:43:48] [Rank 0] step:1381/10000 train_time:84589ms step_avg:61.25ms +[2025-09-05 21:43:49] [Rank 0] step:1401/10000 train_time:85324ms step_avg:60.90ms +[2025-09-05 21:43:49] [Rank 0] step:1401/10000 train_time:85324ms step_avg:60.90ms +[2025-09-05 21:43:50] [Rank 0] step:1421/10000 train_time:86056ms step_avg:60.56ms +[2025-09-05 21:43:50] [Rank 0] step:1421/10000 train_time:86056ms step_avg:60.56ms +[2025-09-05 21:43:50] [Rank 0] step:1441/10000 train_time:86788ms step_avg:60.23ms +[2025-09-05 21:43:50] [Rank 0] step:1441/10000 train_time:86788ms step_avg:60.23ms +[2025-09-05 21:43:51] [Rank 0] step:1461/10000 train_time:87521ms step_avg:59.90ms +[2025-09-05 21:43:51] [Rank 0] step:1461/10000 train_time:87521ms step_avg:59.90ms +[2025-09-05 21:43:52] [Rank 0] step:1481/10000 train_time:88253ms step_avg:59.59ms +[2025-09-05 21:43:52] [Rank 0] step:1481/10000 train_time:88253ms step_avg:59.59ms +[2025-09-05 21:43:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:43:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:43:53] [Rank 0] PRINT: step:1500/10000 train_loss:3.1070 val_loss:2.9504 train_time:89066ms step_avg:59.38ms +[2025-09-05 21:43:53] [Rank 0] PRINT: step:1500/10000 train_loss:3.1070 val_loss:2.9504 train_time:89066ms step_avg:59.38ms +[2025-09-05 21:43:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:43:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:43:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:43:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:45:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:45:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:45:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:45:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:45:15] [Rank 0] Total Loss: 5.1159 +[2025-09-05 21:45:15] [Rank 0] Total Loss: 5.1159 +[2025-09-05 21:45:15] [Rank 0] Total FTA (Unweighted): 0.1625 +[2025-09-05 21:45:15] [Rank 0] Total FTA (Unweighted): 0.1625 +[2025-09-05 21:45:15] [Rank 0] Total FTA (Weighted): 0.1625 +[2025-09-05 21:45:15] [Rank 0] Total FTA (Weighted): 0.1625 +[2025-09-05 21:45:15] [Rank 0] Group 0 Loss: 3.2053 +[2025-09-05 21:45:15] [Rank 0] Group 0 Loss: 3.2053 +[2025-09-05 21:45:15] [Rank 0] Group 1 Loss: 3.1834 +[2025-09-05 21:45:15] [Rank 0] Group 1 Loss: 3.1834 +[2025-09-05 21:45:15] [Rank 0] Group 2 Loss: 3.4664 +[2025-09-05 21:45:15] [Rank 0] Group 2 Loss: 3.4664 +[2025-09-05 21:45:15] [Rank 0] Group 3 Loss: 3.9176 +[2025-09-05 21:45:15] [Rank 0] Group 3 Loss: 3.9176 +[2025-09-05 21:45:15] [Rank 0] Group 4 Loss: 4.7091 +[2025-09-05 21:45:15] [Rank 0] Group 4 Loss: 4.7091 +[2025-09-05 21:45:15] [Rank 0] Group 5 Loss: 5.1542 +[2025-09-05 21:45:15] [Rank 0] Group 5 Loss: 5.1542 +[2025-09-05 21:45:15] [Rank 0] Group 6 Loss: 5.4895 +[2025-09-05 21:45:15] [Rank 0] Group 6 Loss: 5.4895 +[2025-09-05 21:45:15] [Rank 0] Group 7 Loss: 5.5521 +[2025-09-05 21:45:15] [Rank 0] Group 7 Loss: 5.5521 +[2025-09-05 21:45:15] [Rank 0] Group 8 Loss: 5.7840 +[2025-09-05 21:45:15] [Rank 0] Group 8 Loss: 5.7840 +[2025-09-05 21:45:15] [Rank 0] Group 9 Loss: 5.9722 +[2025-09-05 21:45:15] [Rank 0] Group 9 Loss: 5.9722 +[2025-09-05 21:45:15] [Rank 0] Group 10 Loss: 5.9160 +[2025-09-05 21:45:15] [Rank 0] Group 10 Loss: 5.9160 +[2025-09-05 21:45:15] [Rank 0] Group 11 Loss: 5.9938 +[2025-09-05 21:45:15] [Rank 0] Group 11 Loss: 5.9938 +[2025-09-05 21:45:15] [Rank 0] Group 12 Loss: 5.8565 +[2025-09-05 21:45:15] [Rank 0] Group 12 Loss: 5.8565 +[2025-09-05 21:45:15] [Rank 0] Group 13 Loss: 5.8618 +[2025-09-05 21:45:15] [Rank 0] Group 13 Loss: 5.8618 +[2025-09-05 21:45:15] [Rank 0] Group 14 Loss: 5.9186 +[2025-09-05 21:45:15] [Rank 0] Group 14 Loss: 5.9186 +[2025-09-05 21:45:15] [Rank 0] Group 15 Loss: 5.8736 +[2025-09-05 21:45:15] [Rank 0] Group 15 Loss: 5.8736 +[2025-09-05 21:45:15] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-05 21:45:15] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-05 21:45:15] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:45:15] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:45:15] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:45:15] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:45:15] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:45:15] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:45:15] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:45:15] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:45:15] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:45:15] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:45:15] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 21:45:15] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 21:45:15] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 21:45:15] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 21:45:15] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 21:45:15] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 21:45:15] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:45:15] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 21:45:15] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 21:45:15] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 21:45:15] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:45:15] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:45:15] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:45:15] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:45:15] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 21:45:15] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 21:45:15] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:45:15] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:45:15] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:45:15] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:45:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:45:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:45:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:45:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:45:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:45:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:45:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:45:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:45:16] [Rank 0] step:1501/10000 train_time:89075ms step_avg:59.34ms +[2025-09-05 21:45:16] [Rank 0] step:1501/10000 train_time:89075ms step_avg:59.34ms +[2025-09-05 21:45:17] [Rank 0] step:1521/10000 train_time:89738ms step_avg:59.00ms +[2025-09-05 21:45:17] [Rank 0] step:1521/10000 train_time:89738ms step_avg:59.00ms +[2025-09-05 21:45:18] [Rank 0] step:1541/10000 train_time:90470ms step_avg:58.71ms +[2025-09-05 21:45:18] [Rank 0] step:1541/10000 train_time:90470ms step_avg:58.71ms +[2025-09-05 21:45:18] [Rank 0] step:1561/10000 train_time:91202ms step_avg:58.43ms +[2025-09-05 21:45:18] [Rank 0] step:1561/10000 train_time:91202ms step_avg:58.43ms +[2025-09-05 21:45:19] [Rank 0] step:1581/10000 train_time:91934ms step_avg:58.15ms +[2025-09-05 21:45:19] [Rank 0] step:1581/10000 train_time:91934ms step_avg:58.15ms +[2025-09-05 21:45:20] [Rank 0] step:1601/10000 train_time:92666ms step_avg:57.88ms +[2025-09-05 21:45:20] [Rank 0] step:1601/10000 train_time:92666ms step_avg:57.88ms +[2025-09-05 21:45:21] [Rank 0] step:1621/10000 train_time:93399ms step_avg:57.62ms +[2025-09-05 21:45:21] [Rank 0] step:1621/10000 train_time:93399ms step_avg:57.62ms +[2025-09-05 21:45:22] [Rank 0] step:1641/10000 train_time:94766ms step_avg:57.75ms +[2025-09-05 21:45:22] [Rank 0] step:1641/10000 train_time:94766ms step_avg:57.75ms +[2025-09-05 21:45:23] [Rank 0] step:1661/10000 train_time:95499ms step_avg:57.50ms +[2025-09-05 21:45:23] [Rank 0] step:1661/10000 train_time:95499ms step_avg:57.50ms +[2025-09-05 21:45:23] [Rank 0] step:1681/10000 train_time:96232ms step_avg:57.25ms +[2025-09-05 21:45:23] [Rank 0] step:1681/10000 train_time:96232ms step_avg:57.25ms +[2025-09-05 21:45:24] [Rank 0] step:1701/10000 train_time:96968ms step_avg:57.01ms +[2025-09-05 21:45:24] [Rank 0] step:1701/10000 train_time:96968ms step_avg:57.01ms +[2025-09-05 21:45:25] [Rank 0] step:1721/10000 train_time:97701ms step_avg:56.77ms +[2025-09-05 21:45:25] [Rank 0] step:1721/10000 train_time:97701ms step_avg:56.77ms +[2025-09-05 21:45:26] [Rank 0] step:1741/10000 train_time:98434ms step_avg:56.54ms +[2025-09-05 21:45:26] [Rank 0] step:1741/10000 train_time:98434ms step_avg:56.54ms +[2025-09-05 21:45:26] [Rank 0] step:1761/10000 train_time:99167ms step_avg:56.31ms +[2025-09-05 21:45:26] [Rank 0] step:1761/10000 train_time:99167ms step_avg:56.31ms +[2025-09-05 21:45:27] [Rank 0] step:1781/10000 train_time:99901ms step_avg:56.09ms +[2025-09-05 21:45:27] [Rank 0] step:1781/10000 train_time:99901ms step_avg:56.09ms +[2025-09-05 21:45:28] [Rank 0] step:1801/10000 train_time:100634ms step_avg:55.88ms +[2025-09-05 21:45:28] [Rank 0] step:1801/10000 train_time:100634ms step_avg:55.88ms +[2025-09-05 21:45:29] [Rank 0] step:1821/10000 train_time:101366ms step_avg:55.66ms +[2025-09-05 21:45:29] [Rank 0] step:1821/10000 train_time:101366ms step_avg:55.66ms +[2025-09-05 21:45:29] [Rank 0] step:1841/10000 train_time:102096ms step_avg:55.46ms +[2025-09-05 21:45:29] [Rank 0] step:1841/10000 train_time:102096ms step_avg:55.46ms +[2025-09-05 21:45:30] [Rank 0] step:1861/10000 train_time:102828ms step_avg:55.25ms +[2025-09-05 21:45:30] [Rank 0] step:1861/10000 train_time:102828ms step_avg:55.25ms +[2025-09-05 21:45:31] [Rank 0] step:1881/10000 train_time:103558ms step_avg:55.05ms +[2025-09-05 21:45:31] [Rank 0] step:1881/10000 train_time:103558ms step_avg:55.05ms +[2025-09-05 21:45:31] [Rank 0] step:1901/10000 train_time:104290ms step_avg:54.86ms +[2025-09-05 21:45:31] [Rank 0] step:1901/10000 train_time:104290ms step_avg:54.86ms +[2025-09-05 21:45:32] [Rank 0] step:1921/10000 train_time:105025ms step_avg:54.67ms +[2025-09-05 21:45:32] [Rank 0] step:1921/10000 train_time:105025ms step_avg:54.67ms +[2025-09-05 21:45:33] [Rank 0] step:1941/10000 train_time:105757ms step_avg:54.49ms +[2025-09-05 21:45:33] [Rank 0] step:1941/10000 train_time:105757ms step_avg:54.49ms +[2025-09-05 21:45:34] [Rank 0] step:1961/10000 train_time:106490ms step_avg:54.30ms +[2025-09-05 21:45:34] [Rank 0] step:1961/10000 train_time:106490ms step_avg:54.30ms +[2025-09-05 21:45:34] [Rank 0] step:1981/10000 train_time:107222ms step_avg:54.12ms +[2025-09-05 21:45:34] [Rank 0] step:1981/10000 train_time:107222ms step_avg:54.12ms +[2025-09-05 21:45:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:45:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:45:36] [Rank 0] PRINT: step:2000/10000 train_loss:2.8317 val_loss:2.7160 train_time:108034ms step_avg:54.02ms +[2025-09-05 21:45:36] [Rank 0] PRINT: step:2000/10000 train_loss:2.8317 val_loss:2.7160 train_time:108034ms step_avg:54.02ms +[2025-09-05 21:45:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:45:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:45:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:45:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:46:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:46:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:46:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:46:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:46:58] [Rank 0] Total Loss: 5.0354 +[2025-09-05 21:46:58] [Rank 0] Total Loss: 5.0354 +[2025-09-05 21:46:58] [Rank 0] Total FTA (Unweighted): 0.1663 +[2025-09-05 21:46:58] [Rank 0] Total FTA (Unweighted): 0.1663 +[2025-09-05 21:46:58] [Rank 0] Total FTA (Weighted): 0.1663 +[2025-09-05 21:46:58] [Rank 0] Total FTA (Weighted): 0.1663 +[2025-09-05 21:46:58] [Rank 0] Group 0 Loss: 3.2701 +[2025-09-05 21:46:58] [Rank 0] Group 0 Loss: 3.2701 +[2025-09-05 21:46:58] [Rank 0] Group 1 Loss: 3.2536 +[2025-09-05 21:46:58] [Rank 0] Group 1 Loss: 3.2536 +[2025-09-05 21:46:58] [Rank 0] Group 2 Loss: 3.5056 +[2025-09-05 21:46:58] [Rank 0] Group 2 Loss: 3.5056 +[2025-09-05 21:46:58] [Rank 0] Group 3 Loss: 3.8485 +[2025-09-05 21:46:58] [Rank 0] Group 3 Loss: 3.8485 +[2025-09-05 21:46:58] [Rank 0] Group 4 Loss: 4.5050 +[2025-09-05 21:46:58] [Rank 0] Group 4 Loss: 4.5050 +[2025-09-05 21:46:58] [Rank 0] Group 5 Loss: 5.0170 +[2025-09-05 21:46:58] [Rank 0] Group 5 Loss: 5.0170 +[2025-09-05 21:46:58] [Rank 0] Group 6 Loss: 5.3178 +[2025-09-05 21:46:58] [Rank 0] Group 6 Loss: 5.3178 +[2025-09-05 21:46:58] [Rank 0] Group 7 Loss: 5.4330 +[2025-09-05 21:46:58] [Rank 0] Group 7 Loss: 5.4330 +[2025-09-05 21:46:58] [Rank 0] Group 8 Loss: 5.6734 +[2025-09-05 21:46:58] [Rank 0] Group 8 Loss: 5.6734 +[2025-09-05 21:46:58] [Rank 0] Group 9 Loss: 5.8296 +[2025-09-05 21:46:58] [Rank 0] Group 9 Loss: 5.8296 +[2025-09-05 21:46:58] [Rank 0] Group 10 Loss: 5.8316 +[2025-09-05 21:46:58] [Rank 0] Group 10 Loss: 5.8316 +[2025-09-05 21:46:58] [Rank 0] Group 11 Loss: 5.8938 +[2025-09-05 21:46:58] [Rank 0] Group 11 Loss: 5.8938 +[2025-09-05 21:46:58] [Rank 0] Group 12 Loss: 5.7679 +[2025-09-05 21:46:58] [Rank 0] Group 12 Loss: 5.7679 +[2025-09-05 21:46:58] [Rank 0] Group 13 Loss: 5.7822 +[2025-09-05 21:46:58] [Rank 0] Group 13 Loss: 5.7822 +[2025-09-05 21:46:58] [Rank 0] Group 14 Loss: 5.8542 +[2025-09-05 21:46:58] [Rank 0] Group 14 Loss: 5.8542 +[2025-09-05 21:46:58] [Rank 0] Group 15 Loss: 5.7824 +[2025-09-05 21:46:58] [Rank 0] Group 15 Loss: 5.7824 +[2025-09-05 21:46:58] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-05 21:46:58] [Rank 0] Group 0 FTA: 0.8000 +[2025-09-05 21:46:58] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:46:58] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 21:46:58] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:46:58] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:46:58] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:46:58] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 21:46:58] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:46:58] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 21:46:58] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:46:58] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:46:58] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 21:46:58] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 21:46:58] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:46:58] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 21:46:58] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-05 21:46:58] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-05 21:46:58] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 21:46:58] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 21:46:58] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 21:46:58] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 21:46:58] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:46:58] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:46:58] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:46:58] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:46:58] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:46:58] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:46:58] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:46:58] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:46:58] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:46:58] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:46:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:46:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:46:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:46:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:46:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:46:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:46:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:46:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:46:59] [Rank 0] step:2001/10000 train_time:108044ms step_avg:53.99ms +[2025-09-05 21:46:59] [Rank 0] step:2001/10000 train_time:108044ms step_avg:53.99ms +[2025-09-05 21:47:00] [Rank 0] step:2021/10000 train_time:108912ms step_avg:53.89ms +[2025-09-05 21:47:00] [Rank 0] step:2021/10000 train_time:108912ms step_avg:53.89ms +[2025-09-05 21:47:01] [Rank 0] step:2041/10000 train_time:109644ms step_avg:53.72ms +[2025-09-05 21:47:01] [Rank 0] step:2041/10000 train_time:109644ms step_avg:53.72ms +[2025-09-05 21:47:02] [Rank 0] step:2061/10000 train_time:110376ms step_avg:53.55ms +[2025-09-05 21:47:02] [Rank 0] step:2061/10000 train_time:110376ms step_avg:53.55ms +[2025-09-05 21:47:02] [Rank 0] step:2081/10000 train_time:111107ms step_avg:53.39ms +[2025-09-05 21:47:02] [Rank 0] step:2081/10000 train_time:111107ms step_avg:53.39ms +[2025-09-05 21:47:03] [Rank 0] step:2101/10000 train_time:111839ms step_avg:53.23ms +[2025-09-05 21:47:03] [Rank 0] step:2101/10000 train_time:111839ms step_avg:53.23ms +[2025-09-05 21:47:04] [Rank 0] step:2121/10000 train_time:112572ms step_avg:53.07ms +[2025-09-05 21:47:04] [Rank 0] step:2121/10000 train_time:112572ms step_avg:53.07ms +[2025-09-05 21:47:05] [Rank 0] step:2141/10000 train_time:113304ms step_avg:52.92ms +[2025-09-05 21:47:05] [Rank 0] step:2141/10000 train_time:113304ms step_avg:52.92ms +[2025-09-05 21:47:05] [Rank 0] step:2161/10000 train_time:114036ms step_avg:52.77ms +[2025-09-05 21:47:05] [Rank 0] step:2161/10000 train_time:114036ms step_avg:52.77ms +[2025-09-05 21:47:06] [Rank 0] step:2181/10000 train_time:114771ms step_avg:52.62ms +[2025-09-05 21:47:06] [Rank 0] step:2181/10000 train_time:114771ms step_avg:52.62ms +[2025-09-05 21:47:07] [Rank 0] step:2201/10000 train_time:115620ms step_avg:52.53ms +[2025-09-05 21:47:07] [Rank 0] step:2201/10000 train_time:115620ms step_avg:52.53ms +[2025-09-05 21:47:08] [Rank 0] step:2221/10000 train_time:116353ms step_avg:52.39ms +[2025-09-05 21:47:08] [Rank 0] step:2221/10000 train_time:116353ms step_avg:52.39ms +[2025-09-05 21:47:08] [Rank 0] step:2241/10000 train_time:117089ms step_avg:52.25ms +[2025-09-05 21:47:08] [Rank 0] step:2241/10000 train_time:117089ms step_avg:52.25ms +[2025-09-05 21:47:09] [Rank 0] step:2261/10000 train_time:117977ms step_avg:52.18ms +[2025-09-05 21:47:09] [Rank 0] step:2261/10000 train_time:117977ms step_avg:52.18ms +[2025-09-05 21:47:10] [Rank 0] step:2281/10000 train_time:118716ms step_avg:52.05ms +[2025-09-05 21:47:10] [Rank 0] step:2281/10000 train_time:118716ms step_avg:52.05ms +[2025-09-05 21:47:11] [Rank 0] step:2301/10000 train_time:119454ms step_avg:51.91ms +[2025-09-05 21:47:11] [Rank 0] step:2301/10000 train_time:119454ms step_avg:51.91ms +[2025-09-05 21:47:12] [Rank 0] step:2321/10000 train_time:120193ms step_avg:51.79ms +[2025-09-05 21:47:12] [Rank 0] step:2321/10000 train_time:120193ms step_avg:51.79ms +[2025-09-05 21:47:12] [Rank 0] step:2341/10000 train_time:120932ms step_avg:51.66ms +[2025-09-05 21:47:12] [Rank 0] step:2341/10000 train_time:120932ms step_avg:51.66ms +[2025-09-05 21:47:13] [Rank 0] step:2361/10000 train_time:121671ms step_avg:51.53ms +[2025-09-05 21:47:13] [Rank 0] step:2361/10000 train_time:121671ms step_avg:51.53ms +[2025-09-05 21:47:14] [Rank 0] step:2381/10000 train_time:122410ms step_avg:51.41ms +[2025-09-05 21:47:14] [Rank 0] step:2381/10000 train_time:122410ms step_avg:51.41ms +[2025-09-05 21:47:15] [Rank 0] step:2401/10000 train_time:123148ms step_avg:51.29ms +[2025-09-05 21:47:15] [Rank 0] step:2401/10000 train_time:123148ms step_avg:51.29ms +[2025-09-05 21:47:15] [Rank 0] step:2421/10000 train_time:123888ms step_avg:51.17ms +[2025-09-05 21:47:15] [Rank 0] step:2421/10000 train_time:123888ms step_avg:51.17ms +[2025-09-05 21:47:16] [Rank 0] step:2441/10000 train_time:124626ms step_avg:51.06ms +[2025-09-05 21:47:16] [Rank 0] step:2441/10000 train_time:124626ms step_avg:51.06ms +[2025-09-05 21:47:17] [Rank 0] step:2461/10000 train_time:125365ms step_avg:50.94ms +[2025-09-05 21:47:17] [Rank 0] step:2461/10000 train_time:125365ms step_avg:50.94ms +[2025-09-05 21:47:17] [Rank 0] step:2481/10000 train_time:126104ms step_avg:50.83ms +[2025-09-05 21:47:17] [Rank 0] step:2481/10000 train_time:126104ms step_avg:50.83ms +[2025-09-05 21:47:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:47:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:47:19] [Rank 0] PRINT: step:2500/10000 train_loss:2.6437 val_loss:2.5547 train_time:126924ms step_avg:50.77ms +[2025-09-05 21:47:19] [Rank 0] PRINT: step:2500/10000 train_loss:2.6437 val_loss:2.5547 train_time:126924ms step_avg:50.77ms +[2025-09-05 21:47:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:47:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:47:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:47:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:48:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:48:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:48:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:48:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:48:41] [Rank 0] Total Loss: 4.8492 +[2025-09-05 21:48:41] [Rank 0] Total Loss: 4.8492 +[2025-09-05 21:48:41] [Rank 0] Total FTA (Unweighted): 0.2094 +[2025-09-05 21:48:41] [Rank 0] Total FTA (Unweighted): 0.2094 +[2025-09-05 21:48:41] [Rank 0] Total FTA (Weighted): 0.2094 +[2025-09-05 21:48:41] [Rank 0] Total FTA (Weighted): 0.2094 +[2025-09-05 21:48:41] [Rank 0] Group 0 Loss: 3.1919 +[2025-09-05 21:48:41] [Rank 0] Group 0 Loss: 3.1919 +[2025-09-05 21:48:41] [Rank 0] Group 1 Loss: 3.1657 +[2025-09-05 21:48:41] [Rank 0] Group 1 Loss: 3.1657 +[2025-09-05 21:48:41] [Rank 0] Group 2 Loss: 3.3018 +[2025-09-05 21:48:41] [Rank 0] Group 2 Loss: 3.3018 +[2025-09-05 21:48:41] [Rank 0] Group 3 Loss: 3.6876 +[2025-09-05 21:48:41] [Rank 0] Group 3 Loss: 3.6876 +[2025-09-05 21:48:41] [Rank 0] Group 4 Loss: 4.2153 +[2025-09-05 21:48:41] [Rank 0] Group 4 Loss: 4.2153 +[2025-09-05 21:48:41] [Rank 0] Group 5 Loss: 4.7737 +[2025-09-05 21:48:41] [Rank 0] Group 5 Loss: 4.7737 +[2025-09-05 21:48:41] [Rank 0] Group 6 Loss: 5.0628 +[2025-09-05 21:48:41] [Rank 0] Group 6 Loss: 5.0628 +[2025-09-05 21:48:41] [Rank 0] Group 7 Loss: 5.2052 +[2025-09-05 21:48:41] [Rank 0] Group 7 Loss: 5.2052 +[2025-09-05 21:48:41] [Rank 0] Group 8 Loss: 5.4864 +[2025-09-05 21:48:41] [Rank 0] Group 8 Loss: 5.4864 +[2025-09-05 21:48:41] [Rank 0] Group 9 Loss: 5.6123 +[2025-09-05 21:48:41] [Rank 0] Group 9 Loss: 5.6123 +[2025-09-05 21:48:41] [Rank 0] Group 10 Loss: 5.6696 +[2025-09-05 21:48:41] [Rank 0] Group 10 Loss: 5.6696 +[2025-09-05 21:48:41] [Rank 0] Group 11 Loss: 5.7125 +[2025-09-05 21:48:41] [Rank 0] Group 11 Loss: 5.7125 +[2025-09-05 21:48:41] [Rank 0] Group 12 Loss: 5.5839 +[2025-09-05 21:48:41] [Rank 0] Group 12 Loss: 5.5839 +[2025-09-05 21:48:41] [Rank 0] Group 13 Loss: 5.6284 +[2025-09-05 21:48:41] [Rank 0] Group 13 Loss: 5.6284 +[2025-09-05 21:48:41] [Rank 0] Group 14 Loss: 5.6763 +[2025-09-05 21:48:41] [Rank 0] Group 14 Loss: 5.6763 +[2025-09-05 21:48:41] [Rank 0] Group 15 Loss: 5.6134 +[2025-09-05 21:48:41] [Rank 0] Group 15 Loss: 5.6134 +[2025-09-05 21:48:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:48:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:48:41] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 21:48:41] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 21:48:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:48:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:48:41] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:48:41] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:48:41] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 21:48:41] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 21:48:41] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:48:41] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 21:48:41] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 21:48:41] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 21:48:41] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 21:48:41] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 21:48:41] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 21:48:41] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 21:48:41] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 21:48:41] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 21:48:41] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 21:48:41] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 21:48:41] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:48:41] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 21:48:41] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:48:41] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 21:48:41] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:48:41] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:48:41] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 21:48:41] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 21:48:41] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:48:41] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:48:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:48:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:48:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:48:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:48:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:48:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:48:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:48:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:48:42] [Rank 0] step:2501/10000 train_time:126934ms step_avg:50.75ms +[2025-09-05 21:48:42] [Rank 0] step:2501/10000 train_time:126934ms step_avg:50.75ms +[2025-09-05 21:48:43] [Rank 0] step:2521/10000 train_time:127618ms step_avg:50.62ms +[2025-09-05 21:48:43] [Rank 0] step:2521/10000 train_time:127618ms step_avg:50.62ms +[2025-09-05 21:48:44] [Rank 0] step:2541/10000 train_time:128356ms step_avg:50.51ms +[2025-09-05 21:48:44] [Rank 0] step:2541/10000 train_time:128356ms step_avg:50.51ms +[2025-09-05 21:48:45] [Rank 0] step:2561/10000 train_time:129096ms step_avg:50.41ms +[2025-09-05 21:48:45] [Rank 0] step:2561/10000 train_time:129096ms step_avg:50.41ms +[2025-09-05 21:48:45] [Rank 0] step:2581/10000 train_time:129835ms step_avg:50.30ms +[2025-09-05 21:48:45] [Rank 0] step:2581/10000 train_time:129835ms step_avg:50.30ms +[2025-09-05 21:48:46] [Rank 0] step:2601/10000 train_time:130573ms step_avg:50.20ms +[2025-09-05 21:48:46] [Rank 0] step:2601/10000 train_time:130573ms step_avg:50.20ms +[2025-09-05 21:48:47] [Rank 0] step:2621/10000 train_time:131312ms step_avg:50.10ms +[2025-09-05 21:48:47] [Rank 0] step:2621/10000 train_time:131312ms step_avg:50.10ms +[2025-09-05 21:48:48] [Rank 0] step:2641/10000 train_time:132050ms step_avg:50.00ms +[2025-09-05 21:48:48] [Rank 0] step:2641/10000 train_time:132050ms step_avg:50.00ms +[2025-09-05 21:48:48] [Rank 0] step:2661/10000 train_time:132789ms step_avg:49.90ms +[2025-09-05 21:48:48] [Rank 0] step:2661/10000 train_time:132789ms step_avg:49.90ms +[2025-09-05 21:48:49] [Rank 0] step:2681/10000 train_time:133528ms step_avg:49.81ms +[2025-09-05 21:48:49] [Rank 0] step:2681/10000 train_time:133528ms step_avg:49.81ms +[2025-09-05 21:48:50] [Rank 0] step:2701/10000 train_time:134266ms step_avg:49.71ms +[2025-09-05 21:48:50] [Rank 0] step:2701/10000 train_time:134266ms step_avg:49.71ms +[2025-09-05 21:48:51] [Rank 0] step:2721/10000 train_time:135011ms step_avg:49.62ms +[2025-09-05 21:48:51] [Rank 0] step:2721/10000 train_time:135011ms step_avg:49.62ms +[2025-09-05 21:48:51] [Rank 0] step:2741/10000 train_time:135749ms step_avg:49.53ms +[2025-09-05 21:48:51] [Rank 0] step:2741/10000 train_time:135749ms step_avg:49.53ms +[2025-09-05 21:48:52] [Rank 0] step:2761/10000 train_time:136487ms step_avg:49.43ms +[2025-09-05 21:48:52] [Rank 0] step:2761/10000 train_time:136487ms step_avg:49.43ms +[2025-09-05 21:48:53] [Rank 0] step:2781/10000 train_time:137225ms step_avg:49.34ms +[2025-09-05 21:48:53] [Rank 0] step:2781/10000 train_time:137225ms step_avg:49.34ms +[2025-09-05 21:48:54] [Rank 0] step:2801/10000 train_time:137964ms step_avg:49.26ms +[2025-09-05 21:48:54] [Rank 0] step:2801/10000 train_time:137964ms step_avg:49.26ms +[2025-09-05 21:48:55] [Rank 0] step:2821/10000 train_time:139311ms step_avg:49.38ms +[2025-09-05 21:48:55] [Rank 0] step:2821/10000 train_time:139311ms step_avg:49.38ms +[2025-09-05 21:48:56] [Rank 0] step:2841/10000 train_time:140048ms step_avg:49.30ms +[2025-09-05 21:48:56] [Rank 0] step:2841/10000 train_time:140048ms step_avg:49.30ms +[2025-09-05 21:48:56] [Rank 0] step:2861/10000 train_time:140788ms step_avg:49.21ms +[2025-09-05 21:48:56] [Rank 0] step:2861/10000 train_time:140788ms step_avg:49.21ms +[2025-09-05 21:48:57] [Rank 0] step:2881/10000 train_time:141526ms step_avg:49.12ms +[2025-09-05 21:48:57] [Rank 0] step:2881/10000 train_time:141526ms step_avg:49.12ms +[2025-09-05 21:48:58] [Rank 0] step:2901/10000 train_time:142265ms step_avg:49.04ms +[2025-09-05 21:48:58] [Rank 0] step:2901/10000 train_time:142265ms step_avg:49.04ms +[2025-09-05 21:48:59] [Rank 0] step:2921/10000 train_time:143004ms step_avg:48.96ms +[2025-09-05 21:48:59] [Rank 0] step:2921/10000 train_time:143004ms step_avg:48.96ms +[2025-09-05 21:48:59] [Rank 0] step:2941/10000 train_time:143742ms step_avg:48.88ms +[2025-09-05 21:48:59] [Rank 0] step:2941/10000 train_time:143742ms step_avg:48.88ms +[2025-09-05 21:49:00] [Rank 0] step:2961/10000 train_time:144482ms step_avg:48.79ms +[2025-09-05 21:49:00] [Rank 0] step:2961/10000 train_time:144482ms step_avg:48.79ms +[2025-09-05 21:49:01] [Rank 0] step:2981/10000 train_time:145221ms step_avg:48.72ms +[2025-09-05 21:49:01] [Rank 0] step:2981/10000 train_time:145221ms step_avg:48.72ms +[2025-09-05 21:49:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:49:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:49:02] [Rank 0] PRINT: step:3000/10000 train_loss:2.5026 val_loss:2.4372 train_time:146041ms step_avg:48.68ms +[2025-09-05 21:49:02] [Rank 0] PRINT: step:3000/10000 train_loss:2.5026 val_loss:2.4372 train_time:146041ms step_avg:48.68ms +[2025-09-05 21:49:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:49:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:49:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:49:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:50:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:50:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:50:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:50:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:50:24] [Rank 0] Total Loss: 4.8113 +[2025-09-05 21:50:24] [Rank 0] Total Loss: 4.8113 +[2025-09-05 21:50:24] [Rank 0] Total FTA (Unweighted): 0.2250 +[2025-09-05 21:50:24] [Rank 0] Total FTA (Unweighted): 0.2250 +[2025-09-05 21:50:24] [Rank 0] Total FTA (Weighted): 0.2250 +[2025-09-05 21:50:24] [Rank 0] Total FTA (Weighted): 0.2250 +[2025-09-05 21:50:24] [Rank 0] Group 0 Loss: 3.2420 +[2025-09-05 21:50:24] [Rank 0] Group 0 Loss: 3.2420 +[2025-09-05 21:50:24] [Rank 0] Group 1 Loss: 3.2584 +[2025-09-05 21:50:24] [Rank 0] Group 1 Loss: 3.2584 +[2025-09-05 21:50:24] [Rank 0] Group 2 Loss: 3.4285 +[2025-09-05 21:50:24] [Rank 0] Group 2 Loss: 3.4285 +[2025-09-05 21:50:24] [Rank 0] Group 3 Loss: 3.6605 +[2025-09-05 21:50:24] [Rank 0] Group 3 Loss: 3.6605 +[2025-09-05 21:50:24] [Rank 0] Group 4 Loss: 4.1497 +[2025-09-05 21:50:24] [Rank 0] Group 4 Loss: 4.1497 +[2025-09-05 21:50:24] [Rank 0] Group 5 Loss: 4.6677 +[2025-09-05 21:50:24] [Rank 0] Group 5 Loss: 4.6677 +[2025-09-05 21:50:24] [Rank 0] Group 6 Loss: 4.9700 +[2025-09-05 21:50:24] [Rank 0] Group 6 Loss: 4.9700 +[2025-09-05 21:50:24] [Rank 0] Group 7 Loss: 5.1146 +[2025-09-05 21:50:24] [Rank 0] Group 7 Loss: 5.1146 +[2025-09-05 21:50:24] [Rank 0] Group 8 Loss: 5.3978 +[2025-09-05 21:50:24] [Rank 0] Group 8 Loss: 5.3978 +[2025-09-05 21:50:24] [Rank 0] Group 9 Loss: 5.5574 +[2025-09-05 21:50:24] [Rank 0] Group 9 Loss: 5.5574 +[2025-09-05 21:50:24] [Rank 0] Group 10 Loss: 5.6090 +[2025-09-05 21:50:24] [Rank 0] Group 10 Loss: 5.6090 +[2025-09-05 21:50:24] [Rank 0] Group 11 Loss: 5.6444 +[2025-09-05 21:50:24] [Rank 0] Group 11 Loss: 5.6444 +[2025-09-05 21:50:24] [Rank 0] Group 12 Loss: 5.5228 +[2025-09-05 21:50:24] [Rank 0] Group 12 Loss: 5.5228 +[2025-09-05 21:50:24] [Rank 0] Group 13 Loss: 5.5751 +[2025-09-05 21:50:24] [Rank 0] Group 13 Loss: 5.5751 +[2025-09-05 21:50:24] [Rank 0] Group 14 Loss: 5.6153 +[2025-09-05 21:50:24] [Rank 0] Group 14 Loss: 5.6153 +[2025-09-05 21:50:24] [Rank 0] Group 15 Loss: 5.5675 +[2025-09-05 21:50:24] [Rank 0] Group 15 Loss: 5.5675 +[2025-09-05 21:50:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:50:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:50:24] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 21:50:24] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 21:50:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:50:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 21:50:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:50:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:50:24] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-05 21:50:24] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-05 21:50:24] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 21:50:24] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 21:50:24] [Rank 0] Group 6 FTA: 0.1800 +[2025-09-05 21:50:24] [Rank 0] Group 6 FTA: 0.1800 +[2025-09-05 21:50:24] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 21:50:24] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 21:50:24] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 21:50:24] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 21:50:24] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:50:24] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:50:24] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-05 21:50:24] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-05 21:50:24] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 21:50:24] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 21:50:24] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 21:50:24] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 21:50:24] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:50:24] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:50:24] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:50:24] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 21:50:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:50:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:50:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:50:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:50:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:50:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:50:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:50:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:50:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:50:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:50:26] [Rank 0] step:3001/10000 train_time:146050ms step_avg:48.67ms +[2025-09-05 21:50:26] [Rank 0] step:3001/10000 train_time:146050ms step_avg:48.67ms +[2025-09-05 21:50:26] [Rank 0] step:3021/10000 train_time:146713ms step_avg:48.56ms +[2025-09-05 21:50:26] [Rank 0] step:3021/10000 train_time:146713ms step_avg:48.56ms +[2025-09-05 21:50:27] [Rank 0] step:3041/10000 train_time:147452ms step_avg:48.49ms +[2025-09-05 21:50:27] [Rank 0] step:3041/10000 train_time:147452ms step_avg:48.49ms +[2025-09-05 21:50:28] [Rank 0] step:3061/10000 train_time:148190ms step_avg:48.41ms +[2025-09-05 21:50:28] [Rank 0] step:3061/10000 train_time:148190ms step_avg:48.41ms +[2025-09-05 21:50:29] [Rank 0] step:3081/10000 train_time:148928ms step_avg:48.34ms +[2025-09-05 21:50:29] [Rank 0] step:3081/10000 train_time:148928ms step_avg:48.34ms +[2025-09-05 21:50:29] [Rank 0] step:3101/10000 train_time:149667ms step_avg:48.26ms +[2025-09-05 21:50:29] [Rank 0] step:3101/10000 train_time:149667ms step_avg:48.26ms +[2025-09-05 21:50:30] [Rank 0] step:3121/10000 train_time:150405ms step_avg:48.19ms +[2025-09-05 21:50:30] [Rank 0] step:3121/10000 train_time:150405ms step_avg:48.19ms +[2025-09-05 21:50:31] [Rank 0] step:3141/10000 train_time:151144ms step_avg:48.12ms +[2025-09-05 21:50:31] [Rank 0] step:3141/10000 train_time:151144ms step_avg:48.12ms +[2025-09-05 21:50:31] [Rank 0] step:3161/10000 train_time:151882ms step_avg:48.05ms +[2025-09-05 21:50:31] [Rank 0] step:3161/10000 train_time:151882ms step_avg:48.05ms +[2025-09-05 21:50:32] [Rank 0] step:3181/10000 train_time:152621ms step_avg:47.98ms +[2025-09-05 21:50:32] [Rank 0] step:3181/10000 train_time:152621ms step_avg:47.98ms +[2025-09-05 21:50:33] [Rank 0] step:3201/10000 train_time:153358ms step_avg:47.91ms +[2025-09-05 21:50:33] [Rank 0] step:3201/10000 train_time:153358ms step_avg:47.91ms +[2025-09-05 21:50:34] [Rank 0] step:3221/10000 train_time:154096ms step_avg:47.84ms +[2025-09-05 21:50:34] [Rank 0] step:3221/10000 train_time:154096ms step_avg:47.84ms +[2025-09-05 21:50:34] [Rank 0] step:3241/10000 train_time:154834ms step_avg:47.77ms +[2025-09-05 21:50:34] [Rank 0] step:3241/10000 train_time:154834ms step_avg:47.77ms +[2025-09-05 21:50:35] [Rank 0] step:3261/10000 train_time:155573ms step_avg:47.71ms +[2025-09-05 21:50:35] [Rank 0] step:3261/10000 train_time:155573ms step_avg:47.71ms +[2025-09-05 21:50:36] [Rank 0] step:3281/10000 train_time:156312ms step_avg:47.64ms +[2025-09-05 21:50:36] [Rank 0] step:3281/10000 train_time:156312ms step_avg:47.64ms +[2025-09-05 21:50:37] [Rank 0] step:3301/10000 train_time:157049ms step_avg:47.58ms +[2025-09-05 21:50:37] [Rank 0] step:3301/10000 train_time:157049ms step_avg:47.58ms +[2025-09-05 21:50:37] [Rank 0] step:3321/10000 train_time:157787ms step_avg:47.51ms +[2025-09-05 21:50:37] [Rank 0] step:3321/10000 train_time:157787ms step_avg:47.51ms +[2025-09-05 21:50:38] [Rank 0] step:3341/10000 train_time:158525ms step_avg:47.45ms +[2025-09-05 21:50:38] [Rank 0] step:3341/10000 train_time:158525ms step_avg:47.45ms +[2025-09-05 21:50:39] [Rank 0] step:3361/10000 train_time:159264ms step_avg:47.39ms +[2025-09-05 21:50:39] [Rank 0] step:3361/10000 train_time:159264ms step_avg:47.39ms +[2025-09-05 21:50:40] [Rank 0] step:3381/10000 train_time:160002ms step_avg:47.32ms +[2025-09-05 21:50:40] [Rank 0] step:3381/10000 train_time:160002ms step_avg:47.32ms +[2025-09-05 21:50:40] [Rank 0] step:3401/10000 train_time:160741ms step_avg:47.26ms +[2025-09-05 21:50:40] [Rank 0] step:3401/10000 train_time:160741ms step_avg:47.26ms +[2025-09-05 21:50:41] [Rank 0] step:3421/10000 train_time:161479ms step_avg:47.20ms +[2025-09-05 21:50:41] [Rank 0] step:3421/10000 train_time:161479ms step_avg:47.20ms +[2025-09-05 21:50:42] [Rank 0] step:3441/10000 train_time:162217ms step_avg:47.14ms +[2025-09-05 21:50:42] [Rank 0] step:3441/10000 train_time:162217ms step_avg:47.14ms +[2025-09-05 21:50:43] [Rank 0] step:3461/10000 train_time:162955ms step_avg:47.08ms +[2025-09-05 21:50:43] [Rank 0] step:3461/10000 train_time:162955ms step_avg:47.08ms +[2025-09-05 21:50:43] [Rank 0] step:3481/10000 train_time:163694ms step_avg:47.03ms +[2025-09-05 21:50:43] [Rank 0] step:3481/10000 train_time:163694ms step_avg:47.03ms +[2025-09-05 21:50:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:50:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:50:44] [Rank 0] PRINT: step:3500/10000 train_loss:2.3990 val_loss:2.3461 train_time:164513ms step_avg:47.00ms +[2025-09-05 21:50:44] [Rank 0] PRINT: step:3500/10000 train_loss:2.3990 val_loss:2.3461 train_time:164513ms step_avg:47.00ms +[2025-09-05 21:50:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:50:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:50:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:50:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:52:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:52:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:52:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:52:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:52:06] [Rank 0] Total Loss: 4.7158 +[2025-09-05 21:52:06] [Rank 0] Total Loss: 4.7158 +[2025-09-05 21:52:06] [Rank 0] Total FTA (Unweighted): 0.2688 +[2025-09-05 21:52:06] [Rank 0] Total FTA (Unweighted): 0.2688 +[2025-09-05 21:52:06] [Rank 0] Total FTA (Weighted): 0.2687 +[2025-09-05 21:52:06] [Rank 0] Total FTA (Weighted): 0.2687 +[2025-09-05 21:52:06] [Rank 0] Group 0 Loss: 3.2369 +[2025-09-05 21:52:06] [Rank 0] Group 0 Loss: 3.2369 +[2025-09-05 21:52:06] [Rank 0] Group 1 Loss: 3.2669 +[2025-09-05 21:52:06] [Rank 0] Group 1 Loss: 3.2669 +[2025-09-05 21:52:06] [Rank 0] Group 2 Loss: 3.2936 +[2025-09-05 21:52:06] [Rank 0] Group 2 Loss: 3.2936 +[2025-09-05 21:52:06] [Rank 0] Group 3 Loss: 3.6121 +[2025-09-05 21:52:06] [Rank 0] Group 3 Loss: 3.6121 +[2025-09-05 21:52:06] [Rank 0] Group 4 Loss: 4.0088 +[2025-09-05 21:52:06] [Rank 0] Group 4 Loss: 4.0088 +[2025-09-05 21:52:06] [Rank 0] Group 5 Loss: 4.5617 +[2025-09-05 21:52:06] [Rank 0] Group 5 Loss: 4.5617 +[2025-09-05 21:52:06] [Rank 0] Group 6 Loss: 4.8739 +[2025-09-05 21:52:06] [Rank 0] Group 6 Loss: 4.8739 +[2025-09-05 21:52:06] [Rank 0] Group 7 Loss: 5.0003 +[2025-09-05 21:52:06] [Rank 0] Group 7 Loss: 5.0003 +[2025-09-05 21:52:06] [Rank 0] Group 8 Loss: 5.2901 +[2025-09-05 21:52:06] [Rank 0] Group 8 Loss: 5.2901 +[2025-09-05 21:52:06] [Rank 0] Group 9 Loss: 5.4346 +[2025-09-05 21:52:06] [Rank 0] Group 9 Loss: 5.4346 +[2025-09-05 21:52:06] [Rank 0] Group 10 Loss: 5.4662 +[2025-09-05 21:52:06] [Rank 0] Group 10 Loss: 5.4662 +[2025-09-05 21:52:06] [Rank 0] Group 11 Loss: 5.5384 +[2025-09-05 21:52:06] [Rank 0] Group 11 Loss: 5.5384 +[2025-09-05 21:52:06] [Rank 0] Group 12 Loss: 5.4239 +[2025-09-05 21:52:06] [Rank 0] Group 12 Loss: 5.4239 +[2025-09-05 21:52:06] [Rank 0] Group 13 Loss: 5.4822 +[2025-09-05 21:52:06] [Rank 0] Group 13 Loss: 5.4822 +[2025-09-05 21:52:06] [Rank 0] Group 14 Loss: 5.4929 +[2025-09-05 21:52:06] [Rank 0] Group 14 Loss: 5.4929 +[2025-09-05 21:52:06] [Rank 0] Group 15 Loss: 5.4710 +[2025-09-05 21:52:06] [Rank 0] Group 15 Loss: 5.4710 +[2025-09-05 21:52:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:52:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:52:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:52:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:52:06] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:52:06] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:52:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:52:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:52:06] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-05 21:52:06] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-05 21:52:06] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-05 21:52:06] [Rank 0] Group 5 FTA: 0.2200 +[2025-09-05 21:52:06] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-05 21:52:06] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-05 21:52:06] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 21:52:06] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 21:52:06] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:52:06] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:52:06] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:52:06] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:52:06] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 21:52:06] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 21:52:06] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 21:52:06] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 21:52:06] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 21:52:06] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 21:52:06] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:52:06] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 21:52:06] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:52:06] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:52:06] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:52:06] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:52:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:52:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:52:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:52:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:52:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:52:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:52:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:52:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:52:07] [Rank 0] step:3501/10000 train_time:164522ms step_avg:46.99ms +[2025-09-05 21:52:07] [Rank 0] step:3501/10000 train_time:164522ms step_avg:46.99ms +[2025-09-05 21:52:08] [Rank 0] step:3521/10000 train_time:165193ms step_avg:46.92ms +[2025-09-05 21:52:08] [Rank 0] step:3521/10000 train_time:165193ms step_avg:46.92ms +[2025-09-05 21:52:09] [Rank 0] step:3541/10000 train_time:165932ms step_avg:46.86ms +[2025-09-05 21:52:09] [Rank 0] step:3541/10000 train_time:165932ms step_avg:46.86ms +[2025-09-05 21:52:10] [Rank 0] step:3561/10000 train_time:166671ms step_avg:46.80ms +[2025-09-05 21:52:10] [Rank 0] step:3561/10000 train_time:166671ms step_avg:46.80ms +[2025-09-05 21:52:10] [Rank 0] step:3581/10000 train_time:167409ms step_avg:46.75ms +[2025-09-05 21:52:10] [Rank 0] step:3581/10000 train_time:167409ms step_avg:46.75ms +[2025-09-05 21:52:11] [Rank 0] step:3601/10000 train_time:168148ms step_avg:46.69ms +[2025-09-05 21:52:11] [Rank 0] step:3601/10000 train_time:168148ms step_avg:46.69ms +[2025-09-05 21:52:12] [Rank 0] step:3621/10000 train_time:168886ms step_avg:46.64ms +[2025-09-05 21:52:12] [Rank 0] step:3621/10000 train_time:168886ms step_avg:46.64ms +[2025-09-05 21:52:13] [Rank 0] step:3641/10000 train_time:170228ms step_avg:46.75ms +[2025-09-05 21:52:13] [Rank 0] step:3641/10000 train_time:170228ms step_avg:46.75ms +[2025-09-05 21:52:14] [Rank 0] step:3661/10000 train_time:170966ms step_avg:46.70ms +[2025-09-05 21:52:14] [Rank 0] step:3661/10000 train_time:170966ms step_avg:46.70ms +[2025-09-05 21:52:15] [Rank 0] step:3681/10000 train_time:171703ms step_avg:46.65ms +[2025-09-05 21:52:15] [Rank 0] step:3681/10000 train_time:171703ms step_avg:46.65ms +[2025-09-05 21:52:15] [Rank 0] step:3701/10000 train_time:172441ms step_avg:46.59ms +[2025-09-05 21:52:15] [Rank 0] step:3701/10000 train_time:172441ms step_avg:46.59ms +[2025-09-05 21:52:16] [Rank 0] step:3721/10000 train_time:173180ms step_avg:46.54ms +[2025-09-05 21:52:16] [Rank 0] step:3721/10000 train_time:173180ms step_avg:46.54ms +[2025-09-05 21:52:17] [Rank 0] step:3741/10000 train_time:173919ms step_avg:46.49ms +[2025-09-05 21:52:17] [Rank 0] step:3741/10000 train_time:173919ms step_avg:46.49ms +[2025-09-05 21:52:18] [Rank 0] step:3761/10000 train_time:174658ms step_avg:46.44ms +[2025-09-05 21:52:18] [Rank 0] step:3761/10000 train_time:174658ms step_avg:46.44ms +[2025-09-05 21:52:18] [Rank 0] step:3781/10000 train_time:175396ms step_avg:46.39ms +[2025-09-05 21:52:18] [Rank 0] step:3781/10000 train_time:175396ms step_avg:46.39ms +[2025-09-05 21:52:19] [Rank 0] step:3801/10000 train_time:176135ms step_avg:46.34ms +[2025-09-05 21:52:19] [Rank 0] step:3801/10000 train_time:176135ms step_avg:46.34ms +[2025-09-05 21:52:20] [Rank 0] step:3821/10000 train_time:176875ms step_avg:46.29ms +[2025-09-05 21:52:20] [Rank 0] step:3821/10000 train_time:176875ms step_avg:46.29ms +[2025-09-05 21:52:21] [Rank 0] step:3841/10000 train_time:177613ms step_avg:46.24ms +[2025-09-05 21:52:21] [Rank 0] step:3841/10000 train_time:177613ms step_avg:46.24ms +[2025-09-05 21:52:21] [Rank 0] step:3861/10000 train_time:178351ms step_avg:46.19ms +[2025-09-05 21:52:21] [Rank 0] step:3861/10000 train_time:178351ms step_avg:46.19ms +[2025-09-05 21:52:22] [Rank 0] step:3881/10000 train_time:179090ms step_avg:46.15ms +[2025-09-05 21:52:22] [Rank 0] step:3881/10000 train_time:179090ms step_avg:46.15ms +[2025-09-05 21:52:23] [Rank 0] step:3901/10000 train_time:179829ms step_avg:46.10ms +[2025-09-05 21:52:23] [Rank 0] step:3901/10000 train_time:179829ms step_avg:46.10ms +[2025-09-05 21:52:24] [Rank 0] step:3921/10000 train_time:180734ms step_avg:46.09ms +[2025-09-05 21:52:24] [Rank 0] step:3921/10000 train_time:180734ms step_avg:46.09ms +[2025-09-05 21:52:24] [Rank 0] step:3941/10000 train_time:181473ms step_avg:46.05ms +[2025-09-05 21:52:24] [Rank 0] step:3941/10000 train_time:181473ms step_avg:46.05ms +[2025-09-05 21:52:25] [Rank 0] step:3961/10000 train_time:182213ms step_avg:46.00ms +[2025-09-05 21:52:25] [Rank 0] step:3961/10000 train_time:182213ms step_avg:46.00ms +[2025-09-05 21:52:26] [Rank 0] step:3981/10000 train_time:183087ms step_avg:45.99ms +[2025-09-05 21:52:26] [Rank 0] step:3981/10000 train_time:183087ms step_avg:45.99ms +[2025-09-05 21:52:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:52:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:52:27] [Rank 0] PRINT: step:4000/10000 train_loss:2.3217 val_loss:2.2798 train_time:183907ms step_avg:45.98ms +[2025-09-05 21:52:27] [Rank 0] PRINT: step:4000/10000 train_loss:2.3217 val_loss:2.2798 train_time:183907ms step_avg:45.98ms +[2025-09-05 21:52:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:52:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:52:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:52:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:53:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:53:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:53:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:53:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:53:49] [Rank 0] Total Loss: 4.6799 +[2025-09-05 21:53:49] [Rank 0] Total Loss: 4.6799 +[2025-09-05 21:53:49] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-05 21:53:49] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-05 21:53:49] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-05 21:53:49] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-05 21:53:49] [Rank 0] Group 0 Loss: 3.2551 +[2025-09-05 21:53:49] [Rank 0] Group 0 Loss: 3.2551 +[2025-09-05 21:53:49] [Rank 0] Group 1 Loss: 3.2343 +[2025-09-05 21:53:49] [Rank 0] Group 1 Loss: 3.2343 +[2025-09-05 21:53:49] [Rank 0] Group 2 Loss: 3.3705 +[2025-09-05 21:53:49] [Rank 0] Group 2 Loss: 3.3705 +[2025-09-05 21:53:49] [Rank 0] Group 3 Loss: 3.6103 +[2025-09-05 21:53:49] [Rank 0] Group 3 Loss: 3.6103 +[2025-09-05 21:53:49] [Rank 0] Group 4 Loss: 3.9820 +[2025-09-05 21:53:49] [Rank 0] Group 4 Loss: 3.9820 +[2025-09-05 21:53:49] [Rank 0] Group 5 Loss: 4.4791 +[2025-09-05 21:53:49] [Rank 0] Group 5 Loss: 4.4791 +[2025-09-05 21:53:49] [Rank 0] Group 6 Loss: 4.7861 +[2025-09-05 21:53:49] [Rank 0] Group 6 Loss: 4.7861 +[2025-09-05 21:53:49] [Rank 0] Group 7 Loss: 4.9276 +[2025-09-05 21:53:49] [Rank 0] Group 7 Loss: 4.9276 +[2025-09-05 21:53:49] [Rank 0] Group 8 Loss: 5.2440 +[2025-09-05 21:53:49] [Rank 0] Group 8 Loss: 5.2440 +[2025-09-05 21:53:49] [Rank 0] Group 9 Loss: 5.3864 +[2025-09-05 21:53:49] [Rank 0] Group 9 Loss: 5.3864 +[2025-09-05 21:53:49] [Rank 0] Group 10 Loss: 5.4324 +[2025-09-05 21:53:49] [Rank 0] Group 10 Loss: 5.4324 +[2025-09-05 21:53:49] [Rank 0] Group 11 Loss: 5.4923 +[2025-09-05 21:53:49] [Rank 0] Group 11 Loss: 5.4923 +[2025-09-05 21:53:49] [Rank 0] Group 12 Loss: 5.3717 +[2025-09-05 21:53:49] [Rank 0] Group 12 Loss: 5.3717 +[2025-09-05 21:53:49] [Rank 0] Group 13 Loss: 5.4397 +[2025-09-05 21:53:49] [Rank 0] Group 13 Loss: 5.4397 +[2025-09-05 21:53:49] [Rank 0] Group 14 Loss: 5.4604 +[2025-09-05 21:53:49] [Rank 0] Group 14 Loss: 5.4604 +[2025-09-05 21:53:49] [Rank 0] Group 15 Loss: 5.4067 +[2025-09-05 21:53:49] [Rank 0] Group 15 Loss: 5.4067 +[2025-09-05 21:53:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:53:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:53:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:53:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:53:49] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 21:53:49] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 21:53:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:53:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:53:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:53:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:53:49] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:53:49] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:53:49] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:53:49] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:53:49] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:53:49] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:53:49] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:53:49] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:53:49] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:53:49] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 21:53:49] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 21:53:49] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 21:53:49] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 21:53:49] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 21:53:49] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 21:53:49] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 21:53:49] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 21:53:49] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 21:53:49] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:53:49] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:53:49] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:53:49] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:53:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:53:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:53:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:53:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:53:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:53:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:53:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:53:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:53:50] [Rank 0] step:4001/10000 train_time:183916ms step_avg:45.97ms +[2025-09-05 21:53:50] [Rank 0] step:4001/10000 train_time:183916ms step_avg:45.97ms +[2025-09-05 21:53:52] [Rank 0] step:4021/10000 train_time:185201ms step_avg:46.06ms +[2025-09-05 21:53:52] [Rank 0] step:4021/10000 train_time:185201ms step_avg:46.06ms +[2025-09-05 21:53:52] [Rank 0] step:4041/10000 train_time:185940ms step_avg:46.01ms +[2025-09-05 21:53:52] [Rank 0] step:4041/10000 train_time:185940ms step_avg:46.01ms +[2025-09-05 21:53:53] [Rank 0] step:4061/10000 train_time:186678ms step_avg:45.97ms +[2025-09-05 21:53:53] [Rank 0] step:4061/10000 train_time:186678ms step_avg:45.97ms +[2025-09-05 21:53:54] [Rank 0] step:4081/10000 train_time:187417ms step_avg:45.92ms +[2025-09-05 21:53:54] [Rank 0] step:4081/10000 train_time:187417ms step_avg:45.92ms +[2025-09-05 21:53:55] [Rank 0] step:4101/10000 train_time:188156ms step_avg:45.88ms +[2025-09-05 21:53:55] [Rank 0] step:4101/10000 train_time:188156ms step_avg:45.88ms +[2025-09-05 21:53:55] [Rank 0] step:4121/10000 train_time:188895ms step_avg:45.84ms +[2025-09-05 21:53:55] [Rank 0] step:4121/10000 train_time:188895ms step_avg:45.84ms +[2025-09-05 21:53:56] [Rank 0] step:4141/10000 train_time:189637ms step_avg:45.80ms +[2025-09-05 21:53:56] [Rank 0] step:4141/10000 train_time:189637ms step_avg:45.80ms +[2025-09-05 21:53:57] [Rank 0] step:4161/10000 train_time:190376ms step_avg:45.75ms +[2025-09-05 21:53:57] [Rank 0] step:4161/10000 train_time:190376ms step_avg:45.75ms +[2025-09-05 21:53:58] [Rank 0] step:4181/10000 train_time:191115ms step_avg:45.71ms +[2025-09-05 21:53:58] [Rank 0] step:4181/10000 train_time:191115ms step_avg:45.71ms +[2025-09-05 21:53:58] [Rank 0] step:4201/10000 train_time:191854ms step_avg:45.67ms +[2025-09-05 21:53:58] [Rank 0] step:4201/10000 train_time:191854ms step_avg:45.67ms +[2025-09-05 21:53:59] [Rank 0] step:4221/10000 train_time:192593ms step_avg:45.63ms +[2025-09-05 21:53:59] [Rank 0] step:4221/10000 train_time:192593ms step_avg:45.63ms +[2025-09-05 21:54:00] [Rank 0] step:4241/10000 train_time:193332ms step_avg:45.59ms +[2025-09-05 21:54:00] [Rank 0] step:4241/10000 train_time:193332ms step_avg:45.59ms +[2025-09-05 21:54:00] [Rank 0] step:4261/10000 train_time:194071ms step_avg:45.55ms +[2025-09-05 21:54:00] [Rank 0] step:4261/10000 train_time:194071ms step_avg:45.55ms +[2025-09-05 21:54:01] [Rank 0] step:4281/10000 train_time:194809ms step_avg:45.51ms +[2025-09-05 21:54:01] [Rank 0] step:4281/10000 train_time:194809ms step_avg:45.51ms +[2025-09-05 21:54:02] [Rank 0] step:4301/10000 train_time:195549ms step_avg:45.47ms +[2025-09-05 21:54:02] [Rank 0] step:4301/10000 train_time:195549ms step_avg:45.47ms +[2025-09-05 21:54:03] [Rank 0] step:4321/10000 train_time:196288ms step_avg:45.43ms +[2025-09-05 21:54:03] [Rank 0] step:4321/10000 train_time:196288ms step_avg:45.43ms +[2025-09-05 21:54:03] [Rank 0] step:4341/10000 train_time:197027ms step_avg:45.39ms +[2025-09-05 21:54:03] [Rank 0] step:4341/10000 train_time:197027ms step_avg:45.39ms +[2025-09-05 21:54:04] [Rank 0] step:4361/10000 train_time:197767ms step_avg:45.35ms +[2025-09-05 21:54:04] [Rank 0] step:4361/10000 train_time:197767ms step_avg:45.35ms +[2025-09-05 21:54:05] [Rank 0] step:4381/10000 train_time:198506ms step_avg:45.31ms +[2025-09-05 21:54:05] [Rank 0] step:4381/10000 train_time:198506ms step_avg:45.31ms +[2025-09-05 21:54:06] [Rank 0] step:4401/10000 train_time:199245ms step_avg:45.27ms +[2025-09-05 21:54:06] [Rank 0] step:4401/10000 train_time:199245ms step_avg:45.27ms +[2025-09-05 21:54:06] [Rank 0] step:4421/10000 train_time:199984ms step_avg:45.24ms +[2025-09-05 21:54:06] [Rank 0] step:4421/10000 train_time:199984ms step_avg:45.24ms +[2025-09-05 21:54:07] [Rank 0] step:4441/10000 train_time:200723ms step_avg:45.20ms +[2025-09-05 21:54:07] [Rank 0] step:4441/10000 train_time:200723ms step_avg:45.20ms +[2025-09-05 21:54:08] [Rank 0] step:4461/10000 train_time:201461ms step_avg:45.16ms +[2025-09-05 21:54:08] [Rank 0] step:4461/10000 train_time:201461ms step_avg:45.16ms +[2025-09-05 21:54:09] [Rank 0] step:4481/10000 train_time:202200ms step_avg:45.12ms +[2025-09-05 21:54:09] [Rank 0] step:4481/10000 train_time:202200ms step_avg:45.12ms +[2025-09-05 21:54:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:54:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:54:10] [Rank 0] PRINT: step:4500/10000 train_loss:2.2797 val_loss:2.2277 train_time:203020ms step_avg:45.12ms +[2025-09-05 21:54:10] [Rank 0] PRINT: step:4500/10000 train_loss:2.2797 val_loss:2.2277 train_time:203020ms step_avg:45.12ms +[2025-09-05 21:54:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:54:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:54:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:54:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:55:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:55:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:55:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:55:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:55:31] [Rank 0] Total Loss: 4.6201 +[2025-09-05 21:55:31] [Rank 0] Total Loss: 4.6201 +[2025-09-05 21:55:31] [Rank 0] Total FTA (Unweighted): 0.2863 +[2025-09-05 21:55:31] [Rank 0] Total FTA (Unweighted): 0.2863 +[2025-09-05 21:55:31] [Rank 0] Total FTA (Weighted): 0.2863 +[2025-09-05 21:55:31] [Rank 0] Total FTA (Weighted): 0.2863 +[2025-09-05 21:55:31] [Rank 0] Group 0 Loss: 3.3442 +[2025-09-05 21:55:31] [Rank 0] Group 0 Loss: 3.3442 +[2025-09-05 21:55:31] [Rank 0] Group 1 Loss: 3.2625 +[2025-09-05 21:55:31] [Rank 0] Group 1 Loss: 3.2625 +[2025-09-05 21:55:31] [Rank 0] Group 2 Loss: 3.2671 +[2025-09-05 21:55:31] [Rank 0] Group 2 Loss: 3.2671 +[2025-09-05 21:55:31] [Rank 0] Group 3 Loss: 3.5637 +[2025-09-05 21:55:31] [Rank 0] Group 3 Loss: 3.5637 +[2025-09-05 21:55:31] [Rank 0] Group 4 Loss: 3.8977 +[2025-09-05 21:55:31] [Rank 0] Group 4 Loss: 3.8977 +[2025-09-05 21:55:31] [Rank 0] Group 5 Loss: 4.4066 +[2025-09-05 21:55:31] [Rank 0] Group 5 Loss: 4.4066 +[2025-09-05 21:55:31] [Rank 0] Group 6 Loss: 4.7221 +[2025-09-05 21:55:31] [Rank 0] Group 6 Loss: 4.7221 +[2025-09-05 21:55:31] [Rank 0] Group 7 Loss: 4.8538 +[2025-09-05 21:55:31] [Rank 0] Group 7 Loss: 4.8538 +[2025-09-05 21:55:31] [Rank 0] Group 8 Loss: 5.1628 +[2025-09-05 21:55:31] [Rank 0] Group 8 Loss: 5.1628 +[2025-09-05 21:55:31] [Rank 0] Group 9 Loss: 5.3096 +[2025-09-05 21:55:31] [Rank 0] Group 9 Loss: 5.3096 +[2025-09-05 21:55:31] [Rank 0] Group 10 Loss: 5.3555 +[2025-09-05 21:55:31] [Rank 0] Group 10 Loss: 5.3555 +[2025-09-05 21:55:31] [Rank 0] Group 11 Loss: 5.4136 +[2025-09-05 21:55:31] [Rank 0] Group 11 Loss: 5.4136 +[2025-09-05 21:55:31] [Rank 0] Group 12 Loss: 5.3106 +[2025-09-05 21:55:31] [Rank 0] Group 12 Loss: 5.3106 +[2025-09-05 21:55:31] [Rank 0] Group 13 Loss: 5.3585 +[2025-09-05 21:55:31] [Rank 0] Group 13 Loss: 5.3585 +[2025-09-05 21:55:31] [Rank 0] Group 14 Loss: 5.3646 +[2025-09-05 21:55:31] [Rank 0] Group 14 Loss: 5.3646 +[2025-09-05 21:55:31] [Rank 0] Group 15 Loss: 5.3285 +[2025-09-05 21:55:31] [Rank 0] Group 15 Loss: 5.3285 +[2025-09-05 21:55:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:55:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:55:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:55:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:55:31] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:55:31] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 21:55:31] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:55:31] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:55:31] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:55:31] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:55:31] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:55:31] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:55:31] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:55:31] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:55:31] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:55:31] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:55:31] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:55:31] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:55:31] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:55:31] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:55:31] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:55:31] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:55:31] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 21:55:31] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 21:55:31] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 21:55:31] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 21:55:31] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 21:55:31] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 21:55:31] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:55:31] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:55:31] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:55:31] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 21:55:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:55:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:55:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:55:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:55:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:55:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:55:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:55:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:55:33] [Rank 0] step:4501/10000 train_time:203029ms step_avg:45.11ms +[2025-09-05 21:55:33] [Rank 0] step:4501/10000 train_time:203029ms step_avg:45.11ms +[2025-09-05 21:55:33] [Rank 0] step:4521/10000 train_time:203696ms step_avg:45.06ms +[2025-09-05 21:55:33] [Rank 0] step:4521/10000 train_time:203696ms step_avg:45.06ms +[2025-09-05 21:55:34] [Rank 0] step:4541/10000 train_time:204435ms step_avg:45.02ms +[2025-09-05 21:55:34] [Rank 0] step:4541/10000 train_time:204435ms step_avg:45.02ms +[2025-09-05 21:55:35] [Rank 0] step:4561/10000 train_time:205174ms step_avg:44.98ms +[2025-09-05 21:55:35] [Rank 0] step:4561/10000 train_time:205174ms step_avg:44.98ms +[2025-09-05 21:55:36] [Rank 0] step:4581/10000 train_time:206134ms step_avg:45.00ms +[2025-09-05 21:55:36] [Rank 0] step:4581/10000 train_time:206134ms step_avg:45.00ms +[2025-09-05 21:55:37] [Rank 0] step:4601/10000 train_time:206873ms step_avg:44.96ms +[2025-09-05 21:55:37] [Rank 0] step:4601/10000 train_time:206873ms step_avg:44.96ms +[2025-09-05 21:55:37] [Rank 0] step:4621/10000 train_time:207611ms step_avg:44.93ms +[2025-09-05 21:55:37] [Rank 0] step:4621/10000 train_time:207611ms step_avg:44.93ms +[2025-09-05 21:55:38] [Rank 0] step:4641/10000 train_time:208349ms step_avg:44.89ms +[2025-09-05 21:55:38] [Rank 0] step:4641/10000 train_time:208349ms step_avg:44.89ms +[2025-09-05 21:55:39] [Rank 0] step:4661/10000 train_time:209086ms step_avg:44.86ms +[2025-09-05 21:55:39] [Rank 0] step:4661/10000 train_time:209086ms step_avg:44.86ms +[2025-09-05 21:55:40] [Rank 0] step:4681/10000 train_time:209825ms step_avg:44.82ms +[2025-09-05 21:55:40] [Rank 0] step:4681/10000 train_time:209825ms step_avg:44.82ms +[2025-09-05 21:55:40] [Rank 0] step:4701/10000 train_time:210563ms step_avg:44.79ms +[2025-09-05 21:55:40] [Rank 0] step:4701/10000 train_time:210563ms step_avg:44.79ms +[2025-09-05 21:55:41] [Rank 0] step:4721/10000 train_time:211302ms step_avg:44.76ms +[2025-09-05 21:55:41] [Rank 0] step:4721/10000 train_time:211302ms step_avg:44.76ms +[2025-09-05 21:55:42] [Rank 0] step:4741/10000 train_time:212040ms step_avg:44.72ms +[2025-09-05 21:55:42] [Rank 0] step:4741/10000 train_time:212040ms step_avg:44.72ms +[2025-09-05 21:55:42] [Rank 0] step:4761/10000 train_time:212778ms step_avg:44.69ms +[2025-09-05 21:55:42] [Rank 0] step:4761/10000 train_time:212778ms step_avg:44.69ms +[2025-09-05 21:55:43] [Rank 0] step:4781/10000 train_time:213516ms step_avg:44.66ms +[2025-09-05 21:55:43] [Rank 0] step:4781/10000 train_time:213516ms step_avg:44.66ms +[2025-09-05 21:55:44] [Rank 0] step:4801/10000 train_time:214255ms step_avg:44.63ms +[2025-09-05 21:55:44] [Rank 0] step:4801/10000 train_time:214255ms step_avg:44.63ms +[2025-09-05 21:55:45] [Rank 0] step:4821/10000 train_time:214994ms step_avg:44.60ms +[2025-09-05 21:55:45] [Rank 0] step:4821/10000 train_time:214994ms step_avg:44.60ms +[2025-09-05 21:55:46] [Rank 0] step:4841/10000 train_time:216041ms step_avg:44.63ms +[2025-09-05 21:55:46] [Rank 0] step:4841/10000 train_time:216041ms step_avg:44.63ms +[2025-09-05 21:55:46] [Rank 0] step:4861/10000 train_time:216780ms step_avg:44.60ms +[2025-09-05 21:55:46] [Rank 0] step:4861/10000 train_time:216780ms step_avg:44.60ms +[2025-09-05 21:55:47] [Rank 0] step:4881/10000 train_time:217519ms step_avg:44.56ms +[2025-09-05 21:55:47] [Rank 0] step:4881/10000 train_time:217519ms step_avg:44.56ms +[2025-09-05 21:55:48] [Rank 0] step:4901/10000 train_time:218258ms step_avg:44.53ms +[2025-09-05 21:55:48] [Rank 0] step:4901/10000 train_time:218258ms step_avg:44.53ms +[2025-09-05 21:55:49] [Rank 0] step:4921/10000 train_time:218996ms step_avg:44.50ms +[2025-09-05 21:55:49] [Rank 0] step:4921/10000 train_time:218996ms step_avg:44.50ms +[2025-09-05 21:55:49] [Rank 0] step:4941/10000 train_time:219734ms step_avg:44.47ms +[2025-09-05 21:55:49] [Rank 0] step:4941/10000 train_time:219734ms step_avg:44.47ms +[2025-09-05 21:55:50] [Rank 0] step:4961/10000 train_time:220473ms step_avg:44.44ms +[2025-09-05 21:55:50] [Rank 0] step:4961/10000 train_time:220473ms step_avg:44.44ms +[2025-09-05 21:55:51] [Rank 0] step:4981/10000 train_time:221212ms step_avg:44.41ms +[2025-09-05 21:55:51] [Rank 0] step:4981/10000 train_time:221212ms step_avg:44.41ms +[2025-09-05 21:55:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:55:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:55:52] [Rank 0] PRINT: step:5000/10000 train_loss:2.2130 val_loss:2.1847 train_time:222031ms step_avg:44.41ms +[2025-09-05 21:55:52] [Rank 0] PRINT: step:5000/10000 train_loss:2.2130 val_loss:2.1847 train_time:222031ms step_avg:44.41ms +[2025-09-05 21:55:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:55:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:55:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:55:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:57:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:57:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:57:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:57:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:57:14] [Rank 0] Total Loss: 4.5486 +[2025-09-05 21:57:14] [Rank 0] Total Loss: 4.5486 +[2025-09-05 21:57:14] [Rank 0] Total FTA (Unweighted): 0.2956 +[2025-09-05 21:57:14] [Rank 0] Total FTA (Unweighted): 0.2956 +[2025-09-05 21:57:15] [Rank 0] Total FTA (Weighted): 0.2956 +[2025-09-05 21:57:15] [Rank 0] Total FTA (Weighted): 0.2956 +[2025-09-05 21:57:15] [Rank 0] Group 0 Loss: 3.3404 +[2025-09-05 21:57:15] [Rank 0] Group 0 Loss: 3.3404 +[2025-09-05 21:57:15] [Rank 0] Group 1 Loss: 3.0824 +[2025-09-05 21:57:15] [Rank 0] Group 1 Loss: 3.0824 +[2025-09-05 21:57:15] [Rank 0] Group 2 Loss: 3.1918 +[2025-09-05 21:57:15] [Rank 0] Group 2 Loss: 3.1918 +[2025-09-05 21:57:15] [Rank 0] Group 3 Loss: 3.5404 +[2025-09-05 21:57:15] [Rank 0] Group 3 Loss: 3.5404 +[2025-09-05 21:57:15] [Rank 0] Group 4 Loss: 3.8523 +[2025-09-05 21:57:15] [Rank 0] Group 4 Loss: 3.8523 +[2025-09-05 21:57:15] [Rank 0] Group 5 Loss: 4.3410 +[2025-09-05 21:57:15] [Rank 0] Group 5 Loss: 4.3410 +[2025-09-05 21:57:15] [Rank 0] Group 6 Loss: 4.6412 +[2025-09-05 21:57:15] [Rank 0] Group 6 Loss: 4.6412 +[2025-09-05 21:57:15] [Rank 0] Group 7 Loss: 4.7773 +[2025-09-05 21:57:15] [Rank 0] Group 7 Loss: 4.7773 +[2025-09-05 21:57:15] [Rank 0] Group 8 Loss: 5.0937 +[2025-09-05 21:57:15] [Rank 0] Group 8 Loss: 5.0937 +[2025-09-05 21:57:15] [Rank 0] Group 9 Loss: 5.2142 +[2025-09-05 21:57:15] [Rank 0] Group 9 Loss: 5.2142 +[2025-09-05 21:57:15] [Rank 0] Group 10 Loss: 5.2720 +[2025-09-05 21:57:15] [Rank 0] Group 10 Loss: 5.2720 +[2025-09-05 21:57:15] [Rank 0] Group 11 Loss: 5.3250 +[2025-09-05 21:57:15] [Rank 0] Group 11 Loss: 5.3250 +[2025-09-05 21:57:15] [Rank 0] Group 12 Loss: 5.2170 +[2025-09-05 21:57:15] [Rank 0] Group 12 Loss: 5.2170 +[2025-09-05 21:57:15] [Rank 0] Group 13 Loss: 5.2938 +[2025-09-05 21:57:15] [Rank 0] Group 13 Loss: 5.2938 +[2025-09-05 21:57:15] [Rank 0] Group 14 Loss: 5.3141 +[2025-09-05 21:57:15] [Rank 0] Group 14 Loss: 5.3141 +[2025-09-05 21:57:15] [Rank 0] Group 15 Loss: 5.2803 +[2025-09-05 21:57:15] [Rank 0] Group 15 Loss: 5.2803 +[2025-09-05 21:57:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:57:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:57:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:57:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:57:15] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 21:57:15] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 21:57:15] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:57:15] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:57:15] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:57:15] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:57:15] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:57:15] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 21:57:15] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:57:15] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:57:15] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:57:15] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 21:57:15] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:57:15] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:57:15] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:57:15] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:57:15] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:57:15] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 21:57:15] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:57:15] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 21:57:15] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 21:57:15] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 21:57:15] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 21:57:15] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 21:57:15] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 21:57:15] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 21:57:15] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:57:15] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:57:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:57:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:57:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:57:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:57:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:57:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:57:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:57:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:57:16] [Rank 0] step:5001/10000 train_time:222040ms step_avg:44.40ms +[2025-09-05 21:57:16] [Rank 0] step:5001/10000 train_time:222040ms step_avg:44.40ms +[2025-09-05 21:57:17] [Rank 0] step:5021/10000 train_time:222719ms step_avg:44.36ms +[2025-09-05 21:57:17] [Rank 0] step:5021/10000 train_time:222719ms step_avg:44.36ms +[2025-09-05 21:57:18] [Rank 0] step:5041/10000 train_time:223458ms step_avg:44.33ms +[2025-09-05 21:57:18] [Rank 0] step:5041/10000 train_time:223458ms step_avg:44.33ms +[2025-09-05 21:57:18] [Rank 0] step:5061/10000 train_time:224197ms step_avg:44.30ms +[2025-09-05 21:57:18] [Rank 0] step:5061/10000 train_time:224197ms step_avg:44.30ms +[2025-09-05 21:57:19] [Rank 0] step:5081/10000 train_time:224935ms step_avg:44.27ms +[2025-09-05 21:57:19] [Rank 0] step:5081/10000 train_time:224935ms step_avg:44.27ms +[2025-09-05 21:57:20] [Rank 0] step:5101/10000 train_time:225674ms step_avg:44.24ms +[2025-09-05 21:57:20] [Rank 0] step:5101/10000 train_time:225674ms step_avg:44.24ms +[2025-09-05 21:57:21] [Rank 0] step:5121/10000 train_time:226412ms step_avg:44.21ms +[2025-09-05 21:57:21] [Rank 0] step:5121/10000 train_time:226412ms step_avg:44.21ms +[2025-09-05 21:57:21] [Rank 0] step:5141/10000 train_time:227152ms step_avg:44.18ms +[2025-09-05 21:57:21] [Rank 0] step:5141/10000 train_time:227152ms step_avg:44.18ms +[2025-09-05 21:57:22] [Rank 0] step:5161/10000 train_time:227890ms step_avg:44.16ms +[2025-09-05 21:57:22] [Rank 0] step:5161/10000 train_time:227890ms step_avg:44.16ms +[2025-09-05 21:57:23] [Rank 0] step:5181/10000 train_time:228629ms step_avg:44.13ms +[2025-09-05 21:57:23] [Rank 0] step:5181/10000 train_time:228629ms step_avg:44.13ms +[2025-09-05 21:57:23] [Rank 0] step:5201/10000 train_time:229368ms step_avg:44.10ms +[2025-09-05 21:57:23] [Rank 0] step:5201/10000 train_time:229368ms step_avg:44.10ms +[2025-09-05 21:57:24] [Rank 0] step:5221/10000 train_time:230106ms step_avg:44.07ms +[2025-09-05 21:57:24] [Rank 0] step:5221/10000 train_time:230106ms step_avg:44.07ms +[2025-09-05 21:57:25] [Rank 0] step:5241/10000 train_time:230845ms step_avg:44.05ms +[2025-09-05 21:57:25] [Rank 0] step:5241/10000 train_time:230845ms step_avg:44.05ms +[2025-09-05 21:57:26] [Rank 0] step:5261/10000 train_time:231584ms step_avg:44.02ms +[2025-09-05 21:57:26] [Rank 0] step:5261/10000 train_time:231584ms step_avg:44.02ms +[2025-09-05 21:57:26] [Rank 0] step:5281/10000 train_time:232322ms step_avg:43.99ms +[2025-09-05 21:57:26] [Rank 0] step:5281/10000 train_time:232322ms step_avg:43.99ms +[2025-09-05 21:57:27] [Rank 0] step:5301/10000 train_time:233062ms step_avg:43.97ms +[2025-09-05 21:57:27] [Rank 0] step:5301/10000 train_time:233062ms step_avg:43.97ms +[2025-09-05 21:57:28] [Rank 0] step:5321/10000 train_time:233800ms step_avg:43.94ms +[2025-09-05 21:57:28] [Rank 0] step:5321/10000 train_time:233800ms step_avg:43.94ms +[2025-09-05 21:57:29] [Rank 0] step:5341/10000 train_time:234540ms step_avg:43.91ms +[2025-09-05 21:57:29] [Rank 0] step:5341/10000 train_time:234540ms step_avg:43.91ms +[2025-09-05 21:57:29] [Rank 0] step:5361/10000 train_time:235279ms step_avg:43.89ms +[2025-09-05 21:57:29] [Rank 0] step:5361/10000 train_time:235279ms step_avg:43.89ms +[2025-09-05 21:57:30] [Rank 0] step:5381/10000 train_time:236018ms step_avg:43.86ms +[2025-09-05 21:57:30] [Rank 0] step:5381/10000 train_time:236018ms step_avg:43.86ms +[2025-09-05 21:57:31] [Rank 0] step:5401/10000 train_time:236756ms step_avg:43.84ms +[2025-09-05 21:57:31] [Rank 0] step:5401/10000 train_time:236756ms step_avg:43.84ms +[2025-09-05 21:57:32] [Rank 0] step:5421/10000 train_time:237494ms step_avg:43.81ms +[2025-09-05 21:57:32] [Rank 0] step:5421/10000 train_time:237494ms step_avg:43.81ms +[2025-09-05 21:57:32] [Rank 0] step:5441/10000 train_time:238232ms step_avg:43.78ms +[2025-09-05 21:57:32] [Rank 0] step:5441/10000 train_time:238232ms step_avg:43.78ms +[2025-09-05 21:57:33] [Rank 0] step:5461/10000 train_time:238970ms step_avg:43.76ms +[2025-09-05 21:57:33] [Rank 0] step:5461/10000 train_time:238970ms step_avg:43.76ms +[2025-09-05 21:57:34] [Rank 0] step:5481/10000 train_time:239709ms step_avg:43.73ms +[2025-09-05 21:57:34] [Rank 0] step:5481/10000 train_time:239709ms step_avg:43.73ms +[2025-09-05 21:57:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:57:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:57:35] [Rank 0] PRINT: step:5500/10000 train_loss:2.1740 val_loss:2.1499 train_time:240529ms step_avg:43.73ms +[2025-09-05 21:57:35] [Rank 0] PRINT: step:5500/10000 train_loss:2.1740 val_loss:2.1499 train_time:240529ms step_avg:43.73ms +[2025-09-05 21:57:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:57:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:57:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:57:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:58:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:58:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 21:58:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:58:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 21:58:57] [Rank 0] Total Loss: 4.5639 +[2025-09-05 21:58:57] [Rank 0] Total Loss: 4.5639 +[2025-09-05 21:58:57] [Rank 0] Total FTA (Unweighted): 0.2994 +[2025-09-05 21:58:57] [Rank 0] Total FTA (Unweighted): 0.2994 +[2025-09-05 21:58:57] [Rank 0] Total FTA (Weighted): 0.2994 +[2025-09-05 21:58:57] [Rank 0] Total FTA (Weighted): 0.2994 +[2025-09-05 21:58:57] [Rank 0] Group 0 Loss: 3.3333 +[2025-09-05 21:58:57] [Rank 0] Group 0 Loss: 3.3333 +[2025-09-05 21:58:57] [Rank 0] Group 1 Loss: 3.1758 +[2025-09-05 21:58:57] [Rank 0] Group 1 Loss: 3.1758 +[2025-09-05 21:58:57] [Rank 0] Group 2 Loss: 3.2311 +[2025-09-05 21:58:57] [Rank 0] Group 2 Loss: 3.2311 +[2025-09-05 21:58:57] [Rank 0] Group 3 Loss: 3.5526 +[2025-09-05 21:58:57] [Rank 0] Group 3 Loss: 3.5526 +[2025-09-05 21:58:57] [Rank 0] Group 4 Loss: 3.8599 +[2025-09-05 21:58:57] [Rank 0] Group 4 Loss: 3.8599 +[2025-09-05 21:58:57] [Rank 0] Group 5 Loss: 4.3325 +[2025-09-05 21:58:57] [Rank 0] Group 5 Loss: 4.3325 +[2025-09-05 21:58:57] [Rank 0] Group 6 Loss: 4.6337 +[2025-09-05 21:58:57] [Rank 0] Group 6 Loss: 4.6337 +[2025-09-05 21:58:57] [Rank 0] Group 7 Loss: 4.7836 +[2025-09-05 21:58:57] [Rank 0] Group 7 Loss: 4.7836 +[2025-09-05 21:58:57] [Rank 0] Group 8 Loss: 5.0917 +[2025-09-05 21:58:57] [Rank 0] Group 8 Loss: 5.0917 +[2025-09-05 21:58:57] [Rank 0] Group 9 Loss: 5.2353 +[2025-09-05 21:58:57] [Rank 0] Group 9 Loss: 5.2353 +[2025-09-05 21:58:57] [Rank 0] Group 10 Loss: 5.3106 +[2025-09-05 21:58:57] [Rank 0] Group 10 Loss: 5.3106 +[2025-09-05 21:58:57] [Rank 0] Group 11 Loss: 5.3489 +[2025-09-05 21:58:57] [Rank 0] Group 11 Loss: 5.3489 +[2025-09-05 21:58:57] [Rank 0] Group 12 Loss: 5.2358 +[2025-09-05 21:58:57] [Rank 0] Group 12 Loss: 5.2358 +[2025-09-05 21:58:57] [Rank 0] Group 13 Loss: 5.2946 +[2025-09-05 21:58:57] [Rank 0] Group 13 Loss: 5.2946 +[2025-09-05 21:58:57] [Rank 0] Group 14 Loss: 5.3148 +[2025-09-05 21:58:57] [Rank 0] Group 14 Loss: 5.3148 +[2025-09-05 21:58:57] [Rank 0] Group 15 Loss: 5.2878 +[2025-09-05 21:58:57] [Rank 0] Group 15 Loss: 5.2878 +[2025-09-05 21:58:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:58:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 21:58:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:58:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 21:58:57] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 21:58:57] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 21:58:57] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:58:57] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 21:58:57] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:58:57] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 21:58:57] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 21:58:57] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 21:58:57] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:58:57] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 21:58:57] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:58:57] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 21:58:57] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:58:57] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 21:58:57] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:58:57] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 21:58:57] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 21:58:57] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 21:58:57] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 21:58:57] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 21:58:57] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 21:58:57] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 21:58:57] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 21:58:57] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 21:58:57] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:58:57] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 21:58:57] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:58:57] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 21:58:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:58:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 21:58:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:58:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 21:58:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:58:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 21:58:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:58:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 21:58:58] [Rank 0] step:5501/10000 train_time:240538ms step_avg:43.73ms +[2025-09-05 21:58:58] [Rank 0] step:5501/10000 train_time:240538ms step_avg:43.73ms +[2025-09-05 21:58:59] [Rank 0] step:5521/10000 train_time:241202ms step_avg:43.69ms +[2025-09-05 21:58:59] [Rank 0] step:5521/10000 train_time:241202ms step_avg:43.69ms +[2025-09-05 21:59:00] [Rank 0] step:5541/10000 train_time:241941ms step_avg:43.66ms +[2025-09-05 21:59:00] [Rank 0] step:5541/10000 train_time:241941ms step_avg:43.66ms +[2025-09-05 21:59:01] [Rank 0] step:5561/10000 train_time:242680ms step_avg:43.64ms +[2025-09-05 21:59:01] [Rank 0] step:5561/10000 train_time:242680ms step_avg:43.64ms +[2025-09-05 21:59:01] [Rank 0] step:5581/10000 train_time:243419ms step_avg:43.62ms +[2025-09-05 21:59:01] [Rank 0] step:5581/10000 train_time:243419ms step_avg:43.62ms +[2025-09-05 21:59:02] [Rank 0] step:5601/10000 train_time:244158ms step_avg:43.59ms +[2025-09-05 21:59:02] [Rank 0] step:5601/10000 train_time:244158ms step_avg:43.59ms +[2025-09-05 21:59:03] [Rank 0] step:5621/10000 train_time:244897ms step_avg:43.57ms +[2025-09-05 21:59:03] [Rank 0] step:5621/10000 train_time:244897ms step_avg:43.57ms +[2025-09-05 21:59:04] [Rank 0] step:5641/10000 train_time:246255ms step_avg:43.65ms +[2025-09-05 21:59:04] [Rank 0] step:5641/10000 train_time:246255ms step_avg:43.65ms +[2025-09-05 21:59:05] [Rank 0] step:5661/10000 train_time:246993ms step_avg:43.63ms +[2025-09-05 21:59:05] [Rank 0] step:5661/10000 train_time:246993ms step_avg:43.63ms +[2025-09-05 21:59:06] [Rank 0] step:5681/10000 train_time:247734ms step_avg:43.61ms +[2025-09-05 21:59:06] [Rank 0] step:5681/10000 train_time:247734ms step_avg:43.61ms +[2025-09-05 21:59:07] [Rank 0] step:5701/10000 train_time:248474ms step_avg:43.58ms +[2025-09-05 21:59:07] [Rank 0] step:5701/10000 train_time:248474ms step_avg:43.58ms +[2025-09-05 21:59:07] [Rank 0] step:5721/10000 train_time:249213ms step_avg:43.56ms +[2025-09-05 21:59:07] [Rank 0] step:5721/10000 train_time:249213ms step_avg:43.56ms +[2025-09-05 21:59:08] [Rank 0] step:5741/10000 train_time:249952ms step_avg:43.54ms +[2025-09-05 21:59:08] [Rank 0] step:5741/10000 train_time:249952ms step_avg:43.54ms +[2025-09-05 21:59:09] [Rank 0] step:5761/10000 train_time:250691ms step_avg:43.52ms +[2025-09-05 21:59:09] [Rank 0] step:5761/10000 train_time:250691ms step_avg:43.52ms +[2025-09-05 21:59:09] [Rank 0] step:5781/10000 train_time:251430ms step_avg:43.49ms +[2025-09-05 21:59:09] [Rank 0] step:5781/10000 train_time:251430ms step_avg:43.49ms +[2025-09-05 21:59:10] [Rank 0] step:5801/10000 train_time:252168ms step_avg:43.47ms +[2025-09-05 21:59:10] [Rank 0] step:5801/10000 train_time:252168ms step_avg:43.47ms +[2025-09-05 21:59:11] [Rank 0] step:5821/10000 train_time:252932ms step_avg:43.45ms +[2025-09-05 21:59:11] [Rank 0] step:5821/10000 train_time:252932ms step_avg:43.45ms +[2025-09-05 21:59:12] [Rank 0] step:5841/10000 train_time:253670ms step_avg:43.43ms +[2025-09-05 21:59:12] [Rank 0] step:5841/10000 train_time:253670ms step_avg:43.43ms +[2025-09-05 21:59:12] [Rank 0] step:5861/10000 train_time:254408ms step_avg:43.41ms +[2025-09-05 21:59:12] [Rank 0] step:5861/10000 train_time:254408ms step_avg:43.41ms +[2025-09-05 21:59:13] [Rank 0] step:5881/10000 train_time:255147ms step_avg:43.38ms +[2025-09-05 21:59:13] [Rank 0] step:5881/10000 train_time:255147ms step_avg:43.38ms +[2025-09-05 21:59:14] [Rank 0] step:5901/10000 train_time:255885ms step_avg:43.36ms +[2025-09-05 21:59:14] [Rank 0] step:5901/10000 train_time:255885ms step_avg:43.36ms +[2025-09-05 21:59:15] [Rank 0] step:5921/10000 train_time:256624ms step_avg:43.34ms +[2025-09-05 21:59:15] [Rank 0] step:5921/10000 train_time:256624ms step_avg:43.34ms +[2025-09-05 21:59:15] [Rank 0] step:5941/10000 train_time:257363ms step_avg:43.32ms +[2025-09-05 21:59:15] [Rank 0] step:5941/10000 train_time:257363ms step_avg:43.32ms +[2025-09-05 21:59:16] [Rank 0] step:5961/10000 train_time:258102ms step_avg:43.30ms +[2025-09-05 21:59:16] [Rank 0] step:5961/10000 train_time:258102ms step_avg:43.30ms +[2025-09-05 21:59:17] [Rank 0] step:5981/10000 train_time:258841ms step_avg:43.28ms +[2025-09-05 21:59:17] [Rank 0] step:5981/10000 train_time:258841ms step_avg:43.28ms +[2025-09-05 21:59:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:59:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 21:59:18] [Rank 0] PRINT: step:6000/10000 train_loss:2.1442 val_loss:2.1232 train_time:259660ms step_avg:43.28ms +[2025-09-05 21:59:18] [Rank 0] PRINT: step:6000/10000 train_loss:2.1442 val_loss:2.1232 train_time:259660ms step_avg:43.28ms +[2025-09-05 21:59:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:59:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 21:59:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 21:59:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:00:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:00:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:00:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:00:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:00:40] [Rank 0] Total Loss: 4.6163 +[2025-09-05 22:00:40] [Rank 0] Total Loss: 4.6163 +[2025-09-05 22:00:40] [Rank 0] Total FTA (Unweighted): 0.3069 +[2025-09-05 22:00:40] [Rank 0] Total FTA (Unweighted): 0.3069 +[2025-09-05 22:00:40] [Rank 0] Total FTA (Weighted): 0.3069 +[2025-09-05 22:00:40] [Rank 0] Total FTA (Weighted): 0.3069 +[2025-09-05 22:00:40] [Rank 0] Group 0 Loss: 3.2826 +[2025-09-05 22:00:40] [Rank 0] Group 0 Loss: 3.2826 +[2025-09-05 22:00:40] [Rank 0] Group 1 Loss: 3.2511 +[2025-09-05 22:00:40] [Rank 0] Group 1 Loss: 3.2511 +[2025-09-05 22:00:40] [Rank 0] Group 2 Loss: 3.2704 +[2025-09-05 22:00:40] [Rank 0] Group 2 Loss: 3.2704 +[2025-09-05 22:00:40] [Rank 0] Group 3 Loss: 3.6290 +[2025-09-05 22:00:40] [Rank 0] Group 3 Loss: 3.6290 +[2025-09-05 22:00:40] [Rank 0] Group 4 Loss: 3.9225 +[2025-09-05 22:00:40] [Rank 0] Group 4 Loss: 3.9225 +[2025-09-05 22:00:40] [Rank 0] Group 5 Loss: 4.4013 +[2025-09-05 22:00:40] [Rank 0] Group 5 Loss: 4.4013 +[2025-09-05 22:00:40] [Rank 0] Group 6 Loss: 4.6679 +[2025-09-05 22:00:40] [Rank 0] Group 6 Loss: 4.6679 +[2025-09-05 22:00:40] [Rank 0] Group 7 Loss: 4.8377 +[2025-09-05 22:00:40] [Rank 0] Group 7 Loss: 4.8377 +[2025-09-05 22:00:40] [Rank 0] Group 8 Loss: 5.1578 +[2025-09-05 22:00:40] [Rank 0] Group 8 Loss: 5.1578 +[2025-09-05 22:00:40] [Rank 0] Group 9 Loss: 5.3097 +[2025-09-05 22:00:40] [Rank 0] Group 9 Loss: 5.3097 +[2025-09-05 22:00:40] [Rank 0] Group 10 Loss: 5.3727 +[2025-09-05 22:00:40] [Rank 0] Group 10 Loss: 5.3727 +[2025-09-05 22:00:40] [Rank 0] Group 11 Loss: 5.4076 +[2025-09-05 22:00:40] [Rank 0] Group 11 Loss: 5.4076 +[2025-09-05 22:00:40] [Rank 0] Group 12 Loss: 5.2869 +[2025-09-05 22:00:40] [Rank 0] Group 12 Loss: 5.2869 +[2025-09-05 22:00:40] [Rank 0] Group 13 Loss: 5.3286 +[2025-09-05 22:00:40] [Rank 0] Group 13 Loss: 5.3286 +[2025-09-05 22:00:40] [Rank 0] Group 14 Loss: 5.3858 +[2025-09-05 22:00:40] [Rank 0] Group 14 Loss: 5.3858 +[2025-09-05 22:00:40] [Rank 0] Group 15 Loss: 5.3500 +[2025-09-05 22:00:40] [Rank 0] Group 15 Loss: 5.3500 +[2025-09-05 22:00:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:00:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:00:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:00:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:00:40] [Rank 0] Group 2 FTA: 0.4200 +[2025-09-05 22:00:40] [Rank 0] Group 2 FTA: 0.4200 +[2025-09-05 22:00:40] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:00:40] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:00:40] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:00:40] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:00:40] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 22:00:40] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 22:00:40] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:00:40] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:00:40] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:00:40] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:00:40] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:00:40] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:00:40] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 22:00:40] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 22:00:40] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 22:00:40] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 22:00:40] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 22:00:40] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 22:00:40] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 22:00:40] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 22:00:40] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 22:00:40] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 22:00:40] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 22:00:40] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 22:00:40] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:00:40] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:00:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:00:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:00:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:00:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:00:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:00:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:00:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:00:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:00:41] [Rank 0] step:6001/10000 train_time:259670ms step_avg:43.27ms +[2025-09-05 22:00:41] [Rank 0] step:6001/10000 train_time:259670ms step_avg:43.27ms +[2025-09-05 22:00:43] [Rank 0] step:6021/10000 train_time:260969ms step_avg:43.34ms +[2025-09-05 22:00:43] [Rank 0] step:6021/10000 train_time:260969ms step_avg:43.34ms +[2025-09-05 22:00:43] [Rank 0] step:6041/10000 train_time:261708ms step_avg:43.32ms +[2025-09-05 22:00:43] [Rank 0] step:6041/10000 train_time:261708ms step_avg:43.32ms +[2025-09-05 22:00:44] [Rank 0] step:6061/10000 train_time:262447ms step_avg:43.30ms +[2025-09-05 22:00:44] [Rank 0] step:6061/10000 train_time:262447ms step_avg:43.30ms +[2025-09-05 22:00:45] [Rank 0] step:6081/10000 train_time:263186ms step_avg:43.28ms +[2025-09-05 22:00:45] [Rank 0] step:6081/10000 train_time:263186ms step_avg:43.28ms +[2025-09-05 22:00:46] [Rank 0] step:6101/10000 train_time:263925ms step_avg:43.26ms +[2025-09-05 22:00:46] [Rank 0] step:6101/10000 train_time:263925ms step_avg:43.26ms +[2025-09-05 22:00:46] [Rank 0] step:6121/10000 train_time:264664ms step_avg:43.24ms +[2025-09-05 22:00:46] [Rank 0] step:6121/10000 train_time:264664ms step_avg:43.24ms +[2025-09-05 22:00:47] [Rank 0] step:6141/10000 train_time:265403ms step_avg:43.22ms +[2025-09-05 22:00:47] [Rank 0] step:6141/10000 train_time:265403ms step_avg:43.22ms +[2025-09-05 22:00:48] [Rank 0] step:6161/10000 train_time:266142ms step_avg:43.20ms +[2025-09-05 22:00:48] [Rank 0] step:6161/10000 train_time:266142ms step_avg:43.20ms +[2025-09-05 22:00:49] [Rank 0] step:6181/10000 train_time:266880ms step_avg:43.18ms +[2025-09-05 22:00:49] [Rank 0] step:6181/10000 train_time:266880ms step_avg:43.18ms +[2025-09-05 22:00:49] [Rank 0] step:6201/10000 train_time:267737ms step_avg:43.18ms +[2025-09-05 22:00:49] [Rank 0] step:6201/10000 train_time:267737ms step_avg:43.18ms +[2025-09-05 22:00:50] [Rank 0] step:6221/10000 train_time:268475ms step_avg:43.16ms +[2025-09-05 22:00:50] [Rank 0] step:6221/10000 train_time:268475ms step_avg:43.16ms +[2025-09-05 22:00:51] [Rank 0] step:6241/10000 train_time:269214ms step_avg:43.14ms +[2025-09-05 22:00:51] [Rank 0] step:6241/10000 train_time:269214ms step_avg:43.14ms +[2025-09-05 22:00:52] [Rank 0] step:6261/10000 train_time:270083ms step_avg:43.14ms +[2025-09-05 22:00:52] [Rank 0] step:6261/10000 train_time:270083ms step_avg:43.14ms +[2025-09-05 22:00:52] [Rank 0] step:6281/10000 train_time:270821ms step_avg:43.12ms +[2025-09-05 22:00:52] [Rank 0] step:6281/10000 train_time:270821ms step_avg:43.12ms +[2025-09-05 22:00:53] [Rank 0] step:6301/10000 train_time:271560ms step_avg:43.10ms +[2025-09-05 22:00:53] [Rank 0] step:6301/10000 train_time:271560ms step_avg:43.10ms +[2025-09-05 22:00:54] [Rank 0] step:6321/10000 train_time:272299ms step_avg:43.08ms +[2025-09-05 22:00:54] [Rank 0] step:6321/10000 train_time:272299ms step_avg:43.08ms +[2025-09-05 22:00:55] [Rank 0] step:6341/10000 train_time:273037ms step_avg:43.06ms +[2025-09-05 22:00:55] [Rank 0] step:6341/10000 train_time:273037ms step_avg:43.06ms +[2025-09-05 22:00:55] [Rank 0] step:6361/10000 train_time:273777ms step_avg:43.04ms +[2025-09-05 22:00:55] [Rank 0] step:6361/10000 train_time:273777ms step_avg:43.04ms +[2025-09-05 22:00:56] [Rank 0] step:6381/10000 train_time:274516ms step_avg:43.02ms +[2025-09-05 22:00:56] [Rank 0] step:6381/10000 train_time:274516ms step_avg:43.02ms +[2025-09-05 22:00:57] [Rank 0] step:6401/10000 train_time:275255ms step_avg:43.00ms +[2025-09-05 22:00:57] [Rank 0] step:6401/10000 train_time:275255ms step_avg:43.00ms +[2025-09-05 22:00:58] [Rank 0] step:6421/10000 train_time:275994ms step_avg:42.98ms +[2025-09-05 22:00:58] [Rank 0] step:6421/10000 train_time:275994ms step_avg:42.98ms +[2025-09-05 22:00:58] [Rank 0] step:6441/10000 train_time:276733ms step_avg:42.96ms +[2025-09-05 22:00:58] [Rank 0] step:6441/10000 train_time:276733ms step_avg:42.96ms +[2025-09-05 22:00:59] [Rank 0] step:6461/10000 train_time:277472ms step_avg:42.95ms +[2025-09-05 22:00:59] [Rank 0] step:6461/10000 train_time:277472ms step_avg:42.95ms +[2025-09-05 22:01:00] [Rank 0] step:6481/10000 train_time:278211ms step_avg:42.93ms +[2025-09-05 22:01:00] [Rank 0] step:6481/10000 train_time:278211ms step_avg:42.93ms +[2025-09-05 22:01:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:01:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:01:01] [Rank 0] PRINT: step:6500/10000 train_loss:2.1205 val_loss:2.0990 train_time:279030ms step_avg:42.93ms +[2025-09-05 22:01:01] [Rank 0] PRINT: step:6500/10000 train_loss:2.1205 val_loss:2.0990 train_time:279030ms step_avg:42.93ms +[2025-09-05 22:01:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:01:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:01:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:01:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:02:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:02:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:02:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:02:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:02:24] [Rank 0] Total Loss: 4.5240 +[2025-09-05 22:02:24] [Rank 0] Total Loss: 4.5240 +[2025-09-05 22:02:24] [Rank 0] Total FTA (Unweighted): 0.3219 +[2025-09-05 22:02:24] [Rank 0] Total FTA (Unweighted): 0.3219 +[2025-09-05 22:02:24] [Rank 0] Total FTA (Weighted): 0.3219 +[2025-09-05 22:02:24] [Rank 0] Total FTA (Weighted): 0.3219 +[2025-09-05 22:02:24] [Rank 0] Group 0 Loss: 3.2538 +[2025-09-05 22:02:24] [Rank 0] Group 0 Loss: 3.2538 +[2025-09-05 22:02:24] [Rank 0] Group 1 Loss: 3.1748 +[2025-09-05 22:02:24] [Rank 0] Group 1 Loss: 3.1748 +[2025-09-05 22:02:24] [Rank 0] Group 2 Loss: 3.2147 +[2025-09-05 22:02:24] [Rank 0] Group 2 Loss: 3.2147 +[2025-09-05 22:02:24] [Rank 0] Group 3 Loss: 3.5924 +[2025-09-05 22:02:24] [Rank 0] Group 3 Loss: 3.5924 +[2025-09-05 22:02:24] [Rank 0] Group 4 Loss: 3.8330 +[2025-09-05 22:02:24] [Rank 0] Group 4 Loss: 3.8330 +[2025-09-05 22:02:24] [Rank 0] Group 5 Loss: 4.2851 +[2025-09-05 22:02:24] [Rank 0] Group 5 Loss: 4.2851 +[2025-09-05 22:02:24] [Rank 0] Group 6 Loss: 4.6010 +[2025-09-05 22:02:24] [Rank 0] Group 6 Loss: 4.6010 +[2025-09-05 22:02:24] [Rank 0] Group 7 Loss: 4.7359 +[2025-09-05 22:02:24] [Rank 0] Group 7 Loss: 4.7359 +[2025-09-05 22:02:24] [Rank 0] Group 8 Loss: 5.0433 +[2025-09-05 22:02:24] [Rank 0] Group 8 Loss: 5.0433 +[2025-09-05 22:02:24] [Rank 0] Group 9 Loss: 5.1875 +[2025-09-05 22:02:24] [Rank 0] Group 9 Loss: 5.1875 +[2025-09-05 22:02:24] [Rank 0] Group 10 Loss: 5.2157 +[2025-09-05 22:02:24] [Rank 0] Group 10 Loss: 5.2157 +[2025-09-05 22:02:24] [Rank 0] Group 11 Loss: 5.2966 +[2025-09-05 22:02:24] [Rank 0] Group 11 Loss: 5.2966 +[2025-09-05 22:02:24] [Rank 0] Group 12 Loss: 5.1819 +[2025-09-05 22:02:24] [Rank 0] Group 12 Loss: 5.1819 +[2025-09-05 22:02:24] [Rank 0] Group 13 Loss: 5.2472 +[2025-09-05 22:02:24] [Rank 0] Group 13 Loss: 5.2472 +[2025-09-05 22:02:24] [Rank 0] Group 14 Loss: 5.2682 +[2025-09-05 22:02:24] [Rank 0] Group 14 Loss: 5.2682 +[2025-09-05 22:02:24] [Rank 0] Group 15 Loss: 5.2534 +[2025-09-05 22:02:24] [Rank 0] Group 15 Loss: 5.2534 +[2025-09-05 22:02:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:02:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:02:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:02:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:02:24] [Rank 0] Group 2 FTA: 0.5200 +[2025-09-05 22:02:24] [Rank 0] Group 2 FTA: 0.5200 +[2025-09-05 22:02:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:02:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:02:24] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:02:24] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:02:24] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:02:24] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:02:24] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:02:24] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:02:24] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:02:24] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:02:24] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-05 22:02:24] [Rank 0] Group 8 FTA: 0.2000 +[2025-09-05 22:02:24] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:02:24] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:02:24] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 22:02:24] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 22:02:24] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 22:02:24] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 22:02:24] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:02:24] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:02:24] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 22:02:24] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 22:02:24] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 22:02:24] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 22:02:24] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 22:02:24] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 22:02:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:02:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:02:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:02:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:02:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:02:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:02:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:02:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:02:25] [Rank 0] step:6501/10000 train_time:279039ms step_avg:42.92ms +[2025-09-05 22:02:25] [Rank 0] step:6501/10000 train_time:279039ms step_avg:42.92ms +[2025-09-05 22:02:26] [Rank 0] step:6521/10000 train_time:279713ms step_avg:42.89ms +[2025-09-05 22:02:26] [Rank 0] step:6521/10000 train_time:279713ms step_avg:42.89ms +[2025-09-05 22:02:27] [Rank 0] step:6541/10000 train_time:280453ms step_avg:42.88ms +[2025-09-05 22:02:27] [Rank 0] step:6541/10000 train_time:280453ms step_avg:42.88ms +[2025-09-05 22:02:28] [Rank 0] step:6561/10000 train_time:281191ms step_avg:42.86ms +[2025-09-05 22:02:28] [Rank 0] step:6561/10000 train_time:281191ms step_avg:42.86ms +[2025-09-05 22:02:28] [Rank 0] step:6581/10000 train_time:281929ms step_avg:42.84ms +[2025-09-05 22:02:28] [Rank 0] step:6581/10000 train_time:281929ms step_avg:42.84ms +[2025-09-05 22:02:29] [Rank 0] step:6601/10000 train_time:282666ms step_avg:42.82ms +[2025-09-05 22:02:29] [Rank 0] step:6601/10000 train_time:282666ms step_avg:42.82ms +[2025-09-05 22:02:30] [Rank 0] step:6621/10000 train_time:283403ms step_avg:42.80ms +[2025-09-05 22:02:30] [Rank 0] step:6621/10000 train_time:283403ms step_avg:42.80ms +[2025-09-05 22:02:30] [Rank 0] step:6641/10000 train_time:284141ms step_avg:42.79ms +[2025-09-05 22:02:30] [Rank 0] step:6641/10000 train_time:284141ms step_avg:42.79ms +[2025-09-05 22:02:31] [Rank 0] step:6661/10000 train_time:284880ms step_avg:42.77ms +[2025-09-05 22:02:31] [Rank 0] step:6661/10000 train_time:284880ms step_avg:42.77ms +[2025-09-05 22:02:32] [Rank 0] step:6681/10000 train_time:285618ms step_avg:42.75ms +[2025-09-05 22:02:32] [Rank 0] step:6681/10000 train_time:285618ms step_avg:42.75ms +[2025-09-05 22:02:33] [Rank 0] step:6701/10000 train_time:286357ms step_avg:42.73ms +[2025-09-05 22:02:33] [Rank 0] step:6701/10000 train_time:286357ms step_avg:42.73ms +[2025-09-05 22:02:33] [Rank 0] step:6721/10000 train_time:287095ms step_avg:42.72ms +[2025-09-05 22:02:33] [Rank 0] step:6721/10000 train_time:287095ms step_avg:42.72ms +[2025-09-05 22:02:34] [Rank 0] step:6741/10000 train_time:287834ms step_avg:42.70ms +[2025-09-05 22:02:34] [Rank 0] step:6741/10000 train_time:287834ms step_avg:42.70ms +[2025-09-05 22:02:35] [Rank 0] step:6761/10000 train_time:288573ms step_avg:42.68ms +[2025-09-05 22:02:35] [Rank 0] step:6761/10000 train_time:288573ms step_avg:42.68ms +[2025-09-05 22:02:36] [Rank 0] step:6781/10000 train_time:289311ms step_avg:42.66ms +[2025-09-05 22:02:36] [Rank 0] step:6781/10000 train_time:289311ms step_avg:42.66ms +[2025-09-05 22:02:36] [Rank 0] step:6801/10000 train_time:290050ms step_avg:42.65ms +[2025-09-05 22:02:36] [Rank 0] step:6801/10000 train_time:290050ms step_avg:42.65ms +[2025-09-05 22:02:37] [Rank 0] step:6821/10000 train_time:290789ms step_avg:42.63ms +[2025-09-05 22:02:37] [Rank 0] step:6821/10000 train_time:290789ms step_avg:42.63ms +[2025-09-05 22:02:38] [Rank 0] step:6841/10000 train_time:292143ms step_avg:42.70ms +[2025-09-05 22:02:38] [Rank 0] step:6841/10000 train_time:292143ms step_avg:42.70ms +[2025-09-05 22:02:39] [Rank 0] step:6861/10000 train_time:292882ms step_avg:42.69ms +[2025-09-05 22:02:39] [Rank 0] step:6861/10000 train_time:292882ms step_avg:42.69ms +[2025-09-05 22:02:40] [Rank 0] step:6881/10000 train_time:293621ms step_avg:42.67ms +[2025-09-05 22:02:40] [Rank 0] step:6881/10000 train_time:293621ms step_avg:42.67ms +[2025-09-05 22:02:41] [Rank 0] step:6901/10000 train_time:294360ms step_avg:42.65ms +[2025-09-05 22:02:41] [Rank 0] step:6901/10000 train_time:294360ms step_avg:42.65ms +[2025-09-05 22:02:41] [Rank 0] step:6921/10000 train_time:295099ms step_avg:42.64ms +[2025-09-05 22:02:41] [Rank 0] step:6921/10000 train_time:295099ms step_avg:42.64ms +[2025-09-05 22:02:42] [Rank 0] step:6941/10000 train_time:295838ms step_avg:42.62ms +[2025-09-05 22:02:42] [Rank 0] step:6941/10000 train_time:295838ms step_avg:42.62ms +[2025-09-05 22:02:43] [Rank 0] step:6961/10000 train_time:296577ms step_avg:42.61ms +[2025-09-05 22:02:43] [Rank 0] step:6961/10000 train_time:296577ms step_avg:42.61ms +[2025-09-05 22:02:44] [Rank 0] step:6981/10000 train_time:297315ms step_avg:42.59ms +[2025-09-05 22:02:44] [Rank 0] step:6981/10000 train_time:297315ms step_avg:42.59ms +[2025-09-05 22:02:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:02:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:02:45] [Rank 0] PRINT: step:7000/10000 train_loss:2.0968 val_loss:2.0784 train_time:298135ms step_avg:42.59ms +[2025-09-05 22:02:45] [Rank 0] PRINT: step:7000/10000 train_loss:2.0968 val_loss:2.0784 train_time:298135ms step_avg:42.59ms +[2025-09-05 22:02:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:02:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:02:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:02:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:04:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:04:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:04:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:04:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:04:07] [Rank 0] Total Loss: 4.5132 +[2025-09-05 22:04:07] [Rank 0] Total Loss: 4.5132 +[2025-09-05 22:04:07] [Rank 0] Total FTA (Unweighted): 0.3187 +[2025-09-05 22:04:07] [Rank 0] Total FTA (Unweighted): 0.3187 +[2025-09-05 22:04:07] [Rank 0] Total FTA (Weighted): 0.3187 +[2025-09-05 22:04:07] [Rank 0] Total FTA (Weighted): 0.3187 +[2025-09-05 22:04:07] [Rank 0] Group 0 Loss: 3.2738 +[2025-09-05 22:04:07] [Rank 0] Group 0 Loss: 3.2738 +[2025-09-05 22:04:07] [Rank 0] Group 1 Loss: 3.1817 +[2025-09-05 22:04:07] [Rank 0] Group 1 Loss: 3.1817 +[2025-09-05 22:04:07] [Rank 0] Group 2 Loss: 3.1632 +[2025-09-05 22:04:07] [Rank 0] Group 2 Loss: 3.1632 +[2025-09-05 22:04:07] [Rank 0] Group 3 Loss: 3.5649 +[2025-09-05 22:04:07] [Rank 0] Group 3 Loss: 3.5649 +[2025-09-05 22:04:07] [Rank 0] Group 4 Loss: 3.8499 +[2025-09-05 22:04:07] [Rank 0] Group 4 Loss: 3.8499 +[2025-09-05 22:04:07] [Rank 0] Group 5 Loss: 4.2796 +[2025-09-05 22:04:07] [Rank 0] Group 5 Loss: 4.2796 +[2025-09-05 22:04:07] [Rank 0] Group 6 Loss: 4.5780 +[2025-09-05 22:04:07] [Rank 0] Group 6 Loss: 4.5780 +[2025-09-05 22:04:07] [Rank 0] Group 7 Loss: 4.7256 +[2025-09-05 22:04:07] [Rank 0] Group 7 Loss: 4.7256 +[2025-09-05 22:04:07] [Rank 0] Group 8 Loss: 5.0338 +[2025-09-05 22:04:07] [Rank 0] Group 8 Loss: 5.0338 +[2025-09-05 22:04:07] [Rank 0] Group 9 Loss: 5.1687 +[2025-09-05 22:04:07] [Rank 0] Group 9 Loss: 5.1687 +[2025-09-05 22:04:07] [Rank 0] Group 10 Loss: 5.2259 +[2025-09-05 22:04:07] [Rank 0] Group 10 Loss: 5.2259 +[2025-09-05 22:04:07] [Rank 0] Group 11 Loss: 5.2620 +[2025-09-05 22:04:07] [Rank 0] Group 11 Loss: 5.2620 +[2025-09-05 22:04:07] [Rank 0] Group 12 Loss: 5.1776 +[2025-09-05 22:04:07] [Rank 0] Group 12 Loss: 5.1776 +[2025-09-05 22:04:07] [Rank 0] Group 13 Loss: 5.2335 +[2025-09-05 22:04:07] [Rank 0] Group 13 Loss: 5.2335 +[2025-09-05 22:04:07] [Rank 0] Group 14 Loss: 5.2495 +[2025-09-05 22:04:07] [Rank 0] Group 14 Loss: 5.2495 +[2025-09-05 22:04:07] [Rank 0] Group 15 Loss: 5.2427 +[2025-09-05 22:04:07] [Rank 0] Group 15 Loss: 5.2427 +[2025-09-05 22:04:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:04:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:04:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:04:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:04:07] [Rank 0] Group 2 FTA: 0.4800 +[2025-09-05 22:04:07] [Rank 0] Group 2 FTA: 0.4800 +[2025-09-05 22:04:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:04:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:04:07] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:04:07] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:04:07] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:04:07] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:04:07] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:04:07] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:04:07] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:04:07] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:04:07] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:04:07] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:04:07] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:04:07] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:04:07] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 22:04:07] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 22:04:07] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 22:04:07] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 22:04:07] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:04:07] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:04:07] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:04:07] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:04:07] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 22:04:07] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 22:04:07] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 22:04:07] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 22:04:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:04:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:04:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:04:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:04:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:04:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:04:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:04:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:04:08] [Rank 0] step:7001/10000 train_time:298144ms step_avg:42.59ms +[2025-09-05 22:04:08] [Rank 0] step:7001/10000 train_time:298144ms step_avg:42.59ms +[2025-09-05 22:04:09] [Rank 0] step:7021/10000 train_time:298819ms step_avg:42.56ms +[2025-09-05 22:04:09] [Rank 0] step:7021/10000 train_time:298819ms step_avg:42.56ms +[2025-09-05 22:04:10] [Rank 0] step:7041/10000 train_time:299558ms step_avg:42.54ms +[2025-09-05 22:04:10] [Rank 0] step:7041/10000 train_time:299558ms step_avg:42.54ms +[2025-09-05 22:04:11] [Rank 0] step:7061/10000 train_time:300295ms step_avg:42.53ms +[2025-09-05 22:04:11] [Rank 0] step:7061/10000 train_time:300295ms step_avg:42.53ms +[2025-09-05 22:04:11] [Rank 0] step:7081/10000 train_time:301033ms step_avg:42.51ms +[2025-09-05 22:04:11] [Rank 0] step:7081/10000 train_time:301033ms step_avg:42.51ms +[2025-09-05 22:04:12] [Rank 0] step:7101/10000 train_time:301771ms step_avg:42.50ms +[2025-09-05 22:04:12] [Rank 0] step:7101/10000 train_time:301771ms step_avg:42.50ms +[2025-09-05 22:04:13] [Rank 0] step:7121/10000 train_time:302510ms step_avg:42.48ms +[2025-09-05 22:04:13] [Rank 0] step:7121/10000 train_time:302510ms step_avg:42.48ms +[2025-09-05 22:04:14] [Rank 0] step:7141/10000 train_time:303247ms step_avg:42.47ms +[2025-09-05 22:04:14] [Rank 0] step:7141/10000 train_time:303247ms step_avg:42.47ms +[2025-09-05 22:04:14] [Rank 0] step:7161/10000 train_time:303986ms step_avg:42.45ms +[2025-09-05 22:04:14] [Rank 0] step:7161/10000 train_time:303986ms step_avg:42.45ms +[2025-09-05 22:04:15] [Rank 0] step:7181/10000 train_time:304725ms step_avg:42.43ms +[2025-09-05 22:04:15] [Rank 0] step:7181/10000 train_time:304725ms step_avg:42.43ms +[2025-09-05 22:04:16] [Rank 0] step:7201/10000 train_time:305463ms step_avg:42.42ms +[2025-09-05 22:04:16] [Rank 0] step:7201/10000 train_time:305463ms step_avg:42.42ms +[2025-09-05 22:04:17] [Rank 0] step:7221/10000 train_time:306202ms step_avg:42.40ms +[2025-09-05 22:04:17] [Rank 0] step:7221/10000 train_time:306202ms step_avg:42.40ms +[2025-09-05 22:04:17] [Rank 0] step:7241/10000 train_time:306941ms step_avg:42.39ms +[2025-09-05 22:04:17] [Rank 0] step:7241/10000 train_time:306941ms step_avg:42.39ms +[2025-09-05 22:04:18] [Rank 0] step:7261/10000 train_time:307679ms step_avg:42.37ms +[2025-09-05 22:04:18] [Rank 0] step:7261/10000 train_time:307679ms step_avg:42.37ms +[2025-09-05 22:04:19] [Rank 0] step:7281/10000 train_time:308418ms step_avg:42.36ms +[2025-09-05 22:04:19] [Rank 0] step:7281/10000 train_time:308418ms step_avg:42.36ms +[2025-09-05 22:04:19] [Rank 0] step:7301/10000 train_time:309157ms step_avg:42.34ms +[2025-09-05 22:04:19] [Rank 0] step:7301/10000 train_time:309157ms step_avg:42.34ms +[2025-09-05 22:04:20] [Rank 0] step:7321/10000 train_time:309895ms step_avg:42.33ms +[2025-09-05 22:04:20] [Rank 0] step:7321/10000 train_time:309895ms step_avg:42.33ms +[2025-09-05 22:04:21] [Rank 0] step:7341/10000 train_time:310635ms step_avg:42.32ms +[2025-09-05 22:04:21] [Rank 0] step:7341/10000 train_time:310635ms step_avg:42.32ms +[2025-09-05 22:04:22] [Rank 0] step:7361/10000 train_time:311373ms step_avg:42.30ms +[2025-09-05 22:04:22] [Rank 0] step:7361/10000 train_time:311373ms step_avg:42.30ms +[2025-09-05 22:04:22] [Rank 0] step:7381/10000 train_time:312112ms step_avg:42.29ms +[2025-09-05 22:04:22] [Rank 0] step:7381/10000 train_time:312112ms step_avg:42.29ms +[2025-09-05 22:04:23] [Rank 0] step:7401/10000 train_time:312852ms step_avg:42.27ms +[2025-09-05 22:04:23] [Rank 0] step:7401/10000 train_time:312852ms step_avg:42.27ms +[2025-09-05 22:04:24] [Rank 0] step:7421/10000 train_time:313591ms step_avg:42.26ms +[2025-09-05 22:04:24] [Rank 0] step:7421/10000 train_time:313591ms step_avg:42.26ms +[2025-09-05 22:04:25] [Rank 0] step:7441/10000 train_time:314328ms step_avg:42.24ms +[2025-09-05 22:04:25] [Rank 0] step:7441/10000 train_time:314328ms step_avg:42.24ms +[2025-09-05 22:04:25] [Rank 0] step:7461/10000 train_time:315067ms step_avg:42.23ms +[2025-09-05 22:04:25] [Rank 0] step:7461/10000 train_time:315067ms step_avg:42.23ms +[2025-09-05 22:04:26] [Rank 0] step:7481/10000 train_time:315806ms step_avg:42.21ms +[2025-09-05 22:04:26] [Rank 0] step:7481/10000 train_time:315806ms step_avg:42.21ms +[2025-09-05 22:04:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:04:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:04:27] [Rank 0] PRINT: step:7500/10000 train_loss:2.0781 val_loss:2.0617 train_time:316626ms step_avg:42.22ms +[2025-09-05 22:04:27] [Rank 0] PRINT: step:7500/10000 train_loss:2.0781 val_loss:2.0617 train_time:316626ms step_avg:42.22ms +[2025-09-05 22:04:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:04:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:04:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:04:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:05:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:05:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:05:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:05:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:05:49] [Rank 0] Total Loss: 4.5182 +[2025-09-05 22:05:49] [Rank 0] Total Loss: 4.5182 +[2025-09-05 22:05:49] [Rank 0] Total FTA (Unweighted): 0.3294 +[2025-09-05 22:05:49] [Rank 0] Total FTA (Unweighted): 0.3294 +[2025-09-05 22:05:49] [Rank 0] Total FTA (Weighted): 0.3294 +[2025-09-05 22:05:49] [Rank 0] Total FTA (Weighted): 0.3294 +[2025-09-05 22:05:49] [Rank 0] Group 0 Loss: 3.2829 +[2025-09-05 22:05:49] [Rank 0] Group 0 Loss: 3.2829 +[2025-09-05 22:05:49] [Rank 0] Group 1 Loss: 3.2039 +[2025-09-05 22:05:49] [Rank 0] Group 1 Loss: 3.2039 +[2025-09-05 22:05:49] [Rank 0] Group 2 Loss: 3.2643 +[2025-09-05 22:05:49] [Rank 0] Group 2 Loss: 3.2643 +[2025-09-05 22:05:49] [Rank 0] Group 3 Loss: 3.5682 +[2025-09-05 22:05:49] [Rank 0] Group 3 Loss: 3.5682 +[2025-09-05 22:05:49] [Rank 0] Group 4 Loss: 3.8506 +[2025-09-05 22:05:49] [Rank 0] Group 4 Loss: 3.8506 +[2025-09-05 22:05:49] [Rank 0] Group 5 Loss: 4.2735 +[2025-09-05 22:05:49] [Rank 0] Group 5 Loss: 4.2735 +[2025-09-05 22:05:49] [Rank 0] Group 6 Loss: 4.5521 +[2025-09-05 22:05:49] [Rank 0] Group 6 Loss: 4.5521 +[2025-09-05 22:05:49] [Rank 0] Group 7 Loss: 4.7250 +[2025-09-05 22:05:49] [Rank 0] Group 7 Loss: 4.7250 +[2025-09-05 22:05:49] [Rank 0] Group 8 Loss: 5.0183 +[2025-09-05 22:05:49] [Rank 0] Group 8 Loss: 5.0183 +[2025-09-05 22:05:49] [Rank 0] Group 9 Loss: 5.1788 +[2025-09-05 22:05:49] [Rank 0] Group 9 Loss: 5.1788 +[2025-09-05 22:05:49] [Rank 0] Group 10 Loss: 5.2165 +[2025-09-05 22:05:49] [Rank 0] Group 10 Loss: 5.2165 +[2025-09-05 22:05:49] [Rank 0] Group 11 Loss: 5.2696 +[2025-09-05 22:05:49] [Rank 0] Group 11 Loss: 5.2696 +[2025-09-05 22:05:49] [Rank 0] Group 12 Loss: 5.1828 +[2025-09-05 22:05:49] [Rank 0] Group 12 Loss: 5.1828 +[2025-09-05 22:05:49] [Rank 0] Group 13 Loss: 5.2253 +[2025-09-05 22:05:49] [Rank 0] Group 13 Loss: 5.2253 +[2025-09-05 22:05:49] [Rank 0] Group 14 Loss: 5.2503 +[2025-09-05 22:05:49] [Rank 0] Group 14 Loss: 5.2503 +[2025-09-05 22:05:49] [Rank 0] Group 15 Loss: 5.2283 +[2025-09-05 22:05:49] [Rank 0] Group 15 Loss: 5.2283 +[2025-09-05 22:05:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:05:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:05:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:05:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:05:49] [Rank 0] Group 2 FTA: 0.6600 +[2025-09-05 22:05:49] [Rank 0] Group 2 FTA: 0.6600 +[2025-09-05 22:05:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:05:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:05:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:05:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:05:49] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:05:49] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:05:49] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:05:49] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:05:49] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:05:49] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:05:49] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:05:49] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:05:49] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:05:49] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:05:49] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 22:05:49] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 22:05:49] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 22:05:49] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 22:05:49] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 22:05:49] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 22:05:49] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:05:49] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:05:49] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 22:05:49] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 22:05:49] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 22:05:49] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 22:05:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:05:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:05:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:05:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:05:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:05:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:05:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:05:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:05:51] [Rank 0] step:7501/10000 train_time:316635ms step_avg:42.21ms +[2025-09-05 22:05:51] [Rank 0] step:7501/10000 train_time:316635ms step_avg:42.21ms +[2025-09-05 22:05:51] [Rank 0] step:7521/10000 train_time:317309ms step_avg:42.19ms +[2025-09-05 22:05:51] [Rank 0] step:7521/10000 train_time:317309ms step_avg:42.19ms +[2025-09-05 22:05:52] [Rank 0] step:7541/10000 train_time:318047ms step_avg:42.18ms +[2025-09-05 22:05:52] [Rank 0] step:7541/10000 train_time:318047ms step_avg:42.18ms +[2025-09-05 22:05:53] [Rank 0] step:7561/10000 train_time:318784ms step_avg:42.16ms +[2025-09-05 22:05:53] [Rank 0] step:7561/10000 train_time:318784ms step_avg:42.16ms +[2025-09-05 22:05:54] [Rank 0] step:7581/10000 train_time:319523ms step_avg:42.15ms +[2025-09-05 22:05:54] [Rank 0] step:7581/10000 train_time:319523ms step_avg:42.15ms +[2025-09-05 22:05:54] [Rank 0] step:7601/10000 train_time:320262ms step_avg:42.13ms +[2025-09-05 22:05:54] [Rank 0] step:7601/10000 train_time:320262ms step_avg:42.13ms +[2025-09-05 22:05:55] [Rank 0] step:7621/10000 train_time:321000ms step_avg:42.12ms +[2025-09-05 22:05:55] [Rank 0] step:7621/10000 train_time:321000ms step_avg:42.12ms +[2025-09-05 22:05:56] [Rank 0] step:7641/10000 train_time:322367ms step_avg:42.19ms +[2025-09-05 22:05:56] [Rank 0] step:7641/10000 train_time:322367ms step_avg:42.19ms +[2025-09-05 22:05:57] [Rank 0] step:7661/10000 train_time:323076ms step_avg:42.17ms +[2025-09-05 22:05:57] [Rank 0] step:7661/10000 train_time:323076ms step_avg:42.17ms +[2025-09-05 22:05:58] [Rank 0] step:7681/10000 train_time:323815ms step_avg:42.16ms +[2025-09-05 22:05:58] [Rank 0] step:7681/10000 train_time:323815ms step_avg:42.16ms +[2025-09-05 22:05:59] [Rank 0] step:7701/10000 train_time:324554ms step_avg:42.14ms +[2025-09-05 22:05:59] [Rank 0] step:7701/10000 train_time:324554ms step_avg:42.14ms +[2025-09-05 22:05:59] [Rank 0] step:7721/10000 train_time:325293ms step_avg:42.13ms +[2025-09-05 22:05:59] [Rank 0] step:7721/10000 train_time:325293ms step_avg:42.13ms +[2025-09-05 22:06:00] [Rank 0] step:7741/10000 train_time:326032ms step_avg:42.12ms +[2025-09-05 22:06:00] [Rank 0] step:7741/10000 train_time:326032ms step_avg:42.12ms +[2025-09-05 22:06:01] [Rank 0] step:7761/10000 train_time:326771ms step_avg:42.10ms +[2025-09-05 22:06:01] [Rank 0] step:7761/10000 train_time:326771ms step_avg:42.10ms +[2025-09-05 22:06:02] [Rank 0] step:7781/10000 train_time:327510ms step_avg:42.09ms +[2025-09-05 22:06:02] [Rank 0] step:7781/10000 train_time:327510ms step_avg:42.09ms +[2025-09-05 22:06:02] [Rank 0] step:7801/10000 train_time:328248ms step_avg:42.08ms +[2025-09-05 22:06:02] [Rank 0] step:7801/10000 train_time:328248ms step_avg:42.08ms +[2025-09-05 22:06:03] [Rank 0] step:7821/10000 train_time:328988ms step_avg:42.06ms +[2025-09-05 22:06:03] [Rank 0] step:7821/10000 train_time:328988ms step_avg:42.06ms +[2025-09-05 22:06:04] [Rank 0] step:7841/10000 train_time:329727ms step_avg:42.05ms +[2025-09-05 22:06:04] [Rank 0] step:7841/10000 train_time:329727ms step_avg:42.05ms +[2025-09-05 22:06:05] [Rank 0] step:7861/10000 train_time:330466ms step_avg:42.04ms +[2025-09-05 22:06:05] [Rank 0] step:7861/10000 train_time:330466ms step_avg:42.04ms +[2025-09-05 22:06:05] [Rank 0] step:7881/10000 train_time:331360ms step_avg:42.05ms +[2025-09-05 22:06:05] [Rank 0] step:7881/10000 train_time:331360ms step_avg:42.05ms +[2025-09-05 22:06:06] [Rank 0] step:7901/10000 train_time:332099ms step_avg:42.03ms +[2025-09-05 22:06:06] [Rank 0] step:7901/10000 train_time:332099ms step_avg:42.03ms +[2025-09-05 22:06:07] [Rank 0] step:7921/10000 train_time:332838ms step_avg:42.02ms +[2025-09-05 22:06:07] [Rank 0] step:7921/10000 train_time:332838ms step_avg:42.02ms +[2025-09-05 22:06:08] [Rank 0] step:7941/10000 train_time:333694ms step_avg:42.02ms +[2025-09-05 22:06:08] [Rank 0] step:7941/10000 train_time:333694ms step_avg:42.02ms +[2025-09-05 22:06:09] [Rank 0] step:7961/10000 train_time:334433ms step_avg:42.01ms +[2025-09-05 22:06:09] [Rank 0] step:7961/10000 train_time:334433ms step_avg:42.01ms +[2025-09-05 22:06:09] [Rank 0] step:7981/10000 train_time:335172ms step_avg:42.00ms +[2025-09-05 22:06:09] [Rank 0] step:7981/10000 train_time:335172ms step_avg:42.00ms +[2025-09-05 22:06:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:06:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:06:11] [Rank 0] PRINT: step:8000/10000 train_loss:2.0642 val_loss:2.0495 train_time:335991ms step_avg:42.00ms +[2025-09-05 22:06:11] [Rank 0] PRINT: step:8000/10000 train_loss:2.0642 val_loss:2.0495 train_time:335991ms step_avg:42.00ms +[2025-09-05 22:06:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:06:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:06:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:06:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:07:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:07:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:07:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:07:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:07:33] [Rank 0] Total Loss: 4.5493 +[2025-09-05 22:07:33] [Rank 0] Total Loss: 4.5493 +[2025-09-05 22:07:33] [Rank 0] Total FTA (Unweighted): 0.3375 +[2025-09-05 22:07:33] [Rank 0] Total FTA (Unweighted): 0.3375 +[2025-09-05 22:07:33] [Rank 0] Total FTA (Weighted): 0.3375 +[2025-09-05 22:07:33] [Rank 0] Total FTA (Weighted): 0.3375 +[2025-09-05 22:07:33] [Rank 0] Group 0 Loss: 3.3083 +[2025-09-05 22:07:33] [Rank 0] Group 0 Loss: 3.3083 +[2025-09-05 22:07:33] [Rank 0] Group 1 Loss: 3.2464 +[2025-09-05 22:07:33] [Rank 0] Group 1 Loss: 3.2464 +[2025-09-05 22:07:33] [Rank 0] Group 2 Loss: 3.3332 +[2025-09-05 22:07:33] [Rank 0] Group 2 Loss: 3.3332 +[2025-09-05 22:07:33] [Rank 0] Group 3 Loss: 3.5855 +[2025-09-05 22:07:33] [Rank 0] Group 3 Loss: 3.5855 +[2025-09-05 22:07:33] [Rank 0] Group 4 Loss: 3.8840 +[2025-09-05 22:07:33] [Rank 0] Group 4 Loss: 3.8840 +[2025-09-05 22:07:33] [Rank 0] Group 5 Loss: 4.2803 +[2025-09-05 22:07:33] [Rank 0] Group 5 Loss: 4.2803 +[2025-09-05 22:07:33] [Rank 0] Group 6 Loss: 4.5754 +[2025-09-05 22:07:33] [Rank 0] Group 6 Loss: 4.5754 +[2025-09-05 22:07:33] [Rank 0] Group 7 Loss: 4.7671 +[2025-09-05 22:07:33] [Rank 0] Group 7 Loss: 4.7671 +[2025-09-05 22:07:33] [Rank 0] Group 8 Loss: 5.0532 +[2025-09-05 22:07:33] [Rank 0] Group 8 Loss: 5.0532 +[2025-09-05 22:07:33] [Rank 0] Group 9 Loss: 5.1988 +[2025-09-05 22:07:33] [Rank 0] Group 9 Loss: 5.1988 +[2025-09-05 22:07:33] [Rank 0] Group 10 Loss: 5.2717 +[2025-09-05 22:07:33] [Rank 0] Group 10 Loss: 5.2717 +[2025-09-05 22:07:33] [Rank 0] Group 11 Loss: 5.2850 +[2025-09-05 22:07:33] [Rank 0] Group 11 Loss: 5.2850 +[2025-09-05 22:07:33] [Rank 0] Group 12 Loss: 5.2301 +[2025-09-05 22:07:33] [Rank 0] Group 12 Loss: 5.2301 +[2025-09-05 22:07:33] [Rank 0] Group 13 Loss: 5.2424 +[2025-09-05 22:07:33] [Rank 0] Group 13 Loss: 5.2424 +[2025-09-05 22:07:33] [Rank 0] Group 14 Loss: 5.2575 +[2025-09-05 22:07:33] [Rank 0] Group 14 Loss: 5.2575 +[2025-09-05 22:07:33] [Rank 0] Group 15 Loss: 5.2693 +[2025-09-05 22:07:33] [Rank 0] Group 15 Loss: 5.2693 +[2025-09-05 22:07:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:07:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:07:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:07:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:07:33] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 22:07:33] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 22:07:33] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:07:33] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:07:33] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:07:33] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:07:33] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:07:33] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:07:33] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:07:33] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:07:33] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:07:33] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:07:33] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:07:33] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:07:33] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:07:33] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:07:33] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:07:33] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:07:33] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 22:07:33] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 22:07:33] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:07:33] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:07:33] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 22:07:33] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 22:07:33] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 22:07:33] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 22:07:33] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 22:07:33] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 22:07:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:07:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:07:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:07:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:07:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:07:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:07:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:07:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:07:34] [Rank 0] step:8001/10000 train_time:336000ms step_avg:41.99ms +[2025-09-05 22:07:34] [Rank 0] step:8001/10000 train_time:336000ms step_avg:41.99ms +[2025-09-05 22:07:36] [Rank 0] step:8021/10000 train_time:337272ms step_avg:42.05ms +[2025-09-05 22:07:36] [Rank 0] step:8021/10000 train_time:337272ms step_avg:42.05ms +[2025-09-05 22:07:36] [Rank 0] step:8041/10000 train_time:338011ms step_avg:42.04ms +[2025-09-05 22:07:36] [Rank 0] step:8041/10000 train_time:338011ms step_avg:42.04ms +[2025-09-05 22:07:37] [Rank 0] step:8061/10000 train_time:338749ms step_avg:42.02ms +[2025-09-05 22:07:37] [Rank 0] step:8061/10000 train_time:338749ms step_avg:42.02ms +[2025-09-05 22:07:38] [Rank 0] step:8081/10000 train_time:339488ms step_avg:42.01ms +[2025-09-05 22:07:38] [Rank 0] step:8081/10000 train_time:339488ms step_avg:42.01ms +[2025-09-05 22:07:39] [Rank 0] step:8101/10000 train_time:340226ms step_avg:42.00ms +[2025-09-05 22:07:39] [Rank 0] step:8101/10000 train_time:340226ms step_avg:42.00ms +[2025-09-05 22:07:39] [Rank 0] step:8121/10000 train_time:340965ms step_avg:41.99ms +[2025-09-05 22:07:39] [Rank 0] step:8121/10000 train_time:340965ms step_avg:41.99ms +[2025-09-05 22:07:40] [Rank 0] step:8141/10000 train_time:341704ms step_avg:41.97ms +[2025-09-05 22:07:40] [Rank 0] step:8141/10000 train_time:341704ms step_avg:41.97ms +[2025-09-05 22:07:41] [Rank 0] step:8161/10000 train_time:342443ms step_avg:41.96ms +[2025-09-05 22:07:41] [Rank 0] step:8161/10000 train_time:342443ms step_avg:41.96ms +[2025-09-05 22:07:42] [Rank 0] step:8181/10000 train_time:343181ms step_avg:41.95ms +[2025-09-05 22:07:42] [Rank 0] step:8181/10000 train_time:343181ms step_avg:41.95ms +[2025-09-05 22:07:42] [Rank 0] step:8201/10000 train_time:343920ms step_avg:41.94ms +[2025-09-05 22:07:42] [Rank 0] step:8201/10000 train_time:343920ms step_avg:41.94ms +[2025-09-05 22:07:43] [Rank 0] step:8221/10000 train_time:344659ms step_avg:41.92ms +[2025-09-05 22:07:43] [Rank 0] step:8221/10000 train_time:344659ms step_avg:41.92ms +[2025-09-05 22:07:44] [Rank 0] step:8241/10000 train_time:345397ms step_avg:41.91ms +[2025-09-05 22:07:44] [Rank 0] step:8241/10000 train_time:345397ms step_avg:41.91ms +[2025-09-05 22:07:44] [Rank 0] step:8261/10000 train_time:346136ms step_avg:41.90ms +[2025-09-05 22:07:44] [Rank 0] step:8261/10000 train_time:346136ms step_avg:41.90ms +[2025-09-05 22:07:45] [Rank 0] step:8281/10000 train_time:346875ms step_avg:41.89ms +[2025-09-05 22:07:45] [Rank 0] step:8281/10000 train_time:346875ms step_avg:41.89ms +[2025-09-05 22:07:46] [Rank 0] step:8301/10000 train_time:347614ms step_avg:41.88ms +[2025-09-05 22:07:46] [Rank 0] step:8301/10000 train_time:347614ms step_avg:41.88ms +[2025-09-05 22:07:47] [Rank 0] step:8321/10000 train_time:348353ms step_avg:41.86ms +[2025-09-05 22:07:47] [Rank 0] step:8321/10000 train_time:348353ms step_avg:41.86ms +[2025-09-05 22:07:47] [Rank 0] step:8341/10000 train_time:349092ms step_avg:41.85ms +[2025-09-05 22:07:47] [Rank 0] step:8341/10000 train_time:349092ms step_avg:41.85ms +[2025-09-05 22:07:48] [Rank 0] step:8361/10000 train_time:349831ms step_avg:41.84ms +[2025-09-05 22:07:48] [Rank 0] step:8361/10000 train_time:349831ms step_avg:41.84ms +[2025-09-05 22:07:49] [Rank 0] step:8381/10000 train_time:350568ms step_avg:41.83ms +[2025-09-05 22:07:49] [Rank 0] step:8381/10000 train_time:350568ms step_avg:41.83ms +[2025-09-05 22:07:50] [Rank 0] step:8401/10000 train_time:351307ms step_avg:41.82ms +[2025-09-05 22:07:50] [Rank 0] step:8401/10000 train_time:351307ms step_avg:41.82ms +[2025-09-05 22:07:50] [Rank 0] step:8421/10000 train_time:352044ms step_avg:41.81ms +[2025-09-05 22:07:50] [Rank 0] step:8421/10000 train_time:352044ms step_avg:41.81ms +[2025-09-05 22:07:51] [Rank 0] step:8441/10000 train_time:352781ms step_avg:41.79ms +[2025-09-05 22:07:51] [Rank 0] step:8441/10000 train_time:352781ms step_avg:41.79ms +[2025-09-05 22:07:52] [Rank 0] step:8461/10000 train_time:353520ms step_avg:41.78ms +[2025-09-05 22:07:52] [Rank 0] step:8461/10000 train_time:353520ms step_avg:41.78ms +[2025-09-05 22:07:53] [Rank 0] step:8481/10000 train_time:354259ms step_avg:41.77ms +[2025-09-05 22:07:53] [Rank 0] step:8481/10000 train_time:354259ms step_avg:41.77ms +[2025-09-05 22:07:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:07:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:07:54] [Rank 0] PRINT: step:8500/10000 train_loss:2.0528 val_loss:2.0378 train_time:355078ms step_avg:41.77ms +[2025-09-05 22:07:54] [Rank 0] PRINT: step:8500/10000 train_loss:2.0528 val_loss:2.0378 train_time:355078ms step_avg:41.77ms +[2025-09-05 22:07:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:07:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:07:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:07:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:09:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:09:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:09:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:09:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:09:16] [Rank 0] Total Loss: 4.5453 +[2025-09-05 22:09:16] [Rank 0] Total Loss: 4.5453 +[2025-09-05 22:09:16] [Rank 0] Total FTA (Unweighted): 0.3500 +[2025-09-05 22:09:16] [Rank 0] Total FTA (Unweighted): 0.3500 +[2025-09-05 22:09:16] [Rank 0] Total FTA (Weighted): 0.3500 +[2025-09-05 22:09:16] [Rank 0] Total FTA (Weighted): 0.3500 +[2025-09-05 22:09:16] [Rank 0] Group 0 Loss: 3.3108 +[2025-09-05 22:09:16] [Rank 0] Group 0 Loss: 3.3108 +[2025-09-05 22:09:16] [Rank 0] Group 1 Loss: 3.2554 +[2025-09-05 22:09:16] [Rank 0] Group 1 Loss: 3.2554 +[2025-09-05 22:09:16] [Rank 0] Group 2 Loss: 3.3009 +[2025-09-05 22:09:16] [Rank 0] Group 2 Loss: 3.3009 +[2025-09-05 22:09:16] [Rank 0] Group 3 Loss: 3.5857 +[2025-09-05 22:09:16] [Rank 0] Group 3 Loss: 3.5857 +[2025-09-05 22:09:16] [Rank 0] Group 4 Loss: 3.8874 +[2025-09-05 22:09:16] [Rank 0] Group 4 Loss: 3.8874 +[2025-09-05 22:09:16] [Rank 0] Group 5 Loss: 4.2956 +[2025-09-05 22:09:16] [Rank 0] Group 5 Loss: 4.2956 +[2025-09-05 22:09:16] [Rank 0] Group 6 Loss: 4.5605 +[2025-09-05 22:09:16] [Rank 0] Group 6 Loss: 4.5605 +[2025-09-05 22:09:16] [Rank 0] Group 7 Loss: 4.7622 +[2025-09-05 22:09:16] [Rank 0] Group 7 Loss: 4.7622 +[2025-09-05 22:09:16] [Rank 0] Group 8 Loss: 5.0519 +[2025-09-05 22:09:16] [Rank 0] Group 8 Loss: 5.0519 +[2025-09-05 22:09:16] [Rank 0] Group 9 Loss: 5.1872 +[2025-09-05 22:09:16] [Rank 0] Group 9 Loss: 5.1872 +[2025-09-05 22:09:16] [Rank 0] Group 10 Loss: 5.2618 +[2025-09-05 22:09:16] [Rank 0] Group 10 Loss: 5.2618 +[2025-09-05 22:09:16] [Rank 0] Group 11 Loss: 5.2874 +[2025-09-05 22:09:16] [Rank 0] Group 11 Loss: 5.2874 +[2025-09-05 22:09:16] [Rank 0] Group 12 Loss: 5.2115 +[2025-09-05 22:09:16] [Rank 0] Group 12 Loss: 5.2115 +[2025-09-05 22:09:16] [Rank 0] Group 13 Loss: 5.2444 +[2025-09-05 22:09:16] [Rank 0] Group 13 Loss: 5.2444 +[2025-09-05 22:09:16] [Rank 0] Group 14 Loss: 5.2705 +[2025-09-05 22:09:16] [Rank 0] Group 14 Loss: 5.2705 +[2025-09-05 22:09:16] [Rank 0] Group 15 Loss: 5.2520 +[2025-09-05 22:09:16] [Rank 0] Group 15 Loss: 5.2520 +[2025-09-05 22:09:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:09:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:09:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:09:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:09:16] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 22:09:16] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 22:09:16] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:09:16] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:09:16] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:09:16] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:09:16] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:09:16] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:09:16] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:09:16] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:09:16] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:09:16] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:09:16] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:09:16] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:09:16] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:09:16] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:09:16] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:09:16] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:09:16] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 22:09:16] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 22:09:16] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:09:16] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:09:16] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:09:16] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:09:16] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 22:09:16] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 22:09:16] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 22:09:16] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 22:09:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:09:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:09:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:09:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:09:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:09:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:09:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:09:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:09:18] [Rank 0] step:8501/10000 train_time:355087ms step_avg:41.77ms +[2025-09-05 22:09:18] [Rank 0] step:8501/10000 train_time:355087ms step_avg:41.77ms +[2025-09-05 22:09:18] [Rank 0] step:8521/10000 train_time:355762ms step_avg:41.75ms +[2025-09-05 22:09:18] [Rank 0] step:8521/10000 train_time:355762ms step_avg:41.75ms +[2025-09-05 22:09:19] [Rank 0] step:8541/10000 train_time:356500ms step_avg:41.74ms +[2025-09-05 22:09:19] [Rank 0] step:8541/10000 train_time:356500ms step_avg:41.74ms +[2025-09-05 22:09:20] [Rank 0] step:8561/10000 train_time:357239ms step_avg:41.73ms +[2025-09-05 22:09:20] [Rank 0] step:8561/10000 train_time:357239ms step_avg:41.73ms +[2025-09-05 22:09:21] [Rank 0] step:8581/10000 train_time:357977ms step_avg:41.72ms +[2025-09-05 22:09:21] [Rank 0] step:8581/10000 train_time:357977ms step_avg:41.72ms +[2025-09-05 22:09:21] [Rank 0] step:8601/10000 train_time:358715ms step_avg:41.71ms +[2025-09-05 22:09:21] [Rank 0] step:8601/10000 train_time:358715ms step_avg:41.71ms +[2025-09-05 22:09:22] [Rank 0] step:8621/10000 train_time:359454ms step_avg:41.70ms +[2025-09-05 22:09:22] [Rank 0] step:8621/10000 train_time:359454ms step_avg:41.70ms +[2025-09-05 22:09:23] [Rank 0] step:8641/10000 train_time:360193ms step_avg:41.68ms +[2025-09-05 22:09:23] [Rank 0] step:8641/10000 train_time:360193ms step_avg:41.68ms +[2025-09-05 22:09:24] [Rank 0] step:8661/10000 train_time:360932ms step_avg:41.67ms +[2025-09-05 22:09:24] [Rank 0] step:8661/10000 train_time:360932ms step_avg:41.67ms +[2025-09-05 22:09:24] [Rank 0] step:8681/10000 train_time:361670ms step_avg:41.66ms +[2025-09-05 22:09:24] [Rank 0] step:8681/10000 train_time:361670ms step_avg:41.66ms +[2025-09-05 22:09:25] [Rank 0] step:8701/10000 train_time:362407ms step_avg:41.65ms +[2025-09-05 22:09:25] [Rank 0] step:8701/10000 train_time:362407ms step_avg:41.65ms +[2025-09-05 22:09:26] [Rank 0] step:8721/10000 train_time:363145ms step_avg:41.64ms +[2025-09-05 22:09:26] [Rank 0] step:8721/10000 train_time:363145ms step_avg:41.64ms +[2025-09-05 22:09:27] [Rank 0] step:8741/10000 train_time:363883ms step_avg:41.63ms +[2025-09-05 22:09:27] [Rank 0] step:8741/10000 train_time:363883ms step_avg:41.63ms +[2025-09-05 22:09:27] [Rank 0] step:8761/10000 train_time:364622ms step_avg:41.62ms +[2025-09-05 22:09:27] [Rank 0] step:8761/10000 train_time:364622ms step_avg:41.62ms +[2025-09-05 22:09:28] [Rank 0] step:8781/10000 train_time:365361ms step_avg:41.61ms +[2025-09-05 22:09:28] [Rank 0] step:8781/10000 train_time:365361ms step_avg:41.61ms +[2025-09-05 22:09:29] [Rank 0] step:8801/10000 train_time:366099ms step_avg:41.60ms +[2025-09-05 22:09:29] [Rank 0] step:8801/10000 train_time:366099ms step_avg:41.60ms +[2025-09-05 22:09:30] [Rank 0] step:8821/10000 train_time:366837ms step_avg:41.59ms +[2025-09-05 22:09:30] [Rank 0] step:8821/10000 train_time:366837ms step_avg:41.59ms +[2025-09-05 22:09:31] [Rank 0] step:8841/10000 train_time:368206ms step_avg:41.65ms +[2025-09-05 22:09:31] [Rank 0] step:8841/10000 train_time:368206ms step_avg:41.65ms +[2025-09-05 22:09:32] [Rank 0] step:8861/10000 train_time:368945ms step_avg:41.64ms +[2025-09-05 22:09:32] [Rank 0] step:8861/10000 train_time:368945ms step_avg:41.64ms +[2025-09-05 22:09:32] [Rank 0] step:8881/10000 train_time:369683ms step_avg:41.63ms +[2025-09-05 22:09:32] [Rank 0] step:8881/10000 train_time:369683ms step_avg:41.63ms +[2025-09-05 22:09:33] [Rank 0] step:8901/10000 train_time:370421ms step_avg:41.62ms +[2025-09-05 22:09:33] [Rank 0] step:8901/10000 train_time:370421ms step_avg:41.62ms +[2025-09-05 22:09:34] [Rank 0] step:8921/10000 train_time:371160ms step_avg:41.61ms +[2025-09-05 22:09:34] [Rank 0] step:8921/10000 train_time:371160ms step_avg:41.61ms +[2025-09-05 22:09:35] [Rank 0] step:8941/10000 train_time:371898ms step_avg:41.59ms +[2025-09-05 22:09:35] [Rank 0] step:8941/10000 train_time:371898ms step_avg:41.59ms +[2025-09-05 22:09:35] [Rank 0] step:8961/10000 train_time:372636ms step_avg:41.58ms +[2025-09-05 22:09:35] [Rank 0] step:8961/10000 train_time:372636ms step_avg:41.58ms +[2025-09-05 22:09:36] [Rank 0] step:8981/10000 train_time:373374ms step_avg:41.57ms +[2025-09-05 22:09:36] [Rank 0] step:8981/10000 train_time:373374ms step_avg:41.57ms +[2025-09-05 22:09:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:09:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:09:37] [Rank 0] PRINT: step:9000/10000 train_loss:2.0403 val_loss:2.0274 train_time:374194ms step_avg:41.58ms +[2025-09-05 22:09:37] [Rank 0] PRINT: step:9000/10000 train_loss:2.0403 val_loss:2.0274 train_time:374194ms step_avg:41.58ms +[2025-09-05 22:09:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:09:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:09:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:09:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:11:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:11:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:11:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:11:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:11:01] [Rank 0] Total Loss: 4.5238 +[2025-09-05 22:11:01] [Rank 0] Total Loss: 4.5238 +[2025-09-05 22:11:01] [Rank 0] Total FTA (Unweighted): 0.3512 +[2025-09-05 22:11:01] [Rank 0] Total FTA (Unweighted): 0.3512 +[2025-09-05 22:11:01] [Rank 0] Total FTA (Weighted): 0.3513 +[2025-09-05 22:11:01] [Rank 0] Total FTA (Weighted): 0.3513 +[2025-09-05 22:11:01] [Rank 0] Group 0 Loss: 3.2715 +[2025-09-05 22:11:01] [Rank 0] Group 0 Loss: 3.2715 +[2025-09-05 22:11:01] [Rank 0] Group 1 Loss: 3.2701 +[2025-09-05 22:11:01] [Rank 0] Group 1 Loss: 3.2701 +[2025-09-05 22:11:01] [Rank 0] Group 2 Loss: 3.2354 +[2025-09-05 22:11:01] [Rank 0] Group 2 Loss: 3.2354 +[2025-09-05 22:11:01] [Rank 0] Group 3 Loss: 3.5814 +[2025-09-05 22:11:01] [Rank 0] Group 3 Loss: 3.5814 +[2025-09-05 22:11:01] [Rank 0] Group 4 Loss: 3.8853 +[2025-09-05 22:11:01] [Rank 0] Group 4 Loss: 3.8853 +[2025-09-05 22:11:01] [Rank 0] Group 5 Loss: 4.2665 +[2025-09-05 22:11:01] [Rank 0] Group 5 Loss: 4.2665 +[2025-09-05 22:11:01] [Rank 0] Group 6 Loss: 4.5529 +[2025-09-05 22:11:01] [Rank 0] Group 6 Loss: 4.5529 +[2025-09-05 22:11:01] [Rank 0] Group 7 Loss: 4.7289 +[2025-09-05 22:11:01] [Rank 0] Group 7 Loss: 4.7289 +[2025-09-05 22:11:01] [Rank 0] Group 8 Loss: 5.0286 +[2025-09-05 22:11:01] [Rank 0] Group 8 Loss: 5.0286 +[2025-09-05 22:11:01] [Rank 0] Group 9 Loss: 5.1641 +[2025-09-05 22:11:01] [Rank 0] Group 9 Loss: 5.1641 +[2025-09-05 22:11:01] [Rank 0] Group 10 Loss: 5.2416 +[2025-09-05 22:11:01] [Rank 0] Group 10 Loss: 5.2416 +[2025-09-05 22:11:01] [Rank 0] Group 11 Loss: 5.2537 +[2025-09-05 22:11:01] [Rank 0] Group 11 Loss: 5.2537 +[2025-09-05 22:11:01] [Rank 0] Group 12 Loss: 5.1913 +[2025-09-05 22:11:01] [Rank 0] Group 12 Loss: 5.1913 +[2025-09-05 22:11:01] [Rank 0] Group 13 Loss: 5.2307 +[2025-09-05 22:11:01] [Rank 0] Group 13 Loss: 5.2307 +[2025-09-05 22:11:01] [Rank 0] Group 14 Loss: 5.2492 +[2025-09-05 22:11:01] [Rank 0] Group 14 Loss: 5.2492 +[2025-09-05 22:11:01] [Rank 0] Group 15 Loss: 5.2300 +[2025-09-05 22:11:01] [Rank 0] Group 15 Loss: 5.2300 +[2025-09-05 22:11:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:11:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:11:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:11:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:11:01] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 22:11:01] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 22:11:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:11:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:11:01] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:11:01] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:11:01] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:11:01] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:11:01] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:11:01] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:11:01] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:11:01] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:11:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:11:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:11:01] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:11:01] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:11:01] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:11:01] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:11:01] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 22:11:01] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 22:11:01] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:11:01] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:11:01] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 22:11:01] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 22:11:01] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 22:11:01] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 22:11:01] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 22:11:01] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 22:11:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:11:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:11:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:11:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:11:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:11:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:11:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:11:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:11:02] [Rank 0] step:9001/10000 train_time:374203ms step_avg:41.57ms +[2025-09-05 22:11:02] [Rank 0] step:9001/10000 train_time:374203ms step_avg:41.57ms +[2025-09-05 22:11:03] [Rank 0] step:9021/10000 train_time:374875ms step_avg:41.56ms +[2025-09-05 22:11:03] [Rank 0] step:9021/10000 train_time:374875ms step_avg:41.56ms +[2025-09-05 22:11:04] [Rank 0] step:9041/10000 train_time:375613ms step_avg:41.55ms +[2025-09-05 22:11:04] [Rank 0] step:9041/10000 train_time:375613ms step_avg:41.55ms +[2025-09-05 22:11:05] [Rank 0] step:9061/10000 train_time:376352ms step_avg:41.54ms +[2025-09-05 22:11:05] [Rank 0] step:9061/10000 train_time:376352ms step_avg:41.54ms +[2025-09-05 22:11:05] [Rank 0] step:9081/10000 train_time:377091ms step_avg:41.53ms +[2025-09-05 22:11:05] [Rank 0] step:9081/10000 train_time:377091ms step_avg:41.53ms +[2025-09-05 22:11:06] [Rank 0] step:9101/10000 train_time:377829ms step_avg:41.52ms +[2025-09-05 22:11:06] [Rank 0] step:9101/10000 train_time:377829ms step_avg:41.52ms +[2025-09-05 22:11:07] [Rank 0] step:9121/10000 train_time:378567ms step_avg:41.51ms +[2025-09-05 22:11:07] [Rank 0] step:9121/10000 train_time:378567ms step_avg:41.51ms +[2025-09-05 22:11:07] [Rank 0] step:9141/10000 train_time:379305ms step_avg:41.49ms +[2025-09-05 22:11:07] [Rank 0] step:9141/10000 train_time:379305ms step_avg:41.49ms +[2025-09-05 22:11:08] [Rank 0] step:9161/10000 train_time:380047ms step_avg:41.49ms +[2025-09-05 22:11:08] [Rank 0] step:9161/10000 train_time:380047ms step_avg:41.49ms +[2025-09-05 22:11:09] [Rank 0] step:9181/10000 train_time:380785ms step_avg:41.48ms +[2025-09-05 22:11:09] [Rank 0] step:9181/10000 train_time:380785ms step_avg:41.48ms +[2025-09-05 22:11:10] [Rank 0] step:9201/10000 train_time:381524ms step_avg:41.47ms +[2025-09-05 22:11:10] [Rank 0] step:9201/10000 train_time:381524ms step_avg:41.47ms +[2025-09-05 22:11:10] [Rank 0] step:9221/10000 train_time:382262ms step_avg:41.46ms +[2025-09-05 22:11:10] [Rank 0] step:9221/10000 train_time:382262ms step_avg:41.46ms +[2025-09-05 22:11:11] [Rank 0] step:9241/10000 train_time:383043ms step_avg:41.45ms +[2025-09-05 22:11:11] [Rank 0] step:9241/10000 train_time:383043ms step_avg:41.45ms +[2025-09-05 22:11:12] [Rank 0] step:9261/10000 train_time:383780ms step_avg:41.44ms +[2025-09-05 22:11:12] [Rank 0] step:9261/10000 train_time:383780ms step_avg:41.44ms +[2025-09-05 22:11:13] [Rank 0] step:9281/10000 train_time:384518ms step_avg:41.43ms +[2025-09-05 22:11:13] [Rank 0] step:9281/10000 train_time:384518ms step_avg:41.43ms +[2025-09-05 22:11:13] [Rank 0] step:9301/10000 train_time:385257ms step_avg:41.42ms +[2025-09-05 22:11:13] [Rank 0] step:9301/10000 train_time:385257ms step_avg:41.42ms +[2025-09-05 22:11:14] [Rank 0] step:9321/10000 train_time:385995ms step_avg:41.41ms +[2025-09-05 22:11:14] [Rank 0] step:9321/10000 train_time:385995ms step_avg:41.41ms +[2025-09-05 22:11:15] [Rank 0] step:9341/10000 train_time:386734ms step_avg:41.40ms +[2025-09-05 22:11:15] [Rank 0] step:9341/10000 train_time:386734ms step_avg:41.40ms +[2025-09-05 22:11:16] [Rank 0] step:9361/10000 train_time:387473ms step_avg:41.39ms +[2025-09-05 22:11:16] [Rank 0] step:9361/10000 train_time:387473ms step_avg:41.39ms +[2025-09-05 22:11:16] [Rank 0] step:9381/10000 train_time:388212ms step_avg:41.38ms +[2025-09-05 22:11:16] [Rank 0] step:9381/10000 train_time:388212ms step_avg:41.38ms +[2025-09-05 22:11:17] [Rank 0] step:9401/10000 train_time:388951ms step_avg:41.37ms +[2025-09-05 22:11:17] [Rank 0] step:9401/10000 train_time:388951ms step_avg:41.37ms +[2025-09-05 22:11:18] [Rank 0] step:9421/10000 train_time:389689ms step_avg:41.36ms +[2025-09-05 22:11:18] [Rank 0] step:9421/10000 train_time:389689ms step_avg:41.36ms +[2025-09-05 22:11:19] [Rank 0] step:9441/10000 train_time:390431ms step_avg:41.35ms +[2025-09-05 22:11:19] [Rank 0] step:9441/10000 train_time:390431ms step_avg:41.35ms +[2025-09-05 22:11:19] [Rank 0] step:9461/10000 train_time:391169ms step_avg:41.35ms +[2025-09-05 22:11:19] [Rank 0] step:9461/10000 train_time:391169ms step_avg:41.35ms +[2025-09-05 22:11:20] [Rank 0] step:9481/10000 train_time:391907ms step_avg:41.34ms +[2025-09-05 22:11:20] [Rank 0] step:9481/10000 train_time:391907ms step_avg:41.34ms +[2025-09-05 22:11:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:11:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:11:21] [Rank 0] PRINT: step:9500/10000 train_loss:2.0299 val_loss:2.0182 train_time:392727ms step_avg:41.34ms +[2025-09-05 22:11:21] [Rank 0] PRINT: step:9500/10000 train_loss:2.0299 val_loss:2.0182 train_time:392727ms step_avg:41.34ms +[2025-09-05 22:11:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:11:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:11:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:11:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:12:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:12:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:12:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:12:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:12:43] [Rank 0] Total Loss: 4.5149 +[2025-09-05 22:12:43] [Rank 0] Total Loss: 4.5149 +[2025-09-05 22:12:43] [Rank 0] Total FTA (Unweighted): 0.3488 +[2025-09-05 22:12:43] [Rank 0] Total FTA (Unweighted): 0.3488 +[2025-09-05 22:12:43] [Rank 0] Total FTA (Weighted): 0.3488 +[2025-09-05 22:12:43] [Rank 0] Total FTA (Weighted): 0.3488 +[2025-09-05 22:12:43] [Rank 0] Group 0 Loss: 3.2899 +[2025-09-05 22:12:43] [Rank 0] Group 0 Loss: 3.2899 +[2025-09-05 22:12:43] [Rank 0] Group 1 Loss: 3.2942 +[2025-09-05 22:12:43] [Rank 0] Group 1 Loss: 3.2942 +[2025-09-05 22:12:43] [Rank 0] Group 2 Loss: 3.2463 +[2025-09-05 22:12:43] [Rank 0] Group 2 Loss: 3.2463 +[2025-09-05 22:12:43] [Rank 0] Group 3 Loss: 3.5958 +[2025-09-05 22:12:43] [Rank 0] Group 3 Loss: 3.5958 +[2025-09-05 22:12:43] [Rank 0] Group 4 Loss: 3.8433 +[2025-09-05 22:12:43] [Rank 0] Group 4 Loss: 3.8433 +[2025-09-05 22:12:43] [Rank 0] Group 5 Loss: 4.2402 +[2025-09-05 22:12:43] [Rank 0] Group 5 Loss: 4.2402 +[2025-09-05 22:12:43] [Rank 0] Group 6 Loss: 4.5375 +[2025-09-05 22:12:43] [Rank 0] Group 6 Loss: 4.5375 +[2025-09-05 22:12:43] [Rank 0] Group 7 Loss: 4.7176 +[2025-09-05 22:12:43] [Rank 0] Group 7 Loss: 4.7176 +[2025-09-05 22:12:43] [Rank 0] Group 8 Loss: 5.0148 +[2025-09-05 22:12:43] [Rank 0] Group 8 Loss: 5.0148 +[2025-09-05 22:12:43] [Rank 0] Group 9 Loss: 5.1501 +[2025-09-05 22:12:43] [Rank 0] Group 9 Loss: 5.1501 +[2025-09-05 22:12:43] [Rank 0] Group 10 Loss: 5.2315 +[2025-09-05 22:12:43] [Rank 0] Group 10 Loss: 5.2315 +[2025-09-05 22:12:43] [Rank 0] Group 11 Loss: 5.2430 +[2025-09-05 22:12:43] [Rank 0] Group 11 Loss: 5.2430 +[2025-09-05 22:12:43] [Rank 0] Group 12 Loss: 5.1850 +[2025-09-05 22:12:43] [Rank 0] Group 12 Loss: 5.1850 +[2025-09-05 22:12:43] [Rank 0] Group 13 Loss: 5.2129 +[2025-09-05 22:12:43] [Rank 0] Group 13 Loss: 5.2129 +[2025-09-05 22:12:43] [Rank 0] Group 14 Loss: 5.2265 +[2025-09-05 22:12:43] [Rank 0] Group 14 Loss: 5.2265 +[2025-09-05 22:12:43] [Rank 0] Group 15 Loss: 5.2107 +[2025-09-05 22:12:43] [Rank 0] Group 15 Loss: 5.2107 +[2025-09-05 22:12:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:12:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:12:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:12:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:12:43] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 22:12:43] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 22:12:43] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 22:12:43] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 22:12:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:12:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:12:43] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:12:43] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:12:43] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:12:43] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:12:43] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 22:12:43] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 22:12:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:12:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:12:43] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:12:43] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:12:43] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:12:43] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:12:43] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 22:12:43] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 22:12:43] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:12:43] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:12:43] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 22:12:43] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 22:12:43] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 22:12:43] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 22:12:43] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 22:12:43] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 22:12:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:12:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:12:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:12:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:12:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:12:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:12:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:12:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:12:45] [Rank 0] step:9501/10000 train_time:392736ms step_avg:41.34ms +[2025-09-05 22:12:45] [Rank 0] step:9501/10000 train_time:392736ms step_avg:41.34ms +[2025-09-05 22:12:46] [Rank 0] step:9521/10000 train_time:393408ms step_avg:41.32ms +[2025-09-05 22:12:46] [Rank 0] step:9521/10000 train_time:393408ms step_avg:41.32ms +[2025-09-05 22:12:46] [Rank 0] step:9541/10000 train_time:394147ms step_avg:41.31ms +[2025-09-05 22:12:46] [Rank 0] step:9541/10000 train_time:394147ms step_avg:41.31ms +[2025-09-05 22:12:47] [Rank 0] step:9561/10000 train_time:394887ms step_avg:41.30ms +[2025-09-05 22:12:47] [Rank 0] step:9561/10000 train_time:394887ms step_avg:41.30ms +[2025-09-05 22:12:48] [Rank 0] step:9581/10000 train_time:395626ms step_avg:41.29ms +[2025-09-05 22:12:48] [Rank 0] step:9581/10000 train_time:395626ms step_avg:41.29ms +[2025-09-05 22:12:49] [Rank 0] step:9601/10000 train_time:396364ms step_avg:41.28ms +[2025-09-05 22:12:49] [Rank 0] step:9601/10000 train_time:396364ms step_avg:41.28ms +[2025-09-05 22:12:49] [Rank 0] step:9621/10000 train_time:397104ms step_avg:41.27ms +[2025-09-05 22:12:49] [Rank 0] step:9621/10000 train_time:397104ms step_avg:41.27ms +[2025-09-05 22:12:50] [Rank 0] step:9641/10000 train_time:397842ms step_avg:41.27ms +[2025-09-05 22:12:50] [Rank 0] step:9641/10000 train_time:397842ms step_avg:41.27ms +[2025-09-05 22:12:51] [Rank 0] step:9661/10000 train_time:398857ms step_avg:41.29ms +[2025-09-05 22:12:51] [Rank 0] step:9661/10000 train_time:398857ms step_avg:41.29ms +[2025-09-05 22:12:52] [Rank 0] step:9681/10000 train_time:399595ms step_avg:41.28ms +[2025-09-05 22:12:52] [Rank 0] step:9681/10000 train_time:399595ms step_avg:41.28ms +[2025-09-05 22:12:52] [Rank 0] step:9701/10000 train_time:400333ms step_avg:41.27ms +[2025-09-05 22:12:52] [Rank 0] step:9701/10000 train_time:400333ms step_avg:41.27ms +[2025-09-05 22:12:53] [Rank 0] step:9721/10000 train_time:401072ms step_avg:41.26ms +[2025-09-05 22:12:53] [Rank 0] step:9721/10000 train_time:401072ms step_avg:41.26ms +[2025-09-05 22:12:54] [Rank 0] step:9741/10000 train_time:401810ms step_avg:41.25ms +[2025-09-05 22:12:54] [Rank 0] step:9741/10000 train_time:401810ms step_avg:41.25ms +[2025-09-05 22:12:55] [Rank 0] step:9761/10000 train_time:402549ms step_avg:41.24ms +[2025-09-05 22:12:55] [Rank 0] step:9761/10000 train_time:402549ms step_avg:41.24ms +[2025-09-05 22:12:55] [Rank 0] step:9781/10000 train_time:403288ms step_avg:41.23ms +[2025-09-05 22:12:55] [Rank 0] step:9781/10000 train_time:403288ms step_avg:41.23ms +[2025-09-05 22:12:56] [Rank 0] step:9801/10000 train_time:404027ms step_avg:41.22ms +[2025-09-05 22:12:56] [Rank 0] step:9801/10000 train_time:404027ms step_avg:41.22ms +[2025-09-05 22:12:57] [Rank 0] step:9821/10000 train_time:404765ms step_avg:41.21ms +[2025-09-05 22:12:57] [Rank 0] step:9821/10000 train_time:404765ms step_avg:41.21ms +[2025-09-05 22:12:58] [Rank 0] step:9841/10000 train_time:405504ms step_avg:41.21ms +[2025-09-05 22:12:58] [Rank 0] step:9841/10000 train_time:405504ms step_avg:41.21ms +[2025-09-05 22:12:58] [Rank 0] step:9861/10000 train_time:406243ms step_avg:41.20ms +[2025-09-05 22:12:58] [Rank 0] step:9861/10000 train_time:406243ms step_avg:41.20ms +[2025-09-05 22:12:59] [Rank 0] step:9881/10000 train_time:406983ms step_avg:41.19ms +[2025-09-05 22:12:59] [Rank 0] step:9881/10000 train_time:406983ms step_avg:41.19ms +[2025-09-05 22:13:00] [Rank 0] step:9901/10000 train_time:407722ms step_avg:41.18ms +[2025-09-05 22:13:00] [Rank 0] step:9901/10000 train_time:407722ms step_avg:41.18ms +[2025-09-05 22:13:01] [Rank 0] step:9921/10000 train_time:408461ms step_avg:41.17ms +[2025-09-05 22:13:01] [Rank 0] step:9921/10000 train_time:408461ms step_avg:41.17ms +[2025-09-05 22:13:01] [Rank 0] step:9941/10000 train_time:409200ms step_avg:41.16ms +[2025-09-05 22:13:01] [Rank 0] step:9941/10000 train_time:409200ms step_avg:41.16ms +[2025-09-05 22:13:02] [Rank 0] step:9961/10000 train_time:409939ms step_avg:41.15ms +[2025-09-05 22:13:02] [Rank 0] step:9961/10000 train_time:409939ms step_avg:41.15ms +[2025-09-05 22:13:03] [Rank 0] step:9981/10000 train_time:410678ms step_avg:41.15ms +[2025-09-05 22:13:03] [Rank 0] step:9981/10000 train_time:410678ms step_avg:41.15ms +[2025-09-05 22:13:04] [Rank 0] step:10000/10000 train_time:411381ms step_avg:41.14ms +[2025-09-05 22:13:04] [Rank 0] step:10000/10000 train_time:411381ms step_avg:41.14ms +[2025-09-05 22:13:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:13:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:13:04] [Rank 0] PRINT: step:10000/10000 train_loss:2.0222 val_loss:2.0108 train_time:411505ms step_avg:41.15ms +[2025-09-05 22:13:04] [Rank 0] PRINT: step:10000/10000 train_loss:2.0222 val_loss:2.0108 train_time:411505ms step_avg:41.15ms +[2025-09-05 22:13:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:13:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:13:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:13:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:14:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:14:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:14:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:14:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:14:26] [Rank 0] Total Loss: 4.5144 +[2025-09-05 22:14:26] [Rank 0] Total Loss: 4.5144 +[2025-09-05 22:14:26] [Rank 0] Total FTA (Unweighted): 0.3575 +[2025-09-05 22:14:26] [Rank 0] Total FTA (Unweighted): 0.3575 +[2025-09-05 22:14:26] [Rank 0] Total FTA (Weighted): 0.3575 +[2025-09-05 22:14:26] [Rank 0] Total FTA (Weighted): 0.3575 +[2025-09-05 22:14:26] [Rank 0] Group 0 Loss: 3.2969 +[2025-09-05 22:14:26] [Rank 0] Group 0 Loss: 3.2969 +[2025-09-05 22:14:26] [Rank 0] Group 1 Loss: 3.2709 +[2025-09-05 22:14:26] [Rank 0] Group 1 Loss: 3.2709 +[2025-09-05 22:14:26] [Rank 0] Group 2 Loss: 3.2364 +[2025-09-05 22:14:26] [Rank 0] Group 2 Loss: 3.2364 +[2025-09-05 22:14:26] [Rank 0] Group 3 Loss: 3.5963 +[2025-09-05 22:14:26] [Rank 0] Group 3 Loss: 3.5963 +[2025-09-05 22:14:26] [Rank 0] Group 4 Loss: 3.8799 +[2025-09-05 22:14:26] [Rank 0] Group 4 Loss: 3.8799 +[2025-09-05 22:14:26] [Rank 0] Group 5 Loss: 4.2435 +[2025-09-05 22:14:26] [Rank 0] Group 5 Loss: 4.2435 +[2025-09-05 22:14:26] [Rank 0] Group 6 Loss: 4.5394 +[2025-09-05 22:14:26] [Rank 0] Group 6 Loss: 4.5394 +[2025-09-05 22:14:26] [Rank 0] Group 7 Loss: 4.7050 +[2025-09-05 22:14:26] [Rank 0] Group 7 Loss: 4.7050 +[2025-09-05 22:14:26] [Rank 0] Group 8 Loss: 5.0227 +[2025-09-05 22:14:26] [Rank 0] Group 8 Loss: 5.0227 +[2025-09-05 22:14:26] [Rank 0] Group 9 Loss: 5.1492 +[2025-09-05 22:14:26] [Rank 0] Group 9 Loss: 5.1492 +[2025-09-05 22:14:26] [Rank 0] Group 10 Loss: 5.2280 +[2025-09-05 22:14:26] [Rank 0] Group 10 Loss: 5.2280 +[2025-09-05 22:14:26] [Rank 0] Group 11 Loss: 5.2422 +[2025-09-05 22:14:26] [Rank 0] Group 11 Loss: 5.2422 +[2025-09-05 22:14:26] [Rank 0] Group 12 Loss: 5.1782 +[2025-09-05 22:14:26] [Rank 0] Group 12 Loss: 5.1782 +[2025-09-05 22:14:26] [Rank 0] Group 13 Loss: 5.2017 +[2025-09-05 22:14:26] [Rank 0] Group 13 Loss: 5.2017 +[2025-09-05 22:14:26] [Rank 0] Group 14 Loss: 5.2233 +[2025-09-05 22:14:26] [Rank 0] Group 14 Loss: 5.2233 +[2025-09-05 22:14:26] [Rank 0] Group 15 Loss: 5.2163 +[2025-09-05 22:14:26] [Rank 0] Group 15 Loss: 5.2163 +[2025-09-05 22:14:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:14:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:14:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:14:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:14:26] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 22:14:26] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 22:14:26] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 22:14:26] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 22:14:26] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:14:26] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:14:26] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:14:26] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:14:26] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:14:26] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:14:26] [Rank 0] Group 7 FTA: 0.1800 +[2025-09-05 22:14:26] [Rank 0] Group 7 FTA: 0.1800 +[2025-09-05 22:14:26] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:14:26] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:14:26] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 22:14:26] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 22:14:26] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:14:26] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:14:27] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 22:14:27] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 22:14:27] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:14:27] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:14:27] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 22:14:27] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 22:14:27] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 22:14:27] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 22:14:27] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 22:14:27] [Rank 0] Group 15 FTA: 0.2000 +[2025-09-05 22:14:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:14:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_loss_curves.png +[2025-09-05 22:14:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:14:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/per_class_acc_curves.png +[2025-09-05 22:14:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:14:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_loss_curve.png +[2025-09-05 22:14:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:14:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_44/total_acc_curve.png +[2025-09-05 22:14:28] [Rank 0] step:10001/10000 train_time:411514ms step_avg:41.15ms +[2025-09-05 22:14:28] [Rank 0] step:10001/10000 train_time:411514ms step_avg:41.15ms +[2025-09-05 22:14:28] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 22:14:28 2025 --- +[2025-09-05 22:14:28] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 22:14:28 2025 --- +[2025-09-05 22:14:28] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 22:14:28] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..50b21700ebd2adb431cba87bc906131df386714f --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.1, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "3c8516d4-67a0-4bc8-b9b0-aabaa5adb900", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..5331d6e6ae6f6288b9114b034030dcfa7ec4b651 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de7d7d318231297d0f568f40f27edde5d214c69aa1750c9a9443487e3e2fc78e +size 340331 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..2d209746c7bb3ead037a6d0f3dc93a8eb0c28eb5 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ecf10b76dccaf94f3ca58fa46437724b4a0ca1477f4de5b35cdc72796cf6e3b +size 410796 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..65cd9801dd091225eb3665cc5ad5010c0a5392db --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:497cd252c9ba139c3d3ee3a089098bc30898ef0a2ca1bff34ff0aa48b3b87ffa +size 91114 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..2e714ac26a334438ec800755882e72255ac23ec4 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eb0f8c4c3f4a0d0cb0c976e65e52416acf2fc0a4d95bba8aa426e7007cf2bf2 +size 115826 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/training_log_3c8516d4-67a0-4bc8-b9b0-aabaa5adb900.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/training_log_3c8516d4-67a0-4bc8-b9b0-aabaa5adb900.txt new file mode 100644 index 0000000000000000000000000000000000000000..a09a8e06ce85d39f99837dbe3a9ccd9ef322795a --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/training_log_3c8516d4-67a0-4bc8-b9b0-aabaa5adb900.txt @@ -0,0 +1,5614 @@ +[2025-09-05 22:14:53] [Rank 0] PRINT: --- Script Start: Fri Sep 5 22:14:53 2025 --- +[2025-09-05 22:14:53] [Rank 0] PRINT: --- Script Start: Fri Sep 5 22:14:53 2025 --- +[2025-09-05 22:14:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 22:14:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 22:14:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 22:14:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 22:14:53] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-05 22:14:53] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-05 22:14:53] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45 +[2025-09-05 22:14:53] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45 +[2025-09-05 22:14:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 22:14:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 22:14:53] [Rank 0] PRINT: Constructing model... +[2025-09-05 22:14:53] [Rank 0] PRINT: Constructing model... +[2025-09-05 22:14:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 22:14:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 22:14:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 22:14:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 22:14:54] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 22:14:54] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 22:14:58] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 22:14:58] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 22:14:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 22:14:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 22:14:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 22:14:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 22:14:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 22:14:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 22:14:58] [Rank 0] PRINT: Model returns: +[2025-09-05 22:14:58] [Rank 0] PRINT: Model returns: +[2025-09-05 22:14:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 22:14:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 22:14:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 22:14:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 22:14:58] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 22:14:58] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 22:14:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 22:14:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 22:14:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 22:14:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 22:15:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 22:15:03] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 22:15:03] [Rank 0] PRINT: Starting warmup... +[2025-09-05 22:15:03] [Rank 0] PRINT: Starting warmup... +[2025-09-05 22:15:43] [Rank 0] PRINT: Warmup complete. +[2025-09-05 22:15:43] [Rank 0] PRINT: Warmup complete. +[2025-09-05 22:15:43] [Rank 0] PRINT: Starting training... +[2025-09-05 22:15:43] [Rank 0] PRINT: Starting training... +[2025-09-05 22:15:49] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/fixed_eval_indices.json +[2025-09-05 22:15:49] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/fixed_eval_indices.json +[2025-09-05 22:15:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:15:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:15:53] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 22:15:53] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 22:16:27] [Rank 0] step:21/10000 train_time:33806ms step_avg:1609.82ms +[2025-09-05 22:16:27] [Rank 0] step:21/10000 train_time:33806ms step_avg:1609.82ms +[2025-09-05 22:16:28] [Rank 0] step:41/10000 train_time:34535ms step_avg:842.32ms +[2025-09-05 22:16:28] [Rank 0] step:41/10000 train_time:34535ms step_avg:842.32ms +[2025-09-05 22:16:28] [Rank 0] step:61/10000 train_time:35263ms step_avg:578.08ms +[2025-09-05 22:16:28] [Rank 0] step:61/10000 train_time:35263ms step_avg:578.08ms +[2025-09-05 22:16:29] [Rank 0] step:81/10000 train_time:35990ms step_avg:444.31ms +[2025-09-05 22:16:29] [Rank 0] step:81/10000 train_time:35990ms step_avg:444.31ms +[2025-09-05 22:16:30] [Rank 0] step:101/10000 train_time:36718ms step_avg:363.54ms +[2025-09-05 22:16:30] [Rank 0] step:101/10000 train_time:36718ms step_avg:363.54ms +[2025-09-05 22:16:30] [Rank 0] step:121/10000 train_time:37445ms step_avg:309.46ms +[2025-09-05 22:16:30] [Rank 0] step:121/10000 train_time:37445ms step_avg:309.46ms +[2025-09-05 22:16:31] [Rank 0] step:141/10000 train_time:38172ms step_avg:270.72ms +[2025-09-05 22:16:31] [Rank 0] step:141/10000 train_time:38172ms step_avg:270.72ms +[2025-09-05 22:16:32] [Rank 0] step:161/10000 train_time:38900ms step_avg:241.61ms +[2025-09-05 22:16:32] [Rank 0] step:161/10000 train_time:38900ms step_avg:241.61ms +[2025-09-05 22:16:33] [Rank 0] step:181/10000 train_time:39628ms step_avg:218.94ms +[2025-09-05 22:16:33] [Rank 0] step:181/10000 train_time:39628ms step_avg:218.94ms +[2025-09-05 22:16:33] [Rank 0] step:201/10000 train_time:40356ms step_avg:200.77ms +[2025-09-05 22:16:33] [Rank 0] step:201/10000 train_time:40356ms step_avg:200.77ms +[2025-09-05 22:16:34] [Rank 0] step:221/10000 train_time:41083ms step_avg:185.90ms +[2025-09-05 22:16:34] [Rank 0] step:221/10000 train_time:41083ms step_avg:185.90ms +[2025-09-05 22:16:35] [Rank 0] step:241/10000 train_time:41809ms step_avg:173.48ms +[2025-09-05 22:16:35] [Rank 0] step:241/10000 train_time:41809ms step_avg:173.48ms +[2025-09-05 22:16:36] [Rank 0] step:261/10000 train_time:42536ms step_avg:162.97ms +[2025-09-05 22:16:36] [Rank 0] step:261/10000 train_time:42536ms step_avg:162.97ms +[2025-09-05 22:16:36] [Rank 0] step:281/10000 train_time:43263ms step_avg:153.96ms +[2025-09-05 22:16:36] [Rank 0] step:281/10000 train_time:43263ms step_avg:153.96ms +[2025-09-05 22:16:37] [Rank 0] step:301/10000 train_time:43992ms step_avg:146.15ms +[2025-09-05 22:16:37] [Rank 0] step:301/10000 train_time:43992ms step_avg:146.15ms +[2025-09-05 22:16:38] [Rank 0] step:321/10000 train_time:44852ms step_avg:139.73ms +[2025-09-05 22:16:38] [Rank 0] step:321/10000 train_time:44852ms step_avg:139.73ms +[2025-09-05 22:16:39] [Rank 0] step:341/10000 train_time:45580ms step_avg:133.66ms +[2025-09-05 22:16:39] [Rank 0] step:341/10000 train_time:45580ms step_avg:133.66ms +[2025-09-05 22:16:39] [Rank 0] step:361/10000 train_time:46308ms step_avg:128.28ms +[2025-09-05 22:16:39] [Rank 0] step:361/10000 train_time:46308ms step_avg:128.28ms +[2025-09-05 22:16:40] [Rank 0] step:381/10000 train_time:47036ms step_avg:123.45ms +[2025-09-05 22:16:40] [Rank 0] step:381/10000 train_time:47036ms step_avg:123.45ms +[2025-09-05 22:16:41] [Rank 0] step:401/10000 train_time:47988ms step_avg:119.67ms +[2025-09-05 22:16:41] [Rank 0] step:401/10000 train_time:47988ms step_avg:119.67ms +[2025-09-05 22:16:42] [Rank 0] step:421/10000 train_time:48715ms step_avg:115.71ms +[2025-09-05 22:16:42] [Rank 0] step:421/10000 train_time:48715ms step_avg:115.71ms +[2025-09-05 22:16:42] [Rank 0] step:441/10000 train_time:49447ms step_avg:112.12ms +[2025-09-05 22:16:42] [Rank 0] step:441/10000 train_time:49447ms step_avg:112.12ms +[2025-09-05 22:16:43] [Rank 0] step:461/10000 train_time:50173ms step_avg:108.84ms +[2025-09-05 22:16:43] [Rank 0] step:461/10000 train_time:50173ms step_avg:108.84ms +[2025-09-05 22:16:44] [Rank 0] step:481/10000 train_time:50902ms step_avg:105.83ms +[2025-09-05 22:16:44] [Rank 0] step:481/10000 train_time:50902ms step_avg:105.83ms +[2025-09-05 22:16:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:16:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:16:45] [Rank 0] PRINT: step:500/10000 train_loss:5.6149 val_loss:4.0147 train_time:51709ms step_avg:103.42ms +[2025-09-05 22:16:45] [Rank 0] PRINT: step:500/10000 train_loss:5.6149 val_loss:4.0147 train_time:51709ms step_avg:103.42ms +[2025-09-05 22:16:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:16:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:16:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:16:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:18:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:18:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:18:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:18:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:18:06] [Rank 0] Total Loss: 6.0284 +[2025-09-05 22:18:06] [Rank 0] Total Loss: 6.0284 +[2025-09-05 22:18:06] [Rank 0] Total FTA (Unweighted): 0.0844 +[2025-09-05 22:18:06] [Rank 0] Total FTA (Unweighted): 0.0844 +[2025-09-05 22:18:06] [Rank 0] Total FTA (Weighted): 0.0844 +[2025-09-05 22:18:06] [Rank 0] Total FTA (Weighted): 0.0844 +[2025-09-05 22:18:06] [Rank 0] Group 0 Loss: 3.7971 +[2025-09-05 22:18:06] [Rank 0] Group 0 Loss: 3.7971 +[2025-09-05 22:18:06] [Rank 0] Group 1 Loss: 3.8126 +[2025-09-05 22:18:06] [Rank 0] Group 1 Loss: 3.8126 +[2025-09-05 22:18:06] [Rank 0] Group 2 Loss: 4.5638 +[2025-09-05 22:18:06] [Rank 0] Group 2 Loss: 4.5638 +[2025-09-05 22:18:06] [Rank 0] Group 3 Loss: 5.4243 +[2025-09-05 22:18:06] [Rank 0] Group 3 Loss: 5.4243 +[2025-09-05 22:18:06] [Rank 0] Group 4 Loss: 6.2013 +[2025-09-05 22:18:06] [Rank 0] Group 4 Loss: 6.2013 +[2025-09-05 22:18:06] [Rank 0] Group 5 Loss: 6.3748 +[2025-09-05 22:18:06] [Rank 0] Group 5 Loss: 6.3748 +[2025-09-05 22:18:06] [Rank 0] Group 6 Loss: 6.4713 +[2025-09-05 22:18:06] [Rank 0] Group 6 Loss: 6.4713 +[2025-09-05 22:18:06] [Rank 0] Group 7 Loss: 6.4534 +[2025-09-05 22:18:06] [Rank 0] Group 7 Loss: 6.4534 +[2025-09-05 22:18:06] [Rank 0] Group 8 Loss: 6.5962 +[2025-09-05 22:18:06] [Rank 0] Group 8 Loss: 6.5962 +[2025-09-05 22:18:06] [Rank 0] Group 9 Loss: 6.7248 +[2025-09-05 22:18:06] [Rank 0] Group 9 Loss: 6.7248 +[2025-09-05 22:18:06] [Rank 0] Group 10 Loss: 6.7286 +[2025-09-05 22:18:06] [Rank 0] Group 10 Loss: 6.7286 +[2025-09-05 22:18:06] [Rank 0] Group 11 Loss: 6.7857 +[2025-09-05 22:18:06] [Rank 0] Group 11 Loss: 6.7857 +[2025-09-05 22:18:06] [Rank 0] Group 12 Loss: 6.5978 +[2025-09-05 22:18:06] [Rank 0] Group 12 Loss: 6.5978 +[2025-09-05 22:18:06] [Rank 0] Group 13 Loss: 6.5865 +[2025-09-05 22:18:06] [Rank 0] Group 13 Loss: 6.5865 +[2025-09-05 22:18:06] [Rank 0] Group 14 Loss: 6.7188 +[2025-09-05 22:18:06] [Rank 0] Group 14 Loss: 6.7188 +[2025-09-05 22:18:06] [Rank 0] Group 15 Loss: 6.6172 +[2025-09-05 22:18:06] [Rank 0] Group 15 Loss: 6.6172 +[2025-09-05 22:18:07] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 22:18:07] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 22:18:07] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:18:07] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:18:07] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 22:18:07] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 22:18:07] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 22:18:07] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 22:18:07] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 22:18:07] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 22:18:07] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 22:18:07] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 22:18:07] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 22:18:07] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 22:18:07] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 22:18:07] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 22:18:07] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-05 22:18:07] [Rank 0] Group 8 FTA: 0.1200 +[2025-09-05 22:18:07] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 22:18:07] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 22:18:07] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 22:18:07] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 22:18:07] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 22:18:07] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 22:18:07] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:18:07] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:18:07] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 22:18:07] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 22:18:07] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:18:07] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:18:07] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 22:18:07] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 22:18:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:18:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:18:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:18:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:18:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:18:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:18:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:18:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:18:08] [Rank 0] step:501/10000 train_time:51718ms step_avg:103.23ms +[2025-09-05 22:18:08] [Rank 0] step:501/10000 train_time:51718ms step_avg:103.23ms +[2025-09-05 22:18:09] [Rank 0] step:521/10000 train_time:52372ms step_avg:100.52ms +[2025-09-05 22:18:09] [Rank 0] step:521/10000 train_time:52372ms step_avg:100.52ms +[2025-09-05 22:18:10] [Rank 0] step:541/10000 train_time:53098ms step_avg:98.15ms +[2025-09-05 22:18:10] [Rank 0] step:541/10000 train_time:53098ms step_avg:98.15ms +[2025-09-05 22:18:10] [Rank 0] step:561/10000 train_time:53825ms step_avg:95.94ms +[2025-09-05 22:18:10] [Rank 0] step:561/10000 train_time:53825ms step_avg:95.94ms +[2025-09-05 22:18:11] [Rank 0] step:581/10000 train_time:54552ms step_avg:93.89ms +[2025-09-05 22:18:11] [Rank 0] step:581/10000 train_time:54552ms step_avg:93.89ms +[2025-09-05 22:18:12] [Rank 0] step:601/10000 train_time:55281ms step_avg:91.98ms +[2025-09-05 22:18:12] [Rank 0] step:601/10000 train_time:55281ms step_avg:91.98ms +[2025-09-05 22:18:13] [Rank 0] step:621/10000 train_time:56009ms step_avg:90.19ms +[2025-09-05 22:18:13] [Rank 0] step:621/10000 train_time:56009ms step_avg:90.19ms +[2025-09-05 22:18:13] [Rank 0] step:641/10000 train_time:56737ms step_avg:88.51ms +[2025-09-05 22:18:13] [Rank 0] step:641/10000 train_time:56737ms step_avg:88.51ms +[2025-09-05 22:18:14] [Rank 0] step:661/10000 train_time:57464ms step_avg:86.94ms +[2025-09-05 22:18:14] [Rank 0] step:661/10000 train_time:57464ms step_avg:86.94ms +[2025-09-05 22:18:15] [Rank 0] step:681/10000 train_time:58192ms step_avg:85.45ms +[2025-09-05 22:18:15] [Rank 0] step:681/10000 train_time:58192ms step_avg:85.45ms +[2025-09-05 22:18:16] [Rank 0] step:701/10000 train_time:58919ms step_avg:84.05ms +[2025-09-05 22:18:16] [Rank 0] step:701/10000 train_time:58919ms step_avg:84.05ms +[2025-09-05 22:18:16] [Rank 0] step:721/10000 train_time:59651ms step_avg:82.73ms +[2025-09-05 22:18:16] [Rank 0] step:721/10000 train_time:59651ms step_avg:82.73ms +[2025-09-05 22:18:17] [Rank 0] step:741/10000 train_time:60379ms step_avg:81.48ms +[2025-09-05 22:18:17] [Rank 0] step:741/10000 train_time:60379ms step_avg:81.48ms +[2025-09-05 22:18:18] [Rank 0] step:761/10000 train_time:61111ms step_avg:80.30ms +[2025-09-05 22:18:18] [Rank 0] step:761/10000 train_time:61111ms step_avg:80.30ms +[2025-09-05 22:18:19] [Rank 0] step:781/10000 train_time:61845ms step_avg:79.19ms +[2025-09-05 22:18:19] [Rank 0] step:781/10000 train_time:61845ms step_avg:79.19ms +[2025-09-05 22:18:19] [Rank 0] step:801/10000 train_time:62577ms step_avg:78.12ms +[2025-09-05 22:18:19] [Rank 0] step:801/10000 train_time:62577ms step_avg:78.12ms +[2025-09-05 22:18:21] [Rank 0] step:821/10000 train_time:63926ms step_avg:77.86ms +[2025-09-05 22:18:21] [Rank 0] step:821/10000 train_time:63926ms step_avg:77.86ms +[2025-09-05 22:18:21] [Rank 0] step:841/10000 train_time:64659ms step_avg:76.88ms +[2025-09-05 22:18:21] [Rank 0] step:841/10000 train_time:64659ms step_avg:76.88ms +[2025-09-05 22:18:22] [Rank 0] step:861/10000 train_time:65392ms step_avg:75.95ms +[2025-09-05 22:18:22] [Rank 0] step:861/10000 train_time:65392ms step_avg:75.95ms +[2025-09-05 22:18:23] [Rank 0] step:881/10000 train_time:66123ms step_avg:75.05ms +[2025-09-05 22:18:23] [Rank 0] step:881/10000 train_time:66123ms step_avg:75.05ms +[2025-09-05 22:18:24] [Rank 0] step:901/10000 train_time:66855ms step_avg:74.20ms +[2025-09-05 22:18:24] [Rank 0] step:901/10000 train_time:66855ms step_avg:74.20ms +[2025-09-05 22:18:24] [Rank 0] step:921/10000 train_time:67587ms step_avg:73.38ms +[2025-09-05 22:18:24] [Rank 0] step:921/10000 train_time:67587ms step_avg:73.38ms +[2025-09-05 22:18:25] [Rank 0] step:941/10000 train_time:68319ms step_avg:72.60ms +[2025-09-05 22:18:25] [Rank 0] step:941/10000 train_time:68319ms step_avg:72.60ms +[2025-09-05 22:18:26] [Rank 0] step:961/10000 train_time:69052ms step_avg:71.85ms +[2025-09-05 22:18:26] [Rank 0] step:961/10000 train_time:69052ms step_avg:71.85ms +[2025-09-05 22:18:26] [Rank 0] step:981/10000 train_time:69784ms step_avg:71.14ms +[2025-09-05 22:18:26] [Rank 0] step:981/10000 train_time:69784ms step_avg:71.14ms +[2025-09-05 22:18:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:18:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:18:28] [Rank 0] PRINT: step:1000/10000 train_loss:3.6021 val_loss:3.3059 train_time:70597ms step_avg:70.60ms +[2025-09-05 22:18:28] [Rank 0] PRINT: step:1000/10000 train_loss:3.6021 val_loss:3.3059 train_time:70597ms step_avg:70.60ms +[2025-09-05 22:18:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:18:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:18:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:18:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:19:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:19:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:19:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:19:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:19:49] [Rank 0] Total Loss: 5.6609 +[2025-09-05 22:19:49] [Rank 0] Total Loss: 5.6609 +[2025-09-05 22:19:49] [Rank 0] Total FTA (Unweighted): 0.1281 +[2025-09-05 22:19:49] [Rank 0] Total FTA (Unweighted): 0.1281 +[2025-09-05 22:19:49] [Rank 0] Total FTA (Weighted): 0.1281 +[2025-09-05 22:19:49] [Rank 0] Total FTA (Weighted): 0.1281 +[2025-09-05 22:19:49] [Rank 0] Group 0 Loss: 3.6386 +[2025-09-05 22:19:49] [Rank 0] Group 0 Loss: 3.6386 +[2025-09-05 22:19:49] [Rank 0] Group 1 Loss: 3.5769 +[2025-09-05 22:19:49] [Rank 0] Group 1 Loss: 3.5769 +[2025-09-05 22:19:49] [Rank 0] Group 2 Loss: 3.8863 +[2025-09-05 22:19:49] [Rank 0] Group 2 Loss: 3.8863 +[2025-09-05 22:19:49] [Rank 0] Group 3 Loss: 4.6100 +[2025-09-05 22:19:49] [Rank 0] Group 3 Loss: 4.6100 +[2025-09-05 22:19:49] [Rank 0] Group 4 Loss: 5.4938 +[2025-09-05 22:19:49] [Rank 0] Group 4 Loss: 5.4938 +[2025-09-05 22:19:49] [Rank 0] Group 5 Loss: 5.8496 +[2025-09-05 22:19:49] [Rank 0] Group 5 Loss: 5.8496 +[2025-09-05 22:19:49] [Rank 0] Group 6 Loss: 6.0839 +[2025-09-05 22:19:49] [Rank 0] Group 6 Loss: 6.0839 +[2025-09-05 22:19:49] [Rank 0] Group 7 Loss: 6.1161 +[2025-09-05 22:19:49] [Rank 0] Group 7 Loss: 6.1161 +[2025-09-05 22:19:49] [Rank 0] Group 8 Loss: 6.3305 +[2025-09-05 22:19:49] [Rank 0] Group 8 Loss: 6.3305 +[2025-09-05 22:19:49] [Rank 0] Group 9 Loss: 6.4543 +[2025-09-05 22:19:49] [Rank 0] Group 9 Loss: 6.4543 +[2025-09-05 22:19:49] [Rank 0] Group 10 Loss: 6.4831 +[2025-09-05 22:19:49] [Rank 0] Group 10 Loss: 6.4831 +[2025-09-05 22:19:49] [Rank 0] Group 11 Loss: 6.5432 +[2025-09-05 22:19:49] [Rank 0] Group 11 Loss: 6.5432 +[2025-09-05 22:19:49] [Rank 0] Group 12 Loss: 6.3349 +[2025-09-05 22:19:49] [Rank 0] Group 12 Loss: 6.3349 +[2025-09-05 22:19:49] [Rank 0] Group 13 Loss: 6.3605 +[2025-09-05 22:19:49] [Rank 0] Group 13 Loss: 6.3605 +[2025-09-05 22:19:49] [Rank 0] Group 14 Loss: 6.4614 +[2025-09-05 22:19:49] [Rank 0] Group 14 Loss: 6.4614 +[2025-09-05 22:19:49] [Rank 0] Group 15 Loss: 6.3509 +[2025-09-05 22:19:49] [Rank 0] Group 15 Loss: 6.3509 +[2025-09-05 22:19:49] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 22:19:49] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 22:19:49] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:19:49] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:19:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:19:49] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:19:49] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:19:49] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:19:49] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 22:19:49] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 22:19:49] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 22:19:49] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 22:19:49] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 22:19:49] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 22:19:49] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:19:49] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:19:49] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 22:19:49] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 22:19:49] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:19:49] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:19:49] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 22:19:49] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 22:19:49] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 22:19:49] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 22:19:49] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 22:19:49] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 22:19:49] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 22:19:49] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 22:19:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:19:49] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:19:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:19:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:19:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:19:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:19:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:19:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:19:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:19:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:19:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:19:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:19:51] [Rank 0] step:1001/10000 train_time:70606ms step_avg:70.54ms +[2025-09-05 22:19:51] [Rank 0] step:1001/10000 train_time:70606ms step_avg:70.54ms +[2025-09-05 22:19:52] [Rank 0] step:1021/10000 train_time:71285ms step_avg:69.82ms +[2025-09-05 22:19:52] [Rank 0] step:1021/10000 train_time:71285ms step_avg:69.82ms +[2025-09-05 22:19:52] [Rank 0] step:1041/10000 train_time:72017ms step_avg:69.18ms +[2025-09-05 22:19:52] [Rank 0] step:1041/10000 train_time:72017ms step_avg:69.18ms +[2025-09-05 22:19:53] [Rank 0] step:1061/10000 train_time:72749ms step_avg:68.57ms +[2025-09-05 22:19:53] [Rank 0] step:1061/10000 train_time:72749ms step_avg:68.57ms +[2025-09-05 22:19:54] [Rank 0] step:1081/10000 train_time:73482ms step_avg:67.98ms +[2025-09-05 22:19:54] [Rank 0] step:1081/10000 train_time:73482ms step_avg:67.98ms +[2025-09-05 22:19:55] [Rank 0] step:1101/10000 train_time:74215ms step_avg:67.41ms +[2025-09-05 22:19:55] [Rank 0] step:1101/10000 train_time:74215ms step_avg:67.41ms +[2025-09-05 22:19:55] [Rank 0] step:1121/10000 train_time:74948ms step_avg:66.86ms +[2025-09-05 22:19:55] [Rank 0] step:1121/10000 train_time:74948ms step_avg:66.86ms +[2025-09-05 22:19:56] [Rank 0] step:1141/10000 train_time:75680ms step_avg:66.33ms +[2025-09-05 22:19:56] [Rank 0] step:1141/10000 train_time:75680ms step_avg:66.33ms +[2025-09-05 22:19:57] [Rank 0] step:1161/10000 train_time:76413ms step_avg:65.82ms +[2025-09-05 22:19:57] [Rank 0] step:1161/10000 train_time:76413ms step_avg:65.82ms +[2025-09-05 22:19:57] [Rank 0] step:1181/10000 train_time:77145ms step_avg:65.32ms +[2025-09-05 22:19:57] [Rank 0] step:1181/10000 train_time:77145ms step_avg:65.32ms +[2025-09-05 22:19:58] [Rank 0] step:1201/10000 train_time:77876ms step_avg:64.84ms +[2025-09-05 22:19:58] [Rank 0] step:1201/10000 train_time:77876ms step_avg:64.84ms +[2025-09-05 22:19:59] [Rank 0] step:1221/10000 train_time:78608ms step_avg:64.38ms +[2025-09-05 22:19:59] [Rank 0] step:1221/10000 train_time:78608ms step_avg:64.38ms +[2025-09-05 22:20:00] [Rank 0] step:1241/10000 train_time:79341ms step_avg:63.93ms +[2025-09-05 22:20:00] [Rank 0] step:1241/10000 train_time:79341ms step_avg:63.93ms +[2025-09-05 22:20:00] [Rank 0] step:1261/10000 train_time:80071ms step_avg:63.50ms +[2025-09-05 22:20:00] [Rank 0] step:1261/10000 train_time:80071ms step_avg:63.50ms +[2025-09-05 22:20:01] [Rank 0] step:1281/10000 train_time:80803ms step_avg:63.08ms +[2025-09-05 22:20:01] [Rank 0] step:1281/10000 train_time:80803ms step_avg:63.08ms +[2025-09-05 22:20:02] [Rank 0] step:1301/10000 train_time:81535ms step_avg:62.67ms +[2025-09-05 22:20:02] [Rank 0] step:1301/10000 train_time:81535ms step_avg:62.67ms +[2025-09-05 22:20:03] [Rank 0] step:1321/10000 train_time:82272ms step_avg:62.28ms +[2025-09-05 22:20:03] [Rank 0] step:1321/10000 train_time:82272ms step_avg:62.28ms +[2025-09-05 22:20:03] [Rank 0] step:1341/10000 train_time:83004ms step_avg:61.90ms +[2025-09-05 22:20:03] [Rank 0] step:1341/10000 train_time:83004ms step_avg:61.90ms +[2025-09-05 22:20:04] [Rank 0] step:1361/10000 train_time:83736ms step_avg:61.53ms +[2025-09-05 22:20:04] [Rank 0] step:1361/10000 train_time:83736ms step_avg:61.53ms +[2025-09-05 22:20:05] [Rank 0] step:1381/10000 train_time:84469ms step_avg:61.16ms +[2025-09-05 22:20:05] [Rank 0] step:1381/10000 train_time:84469ms step_avg:61.16ms +[2025-09-05 22:20:06] [Rank 0] step:1401/10000 train_time:85202ms step_avg:60.81ms +[2025-09-05 22:20:06] [Rank 0] step:1401/10000 train_time:85202ms step_avg:60.81ms +[2025-09-05 22:20:06] [Rank 0] step:1421/10000 train_time:85934ms step_avg:60.47ms +[2025-09-05 22:20:06] [Rank 0] step:1421/10000 train_time:85934ms step_avg:60.47ms +[2025-09-05 22:20:07] [Rank 0] step:1441/10000 train_time:86667ms step_avg:60.14ms +[2025-09-05 22:20:07] [Rank 0] step:1441/10000 train_time:86667ms step_avg:60.14ms +[2025-09-05 22:20:08] [Rank 0] step:1461/10000 train_time:87399ms step_avg:59.82ms +[2025-09-05 22:20:08] [Rank 0] step:1461/10000 train_time:87399ms step_avg:59.82ms +[2025-09-05 22:20:08] [Rank 0] step:1481/10000 train_time:88131ms step_avg:59.51ms +[2025-09-05 22:20:08] [Rank 0] step:1481/10000 train_time:88131ms step_avg:59.51ms +[2025-09-05 22:20:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:20:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:20:10] [Rank 0] PRINT: step:1500/10000 train_loss:3.1221 val_loss:2.9565 train_time:88944ms step_avg:59.30ms +[2025-09-05 22:20:10] [Rank 0] PRINT: step:1500/10000 train_loss:3.1221 val_loss:2.9565 train_time:88944ms step_avg:59.30ms +[2025-09-05 22:20:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:20:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:20:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:20:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:21:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:21:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:21:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:21:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:21:31] [Rank 0] Total Loss: 5.4076 +[2025-09-05 22:21:31] [Rank 0] Total Loss: 5.4076 +[2025-09-05 22:21:31] [Rank 0] Total FTA (Unweighted): 0.1562 +[2025-09-05 22:21:31] [Rank 0] Total FTA (Unweighted): 0.1562 +[2025-09-05 22:21:31] [Rank 0] Total FTA (Weighted): 0.1562 +[2025-09-05 22:21:31] [Rank 0] Total FTA (Weighted): 0.1562 +[2025-09-05 22:21:31] [Rank 0] Group 0 Loss: 3.5546 +[2025-09-05 22:21:31] [Rank 0] Group 0 Loss: 3.5546 +[2025-09-05 22:21:31] [Rank 0] Group 1 Loss: 3.5262 +[2025-09-05 22:21:31] [Rank 0] Group 1 Loss: 3.5262 +[2025-09-05 22:21:31] [Rank 0] Group 2 Loss: 3.7082 +[2025-09-05 22:21:31] [Rank 0] Group 2 Loss: 3.7082 +[2025-09-05 22:21:31] [Rank 0] Group 3 Loss: 4.2086 +[2025-09-05 22:21:31] [Rank 0] Group 3 Loss: 4.2086 +[2025-09-05 22:21:31] [Rank 0] Group 4 Loss: 5.0153 +[2025-09-05 22:21:31] [Rank 0] Group 4 Loss: 5.0153 +[2025-09-05 22:21:31] [Rank 0] Group 5 Loss: 5.5046 +[2025-09-05 22:21:31] [Rank 0] Group 5 Loss: 5.5046 +[2025-09-05 22:21:31] [Rank 0] Group 6 Loss: 5.7757 +[2025-09-05 22:21:31] [Rank 0] Group 6 Loss: 5.7757 +[2025-09-05 22:21:31] [Rank 0] Group 7 Loss: 5.8366 +[2025-09-05 22:21:31] [Rank 0] Group 7 Loss: 5.8366 +[2025-09-05 22:21:31] [Rank 0] Group 8 Loss: 6.0874 +[2025-09-05 22:21:31] [Rank 0] Group 8 Loss: 6.0874 +[2025-09-05 22:21:31] [Rank 0] Group 9 Loss: 6.2040 +[2025-09-05 22:21:31] [Rank 0] Group 9 Loss: 6.2040 +[2025-09-05 22:21:31] [Rank 0] Group 10 Loss: 6.2356 +[2025-09-05 22:21:31] [Rank 0] Group 10 Loss: 6.2356 +[2025-09-05 22:21:31] [Rank 0] Group 11 Loss: 6.2913 +[2025-09-05 22:21:31] [Rank 0] Group 11 Loss: 6.2913 +[2025-09-05 22:21:31] [Rank 0] Group 12 Loss: 6.0941 +[2025-09-05 22:21:31] [Rank 0] Group 12 Loss: 6.0941 +[2025-09-05 22:21:31] [Rank 0] Group 13 Loss: 6.1375 +[2025-09-05 22:21:31] [Rank 0] Group 13 Loss: 6.1375 +[2025-09-05 22:21:31] [Rank 0] Group 14 Loss: 6.1993 +[2025-09-05 22:21:31] [Rank 0] Group 14 Loss: 6.1993 +[2025-09-05 22:21:31] [Rank 0] Group 15 Loss: 6.1421 +[2025-09-05 22:21:31] [Rank 0] Group 15 Loss: 6.1421 +[2025-09-05 22:21:31] [Rank 0] Group 0 FTA: 0.6800 +[2025-09-05 22:21:31] [Rank 0] Group 0 FTA: 0.6800 +[2025-09-05 22:21:31] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:21:31] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:21:31] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:21:31] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:21:31] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:21:31] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:21:31] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 22:21:31] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 22:21:31] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:21:31] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:21:31] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 22:21:31] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 22:21:31] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:21:31] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:21:31] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 22:21:31] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 22:21:31] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 22:21:31] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 22:21:31] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 22:21:31] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 22:21:31] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:21:31] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:21:31] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:21:31] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:21:31] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 22:21:31] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 22:21:31] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:21:31] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:21:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 22:21:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 22:21:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:21:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:21:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:21:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:21:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:21:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:21:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:21:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:21:32] [Rank 0] step:1501/10000 train_time:88953ms step_avg:59.26ms +[2025-09-05 22:21:32] [Rank 0] step:1501/10000 train_time:88953ms step_avg:59.26ms +[2025-09-05 22:21:33] [Rank 0] step:1521/10000 train_time:89628ms step_avg:58.93ms +[2025-09-05 22:21:33] [Rank 0] step:1521/10000 train_time:89628ms step_avg:58.93ms +[2025-09-05 22:21:34] [Rank 0] step:1541/10000 train_time:90359ms step_avg:58.64ms +[2025-09-05 22:21:34] [Rank 0] step:1541/10000 train_time:90359ms step_avg:58.64ms +[2025-09-05 22:21:35] [Rank 0] step:1561/10000 train_time:91091ms step_avg:58.35ms +[2025-09-05 22:21:35] [Rank 0] step:1561/10000 train_time:91091ms step_avg:58.35ms +[2025-09-05 22:21:35] [Rank 0] step:1581/10000 train_time:91824ms step_avg:58.08ms +[2025-09-05 22:21:35] [Rank 0] step:1581/10000 train_time:91824ms step_avg:58.08ms +[2025-09-05 22:21:36] [Rank 0] step:1601/10000 train_time:92557ms step_avg:57.81ms +[2025-09-05 22:21:36] [Rank 0] step:1601/10000 train_time:92557ms step_avg:57.81ms +[2025-09-05 22:21:37] [Rank 0] step:1621/10000 train_time:93289ms step_avg:57.55ms +[2025-09-05 22:21:37] [Rank 0] step:1621/10000 train_time:93289ms step_avg:57.55ms +[2025-09-05 22:21:38] [Rank 0] step:1641/10000 train_time:94644ms step_avg:57.67ms +[2025-09-05 22:21:38] [Rank 0] step:1641/10000 train_time:94644ms step_avg:57.67ms +[2025-09-05 22:21:39] [Rank 0] step:1661/10000 train_time:95377ms step_avg:57.42ms +[2025-09-05 22:21:39] [Rank 0] step:1661/10000 train_time:95377ms step_avg:57.42ms +[2025-09-05 22:21:40] [Rank 0] step:1681/10000 train_time:96110ms step_avg:57.17ms +[2025-09-05 22:21:40] [Rank 0] step:1681/10000 train_time:96110ms step_avg:57.17ms +[2025-09-05 22:21:40] [Rank 0] step:1701/10000 train_time:96843ms step_avg:56.93ms +[2025-09-05 22:21:40] [Rank 0] step:1701/10000 train_time:96843ms step_avg:56.93ms +[2025-09-05 22:21:41] [Rank 0] step:1721/10000 train_time:97576ms step_avg:56.70ms +[2025-09-05 22:21:41] [Rank 0] step:1721/10000 train_time:97576ms step_avg:56.70ms +[2025-09-05 22:21:42] [Rank 0] step:1741/10000 train_time:98308ms step_avg:56.47ms +[2025-09-05 22:21:42] [Rank 0] step:1741/10000 train_time:98308ms step_avg:56.47ms +[2025-09-05 22:21:43] [Rank 0] step:1761/10000 train_time:99039ms step_avg:56.24ms +[2025-09-05 22:21:43] [Rank 0] step:1761/10000 train_time:99039ms step_avg:56.24ms +[2025-09-05 22:21:43] [Rank 0] step:1781/10000 train_time:99772ms step_avg:56.02ms +[2025-09-05 22:21:43] [Rank 0] step:1781/10000 train_time:99772ms step_avg:56.02ms +[2025-09-05 22:21:44] [Rank 0] step:1801/10000 train_time:100504ms step_avg:55.80ms +[2025-09-05 22:21:44] [Rank 0] step:1801/10000 train_time:100504ms step_avg:55.80ms +[2025-09-05 22:21:45] [Rank 0] step:1821/10000 train_time:101237ms step_avg:55.59ms +[2025-09-05 22:21:45] [Rank 0] step:1821/10000 train_time:101237ms step_avg:55.59ms +[2025-09-05 22:21:45] [Rank 0] step:1841/10000 train_time:101970ms step_avg:55.39ms +[2025-09-05 22:21:45] [Rank 0] step:1841/10000 train_time:101970ms step_avg:55.39ms +[2025-09-05 22:21:46] [Rank 0] step:1861/10000 train_time:102703ms step_avg:55.19ms +[2025-09-05 22:21:46] [Rank 0] step:1861/10000 train_time:102703ms step_avg:55.19ms +[2025-09-05 22:21:47] [Rank 0] step:1881/10000 train_time:103436ms step_avg:54.99ms +[2025-09-05 22:21:47] [Rank 0] step:1881/10000 train_time:103436ms step_avg:54.99ms +[2025-09-05 22:21:48] [Rank 0] step:1901/10000 train_time:104169ms step_avg:54.80ms +[2025-09-05 22:21:48] [Rank 0] step:1901/10000 train_time:104169ms step_avg:54.80ms +[2025-09-05 22:21:48] [Rank 0] step:1921/10000 train_time:104902ms step_avg:54.61ms +[2025-09-05 22:21:48] [Rank 0] step:1921/10000 train_time:104902ms step_avg:54.61ms +[2025-09-05 22:21:49] [Rank 0] step:1941/10000 train_time:105634ms step_avg:54.42ms +[2025-09-05 22:21:49] [Rank 0] step:1941/10000 train_time:105634ms step_avg:54.42ms +[2025-09-05 22:21:50] [Rank 0] step:1961/10000 train_time:106367ms step_avg:54.24ms +[2025-09-05 22:21:50] [Rank 0] step:1961/10000 train_time:106367ms step_avg:54.24ms +[2025-09-05 22:21:51] [Rank 0] step:1981/10000 train_time:107098ms step_avg:54.06ms +[2025-09-05 22:21:51] [Rank 0] step:1981/10000 train_time:107098ms step_avg:54.06ms +[2025-09-05 22:21:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:21:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:21:52] [Rank 0] PRINT: step:2000/10000 train_loss:2.8459 val_loss:2.7352 train_time:107911ms step_avg:53.96ms +[2025-09-05 22:21:52] [Rank 0] PRINT: step:2000/10000 train_loss:2.8459 val_loss:2.7352 train_time:107911ms step_avg:53.96ms +[2025-09-05 22:21:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:21:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:21:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:21:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:23:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:23:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:23:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:23:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:23:13] [Rank 0] Total Loss: 5.2764 +[2025-09-05 22:23:13] [Rank 0] Total Loss: 5.2764 +[2025-09-05 22:23:13] [Rank 0] Total FTA (Unweighted): 0.1806 +[2025-09-05 22:23:13] [Rank 0] Total FTA (Unweighted): 0.1806 +[2025-09-05 22:23:13] [Rank 0] Total FTA (Weighted): 0.1806 +[2025-09-05 22:23:13] [Rank 0] Total FTA (Weighted): 0.1806 +[2025-09-05 22:23:13] [Rank 0] Group 0 Loss: 3.4899 +[2025-09-05 22:23:13] [Rank 0] Group 0 Loss: 3.4899 +[2025-09-05 22:23:13] [Rank 0] Group 1 Loss: 3.4941 +[2025-09-05 22:23:13] [Rank 0] Group 1 Loss: 3.4941 +[2025-09-05 22:23:13] [Rank 0] Group 2 Loss: 3.6596 +[2025-09-05 22:23:13] [Rank 0] Group 2 Loss: 3.6596 +[2025-09-05 22:23:13] [Rank 0] Group 3 Loss: 4.1024 +[2025-09-05 22:23:13] [Rank 0] Group 3 Loss: 4.1024 +[2025-09-05 22:23:13] [Rank 0] Group 4 Loss: 4.7740 +[2025-09-05 22:23:13] [Rank 0] Group 4 Loss: 4.7740 +[2025-09-05 22:23:13] [Rank 0] Group 5 Loss: 5.2789 +[2025-09-05 22:23:13] [Rank 0] Group 5 Loss: 5.2789 +[2025-09-05 22:23:13] [Rank 0] Group 6 Loss: 5.5490 +[2025-09-05 22:23:13] [Rank 0] Group 6 Loss: 5.5490 +[2025-09-05 22:23:13] [Rank 0] Group 7 Loss: 5.6777 +[2025-09-05 22:23:13] [Rank 0] Group 7 Loss: 5.6777 +[2025-09-05 22:23:13] [Rank 0] Group 8 Loss: 5.9321 +[2025-09-05 22:23:13] [Rank 0] Group 8 Loss: 5.9321 +[2025-09-05 22:23:13] [Rank 0] Group 9 Loss: 6.0460 +[2025-09-05 22:23:13] [Rank 0] Group 9 Loss: 6.0460 +[2025-09-05 22:23:13] [Rank 0] Group 10 Loss: 6.1613 +[2025-09-05 22:23:13] [Rank 0] Group 10 Loss: 6.1613 +[2025-09-05 22:23:13] [Rank 0] Group 11 Loss: 6.1759 +[2025-09-05 22:23:13] [Rank 0] Group 11 Loss: 6.1759 +[2025-09-05 22:23:13] [Rank 0] Group 12 Loss: 5.9864 +[2025-09-05 22:23:13] [Rank 0] Group 12 Loss: 5.9864 +[2025-09-05 22:23:13] [Rank 0] Group 13 Loss: 6.0020 +[2025-09-05 22:23:13] [Rank 0] Group 13 Loss: 6.0020 +[2025-09-05 22:23:13] [Rank 0] Group 14 Loss: 6.0883 +[2025-09-05 22:23:13] [Rank 0] Group 14 Loss: 6.0883 +[2025-09-05 22:23:13] [Rank 0] Group 15 Loss: 6.0052 +[2025-09-05 22:23:13] [Rank 0] Group 15 Loss: 6.0052 +[2025-09-05 22:23:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:23:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:23:14] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:23:14] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:23:14] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:23:14] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:23:14] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:23:14] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:23:14] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 22:23:14] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 22:23:14] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:23:14] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:23:14] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 22:23:14] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 22:23:14] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:23:14] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:23:14] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 22:23:14] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 22:23:14] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:23:14] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:23:14] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 22:23:14] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 22:23:14] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:23:14] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:23:14] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:23:14] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:23:14] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 22:23:14] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 22:23:14] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:23:14] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:23:14] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 22:23:14] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 22:23:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:23:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:23:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:23:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:23:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:23:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:23:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:23:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:23:15] [Rank 0] step:2001/10000 train_time:107920ms step_avg:53.93ms +[2025-09-05 22:23:15] [Rank 0] step:2001/10000 train_time:107920ms step_avg:53.93ms +[2025-09-05 22:23:16] [Rank 0] step:2021/10000 train_time:108585ms step_avg:53.73ms +[2025-09-05 22:23:16] [Rank 0] step:2021/10000 train_time:108585ms step_avg:53.73ms +[2025-09-05 22:23:16] [Rank 0] step:2041/10000 train_time:109318ms step_avg:53.56ms +[2025-09-05 22:23:16] [Rank 0] step:2041/10000 train_time:109318ms step_avg:53.56ms +[2025-09-05 22:23:17] [Rank 0] step:2061/10000 train_time:110050ms step_avg:53.40ms +[2025-09-05 22:23:17] [Rank 0] step:2061/10000 train_time:110050ms step_avg:53.40ms +[2025-09-05 22:23:18] [Rank 0] step:2081/10000 train_time:110783ms step_avg:53.24ms +[2025-09-05 22:23:18] [Rank 0] step:2081/10000 train_time:110783ms step_avg:53.24ms +[2025-09-05 22:23:19] [Rank 0] step:2101/10000 train_time:111515ms step_avg:53.08ms +[2025-09-05 22:23:19] [Rank 0] step:2101/10000 train_time:111515ms step_avg:53.08ms +[2025-09-05 22:23:19] [Rank 0] step:2121/10000 train_time:112247ms step_avg:52.92ms +[2025-09-05 22:23:19] [Rank 0] step:2121/10000 train_time:112247ms step_avg:52.92ms +[2025-09-05 22:23:20] [Rank 0] step:2141/10000 train_time:112979ms step_avg:52.77ms +[2025-09-05 22:23:20] [Rank 0] step:2141/10000 train_time:112979ms step_avg:52.77ms +[2025-09-05 22:23:21] [Rank 0] step:2161/10000 train_time:113712ms step_avg:52.62ms +[2025-09-05 22:23:21] [Rank 0] step:2161/10000 train_time:113712ms step_avg:52.62ms +[2025-09-05 22:23:22] [Rank 0] step:2181/10000 train_time:114445ms step_avg:52.47ms +[2025-09-05 22:23:22] [Rank 0] step:2181/10000 train_time:114445ms step_avg:52.47ms +[2025-09-05 22:23:22] [Rank 0] step:2201/10000 train_time:115177ms step_avg:52.33ms +[2025-09-05 22:23:22] [Rank 0] step:2201/10000 train_time:115177ms step_avg:52.33ms +[2025-09-05 22:23:23] [Rank 0] step:2221/10000 train_time:115909ms step_avg:52.19ms +[2025-09-05 22:23:23] [Rank 0] step:2221/10000 train_time:115909ms step_avg:52.19ms +[2025-09-05 22:23:24] [Rank 0] step:2241/10000 train_time:116646ms step_avg:52.05ms +[2025-09-05 22:23:24] [Rank 0] step:2241/10000 train_time:116646ms step_avg:52.05ms +[2025-09-05 22:23:25] [Rank 0] step:2261/10000 train_time:117385ms step_avg:51.92ms +[2025-09-05 22:23:25] [Rank 0] step:2261/10000 train_time:117385ms step_avg:51.92ms +[2025-09-05 22:23:25] [Rank 0] step:2281/10000 train_time:118124ms step_avg:51.79ms +[2025-09-05 22:23:25] [Rank 0] step:2281/10000 train_time:118124ms step_avg:51.79ms +[2025-09-05 22:23:26] [Rank 0] step:2301/10000 train_time:118863ms step_avg:51.66ms +[2025-09-05 22:23:26] [Rank 0] step:2301/10000 train_time:118863ms step_avg:51.66ms +[2025-09-05 22:23:27] [Rank 0] step:2321/10000 train_time:119602ms step_avg:51.53ms +[2025-09-05 22:23:27] [Rank 0] step:2321/10000 train_time:119602ms step_avg:51.53ms +[2025-09-05 22:23:27] [Rank 0] step:2341/10000 train_time:120342ms step_avg:51.41ms +[2025-09-05 22:23:27] [Rank 0] step:2341/10000 train_time:120342ms step_avg:51.41ms +[2025-09-05 22:23:28] [Rank 0] step:2361/10000 train_time:121080ms step_avg:51.28ms +[2025-09-05 22:23:28] [Rank 0] step:2361/10000 train_time:121080ms step_avg:51.28ms +[2025-09-05 22:23:29] [Rank 0] step:2381/10000 train_time:121819ms step_avg:51.16ms +[2025-09-05 22:23:29] [Rank 0] step:2381/10000 train_time:121819ms step_avg:51.16ms +[2025-09-05 22:23:30] [Rank 0] step:2401/10000 train_time:122557ms step_avg:51.04ms +[2025-09-05 22:23:30] [Rank 0] step:2401/10000 train_time:122557ms step_avg:51.04ms +[2025-09-05 22:23:30] [Rank 0] step:2421/10000 train_time:123296ms step_avg:50.93ms +[2025-09-05 22:23:30] [Rank 0] step:2421/10000 train_time:123296ms step_avg:50.93ms +[2025-09-05 22:23:31] [Rank 0] step:2441/10000 train_time:124033ms step_avg:50.81ms +[2025-09-05 22:23:31] [Rank 0] step:2441/10000 train_time:124033ms step_avg:50.81ms +[2025-09-05 22:23:32] [Rank 0] step:2461/10000 train_time:124771ms step_avg:50.70ms +[2025-09-05 22:23:32] [Rank 0] step:2461/10000 train_time:124771ms step_avg:50.70ms +[2025-09-05 22:23:33] [Rank 0] step:2481/10000 train_time:125509ms step_avg:50.59ms +[2025-09-05 22:23:33] [Rank 0] step:2481/10000 train_time:125509ms step_avg:50.59ms +[2025-09-05 22:23:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:23:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:23:34] [Rank 0] PRINT: step:2500/10000 train_loss:2.6571 val_loss:2.5687 train_time:126329ms step_avg:50.53ms +[2025-09-05 22:23:34] [Rank 0] PRINT: step:2500/10000 train_loss:2.6571 val_loss:2.5687 train_time:126329ms step_avg:50.53ms +[2025-09-05 22:23:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:23:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:23:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:23:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:24:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:24:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:24:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:24:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:24:55] [Rank 0] Total Loss: 5.1704 +[2025-09-05 22:24:55] [Rank 0] Total Loss: 5.1704 +[2025-09-05 22:24:55] [Rank 0] Total FTA (Unweighted): 0.2106 +[2025-09-05 22:24:55] [Rank 0] Total FTA (Unweighted): 0.2106 +[2025-09-05 22:24:55] [Rank 0] Total FTA (Weighted): 0.2106 +[2025-09-05 22:24:55] [Rank 0] Total FTA (Weighted): 0.2106 +[2025-09-05 22:24:55] [Rank 0] Group 0 Loss: 3.5252 +[2025-09-05 22:24:55] [Rank 0] Group 0 Loss: 3.5252 +[2025-09-05 22:24:55] [Rank 0] Group 1 Loss: 3.4704 +[2025-09-05 22:24:55] [Rank 0] Group 1 Loss: 3.4704 +[2025-09-05 22:24:55] [Rank 0] Group 2 Loss: 3.6252 +[2025-09-05 22:24:55] [Rank 0] Group 2 Loss: 3.6252 +[2025-09-05 22:24:55] [Rank 0] Group 3 Loss: 4.0996 +[2025-09-05 22:24:55] [Rank 0] Group 3 Loss: 4.0996 +[2025-09-05 22:24:55] [Rank 0] Group 4 Loss: 4.5882 +[2025-09-05 22:24:55] [Rank 0] Group 4 Loss: 4.5882 +[2025-09-05 22:24:55] [Rank 0] Group 5 Loss: 5.0925 +[2025-09-05 22:24:55] [Rank 0] Group 5 Loss: 5.0925 +[2025-09-05 22:24:55] [Rank 0] Group 6 Loss: 5.3776 +[2025-09-05 22:24:55] [Rank 0] Group 6 Loss: 5.3776 +[2025-09-05 22:24:55] [Rank 0] Group 7 Loss: 5.5315 +[2025-09-05 22:24:55] [Rank 0] Group 7 Loss: 5.5315 +[2025-09-05 22:24:55] [Rank 0] Group 8 Loss: 5.8169 +[2025-09-05 22:24:55] [Rank 0] Group 8 Loss: 5.8169 +[2025-09-05 22:24:55] [Rank 0] Group 9 Loss: 5.9360 +[2025-09-05 22:24:55] [Rank 0] Group 9 Loss: 5.9360 +[2025-09-05 22:24:55] [Rank 0] Group 10 Loss: 6.0013 +[2025-09-05 22:24:55] [Rank 0] Group 10 Loss: 6.0013 +[2025-09-05 22:24:55] [Rank 0] Group 11 Loss: 6.0415 +[2025-09-05 22:24:55] [Rank 0] Group 11 Loss: 6.0415 +[2025-09-05 22:24:55] [Rank 0] Group 12 Loss: 5.8792 +[2025-09-05 22:24:55] [Rank 0] Group 12 Loss: 5.8792 +[2025-09-05 22:24:55] [Rank 0] Group 13 Loss: 5.8932 +[2025-09-05 22:24:55] [Rank 0] Group 13 Loss: 5.8932 +[2025-09-05 22:24:55] [Rank 0] Group 14 Loss: 5.9505 +[2025-09-05 22:24:55] [Rank 0] Group 14 Loss: 5.9505 +[2025-09-05 22:24:55] [Rank 0] Group 15 Loss: 5.8979 +[2025-09-05 22:24:55] [Rank 0] Group 15 Loss: 5.8979 +[2025-09-05 22:24:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:24:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:24:55] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 22:24:55] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 22:24:55] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:24:55] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:24:55] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:24:55] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:24:55] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 22:24:55] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 22:24:55] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:24:55] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:24:56] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 22:24:56] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 22:24:56] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 22:24:56] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 22:24:56] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 22:24:56] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 22:24:56] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:24:56] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:24:56] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 22:24:56] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 22:24:56] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:24:56] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:24:56] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 22:24:56] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 22:24:56] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 22:24:56] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 22:24:56] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:24:56] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:24:56] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 22:24:56] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 22:24:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:24:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:24:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:24:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:24:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:24:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:24:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:24:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:24:57] [Rank 0] step:2501/10000 train_time:126337ms step_avg:50.51ms +[2025-09-05 22:24:57] [Rank 0] step:2501/10000 train_time:126337ms step_avg:50.51ms +[2025-09-05 22:24:58] [Rank 0] step:2521/10000 train_time:127018ms step_avg:50.38ms +[2025-09-05 22:24:58] [Rank 0] step:2521/10000 train_time:127018ms step_avg:50.38ms +[2025-09-05 22:24:58] [Rank 0] step:2541/10000 train_time:127756ms step_avg:50.28ms +[2025-09-05 22:24:58] [Rank 0] step:2541/10000 train_time:127756ms step_avg:50.28ms +[2025-09-05 22:24:59] [Rank 0] step:2561/10000 train_time:128495ms step_avg:50.17ms +[2025-09-05 22:24:59] [Rank 0] step:2561/10000 train_time:128495ms step_avg:50.17ms +[2025-09-05 22:25:00] [Rank 0] step:2581/10000 train_time:129234ms step_avg:50.07ms +[2025-09-05 22:25:00] [Rank 0] step:2581/10000 train_time:129234ms step_avg:50.07ms +[2025-09-05 22:25:01] [Rank 0] step:2601/10000 train_time:129973ms step_avg:49.97ms +[2025-09-05 22:25:01] [Rank 0] step:2601/10000 train_time:129973ms step_avg:49.97ms +[2025-09-05 22:25:01] [Rank 0] step:2621/10000 train_time:130712ms step_avg:49.87ms +[2025-09-05 22:25:01] [Rank 0] step:2621/10000 train_time:130712ms step_avg:49.87ms +[2025-09-05 22:25:02] [Rank 0] step:2641/10000 train_time:131452ms step_avg:49.77ms +[2025-09-05 22:25:02] [Rank 0] step:2641/10000 train_time:131452ms step_avg:49.77ms +[2025-09-05 22:25:03] [Rank 0] step:2661/10000 train_time:132191ms step_avg:49.68ms +[2025-09-05 22:25:03] [Rank 0] step:2661/10000 train_time:132191ms step_avg:49.68ms +[2025-09-05 22:25:04] [Rank 0] step:2681/10000 train_time:133064ms step_avg:49.63ms +[2025-09-05 22:25:04] [Rank 0] step:2681/10000 train_time:133064ms step_avg:49.63ms +[2025-09-05 22:25:05] [Rank 0] step:2701/10000 train_time:133802ms step_avg:49.54ms +[2025-09-05 22:25:05] [Rank 0] step:2701/10000 train_time:133802ms step_avg:49.54ms +[2025-09-05 22:25:05] [Rank 0] step:2721/10000 train_time:134540ms step_avg:49.45ms +[2025-09-05 22:25:05] [Rank 0] step:2721/10000 train_time:134540ms step_avg:49.45ms +[2025-09-05 22:25:06] [Rank 0] step:2741/10000 train_time:135422ms step_avg:49.41ms +[2025-09-05 22:25:06] [Rank 0] step:2741/10000 train_time:135422ms step_avg:49.41ms +[2025-09-05 22:25:07] [Rank 0] step:2761/10000 train_time:136161ms step_avg:49.32ms +[2025-09-05 22:25:07] [Rank 0] step:2761/10000 train_time:136161ms step_avg:49.32ms +[2025-09-05 22:25:08] [Rank 0] step:2781/10000 train_time:136899ms step_avg:49.23ms +[2025-09-05 22:25:08] [Rank 0] step:2781/10000 train_time:136899ms step_avg:49.23ms +[2025-09-05 22:25:08] [Rank 0] step:2801/10000 train_time:137638ms step_avg:49.14ms +[2025-09-05 22:25:08] [Rank 0] step:2801/10000 train_time:137638ms step_avg:49.14ms +[2025-09-05 22:25:10] [Rank 0] step:2821/10000 train_time:139005ms step_avg:49.28ms +[2025-09-05 22:25:10] [Rank 0] step:2821/10000 train_time:139005ms step_avg:49.28ms +[2025-09-05 22:25:10] [Rank 0] step:2841/10000 train_time:139742ms step_avg:49.19ms +[2025-09-05 22:25:10] [Rank 0] step:2841/10000 train_time:139742ms step_avg:49.19ms +[2025-09-05 22:25:11] [Rank 0] step:2861/10000 train_time:140481ms step_avg:49.10ms +[2025-09-05 22:25:11] [Rank 0] step:2861/10000 train_time:140481ms step_avg:49.10ms +[2025-09-05 22:25:12] [Rank 0] step:2881/10000 train_time:141220ms step_avg:49.02ms +[2025-09-05 22:25:12] [Rank 0] step:2881/10000 train_time:141220ms step_avg:49.02ms +[2025-09-05 22:25:13] [Rank 0] step:2901/10000 train_time:141959ms step_avg:48.93ms +[2025-09-05 22:25:13] [Rank 0] step:2901/10000 train_time:141959ms step_avg:48.93ms +[2025-09-05 22:25:13] [Rank 0] step:2921/10000 train_time:142697ms step_avg:48.85ms +[2025-09-05 22:25:13] [Rank 0] step:2921/10000 train_time:142697ms step_avg:48.85ms +[2025-09-05 22:25:14] [Rank 0] step:2941/10000 train_time:143436ms step_avg:48.77ms +[2025-09-05 22:25:14] [Rank 0] step:2941/10000 train_time:143436ms step_avg:48.77ms +[2025-09-05 22:25:15] [Rank 0] step:2961/10000 train_time:144174ms step_avg:48.69ms +[2025-09-05 22:25:15] [Rank 0] step:2961/10000 train_time:144174ms step_avg:48.69ms +[2025-09-05 22:25:16] [Rank 0] step:2981/10000 train_time:144912ms step_avg:48.61ms +[2025-09-05 22:25:16] [Rank 0] step:2981/10000 train_time:144912ms step_avg:48.61ms +[2025-09-05 22:25:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:25:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:25:17] [Rank 0] PRINT: step:3000/10000 train_loss:2.5129 val_loss:2.4465 train_time:145731ms step_avg:48.58ms +[2025-09-05 22:25:17] [Rank 0] PRINT: step:3000/10000 train_loss:2.5129 val_loss:2.4465 train_time:145731ms step_avg:48.58ms +[2025-09-05 22:25:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:25:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:25:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:25:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:26:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:26:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:26:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:26:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:26:38] [Rank 0] Total Loss: 4.9729 +[2025-09-05 22:26:38] [Rank 0] Total Loss: 4.9729 +[2025-09-05 22:26:38] [Rank 0] Total FTA (Unweighted): 0.2606 +[2025-09-05 22:26:38] [Rank 0] Total FTA (Unweighted): 0.2606 +[2025-09-05 22:26:38] [Rank 0] Total FTA (Weighted): 0.2606 +[2025-09-05 22:26:38] [Rank 0] Total FTA (Weighted): 0.2606 +[2025-09-05 22:26:39] [Rank 0] Group 0 Loss: 3.4660 +[2025-09-05 22:26:39] [Rank 0] Group 0 Loss: 3.4660 +[2025-09-05 22:26:39] [Rank 0] Group 1 Loss: 3.3851 +[2025-09-05 22:26:39] [Rank 0] Group 1 Loss: 3.3851 +[2025-09-05 22:26:39] [Rank 0] Group 2 Loss: 3.4571 +[2025-09-05 22:26:39] [Rank 0] Group 2 Loss: 3.4571 +[2025-09-05 22:26:39] [Rank 0] Group 3 Loss: 3.8828 +[2025-09-05 22:26:39] [Rank 0] Group 3 Loss: 3.8828 +[2025-09-05 22:26:39] [Rank 0] Group 4 Loss: 4.3446 +[2025-09-05 22:26:39] [Rank 0] Group 4 Loss: 4.3446 +[2025-09-05 22:26:39] [Rank 0] Group 5 Loss: 4.8484 +[2025-09-05 22:26:39] [Rank 0] Group 5 Loss: 4.8484 +[2025-09-05 22:26:39] [Rank 0] Group 6 Loss: 5.1673 +[2025-09-05 22:26:39] [Rank 0] Group 6 Loss: 5.1673 +[2025-09-05 22:26:39] [Rank 0] Group 7 Loss: 5.3017 +[2025-09-05 22:26:39] [Rank 0] Group 7 Loss: 5.3017 +[2025-09-05 22:26:39] [Rank 0] Group 8 Loss: 5.5876 +[2025-09-05 22:26:39] [Rank 0] Group 8 Loss: 5.5876 +[2025-09-05 22:26:39] [Rank 0] Group 9 Loss: 5.7011 +[2025-09-05 22:26:39] [Rank 0] Group 9 Loss: 5.7011 +[2025-09-05 22:26:39] [Rank 0] Group 10 Loss: 5.7620 +[2025-09-05 22:26:39] [Rank 0] Group 10 Loss: 5.7620 +[2025-09-05 22:26:39] [Rank 0] Group 11 Loss: 5.8134 +[2025-09-05 22:26:39] [Rank 0] Group 11 Loss: 5.8134 +[2025-09-05 22:26:39] [Rank 0] Group 12 Loss: 5.6655 +[2025-09-05 22:26:39] [Rank 0] Group 12 Loss: 5.6655 +[2025-09-05 22:26:39] [Rank 0] Group 13 Loss: 5.7172 +[2025-09-05 22:26:39] [Rank 0] Group 13 Loss: 5.7172 +[2025-09-05 22:26:39] [Rank 0] Group 14 Loss: 5.7600 +[2025-09-05 22:26:39] [Rank 0] Group 14 Loss: 5.7600 +[2025-09-05 22:26:39] [Rank 0] Group 15 Loss: 5.7069 +[2025-09-05 22:26:39] [Rank 0] Group 15 Loss: 5.7069 +[2025-09-05 22:26:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:26:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:26:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:26:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:26:39] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:26:39] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:26:39] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:26:39] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:26:39] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 22:26:39] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 22:26:39] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 22:26:39] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 22:26:39] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-05 22:26:39] [Rank 0] Group 6 FTA: 0.1400 +[2025-09-05 22:26:39] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 22:26:39] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 22:26:39] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 22:26:39] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 22:26:39] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 22:26:39] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 22:26:39] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 22:26:39] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 22:26:39] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 22:26:39] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 22:26:39] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 22:26:39] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 22:26:39] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 22:26:39] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 22:26:39] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:26:39] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:26:39] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 22:26:39] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 22:26:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:26:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:26:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:26:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:26:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:26:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:26:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:26:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:26:40] [Rank 0] step:3001/10000 train_time:145740ms step_avg:48.56ms +[2025-09-05 22:26:40] [Rank 0] step:3001/10000 train_time:145740ms step_avg:48.56ms +[2025-09-05 22:26:41] [Rank 0] step:3021/10000 train_time:146422ms step_avg:48.47ms +[2025-09-05 22:26:41] [Rank 0] step:3021/10000 train_time:146422ms step_avg:48.47ms +[2025-09-05 22:26:42] [Rank 0] step:3041/10000 train_time:147161ms step_avg:48.39ms +[2025-09-05 22:26:42] [Rank 0] step:3041/10000 train_time:147161ms step_avg:48.39ms +[2025-09-05 22:26:42] [Rank 0] step:3061/10000 train_time:147899ms step_avg:48.32ms +[2025-09-05 22:26:42] [Rank 0] step:3061/10000 train_time:147899ms step_avg:48.32ms +[2025-09-05 22:26:43] [Rank 0] step:3081/10000 train_time:148638ms step_avg:48.24ms +[2025-09-05 22:26:43] [Rank 0] step:3081/10000 train_time:148638ms step_avg:48.24ms +[2025-09-05 22:26:44] [Rank 0] step:3101/10000 train_time:149377ms step_avg:48.17ms +[2025-09-05 22:26:44] [Rank 0] step:3101/10000 train_time:149377ms step_avg:48.17ms +[2025-09-05 22:26:45] [Rank 0] step:3121/10000 train_time:150115ms step_avg:48.10ms +[2025-09-05 22:26:45] [Rank 0] step:3121/10000 train_time:150115ms step_avg:48.10ms +[2025-09-05 22:26:45] [Rank 0] step:3141/10000 train_time:150854ms step_avg:48.03ms +[2025-09-05 22:26:45] [Rank 0] step:3141/10000 train_time:150854ms step_avg:48.03ms +[2025-09-05 22:26:46] [Rank 0] step:3161/10000 train_time:151594ms step_avg:47.96ms +[2025-09-05 22:26:46] [Rank 0] step:3161/10000 train_time:151594ms step_avg:47.96ms +[2025-09-05 22:26:47] [Rank 0] step:3181/10000 train_time:152332ms step_avg:47.89ms +[2025-09-05 22:26:47] [Rank 0] step:3181/10000 train_time:152332ms step_avg:47.89ms +[2025-09-05 22:26:47] [Rank 0] step:3201/10000 train_time:153070ms step_avg:47.82ms +[2025-09-05 22:26:47] [Rank 0] step:3201/10000 train_time:153070ms step_avg:47.82ms +[2025-09-05 22:26:48] [Rank 0] step:3221/10000 train_time:153810ms step_avg:47.75ms +[2025-09-05 22:26:48] [Rank 0] step:3221/10000 train_time:153810ms step_avg:47.75ms +[2025-09-05 22:26:49] [Rank 0] step:3241/10000 train_time:154548ms step_avg:47.69ms +[2025-09-05 22:26:49] [Rank 0] step:3241/10000 train_time:154548ms step_avg:47.69ms +[2025-09-05 22:26:50] [Rank 0] step:3261/10000 train_time:155287ms step_avg:47.62ms +[2025-09-05 22:26:50] [Rank 0] step:3261/10000 train_time:155287ms step_avg:47.62ms +[2025-09-05 22:26:50] [Rank 0] step:3281/10000 train_time:156025ms step_avg:47.55ms +[2025-09-05 22:26:50] [Rank 0] step:3281/10000 train_time:156025ms step_avg:47.55ms +[2025-09-05 22:26:51] [Rank 0] step:3301/10000 train_time:156765ms step_avg:47.49ms +[2025-09-05 22:26:51] [Rank 0] step:3301/10000 train_time:156765ms step_avg:47.49ms +[2025-09-05 22:26:52] [Rank 0] step:3321/10000 train_time:157504ms step_avg:47.43ms +[2025-09-05 22:26:52] [Rank 0] step:3321/10000 train_time:157504ms step_avg:47.43ms +[2025-09-05 22:26:53] [Rank 0] step:3341/10000 train_time:158241ms step_avg:47.36ms +[2025-09-05 22:26:53] [Rank 0] step:3341/10000 train_time:158241ms step_avg:47.36ms +[2025-09-05 22:26:53] [Rank 0] step:3361/10000 train_time:158979ms step_avg:47.30ms +[2025-09-05 22:26:53] [Rank 0] step:3361/10000 train_time:158979ms step_avg:47.30ms +[2025-09-05 22:26:54] [Rank 0] step:3381/10000 train_time:159718ms step_avg:47.24ms +[2025-09-05 22:26:54] [Rank 0] step:3381/10000 train_time:159718ms step_avg:47.24ms +[2025-09-05 22:26:55] [Rank 0] step:3401/10000 train_time:160456ms step_avg:47.18ms +[2025-09-05 22:26:55] [Rank 0] step:3401/10000 train_time:160456ms step_avg:47.18ms +[2025-09-05 22:26:56] [Rank 0] step:3421/10000 train_time:161194ms step_avg:47.12ms +[2025-09-05 22:26:56] [Rank 0] step:3421/10000 train_time:161194ms step_avg:47.12ms +[2025-09-05 22:26:56] [Rank 0] step:3441/10000 train_time:161932ms step_avg:47.06ms +[2025-09-05 22:26:56] [Rank 0] step:3441/10000 train_time:161932ms step_avg:47.06ms +[2025-09-05 22:26:57] [Rank 0] step:3461/10000 train_time:162671ms step_avg:47.00ms +[2025-09-05 22:26:57] [Rank 0] step:3461/10000 train_time:162671ms step_avg:47.00ms +[2025-09-05 22:26:58] [Rank 0] step:3481/10000 train_time:163409ms step_avg:46.94ms +[2025-09-05 22:26:58] [Rank 0] step:3481/10000 train_time:163409ms step_avg:46.94ms +[2025-09-05 22:26:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:26:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:26:59] [Rank 0] PRINT: step:3500/10000 train_loss:2.4090 val_loss:2.3598 train_time:164227ms step_avg:46.92ms +[2025-09-05 22:26:59] [Rank 0] PRINT: step:3500/10000 train_loss:2.4090 val_loss:2.3598 train_time:164227ms step_avg:46.92ms +[2025-09-05 22:26:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:26:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:26:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:26:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:28:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:28:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:28:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:28:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:28:20] [Rank 0] Total Loss: 4.9257 +[2025-09-05 22:28:20] [Rank 0] Total Loss: 4.9257 +[2025-09-05 22:28:20] [Rank 0] Total FTA (Unweighted): 0.2700 +[2025-09-05 22:28:20] [Rank 0] Total FTA (Unweighted): 0.2700 +[2025-09-05 22:28:20] [Rank 0] Total FTA (Weighted): 0.2700 +[2025-09-05 22:28:20] [Rank 0] Total FTA (Weighted): 0.2700 +[2025-09-05 22:28:20] [Rank 0] Group 0 Loss: 3.4203 +[2025-09-05 22:28:20] [Rank 0] Group 0 Loss: 3.4203 +[2025-09-05 22:28:20] [Rank 0] Group 1 Loss: 3.4104 +[2025-09-05 22:28:20] [Rank 0] Group 1 Loss: 3.4104 +[2025-09-05 22:28:20] [Rank 0] Group 2 Loss: 3.4775 +[2025-09-05 22:28:20] [Rank 0] Group 2 Loss: 3.4775 +[2025-09-05 22:28:20] [Rank 0] Group 3 Loss: 3.8501 +[2025-09-05 22:28:20] [Rank 0] Group 3 Loss: 3.8501 +[2025-09-05 22:28:20] [Rank 0] Group 4 Loss: 4.3134 +[2025-09-05 22:28:20] [Rank 0] Group 4 Loss: 4.3134 +[2025-09-05 22:28:20] [Rank 0] Group 5 Loss: 4.7870 +[2025-09-05 22:28:20] [Rank 0] Group 5 Loss: 4.7870 +[2025-09-05 22:28:20] [Rank 0] Group 6 Loss: 5.0857 +[2025-09-05 22:28:20] [Rank 0] Group 6 Loss: 5.0857 +[2025-09-05 22:28:20] [Rank 0] Group 7 Loss: 5.2337 +[2025-09-05 22:28:20] [Rank 0] Group 7 Loss: 5.2337 +[2025-09-05 22:28:20] [Rank 0] Group 8 Loss: 5.5187 +[2025-09-05 22:28:20] [Rank 0] Group 8 Loss: 5.5187 +[2025-09-05 22:28:20] [Rank 0] Group 9 Loss: 5.6352 +[2025-09-05 22:28:20] [Rank 0] Group 9 Loss: 5.6352 +[2025-09-05 22:28:20] [Rank 0] Group 10 Loss: 5.7166 +[2025-09-05 22:28:20] [Rank 0] Group 10 Loss: 5.7166 +[2025-09-05 22:28:20] [Rank 0] Group 11 Loss: 5.7402 +[2025-09-05 22:28:20] [Rank 0] Group 11 Loss: 5.7402 +[2025-09-05 22:28:20] [Rank 0] Group 12 Loss: 5.6146 +[2025-09-05 22:28:20] [Rank 0] Group 12 Loss: 5.6146 +[2025-09-05 22:28:21] [Rank 0] Group 13 Loss: 5.6500 +[2025-09-05 22:28:21] [Rank 0] Group 13 Loss: 5.6500 +[2025-09-05 22:28:21] [Rank 0] Group 14 Loss: 5.7131 +[2025-09-05 22:28:21] [Rank 0] Group 14 Loss: 5.7131 +[2025-09-05 22:28:21] [Rank 0] Group 15 Loss: 5.6443 +[2025-09-05 22:28:21] [Rank 0] Group 15 Loss: 5.6443 +[2025-09-05 22:28:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:28:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:28:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:28:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:28:21] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:28:21] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:28:21] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:28:21] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:28:21] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-05 22:28:21] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-05 22:28:21] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-05 22:28:21] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-05 22:28:21] [Rank 0] Group 6 FTA: 0.2100 +[2025-09-05 22:28:21] [Rank 0] Group 6 FTA: 0.2100 +[2025-09-05 22:28:21] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:28:21] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:28:21] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 22:28:21] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 22:28:21] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 22:28:21] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 22:28:21] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 22:28:21] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 22:28:21] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 22:28:21] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 22:28:21] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 22:28:21] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 22:28:21] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 22:28:21] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 22:28:21] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 22:28:21] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 22:28:21] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 22:28:21] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 22:28:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:28:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:28:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:28:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:28:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:28:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:28:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:28:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:28:22] [Rank 0] step:3501/10000 train_time:164236ms step_avg:46.91ms +[2025-09-05 22:28:22] [Rank 0] step:3501/10000 train_time:164236ms step_avg:46.91ms +[2025-09-05 22:28:23] [Rank 0] step:3521/10000 train_time:164916ms step_avg:46.84ms +[2025-09-05 22:28:23] [Rank 0] step:3521/10000 train_time:164916ms step_avg:46.84ms +[2025-09-05 22:28:24] [Rank 0] step:3541/10000 train_time:165656ms step_avg:46.78ms +[2025-09-05 22:28:24] [Rank 0] step:3541/10000 train_time:165656ms step_avg:46.78ms +[2025-09-05 22:28:24] [Rank 0] step:3561/10000 train_time:166396ms step_avg:46.73ms +[2025-09-05 22:28:24] [Rank 0] step:3561/10000 train_time:166396ms step_avg:46.73ms +[2025-09-05 22:28:25] [Rank 0] step:3581/10000 train_time:167135ms step_avg:46.67ms +[2025-09-05 22:28:25] [Rank 0] step:3581/10000 train_time:167135ms step_avg:46.67ms +[2025-09-05 22:28:26] [Rank 0] step:3601/10000 train_time:167874ms step_avg:46.62ms +[2025-09-05 22:28:26] [Rank 0] step:3601/10000 train_time:167874ms step_avg:46.62ms +[2025-09-05 22:28:26] [Rank 0] step:3621/10000 train_time:168613ms step_avg:46.57ms +[2025-09-05 22:28:26] [Rank 0] step:3621/10000 train_time:168613ms step_avg:46.57ms +[2025-09-05 22:28:27] [Rank 0] step:3641/10000 train_time:169548ms step_avg:46.57ms +[2025-09-05 22:28:27] [Rank 0] step:3641/10000 train_time:169548ms step_avg:46.57ms +[2025-09-05 22:28:28] [Rank 0] step:3661/10000 train_time:170286ms step_avg:46.51ms +[2025-09-05 22:28:28] [Rank 0] step:3661/10000 train_time:170286ms step_avg:46.51ms +[2025-09-05 22:28:29] [Rank 0] step:3681/10000 train_time:171026ms step_avg:46.46ms +[2025-09-05 22:28:29] [Rank 0] step:3681/10000 train_time:171026ms step_avg:46.46ms +[2025-09-05 22:28:30] [Rank 0] step:3701/10000 train_time:171765ms step_avg:46.41ms +[2025-09-05 22:28:30] [Rank 0] step:3701/10000 train_time:171765ms step_avg:46.41ms +[2025-09-05 22:28:30] [Rank 0] step:3721/10000 train_time:172504ms step_avg:46.36ms +[2025-09-05 22:28:30] [Rank 0] step:3721/10000 train_time:172504ms step_avg:46.36ms +[2025-09-05 22:28:31] [Rank 0] step:3741/10000 train_time:173242ms step_avg:46.31ms +[2025-09-05 22:28:31] [Rank 0] step:3741/10000 train_time:173242ms step_avg:46.31ms +[2025-09-05 22:28:32] [Rank 0] step:3761/10000 train_time:173980ms step_avg:46.26ms +[2025-09-05 22:28:32] [Rank 0] step:3761/10000 train_time:173980ms step_avg:46.26ms +[2025-09-05 22:28:33] [Rank 0] step:3781/10000 train_time:174719ms step_avg:46.21ms +[2025-09-05 22:28:33] [Rank 0] step:3781/10000 train_time:174719ms step_avg:46.21ms +[2025-09-05 22:28:33] [Rank 0] step:3801/10000 train_time:175458ms step_avg:46.16ms +[2025-09-05 22:28:33] [Rank 0] step:3801/10000 train_time:175458ms step_avg:46.16ms +[2025-09-05 22:28:34] [Rank 0] step:3821/10000 train_time:176197ms step_avg:46.11ms +[2025-09-05 22:28:34] [Rank 0] step:3821/10000 train_time:176197ms step_avg:46.11ms +[2025-09-05 22:28:35] [Rank 0] step:3841/10000 train_time:176936ms step_avg:46.06ms +[2025-09-05 22:28:35] [Rank 0] step:3841/10000 train_time:176936ms step_avg:46.06ms +[2025-09-05 22:28:36] [Rank 0] step:3861/10000 train_time:177674ms step_avg:46.02ms +[2025-09-05 22:28:36] [Rank 0] step:3861/10000 train_time:177674ms step_avg:46.02ms +[2025-09-05 22:28:36] [Rank 0] step:3881/10000 train_time:178413ms step_avg:45.97ms +[2025-09-05 22:28:36] [Rank 0] step:3881/10000 train_time:178413ms step_avg:45.97ms +[2025-09-05 22:28:37] [Rank 0] step:3901/10000 train_time:179153ms step_avg:45.92ms +[2025-09-05 22:28:37] [Rank 0] step:3901/10000 train_time:179153ms step_avg:45.92ms +[2025-09-05 22:28:38] [Rank 0] step:3921/10000 train_time:179891ms step_avg:45.88ms +[2025-09-05 22:28:38] [Rank 0] step:3921/10000 train_time:179891ms step_avg:45.88ms +[2025-09-05 22:28:38] [Rank 0] step:3941/10000 train_time:180630ms step_avg:45.83ms +[2025-09-05 22:28:38] [Rank 0] step:3941/10000 train_time:180630ms step_avg:45.83ms +[2025-09-05 22:28:39] [Rank 0] step:3961/10000 train_time:181368ms step_avg:45.79ms +[2025-09-05 22:28:39] [Rank 0] step:3961/10000 train_time:181368ms step_avg:45.79ms +[2025-09-05 22:28:40] [Rank 0] step:3981/10000 train_time:182107ms step_avg:45.74ms +[2025-09-05 22:28:40] [Rank 0] step:3981/10000 train_time:182107ms step_avg:45.74ms +[2025-09-05 22:28:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:28:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:28:41] [Rank 0] PRINT: step:4000/10000 train_loss:2.3417 val_loss:2.2944 train_time:182926ms step_avg:45.73ms +[2025-09-05 22:28:41] [Rank 0] PRINT: step:4000/10000 train_loss:2.3417 val_loss:2.2944 train_time:182926ms step_avg:45.73ms +[2025-09-05 22:28:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:28:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:28:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:28:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:30:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:30:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:30:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:30:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:30:03] [Rank 0] Total Loss: 4.9048 +[2025-09-05 22:30:03] [Rank 0] Total Loss: 4.9048 +[2025-09-05 22:30:03] [Rank 0] Total FTA (Unweighted): 0.2825 +[2025-09-05 22:30:03] [Rank 0] Total FTA (Unweighted): 0.2825 +[2025-09-05 22:30:03] [Rank 0] Total FTA (Weighted): 0.2825 +[2025-09-05 22:30:03] [Rank 0] Total FTA (Weighted): 0.2825 +[2025-09-05 22:30:03] [Rank 0] Group 0 Loss: 3.4622 +[2025-09-05 22:30:03] [Rank 0] Group 0 Loss: 3.4622 +[2025-09-05 22:30:03] [Rank 0] Group 1 Loss: 3.4477 +[2025-09-05 22:30:03] [Rank 0] Group 1 Loss: 3.4477 +[2025-09-05 22:30:03] [Rank 0] Group 2 Loss: 3.4759 +[2025-09-05 22:30:03] [Rank 0] Group 2 Loss: 3.4759 +[2025-09-05 22:30:03] [Rank 0] Group 3 Loss: 3.8656 +[2025-09-05 22:30:03] [Rank 0] Group 3 Loss: 3.8656 +[2025-09-05 22:30:03] [Rank 0] Group 4 Loss: 4.2502 +[2025-09-05 22:30:03] [Rank 0] Group 4 Loss: 4.2502 +[2025-09-05 22:30:03] [Rank 0] Group 5 Loss: 4.7201 +[2025-09-05 22:30:03] [Rank 0] Group 5 Loss: 4.7201 +[2025-09-05 22:30:03] [Rank 0] Group 6 Loss: 5.0377 +[2025-09-05 22:30:03] [Rank 0] Group 6 Loss: 5.0377 +[2025-09-05 22:30:03] [Rank 0] Group 7 Loss: 5.1705 +[2025-09-05 22:30:03] [Rank 0] Group 7 Loss: 5.1705 +[2025-09-05 22:30:03] [Rank 0] Group 8 Loss: 5.4823 +[2025-09-05 22:30:03] [Rank 0] Group 8 Loss: 5.4823 +[2025-09-05 22:30:03] [Rank 0] Group 9 Loss: 5.6127 +[2025-09-05 22:30:03] [Rank 0] Group 9 Loss: 5.6127 +[2025-09-05 22:30:03] [Rank 0] Group 10 Loss: 5.7016 +[2025-09-05 22:30:03] [Rank 0] Group 10 Loss: 5.7016 +[2025-09-05 22:30:03] [Rank 0] Group 11 Loss: 5.7205 +[2025-09-05 22:30:03] [Rank 0] Group 11 Loss: 5.7205 +[2025-09-05 22:30:03] [Rank 0] Group 12 Loss: 5.5984 +[2025-09-05 22:30:03] [Rank 0] Group 12 Loss: 5.5984 +[2025-09-05 22:30:03] [Rank 0] Group 13 Loss: 5.6233 +[2025-09-05 22:30:03] [Rank 0] Group 13 Loss: 5.6233 +[2025-09-05 22:30:03] [Rank 0] Group 14 Loss: 5.6658 +[2025-09-05 22:30:03] [Rank 0] Group 14 Loss: 5.6658 +[2025-09-05 22:30:03] [Rank 0] Group 15 Loss: 5.6417 +[2025-09-05 22:30:03] [Rank 0] Group 15 Loss: 5.6417 +[2025-09-05 22:30:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:30:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:30:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:30:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:30:03] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:30:03] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:30:03] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:30:03] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:30:03] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:30:03] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:30:03] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 22:30:03] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 22:30:03] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 22:30:03] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 22:30:03] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:30:03] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:30:03] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 22:30:03] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 22:30:03] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 22:30:03] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 22:30:03] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 22:30:03] [Rank 0] Group 10 FTA: 0.1700 +[2025-09-05 22:30:03] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 22:30:03] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 22:30:03] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 22:30:03] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 22:30:03] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 22:30:03] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 22:30:03] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 22:30:03] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 22:30:03] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:30:03] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:30:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:30:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:30:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:30:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:30:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:30:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:30:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:30:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:30:04] [Rank 0] step:4001/10000 train_time:182935ms step_avg:45.72ms +[2025-09-05 22:30:04] [Rank 0] step:4001/10000 train_time:182935ms step_avg:45.72ms +[2025-09-05 22:30:06] [Rank 0] step:4021/10000 train_time:184211ms step_avg:45.81ms +[2025-09-05 22:30:06] [Rank 0] step:4021/10000 train_time:184211ms step_avg:45.81ms +[2025-09-05 22:30:07] [Rank 0] step:4041/10000 train_time:184952ms step_avg:45.77ms +[2025-09-05 22:30:07] [Rank 0] step:4041/10000 train_time:184952ms step_avg:45.77ms +[2025-09-05 22:30:07] [Rank 0] step:4061/10000 train_time:185689ms step_avg:45.73ms +[2025-09-05 22:30:07] [Rank 0] step:4061/10000 train_time:185689ms step_avg:45.73ms +[2025-09-05 22:30:08] [Rank 0] step:4081/10000 train_time:186428ms step_avg:45.68ms +[2025-09-05 22:30:08] [Rank 0] step:4081/10000 train_time:186428ms step_avg:45.68ms +[2025-09-05 22:30:09] [Rank 0] step:4101/10000 train_time:187166ms step_avg:45.64ms +[2025-09-05 22:30:09] [Rank 0] step:4101/10000 train_time:187166ms step_avg:45.64ms +[2025-09-05 22:30:10] [Rank 0] step:4121/10000 train_time:187905ms step_avg:45.60ms +[2025-09-05 22:30:10] [Rank 0] step:4121/10000 train_time:187905ms step_avg:45.60ms +[2025-09-05 22:30:10] [Rank 0] step:4141/10000 train_time:188644ms step_avg:45.56ms +[2025-09-05 22:30:10] [Rank 0] step:4141/10000 train_time:188644ms step_avg:45.56ms +[2025-09-05 22:30:11] [Rank 0] step:4161/10000 train_time:189382ms step_avg:45.51ms +[2025-09-05 22:30:11] [Rank 0] step:4161/10000 train_time:189382ms step_avg:45.51ms +[2025-09-05 22:30:12] [Rank 0] step:4181/10000 train_time:190119ms step_avg:45.47ms +[2025-09-05 22:30:12] [Rank 0] step:4181/10000 train_time:190119ms step_avg:45.47ms +[2025-09-05 22:30:12] [Rank 0] step:4201/10000 train_time:190858ms step_avg:45.43ms +[2025-09-05 22:30:12] [Rank 0] step:4201/10000 train_time:190858ms step_avg:45.43ms +[2025-09-05 22:30:13] [Rank 0] step:4221/10000 train_time:191598ms step_avg:45.39ms +[2025-09-05 22:30:13] [Rank 0] step:4221/10000 train_time:191598ms step_avg:45.39ms +[2025-09-05 22:30:14] [Rank 0] step:4241/10000 train_time:192337ms step_avg:45.35ms +[2025-09-05 22:30:14] [Rank 0] step:4241/10000 train_time:192337ms step_avg:45.35ms +[2025-09-05 22:30:15] [Rank 0] step:4261/10000 train_time:193077ms step_avg:45.31ms +[2025-09-05 22:30:15] [Rank 0] step:4261/10000 train_time:193077ms step_avg:45.31ms +[2025-09-05 22:30:15] [Rank 0] step:4281/10000 train_time:193815ms step_avg:45.27ms +[2025-09-05 22:30:15] [Rank 0] step:4281/10000 train_time:193815ms step_avg:45.27ms +[2025-09-05 22:30:16] [Rank 0] step:4301/10000 train_time:194553ms step_avg:45.23ms +[2025-09-05 22:30:16] [Rank 0] step:4301/10000 train_time:194553ms step_avg:45.23ms +[2025-09-05 22:30:17] [Rank 0] step:4321/10000 train_time:195292ms step_avg:45.20ms +[2025-09-05 22:30:17] [Rank 0] step:4321/10000 train_time:195292ms step_avg:45.20ms +[2025-09-05 22:30:18] [Rank 0] step:4341/10000 train_time:196030ms step_avg:45.16ms +[2025-09-05 22:30:18] [Rank 0] step:4341/10000 train_time:196030ms step_avg:45.16ms +[2025-09-05 22:30:18] [Rank 0] step:4361/10000 train_time:196769ms step_avg:45.12ms +[2025-09-05 22:30:18] [Rank 0] step:4361/10000 train_time:196769ms step_avg:45.12ms +[2025-09-05 22:30:19] [Rank 0] step:4381/10000 train_time:197508ms step_avg:45.08ms +[2025-09-05 22:30:19] [Rank 0] step:4381/10000 train_time:197508ms step_avg:45.08ms +[2025-09-05 22:30:20] [Rank 0] step:4401/10000 train_time:198376ms step_avg:45.08ms +[2025-09-05 22:30:20] [Rank 0] step:4401/10000 train_time:198376ms step_avg:45.08ms +[2025-09-05 22:30:21] [Rank 0] step:4421/10000 train_time:199115ms step_avg:45.04ms +[2025-09-05 22:30:21] [Rank 0] step:4421/10000 train_time:199115ms step_avg:45.04ms +[2025-09-05 22:30:21] [Rank 0] step:4441/10000 train_time:199853ms step_avg:45.00ms +[2025-09-05 22:30:21] [Rank 0] step:4441/10000 train_time:199853ms step_avg:45.00ms +[2025-09-05 22:30:22] [Rank 0] step:4461/10000 train_time:200731ms step_avg:45.00ms +[2025-09-05 22:30:22] [Rank 0] step:4461/10000 train_time:200731ms step_avg:45.00ms +[2025-09-05 22:30:23] [Rank 0] step:4481/10000 train_time:201482ms step_avg:44.96ms +[2025-09-05 22:30:23] [Rank 0] step:4481/10000 train_time:201482ms step_avg:44.96ms +[2025-09-05 22:30:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:30:24] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:30:24] [Rank 0] PRINT: step:4500/10000 train_loss:2.2778 val_loss:2.2445 train_time:202301ms step_avg:44.96ms +[2025-09-05 22:30:24] [Rank 0] PRINT: step:4500/10000 train_loss:2.2778 val_loss:2.2445 train_time:202301ms step_avg:44.96ms +[2025-09-05 22:30:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:30:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:30:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:30:24] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:31:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:31:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:31:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:31:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:31:46] [Rank 0] Total Loss: 4.8528 +[2025-09-05 22:31:46] [Rank 0] Total Loss: 4.8528 +[2025-09-05 22:31:46] [Rank 0] Total FTA (Unweighted): 0.2875 +[2025-09-05 22:31:46] [Rank 0] Total FTA (Unweighted): 0.2875 +[2025-09-05 22:31:46] [Rank 0] Total FTA (Weighted): 0.2875 +[2025-09-05 22:31:46] [Rank 0] Total FTA (Weighted): 0.2875 +[2025-09-05 22:31:46] [Rank 0] Group 0 Loss: 3.5033 +[2025-09-05 22:31:46] [Rank 0] Group 0 Loss: 3.5033 +[2025-09-05 22:31:46] [Rank 0] Group 1 Loss: 3.4434 +[2025-09-05 22:31:46] [Rank 0] Group 1 Loss: 3.4434 +[2025-09-05 22:31:46] [Rank 0] Group 2 Loss: 3.4331 +[2025-09-05 22:31:46] [Rank 0] Group 2 Loss: 3.4331 +[2025-09-05 22:31:46] [Rank 0] Group 3 Loss: 3.8323 +[2025-09-05 22:31:46] [Rank 0] Group 3 Loss: 3.8323 +[2025-09-05 22:31:46] [Rank 0] Group 4 Loss: 4.2017 +[2025-09-05 22:31:46] [Rank 0] Group 4 Loss: 4.2017 +[2025-09-05 22:31:46] [Rank 0] Group 5 Loss: 4.6679 +[2025-09-05 22:31:46] [Rank 0] Group 5 Loss: 4.6679 +[2025-09-05 22:31:46] [Rank 0] Group 6 Loss: 4.9576 +[2025-09-05 22:31:46] [Rank 0] Group 6 Loss: 4.9576 +[2025-09-05 22:31:46] [Rank 0] Group 7 Loss: 5.1044 +[2025-09-05 22:31:46] [Rank 0] Group 7 Loss: 5.1044 +[2025-09-05 22:31:46] [Rank 0] Group 8 Loss: 5.4180 +[2025-09-05 22:31:46] [Rank 0] Group 8 Loss: 5.4180 +[2025-09-05 22:31:46] [Rank 0] Group 9 Loss: 5.5453 +[2025-09-05 22:31:46] [Rank 0] Group 9 Loss: 5.5453 +[2025-09-05 22:31:46] [Rank 0] Group 10 Loss: 5.6280 +[2025-09-05 22:31:46] [Rank 0] Group 10 Loss: 5.6280 +[2025-09-05 22:31:46] [Rank 0] Group 11 Loss: 5.6415 +[2025-09-05 22:31:46] [Rank 0] Group 11 Loss: 5.6415 +[2025-09-05 22:31:46] [Rank 0] Group 12 Loss: 5.5288 +[2025-09-05 22:31:46] [Rank 0] Group 12 Loss: 5.5288 +[2025-09-05 22:31:46] [Rank 0] Group 13 Loss: 5.5593 +[2025-09-05 22:31:46] [Rank 0] Group 13 Loss: 5.5593 +[2025-09-05 22:31:46] [Rank 0] Group 14 Loss: 5.6112 +[2025-09-05 22:31:46] [Rank 0] Group 14 Loss: 5.6112 +[2025-09-05 22:31:46] [Rank 0] Group 15 Loss: 5.5691 +[2025-09-05 22:31:46] [Rank 0] Group 15 Loss: 5.5691 +[2025-09-05 22:31:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:31:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:31:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:31:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:31:46] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:31:46] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:31:46] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:31:46] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:31:46] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:31:46] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:31:46] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 22:31:46] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 22:31:46] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 22:31:46] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 22:31:46] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:31:46] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:31:46] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 22:31:46] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 22:31:46] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 22:31:46] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 22:31:46] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 22:31:46] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 22:31:46] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 22:31:46] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 22:31:46] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 22:31:46] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 22:31:46] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 22:31:46] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 22:31:46] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:31:46] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:31:46] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:31:46] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:31:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:31:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:31:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:31:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:31:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:31:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:31:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:31:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:31:47] [Rank 0] step:4501/10000 train_time:202310ms step_avg:44.95ms +[2025-09-05 22:31:47] [Rank 0] step:4501/10000 train_time:202310ms step_avg:44.95ms +[2025-09-05 22:31:48] [Rank 0] step:4521/10000 train_time:202996ms step_avg:44.90ms +[2025-09-05 22:31:48] [Rank 0] step:4521/10000 train_time:202996ms step_avg:44.90ms +[2025-09-05 22:31:49] [Rank 0] step:4541/10000 train_time:203735ms step_avg:44.87ms +[2025-09-05 22:31:49] [Rank 0] step:4541/10000 train_time:203735ms step_avg:44.87ms +[2025-09-05 22:31:49] [Rank 0] step:4561/10000 train_time:204473ms step_avg:44.83ms +[2025-09-05 22:31:49] [Rank 0] step:4561/10000 train_time:204473ms step_avg:44.83ms +[2025-09-05 22:31:50] [Rank 0] step:4581/10000 train_time:205213ms step_avg:44.80ms +[2025-09-05 22:31:50] [Rank 0] step:4581/10000 train_time:205213ms step_avg:44.80ms +[2025-09-05 22:31:51] [Rank 0] step:4601/10000 train_time:205951ms step_avg:44.76ms +[2025-09-05 22:31:51] [Rank 0] step:4601/10000 train_time:205951ms step_avg:44.76ms +[2025-09-05 22:31:52] [Rank 0] step:4621/10000 train_time:206690ms step_avg:44.73ms +[2025-09-05 22:31:52] [Rank 0] step:4621/10000 train_time:206690ms step_avg:44.73ms +[2025-09-05 22:31:52] [Rank 0] step:4641/10000 train_time:207428ms step_avg:44.69ms +[2025-09-05 22:31:52] [Rank 0] step:4641/10000 train_time:207428ms step_avg:44.69ms +[2025-09-05 22:31:53] [Rank 0] step:4661/10000 train_time:208165ms step_avg:44.66ms +[2025-09-05 22:31:53] [Rank 0] step:4661/10000 train_time:208165ms step_avg:44.66ms +[2025-09-05 22:31:54] [Rank 0] step:4681/10000 train_time:208903ms step_avg:44.63ms +[2025-09-05 22:31:54] [Rank 0] step:4681/10000 train_time:208903ms step_avg:44.63ms +[2025-09-05 22:31:55] [Rank 0] step:4701/10000 train_time:209640ms step_avg:44.59ms +[2025-09-05 22:31:55] [Rank 0] step:4701/10000 train_time:209640ms step_avg:44.59ms +[2025-09-05 22:31:55] [Rank 0] step:4721/10000 train_time:210378ms step_avg:44.56ms +[2025-09-05 22:31:55] [Rank 0] step:4721/10000 train_time:210378ms step_avg:44.56ms +[2025-09-05 22:31:56] [Rank 0] step:4741/10000 train_time:211117ms step_avg:44.53ms +[2025-09-05 22:31:56] [Rank 0] step:4741/10000 train_time:211117ms step_avg:44.53ms +[2025-09-05 22:31:57] [Rank 0] step:4761/10000 train_time:211855ms step_avg:44.50ms +[2025-09-05 22:31:57] [Rank 0] step:4761/10000 train_time:211855ms step_avg:44.50ms +[2025-09-05 22:31:58] [Rank 0] step:4781/10000 train_time:212592ms step_avg:44.47ms +[2025-09-05 22:31:58] [Rank 0] step:4781/10000 train_time:212592ms step_avg:44.47ms +[2025-09-05 22:31:58] [Rank 0] step:4801/10000 train_time:213329ms step_avg:44.43ms +[2025-09-05 22:31:58] [Rank 0] step:4801/10000 train_time:213329ms step_avg:44.43ms +[2025-09-05 22:31:59] [Rank 0] step:4821/10000 train_time:214066ms step_avg:44.40ms +[2025-09-05 22:31:59] [Rank 0] step:4821/10000 train_time:214066ms step_avg:44.40ms +[2025-09-05 22:32:00] [Rank 0] step:4841/10000 train_time:215112ms step_avg:44.44ms +[2025-09-05 22:32:00] [Rank 0] step:4841/10000 train_time:215112ms step_avg:44.44ms +[2025-09-05 22:32:01] [Rank 0] step:4861/10000 train_time:215851ms step_avg:44.40ms +[2025-09-05 22:32:01] [Rank 0] step:4861/10000 train_time:215851ms step_avg:44.40ms +[2025-09-05 22:32:02] [Rank 0] step:4881/10000 train_time:216589ms step_avg:44.37ms +[2025-09-05 22:32:02] [Rank 0] step:4881/10000 train_time:216589ms step_avg:44.37ms +[2025-09-05 22:32:02] [Rank 0] step:4901/10000 train_time:217328ms step_avg:44.34ms +[2025-09-05 22:32:02] [Rank 0] step:4901/10000 train_time:217328ms step_avg:44.34ms +[2025-09-05 22:32:03] [Rank 0] step:4921/10000 train_time:218067ms step_avg:44.31ms +[2025-09-05 22:32:03] [Rank 0] step:4921/10000 train_time:218067ms step_avg:44.31ms +[2025-09-05 22:32:04] [Rank 0] step:4941/10000 train_time:218806ms step_avg:44.28ms +[2025-09-05 22:32:04] [Rank 0] step:4941/10000 train_time:218806ms step_avg:44.28ms +[2025-09-05 22:32:04] [Rank 0] step:4961/10000 train_time:219545ms step_avg:44.25ms +[2025-09-05 22:32:04] [Rank 0] step:4961/10000 train_time:219545ms step_avg:44.25ms +[2025-09-05 22:32:05] [Rank 0] step:4981/10000 train_time:220283ms step_avg:44.22ms +[2025-09-05 22:32:05] [Rank 0] step:4981/10000 train_time:220283ms step_avg:44.22ms +[2025-09-05 22:32:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:32:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:32:06] [Rank 0] PRINT: step:5000/10000 train_loss:2.2299 val_loss:2.2041 train_time:221103ms step_avg:44.22ms +[2025-09-05 22:32:06] [Rank 0] PRINT: step:5000/10000 train_loss:2.2299 val_loss:2.2041 train_time:221103ms step_avg:44.22ms +[2025-09-05 22:32:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:32:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:32:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:32:07] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:33:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:33:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:33:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:33:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:33:28] [Rank 0] Total Loss: 4.8409 +[2025-09-05 22:33:28] [Rank 0] Total Loss: 4.8409 +[2025-09-05 22:33:28] [Rank 0] Total FTA (Unweighted): 0.2938 +[2025-09-05 22:33:28] [Rank 0] Total FTA (Unweighted): 0.2938 +[2025-09-05 22:33:28] [Rank 0] Total FTA (Weighted): 0.2938 +[2025-09-05 22:33:28] [Rank 0] Total FTA (Weighted): 0.2938 +[2025-09-05 22:33:28] [Rank 0] Group 0 Loss: 3.4842 +[2025-09-05 22:33:28] [Rank 0] Group 0 Loss: 3.4842 +[2025-09-05 22:33:28] [Rank 0] Group 1 Loss: 3.4508 +[2025-09-05 22:33:28] [Rank 0] Group 1 Loss: 3.4508 +[2025-09-05 22:33:28] [Rank 0] Group 2 Loss: 3.4598 +[2025-09-05 22:33:28] [Rank 0] Group 2 Loss: 3.4598 +[2025-09-05 22:33:28] [Rank 0] Group 3 Loss: 3.8716 +[2025-09-05 22:33:28] [Rank 0] Group 3 Loss: 3.8716 +[2025-09-05 22:33:28] [Rank 0] Group 4 Loss: 4.1995 +[2025-09-05 22:33:28] [Rank 0] Group 4 Loss: 4.1995 +[2025-09-05 22:33:28] [Rank 0] Group 5 Loss: 4.6428 +[2025-09-05 22:33:28] [Rank 0] Group 5 Loss: 4.6428 +[2025-09-05 22:33:28] [Rank 0] Group 6 Loss: 4.9669 +[2025-09-05 22:33:28] [Rank 0] Group 6 Loss: 4.9669 +[2025-09-05 22:33:28] [Rank 0] Group 7 Loss: 5.0880 +[2025-09-05 22:33:28] [Rank 0] Group 7 Loss: 5.0880 +[2025-09-05 22:33:28] [Rank 0] Group 8 Loss: 5.4000 +[2025-09-05 22:33:28] [Rank 0] Group 8 Loss: 5.4000 +[2025-09-05 22:33:28] [Rank 0] Group 9 Loss: 5.5001 +[2025-09-05 22:33:28] [Rank 0] Group 9 Loss: 5.5001 +[2025-09-05 22:33:28] [Rank 0] Group 10 Loss: 5.6149 +[2025-09-05 22:33:28] [Rank 0] Group 10 Loss: 5.6149 +[2025-09-05 22:33:28] [Rank 0] Group 11 Loss: 5.5967 +[2025-09-05 22:33:28] [Rank 0] Group 11 Loss: 5.5967 +[2025-09-05 22:33:28] [Rank 0] Group 12 Loss: 5.5157 +[2025-09-05 22:33:28] [Rank 0] Group 12 Loss: 5.5157 +[2025-09-05 22:33:28] [Rank 0] Group 13 Loss: 5.5331 +[2025-09-05 22:33:28] [Rank 0] Group 13 Loss: 5.5331 +[2025-09-05 22:33:28] [Rank 0] Group 14 Loss: 5.5754 +[2025-09-05 22:33:28] [Rank 0] Group 14 Loss: 5.5754 +[2025-09-05 22:33:28] [Rank 0] Group 15 Loss: 5.5550 +[2025-09-05 22:33:28] [Rank 0] Group 15 Loss: 5.5550 +[2025-09-05 22:33:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:33:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:33:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:33:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:33:28] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:33:28] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:33:28] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:33:28] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:33:28] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:33:28] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:33:28] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 22:33:28] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 22:33:28] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 22:33:28] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 22:33:28] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:33:28] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:33:28] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:33:28] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:33:28] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 22:33:28] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 22:33:28] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 22:33:28] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 22:33:28] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 22:33:28] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 22:33:28] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:33:28] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:33:28] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 22:33:28] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 22:33:28] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 22:33:28] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 22:33:28] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 22:33:28] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 22:33:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:33:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:33:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:33:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:33:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:33:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:33:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:33:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:33:30] [Rank 0] step:5001/10000 train_time:221112ms step_avg:44.21ms +[2025-09-05 22:33:30] [Rank 0] step:5001/10000 train_time:221112ms step_avg:44.21ms +[2025-09-05 22:33:31] [Rank 0] step:5021/10000 train_time:221788ms step_avg:44.17ms +[2025-09-05 22:33:31] [Rank 0] step:5021/10000 train_time:221788ms step_avg:44.17ms +[2025-09-05 22:33:31] [Rank 0] step:5041/10000 train_time:222527ms step_avg:44.14ms +[2025-09-05 22:33:31] [Rank 0] step:5041/10000 train_time:222527ms step_avg:44.14ms +[2025-09-05 22:33:32] [Rank 0] step:5061/10000 train_time:223410ms step_avg:44.14ms +[2025-09-05 22:33:32] [Rank 0] step:5061/10000 train_time:223410ms step_avg:44.14ms +[2025-09-05 22:33:33] [Rank 0] step:5081/10000 train_time:224148ms step_avg:44.12ms +[2025-09-05 22:33:33] [Rank 0] step:5081/10000 train_time:224148ms step_avg:44.12ms +[2025-09-05 22:33:34] [Rank 0] step:5101/10000 train_time:224887ms step_avg:44.09ms +[2025-09-05 22:33:34] [Rank 0] step:5101/10000 train_time:224887ms step_avg:44.09ms +[2025-09-05 22:33:34] [Rank 0] step:5121/10000 train_time:225627ms step_avg:44.06ms +[2025-09-05 22:33:34] [Rank 0] step:5121/10000 train_time:225627ms step_avg:44.06ms +[2025-09-05 22:33:35] [Rank 0] step:5141/10000 train_time:226365ms step_avg:44.03ms +[2025-09-05 22:33:35] [Rank 0] step:5141/10000 train_time:226365ms step_avg:44.03ms +[2025-09-05 22:33:36] [Rank 0] step:5161/10000 train_time:227104ms step_avg:44.00ms +[2025-09-05 22:33:36] [Rank 0] step:5161/10000 train_time:227104ms step_avg:44.00ms +[2025-09-05 22:33:37] [Rank 0] step:5181/10000 train_time:227843ms step_avg:43.98ms +[2025-09-05 22:33:37] [Rank 0] step:5181/10000 train_time:227843ms step_avg:43.98ms +[2025-09-05 22:33:37] [Rank 0] step:5201/10000 train_time:228582ms step_avg:43.95ms +[2025-09-05 22:33:37] [Rank 0] step:5201/10000 train_time:228582ms step_avg:43.95ms +[2025-09-05 22:33:38] [Rank 0] step:5221/10000 train_time:229320ms step_avg:43.92ms +[2025-09-05 22:33:38] [Rank 0] step:5221/10000 train_time:229320ms step_avg:43.92ms +[2025-09-05 22:33:39] [Rank 0] step:5241/10000 train_time:230060ms step_avg:43.90ms +[2025-09-05 22:33:39] [Rank 0] step:5241/10000 train_time:230060ms step_avg:43.90ms +[2025-09-05 22:33:40] [Rank 0] step:5261/10000 train_time:230799ms step_avg:43.87ms +[2025-09-05 22:33:40] [Rank 0] step:5261/10000 train_time:230799ms step_avg:43.87ms +[2025-09-05 22:33:40] [Rank 0] step:5281/10000 train_time:231537ms step_avg:43.84ms +[2025-09-05 22:33:40] [Rank 0] step:5281/10000 train_time:231537ms step_avg:43.84ms +[2025-09-05 22:33:41] [Rank 0] step:5301/10000 train_time:232277ms step_avg:43.82ms +[2025-09-05 22:33:41] [Rank 0] step:5301/10000 train_time:232277ms step_avg:43.82ms +[2025-09-05 22:33:42] [Rank 0] step:5321/10000 train_time:233015ms step_avg:43.79ms +[2025-09-05 22:33:42] [Rank 0] step:5321/10000 train_time:233015ms step_avg:43.79ms +[2025-09-05 22:33:42] [Rank 0] step:5341/10000 train_time:233753ms step_avg:43.77ms +[2025-09-05 22:33:42] [Rank 0] step:5341/10000 train_time:233753ms step_avg:43.77ms +[2025-09-05 22:33:43] [Rank 0] step:5361/10000 train_time:234494ms step_avg:43.74ms +[2025-09-05 22:33:43] [Rank 0] step:5361/10000 train_time:234494ms step_avg:43.74ms +[2025-09-05 22:33:44] [Rank 0] step:5381/10000 train_time:235233ms step_avg:43.72ms +[2025-09-05 22:33:44] [Rank 0] step:5381/10000 train_time:235233ms step_avg:43.72ms +[2025-09-05 22:33:45] [Rank 0] step:5401/10000 train_time:235971ms step_avg:43.69ms +[2025-09-05 22:33:45] [Rank 0] step:5401/10000 train_time:235971ms step_avg:43.69ms +[2025-09-05 22:33:45] [Rank 0] step:5421/10000 train_time:236708ms step_avg:43.66ms +[2025-09-05 22:33:45] [Rank 0] step:5421/10000 train_time:236708ms step_avg:43.66ms +[2025-09-05 22:33:46] [Rank 0] step:5441/10000 train_time:237447ms step_avg:43.64ms +[2025-09-05 22:33:46] [Rank 0] step:5441/10000 train_time:237447ms step_avg:43.64ms +[2025-09-05 22:33:47] [Rank 0] step:5461/10000 train_time:238186ms step_avg:43.62ms +[2025-09-05 22:33:47] [Rank 0] step:5461/10000 train_time:238186ms step_avg:43.62ms +[2025-09-05 22:33:48] [Rank 0] step:5481/10000 train_time:238925ms step_avg:43.59ms +[2025-09-05 22:33:48] [Rank 0] step:5481/10000 train_time:238925ms step_avg:43.59ms +[2025-09-05 22:33:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:33:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:33:49] [Rank 0] PRINT: step:5500/10000 train_loss:2.1915 val_loss:2.1663 train_time:239744ms step_avg:43.59ms +[2025-09-05 22:33:49] [Rank 0] PRINT: step:5500/10000 train_loss:2.1915 val_loss:2.1663 train_time:239744ms step_avg:43.59ms +[2025-09-05 22:33:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:33:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:33:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:33:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:35:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:35:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:35:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:35:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:35:10] [Rank 0] Total Loss: 4.8057 +[2025-09-05 22:35:10] [Rank 0] Total Loss: 4.8057 +[2025-09-05 22:35:10] [Rank 0] Total FTA (Unweighted): 0.2912 +[2025-09-05 22:35:10] [Rank 0] Total FTA (Unweighted): 0.2912 +[2025-09-05 22:35:10] [Rank 0] Total FTA (Weighted): 0.2913 +[2025-09-05 22:35:10] [Rank 0] Total FTA (Weighted): 0.2913 +[2025-09-05 22:35:10] [Rank 0] Group 0 Loss: 3.4836 +[2025-09-05 22:35:10] [Rank 0] Group 0 Loss: 3.4836 +[2025-09-05 22:35:10] [Rank 0] Group 1 Loss: 3.4330 +[2025-09-05 22:35:10] [Rank 0] Group 1 Loss: 3.4330 +[2025-09-05 22:35:10] [Rank 0] Group 2 Loss: 3.4056 +[2025-09-05 22:35:10] [Rank 0] Group 2 Loss: 3.4056 +[2025-09-05 22:35:10] [Rank 0] Group 3 Loss: 3.8301 +[2025-09-05 22:35:10] [Rank 0] Group 3 Loss: 3.8301 +[2025-09-05 22:35:10] [Rank 0] Group 4 Loss: 4.1718 +[2025-09-05 22:35:10] [Rank 0] Group 4 Loss: 4.1718 +[2025-09-05 22:35:10] [Rank 0] Group 5 Loss: 4.5998 +[2025-09-05 22:35:10] [Rank 0] Group 5 Loss: 4.5998 +[2025-09-05 22:35:10] [Rank 0] Group 6 Loss: 4.8842 +[2025-09-05 22:35:10] [Rank 0] Group 6 Loss: 4.8842 +[2025-09-05 22:35:10] [Rank 0] Group 7 Loss: 5.0385 +[2025-09-05 22:35:10] [Rank 0] Group 7 Loss: 5.0385 +[2025-09-05 22:35:10] [Rank 0] Group 8 Loss: 5.3507 +[2025-09-05 22:35:10] [Rank 0] Group 8 Loss: 5.3507 +[2025-09-05 22:35:10] [Rank 0] Group 9 Loss: 5.4818 +[2025-09-05 22:35:10] [Rank 0] Group 9 Loss: 5.4818 +[2025-09-05 22:35:10] [Rank 0] Group 10 Loss: 5.5845 +[2025-09-05 22:35:10] [Rank 0] Group 10 Loss: 5.5845 +[2025-09-05 22:35:10] [Rank 0] Group 11 Loss: 5.5759 +[2025-09-05 22:35:10] [Rank 0] Group 11 Loss: 5.5759 +[2025-09-05 22:35:10] [Rank 0] Group 12 Loss: 5.4676 +[2025-09-05 22:35:10] [Rank 0] Group 12 Loss: 5.4676 +[2025-09-05 22:35:10] [Rank 0] Group 13 Loss: 5.5065 +[2025-09-05 22:35:10] [Rank 0] Group 13 Loss: 5.5065 +[2025-09-05 22:35:10] [Rank 0] Group 14 Loss: 5.5592 +[2025-09-05 22:35:10] [Rank 0] Group 14 Loss: 5.5592 +[2025-09-05 22:35:10] [Rank 0] Group 15 Loss: 5.5185 +[2025-09-05 22:35:10] [Rank 0] Group 15 Loss: 5.5185 +[2025-09-05 22:35:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:35:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:35:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:35:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:35:10] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:35:10] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 22:35:10] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:35:10] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:35:10] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:35:10] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:35:10] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 22:35:10] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 22:35:10] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 22:35:10] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 22:35:10] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:35:10] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:35:10] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:35:10] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:35:10] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 22:35:10] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 22:35:10] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 22:35:10] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 22:35:10] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 22:35:10] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 22:35:10] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:35:10] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:35:10] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 22:35:10] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 22:35:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:35:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:35:10] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:35:10] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:35:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:35:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:35:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:35:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:35:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:35:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:35:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:35:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:35:12] [Rank 0] step:5501/10000 train_time:239754ms step_avg:43.58ms +[2025-09-05 22:35:12] [Rank 0] step:5501/10000 train_time:239754ms step_avg:43.58ms +[2025-09-05 22:35:12] [Rank 0] step:5521/10000 train_time:240424ms step_avg:43.55ms +[2025-09-05 22:35:12] [Rank 0] step:5521/10000 train_time:240424ms step_avg:43.55ms +[2025-09-05 22:35:13] [Rank 0] step:5541/10000 train_time:241163ms step_avg:43.52ms +[2025-09-05 22:35:13] [Rank 0] step:5541/10000 train_time:241163ms step_avg:43.52ms +[2025-09-05 22:35:14] [Rank 0] step:5561/10000 train_time:241901ms step_avg:43.50ms +[2025-09-05 22:35:14] [Rank 0] step:5561/10000 train_time:241901ms step_avg:43.50ms +[2025-09-05 22:35:15] [Rank 0] step:5581/10000 train_time:242640ms step_avg:43.48ms +[2025-09-05 22:35:15] [Rank 0] step:5581/10000 train_time:242640ms step_avg:43.48ms +[2025-09-05 22:35:15] [Rank 0] step:5601/10000 train_time:243382ms step_avg:43.45ms +[2025-09-05 22:35:15] [Rank 0] step:5601/10000 train_time:243382ms step_avg:43.45ms +[2025-09-05 22:35:16] [Rank 0] step:5621/10000 train_time:244121ms step_avg:43.43ms +[2025-09-05 22:35:16] [Rank 0] step:5621/10000 train_time:244121ms step_avg:43.43ms +[2025-09-05 22:35:17] [Rank 0] step:5641/10000 train_time:245465ms step_avg:43.51ms +[2025-09-05 22:35:17] [Rank 0] step:5641/10000 train_time:245465ms step_avg:43.51ms +[2025-09-05 22:35:18] [Rank 0] step:5661/10000 train_time:246202ms step_avg:43.49ms +[2025-09-05 22:35:18] [Rank 0] step:5661/10000 train_time:246202ms step_avg:43.49ms +[2025-09-05 22:35:19] [Rank 0] step:5681/10000 train_time:246940ms step_avg:43.47ms +[2025-09-05 22:35:19] [Rank 0] step:5681/10000 train_time:246940ms step_avg:43.47ms +[2025-09-05 22:35:20] [Rank 0] step:5701/10000 train_time:247678ms step_avg:43.44ms +[2025-09-05 22:35:20] [Rank 0] step:5701/10000 train_time:247678ms step_avg:43.44ms +[2025-09-05 22:35:20] [Rank 0] step:5721/10000 train_time:248417ms step_avg:43.42ms +[2025-09-05 22:35:20] [Rank 0] step:5721/10000 train_time:248417ms step_avg:43.42ms +[2025-09-05 22:35:21] [Rank 0] step:5741/10000 train_time:249155ms step_avg:43.40ms +[2025-09-05 22:35:21] [Rank 0] step:5741/10000 train_time:249155ms step_avg:43.40ms +[2025-09-05 22:35:22] [Rank 0] step:5761/10000 train_time:249895ms step_avg:43.38ms +[2025-09-05 22:35:22] [Rank 0] step:5761/10000 train_time:249895ms step_avg:43.38ms +[2025-09-05 22:35:23] [Rank 0] step:5781/10000 train_time:250634ms step_avg:43.35ms +[2025-09-05 22:35:23] [Rank 0] step:5781/10000 train_time:250634ms step_avg:43.35ms +[2025-09-05 22:35:23] [Rank 0] step:5801/10000 train_time:251372ms step_avg:43.33ms +[2025-09-05 22:35:23] [Rank 0] step:5801/10000 train_time:251372ms step_avg:43.33ms +[2025-09-05 22:35:24] [Rank 0] step:5821/10000 train_time:252111ms step_avg:43.31ms +[2025-09-05 22:35:24] [Rank 0] step:5821/10000 train_time:252111ms step_avg:43.31ms +[2025-09-05 22:35:25] [Rank 0] step:5841/10000 train_time:252850ms step_avg:43.29ms +[2025-09-05 22:35:25] [Rank 0] step:5841/10000 train_time:252850ms step_avg:43.29ms +[2025-09-05 22:35:26] [Rank 0] step:5861/10000 train_time:253589ms step_avg:43.27ms +[2025-09-05 22:35:26] [Rank 0] step:5861/10000 train_time:253589ms step_avg:43.27ms +[2025-09-05 22:35:26] [Rank 0] step:5881/10000 train_time:254328ms step_avg:43.25ms +[2025-09-05 22:35:26] [Rank 0] step:5881/10000 train_time:254328ms step_avg:43.25ms +[2025-09-05 22:35:27] [Rank 0] step:5901/10000 train_time:255067ms step_avg:43.22ms +[2025-09-05 22:35:27] [Rank 0] step:5901/10000 train_time:255067ms step_avg:43.22ms +[2025-09-05 22:35:28] [Rank 0] step:5921/10000 train_time:255805ms step_avg:43.20ms +[2025-09-05 22:35:28] [Rank 0] step:5921/10000 train_time:255805ms step_avg:43.20ms +[2025-09-05 22:35:29] [Rank 0] step:5941/10000 train_time:256543ms step_avg:43.18ms +[2025-09-05 22:35:29] [Rank 0] step:5941/10000 train_time:256543ms step_avg:43.18ms +[2025-09-05 22:35:29] [Rank 0] step:5961/10000 train_time:257281ms step_avg:43.16ms +[2025-09-05 22:35:29] [Rank 0] step:5961/10000 train_time:257281ms step_avg:43.16ms +[2025-09-05 22:35:30] [Rank 0] step:5981/10000 train_time:258019ms step_avg:43.14ms +[2025-09-05 22:35:30] [Rank 0] step:5981/10000 train_time:258019ms step_avg:43.14ms +[2025-09-05 22:35:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:35:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:35:31] [Rank 0] PRINT: step:6000/10000 train_loss:2.1607 val_loss:2.1387 train_time:258839ms step_avg:43.14ms +[2025-09-05 22:35:31] [Rank 0] PRINT: step:6000/10000 train_loss:2.1607 val_loss:2.1387 train_time:258839ms step_avg:43.14ms +[2025-09-05 22:35:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:35:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:35:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:35:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:36:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:36:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:36:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:36:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:36:53] [Rank 0] Total Loss: 4.8089 +[2025-09-05 22:36:53] [Rank 0] Total Loss: 4.8089 +[2025-09-05 22:36:53] [Rank 0] Total FTA (Unweighted): 0.3050 +[2025-09-05 22:36:53] [Rank 0] Total FTA (Unweighted): 0.3050 +[2025-09-05 22:36:53] [Rank 0] Total FTA (Weighted): 0.3050 +[2025-09-05 22:36:53] [Rank 0] Total FTA (Weighted): 0.3050 +[2025-09-05 22:36:53] [Rank 0] Group 0 Loss: 3.4838 +[2025-09-05 22:36:53] [Rank 0] Group 0 Loss: 3.4838 +[2025-09-05 22:36:53] [Rank 0] Group 1 Loss: 3.4345 +[2025-09-05 22:36:53] [Rank 0] Group 1 Loss: 3.4345 +[2025-09-05 22:36:53] [Rank 0] Group 2 Loss: 3.4701 +[2025-09-05 22:36:53] [Rank 0] Group 2 Loss: 3.4701 +[2025-09-05 22:36:53] [Rank 0] Group 3 Loss: 3.8802 +[2025-09-05 22:36:53] [Rank 0] Group 3 Loss: 3.8802 +[2025-09-05 22:36:53] [Rank 0] Group 4 Loss: 4.1586 +[2025-09-05 22:36:53] [Rank 0] Group 4 Loss: 4.1586 +[2025-09-05 22:36:53] [Rank 0] Group 5 Loss: 4.6041 +[2025-09-05 22:36:53] [Rank 0] Group 5 Loss: 4.6041 +[2025-09-05 22:36:53] [Rank 0] Group 6 Loss: 4.8672 +[2025-09-05 22:36:53] [Rank 0] Group 6 Loss: 4.8672 +[2025-09-05 22:36:53] [Rank 0] Group 7 Loss: 5.0413 +[2025-09-05 22:36:53] [Rank 0] Group 7 Loss: 5.0413 +[2025-09-05 22:36:53] [Rank 0] Group 8 Loss: 5.3405 +[2025-09-05 22:36:53] [Rank 0] Group 8 Loss: 5.3405 +[2025-09-05 22:36:53] [Rank 0] Group 9 Loss: 5.4759 +[2025-09-05 22:36:53] [Rank 0] Group 9 Loss: 5.4759 +[2025-09-05 22:36:53] [Rank 0] Group 10 Loss: 5.5855 +[2025-09-05 22:36:53] [Rank 0] Group 10 Loss: 5.5855 +[2025-09-05 22:36:53] [Rank 0] Group 11 Loss: 5.5840 +[2025-09-05 22:36:53] [Rank 0] Group 11 Loss: 5.5840 +[2025-09-05 22:36:53] [Rank 0] Group 12 Loss: 5.4660 +[2025-09-05 22:36:53] [Rank 0] Group 12 Loss: 5.4660 +[2025-09-05 22:36:53] [Rank 0] Group 13 Loss: 5.5030 +[2025-09-05 22:36:53] [Rank 0] Group 13 Loss: 5.5030 +[2025-09-05 22:36:53] [Rank 0] Group 14 Loss: 5.5404 +[2025-09-05 22:36:53] [Rank 0] Group 14 Loss: 5.5404 +[2025-09-05 22:36:53] [Rank 0] Group 15 Loss: 5.5069 +[2025-09-05 22:36:53] [Rank 0] Group 15 Loss: 5.5069 +[2025-09-05 22:36:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:36:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:36:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:36:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:36:53] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 22:36:53] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 22:36:53] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:36:53] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:36:53] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:36:53] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:36:53] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 22:36:53] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 22:36:53] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:36:53] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:36:53] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:36:53] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 22:36:53] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:36:53] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:36:53] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 22:36:53] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 22:36:53] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 22:36:53] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 22:36:53] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 22:36:53] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 22:36:53] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:36:53] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:36:53] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 22:36:53] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 22:36:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:36:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:36:53] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 22:36:53] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 22:36:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:36:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:36:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:36:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:36:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:36:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:36:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:36:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:36:54] [Rank 0] step:6001/10000 train_time:258848ms step_avg:43.13ms +[2025-09-05 22:36:54] [Rank 0] step:6001/10000 train_time:258848ms step_avg:43.13ms +[2025-09-05 22:36:56] [Rank 0] step:6021/10000 train_time:260135ms step_avg:43.20ms +[2025-09-05 22:36:56] [Rank 0] step:6021/10000 train_time:260135ms step_avg:43.20ms +[2025-09-05 22:36:56] [Rank 0] step:6041/10000 train_time:260874ms step_avg:43.18ms +[2025-09-05 22:36:56] [Rank 0] step:6041/10000 train_time:260874ms step_avg:43.18ms +[2025-09-05 22:36:57] [Rank 0] step:6061/10000 train_time:261613ms step_avg:43.16ms +[2025-09-05 22:36:57] [Rank 0] step:6061/10000 train_time:261613ms step_avg:43.16ms +[2025-09-05 22:36:58] [Rank 0] step:6081/10000 train_time:262351ms step_avg:43.14ms +[2025-09-05 22:36:58] [Rank 0] step:6081/10000 train_time:262351ms step_avg:43.14ms +[2025-09-05 22:36:59] [Rank 0] step:6101/10000 train_time:263089ms step_avg:43.12ms +[2025-09-05 22:36:59] [Rank 0] step:6101/10000 train_time:263089ms step_avg:43.12ms +[2025-09-05 22:36:59] [Rank 0] step:6121/10000 train_time:263827ms step_avg:43.10ms +[2025-09-05 22:36:59] [Rank 0] step:6121/10000 train_time:263827ms step_avg:43.10ms +[2025-09-05 22:37:00] [Rank 0] step:6141/10000 train_time:264566ms step_avg:43.08ms +[2025-09-05 22:37:00] [Rank 0] step:6141/10000 train_time:264566ms step_avg:43.08ms +[2025-09-05 22:37:01] [Rank 0] step:6161/10000 train_time:265305ms step_avg:43.06ms +[2025-09-05 22:37:01] [Rank 0] step:6161/10000 train_time:265305ms step_avg:43.06ms +[2025-09-05 22:37:02] [Rank 0] step:6181/10000 train_time:266044ms step_avg:43.04ms +[2025-09-05 22:37:02] [Rank 0] step:6181/10000 train_time:266044ms step_avg:43.04ms +[2025-09-05 22:37:02] [Rank 0] step:6201/10000 train_time:266783ms step_avg:43.02ms +[2025-09-05 22:37:02] [Rank 0] step:6201/10000 train_time:266783ms step_avg:43.02ms +[2025-09-05 22:37:03] [Rank 0] step:6221/10000 train_time:267522ms step_avg:43.00ms +[2025-09-05 22:37:03] [Rank 0] step:6221/10000 train_time:267522ms step_avg:43.00ms +[2025-09-05 22:37:04] [Rank 0] step:6241/10000 train_time:268262ms step_avg:42.98ms +[2025-09-05 22:37:04] [Rank 0] step:6241/10000 train_time:268262ms step_avg:42.98ms +[2025-09-05 22:37:04] [Rank 0] step:6261/10000 train_time:269001ms step_avg:42.96ms +[2025-09-05 22:37:04] [Rank 0] step:6261/10000 train_time:269001ms step_avg:42.96ms +[2025-09-05 22:37:05] [Rank 0] step:6281/10000 train_time:269739ms step_avg:42.95ms +[2025-09-05 22:37:05] [Rank 0] step:6281/10000 train_time:269739ms step_avg:42.95ms +[2025-09-05 22:37:06] [Rank 0] step:6301/10000 train_time:270478ms step_avg:42.93ms +[2025-09-05 22:37:06] [Rank 0] step:6301/10000 train_time:270478ms step_avg:42.93ms +[2025-09-05 22:37:07] [Rank 0] step:6321/10000 train_time:271216ms step_avg:42.91ms +[2025-09-05 22:37:07] [Rank 0] step:6321/10000 train_time:271216ms step_avg:42.91ms +[2025-09-05 22:37:07] [Rank 0] step:6341/10000 train_time:271956ms step_avg:42.89ms +[2025-09-05 22:37:07] [Rank 0] step:6341/10000 train_time:271956ms step_avg:42.89ms +[2025-09-05 22:37:08] [Rank 0] step:6361/10000 train_time:272694ms step_avg:42.87ms +[2025-09-05 22:37:08] [Rank 0] step:6361/10000 train_time:272694ms step_avg:42.87ms +[2025-09-05 22:37:09] [Rank 0] step:6381/10000 train_time:273432ms step_avg:42.85ms +[2025-09-05 22:37:09] [Rank 0] step:6381/10000 train_time:273432ms step_avg:42.85ms +[2025-09-05 22:37:10] [Rank 0] step:6401/10000 train_time:274171ms step_avg:42.83ms +[2025-09-05 22:37:10] [Rank 0] step:6401/10000 train_time:274171ms step_avg:42.83ms +[2025-09-05 22:37:10] [Rank 0] step:6421/10000 train_time:274909ms step_avg:42.81ms +[2025-09-05 22:37:10] [Rank 0] step:6421/10000 train_time:274909ms step_avg:42.81ms +[2025-09-05 22:37:11] [Rank 0] step:6441/10000 train_time:275647ms step_avg:42.80ms +[2025-09-05 22:37:11] [Rank 0] step:6441/10000 train_time:275647ms step_avg:42.80ms +[2025-09-05 22:37:12] [Rank 0] step:6461/10000 train_time:276386ms step_avg:42.78ms +[2025-09-05 22:37:12] [Rank 0] step:6461/10000 train_time:276386ms step_avg:42.78ms +[2025-09-05 22:37:13] [Rank 0] step:6481/10000 train_time:277123ms step_avg:42.76ms +[2025-09-05 22:37:13] [Rank 0] step:6481/10000 train_time:277123ms step_avg:42.76ms +[2025-09-05 22:37:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:37:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:37:14] [Rank 0] PRINT: step:6500/10000 train_loss:2.1366 val_loss:2.1145 train_time:277942ms step_avg:42.76ms +[2025-09-05 22:37:14] [Rank 0] PRINT: step:6500/10000 train_loss:2.1366 val_loss:2.1145 train_time:277942ms step_avg:42.76ms +[2025-09-05 22:37:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:37:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:37:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:37:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:38:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:38:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:38:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:38:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:38:36] [Rank 0] Total Loss: 4.7498 +[2025-09-05 22:38:36] [Rank 0] Total Loss: 4.7498 +[2025-09-05 22:38:36] [Rank 0] Total FTA (Unweighted): 0.3144 +[2025-09-05 22:38:36] [Rank 0] Total FTA (Unweighted): 0.3144 +[2025-09-05 22:38:36] [Rank 0] Total FTA (Weighted): 0.3144 +[2025-09-05 22:38:36] [Rank 0] Total FTA (Weighted): 0.3144 +[2025-09-05 22:38:36] [Rank 0] Group 0 Loss: 3.4874 +[2025-09-05 22:38:36] [Rank 0] Group 0 Loss: 3.4874 +[2025-09-05 22:38:36] [Rank 0] Group 1 Loss: 3.4009 +[2025-09-05 22:38:36] [Rank 0] Group 1 Loss: 3.4009 +[2025-09-05 22:38:36] [Rank 0] Group 2 Loss: 3.4386 +[2025-09-05 22:38:36] [Rank 0] Group 2 Loss: 3.4386 +[2025-09-05 22:38:36] [Rank 0] Group 3 Loss: 3.7579 +[2025-09-05 22:38:36] [Rank 0] Group 3 Loss: 3.7579 +[2025-09-05 22:38:36] [Rank 0] Group 4 Loss: 4.1059 +[2025-09-05 22:38:36] [Rank 0] Group 4 Loss: 4.1059 +[2025-09-05 22:38:36] [Rank 0] Group 5 Loss: 4.5378 +[2025-09-05 22:38:36] [Rank 0] Group 5 Loss: 4.5378 +[2025-09-05 22:38:36] [Rank 0] Group 6 Loss: 4.8243 +[2025-09-05 22:38:36] [Rank 0] Group 6 Loss: 4.8243 +[2025-09-05 22:38:36] [Rank 0] Group 7 Loss: 4.9642 +[2025-09-05 22:38:36] [Rank 0] Group 7 Loss: 4.9642 +[2025-09-05 22:38:36] [Rank 0] Group 8 Loss: 5.2911 +[2025-09-05 22:38:36] [Rank 0] Group 8 Loss: 5.2911 +[2025-09-05 22:38:36] [Rank 0] Group 9 Loss: 5.3836 +[2025-09-05 22:38:36] [Rank 0] Group 9 Loss: 5.3836 +[2025-09-05 22:38:36] [Rank 0] Group 10 Loss: 5.5141 +[2025-09-05 22:38:36] [Rank 0] Group 10 Loss: 5.5141 +[2025-09-05 22:38:36] [Rank 0] Group 11 Loss: 5.5092 +[2025-09-05 22:38:36] [Rank 0] Group 11 Loss: 5.5092 +[2025-09-05 22:38:36] [Rank 0] Group 12 Loss: 5.4073 +[2025-09-05 22:38:36] [Rank 0] Group 12 Loss: 5.4073 +[2025-09-05 22:38:36] [Rank 0] Group 13 Loss: 5.4443 +[2025-09-05 22:38:36] [Rank 0] Group 13 Loss: 5.4443 +[2025-09-05 22:38:36] [Rank 0] Group 14 Loss: 5.4696 +[2025-09-05 22:38:36] [Rank 0] Group 14 Loss: 5.4696 +[2025-09-05 22:38:36] [Rank 0] Group 15 Loss: 5.4613 +[2025-09-05 22:38:36] [Rank 0] Group 15 Loss: 5.4613 +[2025-09-05 22:38:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:38:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:38:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:38:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:38:36] [Rank 0] Group 2 FTA: 0.5000 +[2025-09-05 22:38:36] [Rank 0] Group 2 FTA: 0.5000 +[2025-09-05 22:38:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:38:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:38:36] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:38:36] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:38:36] [Rank 0] Group 5 FTA: 0.2800 +[2025-09-05 22:38:36] [Rank 0] Group 5 FTA: 0.2800 +[2025-09-05 22:38:36] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:38:36] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:38:36] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 22:38:36] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 22:38:36] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:38:36] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:38:36] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:38:36] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:38:36] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 22:38:36] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 22:38:36] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 22:38:36] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 22:38:36] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 22:38:36] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 22:38:36] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 22:38:36] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 22:38:36] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 22:38:36] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 22:38:36] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 22:38:36] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 22:38:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:38:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:38:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:38:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:38:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:38:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:38:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:38:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:38:37] [Rank 0] step:6501/10000 train_time:277951ms step_avg:42.76ms +[2025-09-05 22:38:37] [Rank 0] step:6501/10000 train_time:277951ms step_avg:42.76ms +[2025-09-05 22:38:38] [Rank 0] step:6521/10000 train_time:278636ms step_avg:42.73ms +[2025-09-05 22:38:38] [Rank 0] step:6521/10000 train_time:278636ms step_avg:42.73ms +[2025-09-05 22:38:39] [Rank 0] step:6541/10000 train_time:279376ms step_avg:42.71ms +[2025-09-05 22:38:39] [Rank 0] step:6541/10000 train_time:279376ms step_avg:42.71ms +[2025-09-05 22:38:40] [Rank 0] step:6561/10000 train_time:280115ms step_avg:42.69ms +[2025-09-05 22:38:40] [Rank 0] step:6561/10000 train_time:280115ms step_avg:42.69ms +[2025-09-05 22:38:40] [Rank 0] step:6581/10000 train_time:280854ms step_avg:42.68ms +[2025-09-05 22:38:40] [Rank 0] step:6581/10000 train_time:280854ms step_avg:42.68ms +[2025-09-05 22:38:41] [Rank 0] step:6601/10000 train_time:281593ms step_avg:42.66ms +[2025-09-05 22:38:41] [Rank 0] step:6601/10000 train_time:281593ms step_avg:42.66ms +[2025-09-05 22:38:42] [Rank 0] step:6621/10000 train_time:282332ms step_avg:42.64ms +[2025-09-05 22:38:42] [Rank 0] step:6621/10000 train_time:282332ms step_avg:42.64ms +[2025-09-05 22:38:43] [Rank 0] step:6641/10000 train_time:283072ms step_avg:42.62ms +[2025-09-05 22:38:43] [Rank 0] step:6641/10000 train_time:283072ms step_avg:42.62ms +[2025-09-05 22:38:43] [Rank 0] step:6661/10000 train_time:283811ms step_avg:42.61ms +[2025-09-05 22:38:43] [Rank 0] step:6661/10000 train_time:283811ms step_avg:42.61ms +[2025-09-05 22:38:44] [Rank 0] step:6681/10000 train_time:284550ms step_avg:42.59ms +[2025-09-05 22:38:44] [Rank 0] step:6681/10000 train_time:284550ms step_avg:42.59ms +[2025-09-05 22:38:45] [Rank 0] step:6701/10000 train_time:285288ms step_avg:42.57ms +[2025-09-05 22:38:45] [Rank 0] step:6701/10000 train_time:285288ms step_avg:42.57ms +[2025-09-05 22:38:46] [Rank 0] step:6721/10000 train_time:286160ms step_avg:42.58ms +[2025-09-05 22:38:46] [Rank 0] step:6721/10000 train_time:286160ms step_avg:42.58ms +[2025-09-05 22:38:46] [Rank 0] step:6741/10000 train_time:286898ms step_avg:42.56ms +[2025-09-05 22:38:46] [Rank 0] step:6741/10000 train_time:286898ms step_avg:42.56ms +[2025-09-05 22:38:47] [Rank 0] step:6761/10000 train_time:287636ms step_avg:42.54ms +[2025-09-05 22:38:47] [Rank 0] step:6761/10000 train_time:287636ms step_avg:42.54ms +[2025-09-05 22:38:48] [Rank 0] step:6781/10000 train_time:288515ms step_avg:42.55ms +[2025-09-05 22:38:48] [Rank 0] step:6781/10000 train_time:288515ms step_avg:42.55ms +[2025-09-05 22:38:49] [Rank 0] step:6801/10000 train_time:289252ms step_avg:42.53ms +[2025-09-05 22:38:49] [Rank 0] step:6801/10000 train_time:289252ms step_avg:42.53ms +[2025-09-05 22:38:50] [Rank 0] step:6821/10000 train_time:289990ms step_avg:42.51ms +[2025-09-05 22:38:50] [Rank 0] step:6821/10000 train_time:289990ms step_avg:42.51ms +[2025-09-05 22:38:51] [Rank 0] step:6841/10000 train_time:291340ms step_avg:42.59ms +[2025-09-05 22:38:51] [Rank 0] step:6841/10000 train_time:291340ms step_avg:42.59ms +[2025-09-05 22:38:52] [Rank 0] step:6861/10000 train_time:292079ms step_avg:42.57ms +[2025-09-05 22:38:52] [Rank 0] step:6861/10000 train_time:292079ms step_avg:42.57ms +[2025-09-05 22:38:52] [Rank 0] step:6881/10000 train_time:292824ms step_avg:42.56ms +[2025-09-05 22:38:52] [Rank 0] step:6881/10000 train_time:292824ms step_avg:42.56ms +[2025-09-05 22:38:53] [Rank 0] step:6901/10000 train_time:293564ms step_avg:42.54ms +[2025-09-05 22:38:53] [Rank 0] step:6901/10000 train_time:293564ms step_avg:42.54ms +[2025-09-05 22:38:54] [Rank 0] step:6921/10000 train_time:294303ms step_avg:42.52ms +[2025-09-05 22:38:54] [Rank 0] step:6921/10000 train_time:294303ms step_avg:42.52ms +[2025-09-05 22:38:55] [Rank 0] step:6941/10000 train_time:295042ms step_avg:42.51ms +[2025-09-05 22:38:55] [Rank 0] step:6941/10000 train_time:295042ms step_avg:42.51ms +[2025-09-05 22:38:55] [Rank 0] step:6961/10000 train_time:295781ms step_avg:42.49ms +[2025-09-05 22:38:55] [Rank 0] step:6961/10000 train_time:295781ms step_avg:42.49ms +[2025-09-05 22:38:56] [Rank 0] step:6981/10000 train_time:296521ms step_avg:42.48ms +[2025-09-05 22:38:56] [Rank 0] step:6981/10000 train_time:296521ms step_avg:42.48ms +[2025-09-05 22:38:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:38:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:38:57] [Rank 0] PRINT: step:7000/10000 train_loss:2.1133 val_loss:2.0982 train_time:297341ms step_avg:42.48ms +[2025-09-05 22:38:57] [Rank 0] PRINT: step:7000/10000 train_loss:2.1133 val_loss:2.0982 train_time:297341ms step_avg:42.48ms +[2025-09-05 22:38:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:38:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:38:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:38:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:40:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:40:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:40:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:40:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:40:19] [Rank 0] Total Loss: 4.7536 +[2025-09-05 22:40:19] [Rank 0] Total Loss: 4.7536 +[2025-09-05 22:40:19] [Rank 0] Total FTA (Unweighted): 0.3269 +[2025-09-05 22:40:19] [Rank 0] Total FTA (Unweighted): 0.3269 +[2025-09-05 22:40:19] [Rank 0] Total FTA (Weighted): 0.3269 +[2025-09-05 22:40:19] [Rank 0] Total FTA (Weighted): 0.3269 +[2025-09-05 22:40:19] [Rank 0] Group 0 Loss: 3.5012 +[2025-09-05 22:40:19] [Rank 0] Group 0 Loss: 3.5012 +[2025-09-05 22:40:19] [Rank 0] Group 1 Loss: 3.4109 +[2025-09-05 22:40:19] [Rank 0] Group 1 Loss: 3.4109 +[2025-09-05 22:40:19] [Rank 0] Group 2 Loss: 3.4471 +[2025-09-05 22:40:19] [Rank 0] Group 2 Loss: 3.4471 +[2025-09-05 22:40:19] [Rank 0] Group 3 Loss: 3.7780 +[2025-09-05 22:40:19] [Rank 0] Group 3 Loss: 3.7780 +[2025-09-05 22:40:19] [Rank 0] Group 4 Loss: 4.1140 +[2025-09-05 22:40:19] [Rank 0] Group 4 Loss: 4.1140 +[2025-09-05 22:40:19] [Rank 0] Group 5 Loss: 4.5248 +[2025-09-05 22:40:19] [Rank 0] Group 5 Loss: 4.5248 +[2025-09-05 22:40:19] [Rank 0] Group 6 Loss: 4.8036 +[2025-09-05 22:40:19] [Rank 0] Group 6 Loss: 4.8036 +[2025-09-05 22:40:19] [Rank 0] Group 7 Loss: 4.9744 +[2025-09-05 22:40:19] [Rank 0] Group 7 Loss: 4.9744 +[2025-09-05 22:40:19] [Rank 0] Group 8 Loss: 5.2910 +[2025-09-05 22:40:19] [Rank 0] Group 8 Loss: 5.2910 +[2025-09-05 22:40:19] [Rank 0] Group 9 Loss: 5.4002 +[2025-09-05 22:40:19] [Rank 0] Group 9 Loss: 5.4002 +[2025-09-05 22:40:19] [Rank 0] Group 10 Loss: 5.5142 +[2025-09-05 22:40:19] [Rank 0] Group 10 Loss: 5.5142 +[2025-09-05 22:40:19] [Rank 0] Group 11 Loss: 5.5052 +[2025-09-05 22:40:19] [Rank 0] Group 11 Loss: 5.5052 +[2025-09-05 22:40:19] [Rank 0] Group 12 Loss: 5.4088 +[2025-09-05 22:40:19] [Rank 0] Group 12 Loss: 5.4088 +[2025-09-05 22:40:19] [Rank 0] Group 13 Loss: 5.4561 +[2025-09-05 22:40:19] [Rank 0] Group 13 Loss: 5.4561 +[2025-09-05 22:40:19] [Rank 0] Group 14 Loss: 5.4733 +[2025-09-05 22:40:19] [Rank 0] Group 14 Loss: 5.4733 +[2025-09-05 22:40:19] [Rank 0] Group 15 Loss: 5.4553 +[2025-09-05 22:40:19] [Rank 0] Group 15 Loss: 5.4553 +[2025-09-05 22:40:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:40:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:40:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:40:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:40:19] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 22:40:19] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 22:40:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:40:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:40:19] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:40:19] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:40:19] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 22:40:19] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 22:40:19] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:40:19] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:40:19] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:40:19] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:40:19] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:40:19] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:40:19] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 22:40:19] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 22:40:19] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:40:19] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:40:19] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 22:40:19] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 22:40:19] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:40:19] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:40:19] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 22:40:19] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 22:40:19] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 22:40:19] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 22:40:19] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 22:40:19] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 22:40:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:40:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:40:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:40:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:40:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:40:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:40:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:40:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:40:20] [Rank 0] step:7001/10000 train_time:297350ms step_avg:42.47ms +[2025-09-05 22:40:20] [Rank 0] step:7001/10000 train_time:297350ms step_avg:42.47ms +[2025-09-05 22:40:21] [Rank 0] step:7021/10000 train_time:298026ms step_avg:42.45ms +[2025-09-05 22:40:21] [Rank 0] step:7021/10000 train_time:298026ms step_avg:42.45ms +[2025-09-05 22:40:22] [Rank 0] step:7041/10000 train_time:298765ms step_avg:42.43ms +[2025-09-05 22:40:22] [Rank 0] step:7041/10000 train_time:298765ms step_avg:42.43ms +[2025-09-05 22:40:23] [Rank 0] step:7061/10000 train_time:299504ms step_avg:42.42ms +[2025-09-05 22:40:23] [Rank 0] step:7061/10000 train_time:299504ms step_avg:42.42ms +[2025-09-05 22:40:23] [Rank 0] step:7081/10000 train_time:300243ms step_avg:42.40ms +[2025-09-05 22:40:23] [Rank 0] step:7081/10000 train_time:300243ms step_avg:42.40ms +[2025-09-05 22:40:24] [Rank 0] step:7101/10000 train_time:300982ms step_avg:42.39ms +[2025-09-05 22:40:24] [Rank 0] step:7101/10000 train_time:300982ms step_avg:42.39ms +[2025-09-05 22:40:25] [Rank 0] step:7121/10000 train_time:301721ms step_avg:42.37ms +[2025-09-05 22:40:25] [Rank 0] step:7121/10000 train_time:301721ms step_avg:42.37ms +[2025-09-05 22:40:26] [Rank 0] step:7141/10000 train_time:302459ms step_avg:42.36ms +[2025-09-05 22:40:26] [Rank 0] step:7141/10000 train_time:302459ms step_avg:42.36ms +[2025-09-05 22:40:26] [Rank 0] step:7161/10000 train_time:303197ms step_avg:42.34ms +[2025-09-05 22:40:26] [Rank 0] step:7161/10000 train_time:303197ms step_avg:42.34ms +[2025-09-05 22:40:27] [Rank 0] step:7181/10000 train_time:303940ms step_avg:42.33ms +[2025-09-05 22:40:27] [Rank 0] step:7181/10000 train_time:303940ms step_avg:42.33ms +[2025-09-05 22:40:28] [Rank 0] step:7201/10000 train_time:304677ms step_avg:42.31ms +[2025-09-05 22:40:28] [Rank 0] step:7201/10000 train_time:304677ms step_avg:42.31ms +[2025-09-05 22:40:29] [Rank 0] step:7221/10000 train_time:305415ms step_avg:42.30ms +[2025-09-05 22:40:29] [Rank 0] step:7221/10000 train_time:305415ms step_avg:42.30ms +[2025-09-05 22:40:29] [Rank 0] step:7241/10000 train_time:306154ms step_avg:42.28ms +[2025-09-05 22:40:29] [Rank 0] step:7241/10000 train_time:306154ms step_avg:42.28ms +[2025-09-05 22:40:30] [Rank 0] step:7261/10000 train_time:306893ms step_avg:42.27ms +[2025-09-05 22:40:30] [Rank 0] step:7261/10000 train_time:306893ms step_avg:42.27ms +[2025-09-05 22:40:31] [Rank 0] step:7281/10000 train_time:307632ms step_avg:42.25ms +[2025-09-05 22:40:31] [Rank 0] step:7281/10000 train_time:307632ms step_avg:42.25ms +[2025-09-05 22:40:31] [Rank 0] step:7301/10000 train_time:308370ms step_avg:42.24ms +[2025-09-05 22:40:31] [Rank 0] step:7301/10000 train_time:308370ms step_avg:42.24ms +[2025-09-05 22:40:32] [Rank 0] step:7321/10000 train_time:309109ms step_avg:42.22ms +[2025-09-05 22:40:32] [Rank 0] step:7321/10000 train_time:309109ms step_avg:42.22ms +[2025-09-05 22:40:33] [Rank 0] step:7341/10000 train_time:309846ms step_avg:42.21ms +[2025-09-05 22:40:33] [Rank 0] step:7341/10000 train_time:309846ms step_avg:42.21ms +[2025-09-05 22:40:34] [Rank 0] step:7361/10000 train_time:310585ms step_avg:42.19ms +[2025-09-05 22:40:34] [Rank 0] step:7361/10000 train_time:310585ms step_avg:42.19ms +[2025-09-05 22:40:34] [Rank 0] step:7381/10000 train_time:311324ms step_avg:42.18ms +[2025-09-05 22:40:34] [Rank 0] step:7381/10000 train_time:311324ms step_avg:42.18ms +[2025-09-05 22:40:35] [Rank 0] step:7401/10000 train_time:312062ms step_avg:42.16ms +[2025-09-05 22:40:35] [Rank 0] step:7401/10000 train_time:312062ms step_avg:42.16ms +[2025-09-05 22:40:36] [Rank 0] step:7421/10000 train_time:312800ms step_avg:42.15ms +[2025-09-05 22:40:36] [Rank 0] step:7421/10000 train_time:312800ms step_avg:42.15ms +[2025-09-05 22:40:37] [Rank 0] step:7441/10000 train_time:313539ms step_avg:42.14ms +[2025-09-05 22:40:37] [Rank 0] step:7441/10000 train_time:313539ms step_avg:42.14ms +[2025-09-05 22:40:37] [Rank 0] step:7461/10000 train_time:314277ms step_avg:42.12ms +[2025-09-05 22:40:37] [Rank 0] step:7461/10000 train_time:314277ms step_avg:42.12ms +[2025-09-05 22:40:38] [Rank 0] step:7481/10000 train_time:315015ms step_avg:42.11ms +[2025-09-05 22:40:38] [Rank 0] step:7481/10000 train_time:315015ms step_avg:42.11ms +[2025-09-05 22:40:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:40:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:40:39] [Rank 0] PRINT: step:7500/10000 train_loss:2.0964 val_loss:2.0803 train_time:315834ms step_avg:42.11ms +[2025-09-05 22:40:39] [Rank 0] PRINT: step:7500/10000 train_loss:2.0964 val_loss:2.0803 train_time:315834ms step_avg:42.11ms +[2025-09-05 22:40:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:40:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:40:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:40:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:42:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:42:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:42:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:42:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:42:01] [Rank 0] Total Loss: 4.7376 +[2025-09-05 22:42:01] [Rank 0] Total Loss: 4.7376 +[2025-09-05 22:42:01] [Rank 0] Total FTA (Unweighted): 0.3344 +[2025-09-05 22:42:01] [Rank 0] Total FTA (Unweighted): 0.3344 +[2025-09-05 22:42:01] [Rank 0] Total FTA (Weighted): 0.3344 +[2025-09-05 22:42:01] [Rank 0] Total FTA (Weighted): 0.3344 +[2025-09-05 22:42:01] [Rank 0] Group 0 Loss: 3.4844 +[2025-09-05 22:42:01] [Rank 0] Group 0 Loss: 3.4844 +[2025-09-05 22:42:01] [Rank 0] Group 1 Loss: 3.4200 +[2025-09-05 22:42:01] [Rank 0] Group 1 Loss: 3.4200 +[2025-09-05 22:42:01] [Rank 0] Group 2 Loss: 3.4276 +[2025-09-05 22:42:01] [Rank 0] Group 2 Loss: 3.4276 +[2025-09-05 22:42:01] [Rank 0] Group 3 Loss: 3.8308 +[2025-09-05 22:42:01] [Rank 0] Group 3 Loss: 3.8308 +[2025-09-05 22:42:01] [Rank 0] Group 4 Loss: 4.0833 +[2025-09-05 22:42:01] [Rank 0] Group 4 Loss: 4.0833 +[2025-09-05 22:42:01] [Rank 0] Group 5 Loss: 4.4911 +[2025-09-05 22:42:01] [Rank 0] Group 5 Loss: 4.4911 +[2025-09-05 22:42:01] [Rank 0] Group 6 Loss: 4.7918 +[2025-09-05 22:42:01] [Rank 0] Group 6 Loss: 4.7918 +[2025-09-05 22:42:01] [Rank 0] Group 7 Loss: 4.9517 +[2025-09-05 22:42:01] [Rank 0] Group 7 Loss: 4.9517 +[2025-09-05 22:42:01] [Rank 0] Group 8 Loss: 5.2643 +[2025-09-05 22:42:01] [Rank 0] Group 8 Loss: 5.2643 +[2025-09-05 22:42:01] [Rank 0] Group 9 Loss: 5.3715 +[2025-09-05 22:42:01] [Rank 0] Group 9 Loss: 5.3715 +[2025-09-05 22:42:01] [Rank 0] Group 10 Loss: 5.5019 +[2025-09-05 22:42:01] [Rank 0] Group 10 Loss: 5.5019 +[2025-09-05 22:42:01] [Rank 0] Group 11 Loss: 5.4791 +[2025-09-05 22:42:01] [Rank 0] Group 11 Loss: 5.4791 +[2025-09-05 22:42:01] [Rank 0] Group 12 Loss: 5.3801 +[2025-09-05 22:42:01] [Rank 0] Group 12 Loss: 5.3801 +[2025-09-05 22:42:01] [Rank 0] Group 13 Loss: 5.4350 +[2025-09-05 22:42:01] [Rank 0] Group 13 Loss: 5.4350 +[2025-09-05 22:42:01] [Rank 0] Group 14 Loss: 5.4564 +[2025-09-05 22:42:01] [Rank 0] Group 14 Loss: 5.4564 +[2025-09-05 22:42:01] [Rank 0] Group 15 Loss: 5.4326 +[2025-09-05 22:42:01] [Rank 0] Group 15 Loss: 5.4326 +[2025-09-05 22:42:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:42:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:42:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:42:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:42:01] [Rank 0] Group 2 FTA: 0.6200 +[2025-09-05 22:42:01] [Rank 0] Group 2 FTA: 0.6200 +[2025-09-05 22:42:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:42:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:42:01] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:42:01] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:42:01] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:42:01] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:42:01] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:42:01] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:42:01] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 22:42:01] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 22:42:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:42:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:42:01] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:42:01] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:42:01] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-05 22:42:01] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-05 22:42:01] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 22:42:01] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 22:42:01] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:42:01] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:42:01] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 22:42:01] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 22:42:01] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 22:42:01] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 22:42:01] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 22:42:01] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 22:42:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:42:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:42:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:42:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:42:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:42:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:42:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:42:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:42:02] [Rank 0] step:7501/10000 train_time:315843ms step_avg:42.11ms +[2025-09-05 22:42:02] [Rank 0] step:7501/10000 train_time:315843ms step_avg:42.11ms +[2025-09-05 22:42:03] [Rank 0] step:7521/10000 train_time:316517ms step_avg:42.08ms +[2025-09-05 22:42:03] [Rank 0] step:7521/10000 train_time:316517ms step_avg:42.08ms +[2025-09-05 22:42:04] [Rank 0] step:7541/10000 train_time:317257ms step_avg:42.07ms +[2025-09-05 22:42:04] [Rank 0] step:7541/10000 train_time:317257ms step_avg:42.07ms +[2025-09-05 22:42:05] [Rank 0] step:7561/10000 train_time:317996ms step_avg:42.06ms +[2025-09-05 22:42:05] [Rank 0] step:7561/10000 train_time:317996ms step_avg:42.06ms +[2025-09-05 22:42:05] [Rank 0] step:7581/10000 train_time:318734ms step_avg:42.04ms +[2025-09-05 22:42:05] [Rank 0] step:7581/10000 train_time:318734ms step_avg:42.04ms +[2025-09-05 22:42:06] [Rank 0] step:7601/10000 train_time:319473ms step_avg:42.03ms +[2025-09-05 22:42:06] [Rank 0] step:7601/10000 train_time:319473ms step_avg:42.03ms +[2025-09-05 22:42:07] [Rank 0] step:7621/10000 train_time:320211ms step_avg:42.02ms +[2025-09-05 22:42:07] [Rank 0] step:7621/10000 train_time:320211ms step_avg:42.02ms +[2025-09-05 22:42:08] [Rank 0] step:7641/10000 train_time:321592ms step_avg:42.09ms +[2025-09-05 22:42:08] [Rank 0] step:7641/10000 train_time:321592ms step_avg:42.09ms +[2025-09-05 22:42:09] [Rank 0] step:7661/10000 train_time:322302ms step_avg:42.07ms +[2025-09-05 22:42:09] [Rank 0] step:7661/10000 train_time:322302ms step_avg:42.07ms +[2025-09-05 22:42:10] [Rank 0] step:7681/10000 train_time:323041ms step_avg:42.06ms +[2025-09-05 22:42:10] [Rank 0] step:7681/10000 train_time:323041ms step_avg:42.06ms +[2025-09-05 22:42:10] [Rank 0] step:7701/10000 train_time:323780ms step_avg:42.04ms +[2025-09-05 22:42:10] [Rank 0] step:7701/10000 train_time:323780ms step_avg:42.04ms +[2025-09-05 22:42:11] [Rank 0] step:7721/10000 train_time:324519ms step_avg:42.03ms +[2025-09-05 22:42:11] [Rank 0] step:7721/10000 train_time:324519ms step_avg:42.03ms +[2025-09-05 22:42:12] [Rank 0] step:7741/10000 train_time:325257ms step_avg:42.02ms +[2025-09-05 22:42:12] [Rank 0] step:7741/10000 train_time:325257ms step_avg:42.02ms +[2025-09-05 22:42:13] [Rank 0] step:7761/10000 train_time:325996ms step_avg:42.00ms +[2025-09-05 22:42:13] [Rank 0] step:7761/10000 train_time:325996ms step_avg:42.00ms +[2025-09-05 22:42:13] [Rank 0] step:7781/10000 train_time:326735ms step_avg:41.99ms +[2025-09-05 22:42:13] [Rank 0] step:7781/10000 train_time:326735ms step_avg:41.99ms +[2025-09-05 22:42:14] [Rank 0] step:7801/10000 train_time:327475ms step_avg:41.98ms +[2025-09-05 22:42:14] [Rank 0] step:7801/10000 train_time:327475ms step_avg:41.98ms +[2025-09-05 22:42:15] [Rank 0] step:7821/10000 train_time:328213ms step_avg:41.97ms +[2025-09-05 22:42:15] [Rank 0] step:7821/10000 train_time:328213ms step_avg:41.97ms +[2025-09-05 22:42:15] [Rank 0] step:7841/10000 train_time:328954ms step_avg:41.95ms +[2025-09-05 22:42:15] [Rank 0] step:7841/10000 train_time:328954ms step_avg:41.95ms +[2025-09-05 22:42:16] [Rank 0] step:7861/10000 train_time:329692ms step_avg:41.94ms +[2025-09-05 22:42:16] [Rank 0] step:7861/10000 train_time:329692ms step_avg:41.94ms +[2025-09-05 22:42:17] [Rank 0] step:7881/10000 train_time:330431ms step_avg:41.93ms +[2025-09-05 22:42:17] [Rank 0] step:7881/10000 train_time:330431ms step_avg:41.93ms +[2025-09-05 22:42:18] [Rank 0] step:7901/10000 train_time:331170ms step_avg:41.91ms +[2025-09-05 22:42:18] [Rank 0] step:7901/10000 train_time:331170ms step_avg:41.91ms +[2025-09-05 22:42:18] [Rank 0] step:7921/10000 train_time:331908ms step_avg:41.90ms +[2025-09-05 22:42:18] [Rank 0] step:7921/10000 train_time:331908ms step_avg:41.90ms +[2025-09-05 22:42:19] [Rank 0] step:7941/10000 train_time:332647ms step_avg:41.89ms +[2025-09-05 22:42:19] [Rank 0] step:7941/10000 train_time:332647ms step_avg:41.89ms +[2025-09-05 22:42:20] [Rank 0] step:7961/10000 train_time:333386ms step_avg:41.88ms +[2025-09-05 22:42:20] [Rank 0] step:7961/10000 train_time:333386ms step_avg:41.88ms +[2025-09-05 22:42:21] [Rank 0] step:7981/10000 train_time:334125ms step_avg:41.87ms +[2025-09-05 22:42:21] [Rank 0] step:7981/10000 train_time:334125ms step_avg:41.87ms +[2025-09-05 22:42:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:42:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:42:22] [Rank 0] PRINT: step:8000/10000 train_loss:2.0813 val_loss:2.0665 train_time:334944ms step_avg:41.87ms +[2025-09-05 22:42:22] [Rank 0] PRINT: step:8000/10000 train_loss:2.0813 val_loss:2.0665 train_time:334944ms step_avg:41.87ms +[2025-09-05 22:42:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:42:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:42:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:42:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:43:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:43:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:43:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:43:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:43:43] [Rank 0] Total Loss: 4.7068 +[2025-09-05 22:43:43] [Rank 0] Total Loss: 4.7068 +[2025-09-05 22:43:43] [Rank 0] Total FTA (Unweighted): 0.3381 +[2025-09-05 22:43:43] [Rank 0] Total FTA (Unweighted): 0.3381 +[2025-09-05 22:43:43] [Rank 0] Total FTA (Weighted): 0.3381 +[2025-09-05 22:43:43] [Rank 0] Total FTA (Weighted): 0.3381 +[2025-09-05 22:43:43] [Rank 0] Group 0 Loss: 3.4507 +[2025-09-05 22:43:43] [Rank 0] Group 0 Loss: 3.4507 +[2025-09-05 22:43:43] [Rank 0] Group 1 Loss: 3.3767 +[2025-09-05 22:43:43] [Rank 0] Group 1 Loss: 3.3767 +[2025-09-05 22:43:43] [Rank 0] Group 2 Loss: 3.3802 +[2025-09-05 22:43:43] [Rank 0] Group 2 Loss: 3.3802 +[2025-09-05 22:43:43] [Rank 0] Group 3 Loss: 3.7750 +[2025-09-05 22:43:43] [Rank 0] Group 3 Loss: 3.7750 +[2025-09-05 22:43:43] [Rank 0] Group 4 Loss: 4.0640 +[2025-09-05 22:43:43] [Rank 0] Group 4 Loss: 4.0640 +[2025-09-05 22:43:43] [Rank 0] Group 5 Loss: 4.4806 +[2025-09-05 22:43:43] [Rank 0] Group 5 Loss: 4.4806 +[2025-09-05 22:43:43] [Rank 0] Group 6 Loss: 4.7456 +[2025-09-05 22:43:43] [Rank 0] Group 6 Loss: 4.7456 +[2025-09-05 22:43:43] [Rank 0] Group 7 Loss: 4.9304 +[2025-09-05 22:43:43] [Rank 0] Group 7 Loss: 4.9304 +[2025-09-05 22:43:43] [Rank 0] Group 8 Loss: 5.2315 +[2025-09-05 22:43:43] [Rank 0] Group 8 Loss: 5.2315 +[2025-09-05 22:43:43] [Rank 0] Group 9 Loss: 5.3455 +[2025-09-05 22:43:43] [Rank 0] Group 9 Loss: 5.3455 +[2025-09-05 22:43:43] [Rank 0] Group 10 Loss: 5.4569 +[2025-09-05 22:43:43] [Rank 0] Group 10 Loss: 5.4569 +[2025-09-05 22:43:43] [Rank 0] Group 11 Loss: 5.4700 +[2025-09-05 22:43:43] [Rank 0] Group 11 Loss: 5.4700 +[2025-09-05 22:43:43] [Rank 0] Group 12 Loss: 5.3590 +[2025-09-05 22:43:43] [Rank 0] Group 12 Loss: 5.3590 +[2025-09-05 22:43:43] [Rank 0] Group 13 Loss: 5.4002 +[2025-09-05 22:43:43] [Rank 0] Group 13 Loss: 5.4002 +[2025-09-05 22:43:43] [Rank 0] Group 14 Loss: 5.4315 +[2025-09-05 22:43:43] [Rank 0] Group 14 Loss: 5.4315 +[2025-09-05 22:43:43] [Rank 0] Group 15 Loss: 5.4105 +[2025-09-05 22:43:43] [Rank 0] Group 15 Loss: 5.4105 +[2025-09-05 22:43:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:43:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:43:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:43:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:43:43] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 22:43:43] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 22:43:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:43:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:43:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:43:43] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:43:43] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:43:43] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:43:43] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:43:43] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:43:43] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:43:43] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 22:43:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:43:43] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 22:43:43] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:43:43] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:43:43] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:43:43] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:43:43] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 22:43:43] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 22:43:43] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:43:43] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:43:43] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 22:43:43] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 22:43:43] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 22:43:43] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 22:43:43] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 22:43:43] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 22:43:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:43:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:43:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:43:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:43:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:43:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:43:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:43:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:43:45] [Rank 0] step:8001/10000 train_time:334953ms step_avg:41.86ms +[2025-09-05 22:43:45] [Rank 0] step:8001/10000 train_time:334953ms step_avg:41.86ms +[2025-09-05 22:43:46] [Rank 0] step:8021/10000 train_time:336250ms step_avg:41.92ms +[2025-09-05 22:43:46] [Rank 0] step:8021/10000 train_time:336250ms step_avg:41.92ms +[2025-09-05 22:43:47] [Rank 0] step:8041/10000 train_time:336989ms step_avg:41.91ms +[2025-09-05 22:43:47] [Rank 0] step:8041/10000 train_time:336989ms step_avg:41.91ms +[2025-09-05 22:43:48] [Rank 0] step:8061/10000 train_time:337729ms step_avg:41.90ms +[2025-09-05 22:43:48] [Rank 0] step:8061/10000 train_time:337729ms step_avg:41.90ms +[2025-09-05 22:43:48] [Rank 0] step:8081/10000 train_time:338469ms step_avg:41.88ms +[2025-09-05 22:43:48] [Rank 0] step:8081/10000 train_time:338469ms step_avg:41.88ms +[2025-09-05 22:43:49] [Rank 0] step:8101/10000 train_time:339208ms step_avg:41.87ms +[2025-09-05 22:43:49] [Rank 0] step:8101/10000 train_time:339208ms step_avg:41.87ms +[2025-09-05 22:43:50] [Rank 0] step:8121/10000 train_time:339947ms step_avg:41.86ms +[2025-09-05 22:43:50] [Rank 0] step:8121/10000 train_time:339947ms step_avg:41.86ms +[2025-09-05 22:43:51] [Rank 0] step:8141/10000 train_time:340685ms step_avg:41.85ms +[2025-09-05 22:43:51] [Rank 0] step:8141/10000 train_time:340685ms step_avg:41.85ms +[2025-09-05 22:43:51] [Rank 0] step:8161/10000 train_time:341425ms step_avg:41.84ms +[2025-09-05 22:43:51] [Rank 0] step:8161/10000 train_time:341425ms step_avg:41.84ms +[2025-09-05 22:43:52] [Rank 0] step:8181/10000 train_time:342165ms step_avg:41.82ms +[2025-09-05 22:43:52] [Rank 0] step:8181/10000 train_time:342165ms step_avg:41.82ms +[2025-09-05 22:43:53] [Rank 0] step:8201/10000 train_time:342904ms step_avg:41.81ms +[2025-09-05 22:43:53] [Rank 0] step:8201/10000 train_time:342904ms step_avg:41.81ms +[2025-09-05 22:43:54] [Rank 0] step:8221/10000 train_time:343643ms step_avg:41.80ms +[2025-09-05 22:43:54] [Rank 0] step:8221/10000 train_time:343643ms step_avg:41.80ms +[2025-09-05 22:43:54] [Rank 0] step:8241/10000 train_time:344382ms step_avg:41.79ms +[2025-09-05 22:43:54] [Rank 0] step:8241/10000 train_time:344382ms step_avg:41.79ms +[2025-09-05 22:43:55] [Rank 0] step:8261/10000 train_time:345121ms step_avg:41.78ms +[2025-09-05 22:43:55] [Rank 0] step:8261/10000 train_time:345121ms step_avg:41.78ms +[2025-09-05 22:43:56] [Rank 0] step:8281/10000 train_time:345861ms step_avg:41.77ms +[2025-09-05 22:43:56] [Rank 0] step:8281/10000 train_time:345861ms step_avg:41.77ms +[2025-09-05 22:43:57] [Rank 0] step:8301/10000 train_time:346600ms step_avg:41.75ms +[2025-09-05 22:43:57] [Rank 0] step:8301/10000 train_time:346600ms step_avg:41.75ms +[2025-09-05 22:43:57] [Rank 0] step:8321/10000 train_time:347339ms step_avg:41.74ms +[2025-09-05 22:43:57] [Rank 0] step:8321/10000 train_time:347339ms step_avg:41.74ms +[2025-09-05 22:43:58] [Rank 0] step:8341/10000 train_time:348079ms step_avg:41.73ms +[2025-09-05 22:43:58] [Rank 0] step:8341/10000 train_time:348079ms step_avg:41.73ms +[2025-09-05 22:43:59] [Rank 0] step:8361/10000 train_time:348817ms step_avg:41.72ms +[2025-09-05 22:43:59] [Rank 0] step:8361/10000 train_time:348817ms step_avg:41.72ms +[2025-09-05 22:44:00] [Rank 0] step:8381/10000 train_time:349557ms step_avg:41.71ms +[2025-09-05 22:44:00] [Rank 0] step:8381/10000 train_time:349557ms step_avg:41.71ms +[2025-09-05 22:44:00] [Rank 0] step:8401/10000 train_time:350296ms step_avg:41.70ms +[2025-09-05 22:44:00] [Rank 0] step:8401/10000 train_time:350296ms step_avg:41.70ms +[2025-09-05 22:44:01] [Rank 0] step:8421/10000 train_time:351034ms step_avg:41.69ms +[2025-09-05 22:44:01] [Rank 0] step:8421/10000 train_time:351034ms step_avg:41.69ms +[2025-09-05 22:44:02] [Rank 0] step:8441/10000 train_time:351888ms step_avg:41.69ms +[2025-09-05 22:44:02] [Rank 0] step:8441/10000 train_time:351888ms step_avg:41.69ms +[2025-09-05 22:44:03] [Rank 0] step:8461/10000 train_time:352627ms step_avg:41.68ms +[2025-09-05 22:44:03] [Rank 0] step:8461/10000 train_time:352627ms step_avg:41.68ms +[2025-09-05 22:44:03] [Rank 0] step:8481/10000 train_time:353367ms step_avg:41.67ms +[2025-09-05 22:44:03] [Rank 0] step:8481/10000 train_time:353367ms step_avg:41.67ms +[2025-09-05 22:44:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:44:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:44:05] [Rank 0] PRINT: step:8500/10000 train_loss:2.0683 val_loss:2.0528 train_time:354318ms step_avg:41.68ms +[2025-09-05 22:44:05] [Rank 0] PRINT: step:8500/10000 train_loss:2.0683 val_loss:2.0528 train_time:354318ms step_avg:41.68ms +[2025-09-05 22:44:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:44:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:44:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:44:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:45:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:45:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:45:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:45:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:45:27] [Rank 0] Total Loss: 4.6966 +[2025-09-05 22:45:27] [Rank 0] Total Loss: 4.6966 +[2025-09-05 22:45:27] [Rank 0] Total FTA (Unweighted): 0.3356 +[2025-09-05 22:45:27] [Rank 0] Total FTA (Unweighted): 0.3356 +[2025-09-05 22:45:27] [Rank 0] Total FTA (Weighted): 0.3356 +[2025-09-05 22:45:27] [Rank 0] Total FTA (Weighted): 0.3356 +[2025-09-05 22:45:27] [Rank 0] Group 0 Loss: 3.4998 +[2025-09-05 22:45:27] [Rank 0] Group 0 Loss: 3.4998 +[2025-09-05 22:45:27] [Rank 0] Group 1 Loss: 3.4097 +[2025-09-05 22:45:27] [Rank 0] Group 1 Loss: 3.4097 +[2025-09-05 22:45:27] [Rank 0] Group 2 Loss: 3.3816 +[2025-09-05 22:45:27] [Rank 0] Group 2 Loss: 3.3816 +[2025-09-05 22:45:27] [Rank 0] Group 3 Loss: 3.7628 +[2025-09-05 22:45:27] [Rank 0] Group 3 Loss: 3.7628 +[2025-09-05 22:45:27] [Rank 0] Group 4 Loss: 4.0557 +[2025-09-05 22:45:27] [Rank 0] Group 4 Loss: 4.0557 +[2025-09-05 22:45:27] [Rank 0] Group 5 Loss: 4.4567 +[2025-09-05 22:45:27] [Rank 0] Group 5 Loss: 4.4567 +[2025-09-05 22:45:27] [Rank 0] Group 6 Loss: 4.7245 +[2025-09-05 22:45:27] [Rank 0] Group 6 Loss: 4.7245 +[2025-09-05 22:45:27] [Rank 0] Group 7 Loss: 4.8976 +[2025-09-05 22:45:27] [Rank 0] Group 7 Loss: 4.8976 +[2025-09-05 22:45:27] [Rank 0] Group 8 Loss: 5.2198 +[2025-09-05 22:45:27] [Rank 0] Group 8 Loss: 5.2198 +[2025-09-05 22:45:27] [Rank 0] Group 9 Loss: 5.3299 +[2025-09-05 22:45:27] [Rank 0] Group 9 Loss: 5.3299 +[2025-09-05 22:45:27] [Rank 0] Group 10 Loss: 5.4463 +[2025-09-05 22:45:27] [Rank 0] Group 10 Loss: 5.4463 +[2025-09-05 22:45:27] [Rank 0] Group 11 Loss: 5.4229 +[2025-09-05 22:45:27] [Rank 0] Group 11 Loss: 5.4229 +[2025-09-05 22:45:27] [Rank 0] Group 12 Loss: 5.3359 +[2025-09-05 22:45:27] [Rank 0] Group 12 Loss: 5.3359 +[2025-09-05 22:45:27] [Rank 0] Group 13 Loss: 5.3921 +[2025-09-05 22:45:27] [Rank 0] Group 13 Loss: 5.3921 +[2025-09-05 22:45:27] [Rank 0] Group 14 Loss: 5.4191 +[2025-09-05 22:45:27] [Rank 0] Group 14 Loss: 5.4191 +[2025-09-05 22:45:27] [Rank 0] Group 15 Loss: 5.3905 +[2025-09-05 22:45:27] [Rank 0] Group 15 Loss: 5.3905 +[2025-09-05 22:45:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:45:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:45:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:45:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:45:27] [Rank 0] Group 2 FTA: 0.6100 +[2025-09-05 22:45:27] [Rank 0] Group 2 FTA: 0.6100 +[2025-09-05 22:45:27] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:45:27] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:45:27] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:45:27] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:45:27] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:45:27] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:45:27] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:45:27] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:45:27] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 22:45:27] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 22:45:27] [Rank 0] Group 8 FTA: 0.2500 +[2025-09-05 22:45:27] [Rank 0] Group 8 FTA: 0.2500 +[2025-09-05 22:45:27] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 22:45:27] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 22:45:27] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 22:45:27] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 22:45:27] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 22:45:27] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 22:45:27] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:45:27] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 22:45:27] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:45:27] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:45:27] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 22:45:27] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 22:45:27] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 22:45:27] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 22:45:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:45:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:45:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:45:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:45:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:45:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:45:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:45:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:45:29] [Rank 0] step:8501/10000 train_time:354327ms step_avg:41.68ms +[2025-09-05 22:45:29] [Rank 0] step:8501/10000 train_time:354327ms step_avg:41.68ms +[2025-09-05 22:45:29] [Rank 0] step:8521/10000 train_time:355003ms step_avg:41.66ms +[2025-09-05 22:45:29] [Rank 0] step:8521/10000 train_time:355003ms step_avg:41.66ms +[2025-09-05 22:45:30] [Rank 0] step:8541/10000 train_time:355741ms step_avg:41.65ms +[2025-09-05 22:45:30] [Rank 0] step:8541/10000 train_time:355741ms step_avg:41.65ms +[2025-09-05 22:45:31] [Rank 0] step:8561/10000 train_time:356479ms step_avg:41.64ms +[2025-09-05 22:45:31] [Rank 0] step:8561/10000 train_time:356479ms step_avg:41.64ms +[2025-09-05 22:45:31] [Rank 0] step:8581/10000 train_time:357219ms step_avg:41.63ms +[2025-09-05 22:45:31] [Rank 0] step:8581/10000 train_time:357219ms step_avg:41.63ms +[2025-09-05 22:45:32] [Rank 0] step:8601/10000 train_time:357957ms step_avg:41.62ms +[2025-09-05 22:45:32] [Rank 0] step:8601/10000 train_time:357957ms step_avg:41.62ms +[2025-09-05 22:45:33] [Rank 0] step:8621/10000 train_time:358696ms step_avg:41.61ms +[2025-09-05 22:45:33] [Rank 0] step:8621/10000 train_time:358696ms step_avg:41.61ms +[2025-09-05 22:45:34] [Rank 0] step:8641/10000 train_time:359434ms step_avg:41.60ms +[2025-09-05 22:45:34] [Rank 0] step:8641/10000 train_time:359434ms step_avg:41.60ms +[2025-09-05 22:45:34] [Rank 0] step:8661/10000 train_time:360172ms step_avg:41.59ms +[2025-09-05 22:45:34] [Rank 0] step:8661/10000 train_time:360172ms step_avg:41.59ms +[2025-09-05 22:45:35] [Rank 0] step:8681/10000 train_time:360911ms step_avg:41.57ms +[2025-09-05 22:45:35] [Rank 0] step:8681/10000 train_time:360911ms step_avg:41.57ms +[2025-09-05 22:45:36] [Rank 0] step:8701/10000 train_time:361650ms step_avg:41.56ms +[2025-09-05 22:45:36] [Rank 0] step:8701/10000 train_time:361650ms step_avg:41.56ms +[2025-09-05 22:45:37] [Rank 0] step:8721/10000 train_time:362389ms step_avg:41.55ms +[2025-09-05 22:45:37] [Rank 0] step:8721/10000 train_time:362389ms step_avg:41.55ms +[2025-09-05 22:45:37] [Rank 0] step:8741/10000 train_time:363128ms step_avg:41.54ms +[2025-09-05 22:45:37] [Rank 0] step:8741/10000 train_time:363128ms step_avg:41.54ms +[2025-09-05 22:45:38] [Rank 0] step:8761/10000 train_time:363868ms step_avg:41.53ms +[2025-09-05 22:45:38] [Rank 0] step:8761/10000 train_time:363868ms step_avg:41.53ms +[2025-09-05 22:45:39] [Rank 0] step:8781/10000 train_time:364606ms step_avg:41.52ms +[2025-09-05 22:45:39] [Rank 0] step:8781/10000 train_time:364606ms step_avg:41.52ms +[2025-09-05 22:45:40] [Rank 0] step:8801/10000 train_time:365344ms step_avg:41.51ms +[2025-09-05 22:45:40] [Rank 0] step:8801/10000 train_time:365344ms step_avg:41.51ms +[2025-09-05 22:45:40] [Rank 0] step:8821/10000 train_time:366083ms step_avg:41.50ms +[2025-09-05 22:45:40] [Rank 0] step:8821/10000 train_time:366083ms step_avg:41.50ms +[2025-09-05 22:45:41] [Rank 0] step:8841/10000 train_time:367021ms step_avg:41.51ms +[2025-09-05 22:45:41] [Rank 0] step:8841/10000 train_time:367021ms step_avg:41.51ms +[2025-09-05 22:45:42] [Rank 0] step:8861/10000 train_time:367760ms step_avg:41.50ms +[2025-09-05 22:45:42] [Rank 0] step:8861/10000 train_time:367760ms step_avg:41.50ms +[2025-09-05 22:45:43] [Rank 0] step:8881/10000 train_time:368502ms step_avg:41.49ms +[2025-09-05 22:45:43] [Rank 0] step:8881/10000 train_time:368502ms step_avg:41.49ms +[2025-09-05 22:45:44] [Rank 0] step:8901/10000 train_time:369241ms step_avg:41.48ms +[2025-09-05 22:45:44] [Rank 0] step:8901/10000 train_time:369241ms step_avg:41.48ms +[2025-09-05 22:45:44] [Rank 0] step:8921/10000 train_time:369980ms step_avg:41.47ms +[2025-09-05 22:45:44] [Rank 0] step:8921/10000 train_time:369980ms step_avg:41.47ms +[2025-09-05 22:45:45] [Rank 0] step:8941/10000 train_time:370717ms step_avg:41.46ms +[2025-09-05 22:45:45] [Rank 0] step:8941/10000 train_time:370717ms step_avg:41.46ms +[2025-09-05 22:45:46] [Rank 0] step:8961/10000 train_time:371455ms step_avg:41.45ms +[2025-09-05 22:45:46] [Rank 0] step:8961/10000 train_time:371455ms step_avg:41.45ms +[2025-09-05 22:45:46] [Rank 0] step:8981/10000 train_time:372197ms step_avg:41.44ms +[2025-09-05 22:45:46] [Rank 0] step:8981/10000 train_time:372197ms step_avg:41.44ms +[2025-09-05 22:45:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:45:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:45:48] [Rank 0] PRINT: step:9000/10000 train_loss:2.0552 val_loss:2.0418 train_time:373017ms step_avg:41.45ms +[2025-09-05 22:45:48] [Rank 0] PRINT: step:9000/10000 train_loss:2.0552 val_loss:2.0418 train_time:373017ms step_avg:41.45ms +[2025-09-05 22:45:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:45:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:45:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:45:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:47:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:47:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:47:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:47:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:47:09] [Rank 0] Total Loss: 4.7039 +[2025-09-05 22:47:09] [Rank 0] Total Loss: 4.7039 +[2025-09-05 22:47:09] [Rank 0] Total FTA (Unweighted): 0.3525 +[2025-09-05 22:47:09] [Rank 0] Total FTA (Unweighted): 0.3525 +[2025-09-05 22:47:09] [Rank 0] Total FTA (Weighted): 0.3525 +[2025-09-05 22:47:09] [Rank 0] Total FTA (Weighted): 0.3525 +[2025-09-05 22:47:09] [Rank 0] Group 0 Loss: 3.5233 +[2025-09-05 22:47:09] [Rank 0] Group 0 Loss: 3.5233 +[2025-09-05 22:47:09] [Rank 0] Group 1 Loss: 3.4124 +[2025-09-05 22:47:09] [Rank 0] Group 1 Loss: 3.4124 +[2025-09-05 22:47:09] [Rank 0] Group 2 Loss: 3.4205 +[2025-09-05 22:47:09] [Rank 0] Group 2 Loss: 3.4205 +[2025-09-05 22:47:09] [Rank 0] Group 3 Loss: 3.7846 +[2025-09-05 22:47:09] [Rank 0] Group 3 Loss: 3.7846 +[2025-09-05 22:47:09] [Rank 0] Group 4 Loss: 4.0484 +[2025-09-05 22:47:09] [Rank 0] Group 4 Loss: 4.0484 +[2025-09-05 22:47:09] [Rank 0] Group 5 Loss: 4.4604 +[2025-09-05 22:47:09] [Rank 0] Group 5 Loss: 4.4604 +[2025-09-05 22:47:09] [Rank 0] Group 6 Loss: 4.7428 +[2025-09-05 22:47:09] [Rank 0] Group 6 Loss: 4.7428 +[2025-09-05 22:47:09] [Rank 0] Group 7 Loss: 4.9171 +[2025-09-05 22:47:09] [Rank 0] Group 7 Loss: 4.9171 +[2025-09-05 22:47:09] [Rank 0] Group 8 Loss: 5.2210 +[2025-09-05 22:47:09] [Rank 0] Group 8 Loss: 5.2210 +[2025-09-05 22:47:09] [Rank 0] Group 9 Loss: 5.3268 +[2025-09-05 22:47:09] [Rank 0] Group 9 Loss: 5.3268 +[2025-09-05 22:47:09] [Rank 0] Group 10 Loss: 5.4505 +[2025-09-05 22:47:09] [Rank 0] Group 10 Loss: 5.4505 +[2025-09-05 22:47:09] [Rank 0] Group 11 Loss: 5.4380 +[2025-09-05 22:47:09] [Rank 0] Group 11 Loss: 5.4380 +[2025-09-05 22:47:09] [Rank 0] Group 12 Loss: 5.3349 +[2025-09-05 22:47:09] [Rank 0] Group 12 Loss: 5.3349 +[2025-09-05 22:47:09] [Rank 0] Group 13 Loss: 5.3808 +[2025-09-05 22:47:09] [Rank 0] Group 13 Loss: 5.3808 +[2025-09-05 22:47:09] [Rank 0] Group 14 Loss: 5.4126 +[2025-09-05 22:47:09] [Rank 0] Group 14 Loss: 5.4126 +[2025-09-05 22:47:09] [Rank 0] Group 15 Loss: 5.3889 +[2025-09-05 22:47:09] [Rank 0] Group 15 Loss: 5.3889 +[2025-09-05 22:47:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:47:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:47:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:47:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:47:09] [Rank 0] Group 2 FTA: 0.7800 +[2025-09-05 22:47:09] [Rank 0] Group 2 FTA: 0.7800 +[2025-09-05 22:47:09] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:47:09] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:47:09] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:47:09] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:47:09] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:47:09] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:47:09] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:47:09] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:47:09] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 22:47:09] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 22:47:09] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 22:47:09] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 22:47:09] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 22:47:09] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 22:47:09] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:47:09] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:47:09] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 22:47:09] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 22:47:09] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:47:09] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 22:47:09] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:47:09] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 22:47:09] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 22:47:09] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 22:47:09] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 22:47:09] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 22:47:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:47:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:47:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:47:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:47:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:47:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:47:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:47:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:47:11] [Rank 0] step:9001/10000 train_time:373026ms step_avg:41.44ms +[2025-09-05 22:47:11] [Rank 0] step:9001/10000 train_time:373026ms step_avg:41.44ms +[2025-09-05 22:47:12] [Rank 0] step:9021/10000 train_time:373803ms step_avg:41.44ms +[2025-09-05 22:47:12] [Rank 0] step:9021/10000 train_time:373803ms step_avg:41.44ms +[2025-09-05 22:47:12] [Rank 0] step:9041/10000 train_time:374542ms step_avg:41.43ms +[2025-09-05 22:47:12] [Rank 0] step:9041/10000 train_time:374542ms step_avg:41.43ms +[2025-09-05 22:47:13] [Rank 0] step:9061/10000 train_time:375281ms step_avg:41.42ms +[2025-09-05 22:47:13] [Rank 0] step:9061/10000 train_time:375281ms step_avg:41.42ms +[2025-09-05 22:47:14] [Rank 0] step:9081/10000 train_time:376158ms step_avg:41.42ms +[2025-09-05 22:47:14] [Rank 0] step:9081/10000 train_time:376158ms step_avg:41.42ms +[2025-09-05 22:47:15] [Rank 0] step:9101/10000 train_time:376896ms step_avg:41.41ms +[2025-09-05 22:47:15] [Rank 0] step:9101/10000 train_time:376896ms step_avg:41.41ms +[2025-09-05 22:47:15] [Rank 0] step:9121/10000 train_time:377635ms step_avg:41.40ms +[2025-09-05 22:47:15] [Rank 0] step:9121/10000 train_time:377635ms step_avg:41.40ms +[2025-09-05 22:47:16] [Rank 0] step:9141/10000 train_time:378372ms step_avg:41.39ms +[2025-09-05 22:47:16] [Rank 0] step:9141/10000 train_time:378372ms step_avg:41.39ms +[2025-09-05 22:47:17] [Rank 0] step:9161/10000 train_time:379111ms step_avg:41.38ms +[2025-09-05 22:47:17] [Rank 0] step:9161/10000 train_time:379111ms step_avg:41.38ms +[2025-09-05 22:47:18] [Rank 0] step:9181/10000 train_time:379850ms step_avg:41.37ms +[2025-09-05 22:47:18] [Rank 0] step:9181/10000 train_time:379850ms step_avg:41.37ms +[2025-09-05 22:47:18] [Rank 0] step:9201/10000 train_time:380590ms step_avg:41.36ms +[2025-09-05 22:47:18] [Rank 0] step:9201/10000 train_time:380590ms step_avg:41.36ms +[2025-09-05 22:47:19] [Rank 0] step:9221/10000 train_time:381329ms step_avg:41.35ms +[2025-09-05 22:47:19] [Rank 0] step:9221/10000 train_time:381329ms step_avg:41.35ms +[2025-09-05 22:47:20] [Rank 0] step:9241/10000 train_time:382068ms step_avg:41.34ms +[2025-09-05 22:47:20] [Rank 0] step:9241/10000 train_time:382068ms step_avg:41.34ms +[2025-09-05 22:47:21] [Rank 0] step:9261/10000 train_time:382807ms step_avg:41.34ms +[2025-09-05 22:47:21] [Rank 0] step:9261/10000 train_time:382807ms step_avg:41.34ms +[2025-09-05 22:47:21] [Rank 0] step:9281/10000 train_time:383547ms step_avg:41.33ms +[2025-09-05 22:47:21] [Rank 0] step:9281/10000 train_time:383547ms step_avg:41.33ms +[2025-09-05 22:47:22] [Rank 0] step:9301/10000 train_time:384329ms step_avg:41.32ms +[2025-09-05 22:47:22] [Rank 0] step:9301/10000 train_time:384329ms step_avg:41.32ms +[2025-09-05 22:47:23] [Rank 0] step:9321/10000 train_time:385069ms step_avg:41.31ms +[2025-09-05 22:47:23] [Rank 0] step:9321/10000 train_time:385069ms step_avg:41.31ms +[2025-09-05 22:47:24] [Rank 0] step:9341/10000 train_time:385809ms step_avg:41.30ms +[2025-09-05 22:47:24] [Rank 0] step:9341/10000 train_time:385809ms step_avg:41.30ms +[2025-09-05 22:47:24] [Rank 0] step:9361/10000 train_time:386548ms step_avg:41.29ms +[2025-09-05 22:47:24] [Rank 0] step:9361/10000 train_time:386548ms step_avg:41.29ms +[2025-09-05 22:47:25] [Rank 0] step:9381/10000 train_time:387288ms step_avg:41.28ms +[2025-09-05 22:47:25] [Rank 0] step:9381/10000 train_time:387288ms step_avg:41.28ms +[2025-09-05 22:47:26] [Rank 0] step:9401/10000 train_time:388027ms step_avg:41.28ms +[2025-09-05 22:47:26] [Rank 0] step:9401/10000 train_time:388027ms step_avg:41.28ms +[2025-09-05 22:47:27] [Rank 0] step:9421/10000 train_time:388766ms step_avg:41.27ms +[2025-09-05 22:47:27] [Rank 0] step:9421/10000 train_time:388766ms step_avg:41.27ms +[2025-09-05 22:47:27] [Rank 0] step:9441/10000 train_time:389505ms step_avg:41.26ms +[2025-09-05 22:47:27] [Rank 0] step:9441/10000 train_time:389505ms step_avg:41.26ms +[2025-09-05 22:47:28] [Rank 0] step:9461/10000 train_time:390244ms step_avg:41.25ms +[2025-09-05 22:47:28] [Rank 0] step:9461/10000 train_time:390244ms step_avg:41.25ms +[2025-09-05 22:47:29] [Rank 0] step:9481/10000 train_time:390981ms step_avg:41.24ms +[2025-09-05 22:47:29] [Rank 0] step:9481/10000 train_time:390981ms step_avg:41.24ms +[2025-09-05 22:47:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:47:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:47:30] [Rank 0] PRINT: step:9500/10000 train_loss:2.0448 val_loss:2.0330 train_time:391800ms step_avg:41.24ms +[2025-09-05 22:47:30] [Rank 0] PRINT: step:9500/10000 train_loss:2.0448 val_loss:2.0330 train_time:391800ms step_avg:41.24ms +[2025-09-05 22:47:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:47:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:47:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:47:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:48:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:48:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:48:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:48:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:48:52] [Rank 0] Total Loss: 4.7027 +[2025-09-05 22:48:52] [Rank 0] Total Loss: 4.7027 +[2025-09-05 22:48:52] [Rank 0] Total FTA (Unweighted): 0.3600 +[2025-09-05 22:48:52] [Rank 0] Total FTA (Unweighted): 0.3600 +[2025-09-05 22:48:52] [Rank 0] Total FTA (Weighted): 0.3600 +[2025-09-05 22:48:52] [Rank 0] Total FTA (Weighted): 0.3600 +[2025-09-05 22:48:52] [Rank 0] Group 0 Loss: 3.5069 +[2025-09-05 22:48:52] [Rank 0] Group 0 Loss: 3.5069 +[2025-09-05 22:48:52] [Rank 0] Group 1 Loss: 3.4165 +[2025-09-05 22:48:52] [Rank 0] Group 1 Loss: 3.4165 +[2025-09-05 22:48:52] [Rank 0] Group 2 Loss: 3.4181 +[2025-09-05 22:48:52] [Rank 0] Group 2 Loss: 3.4181 +[2025-09-05 22:48:52] [Rank 0] Group 3 Loss: 3.7935 +[2025-09-05 22:48:52] [Rank 0] Group 3 Loss: 3.7935 +[2025-09-05 22:48:52] [Rank 0] Group 4 Loss: 4.0509 +[2025-09-05 22:48:52] [Rank 0] Group 4 Loss: 4.0509 +[2025-09-05 22:48:52] [Rank 0] Group 5 Loss: 4.4468 +[2025-09-05 22:48:52] [Rank 0] Group 5 Loss: 4.4468 +[2025-09-05 22:48:52] [Rank 0] Group 6 Loss: 4.7328 +[2025-09-05 22:48:52] [Rank 0] Group 6 Loss: 4.7328 +[2025-09-05 22:48:52] [Rank 0] Group 7 Loss: 4.9083 +[2025-09-05 22:48:52] [Rank 0] Group 7 Loss: 4.9083 +[2025-09-05 22:48:52] [Rank 0] Group 8 Loss: 5.2095 +[2025-09-05 22:48:52] [Rank 0] Group 8 Loss: 5.2095 +[2025-09-05 22:48:52] [Rank 0] Group 9 Loss: 5.3321 +[2025-09-05 22:48:52] [Rank 0] Group 9 Loss: 5.3321 +[2025-09-05 22:48:52] [Rank 0] Group 10 Loss: 5.4633 +[2025-09-05 22:48:52] [Rank 0] Group 10 Loss: 5.4633 +[2025-09-05 22:48:52] [Rank 0] Group 11 Loss: 5.4354 +[2025-09-05 22:48:52] [Rank 0] Group 11 Loss: 5.4354 +[2025-09-05 22:48:52] [Rank 0] Group 12 Loss: 5.3416 +[2025-09-05 22:48:52] [Rank 0] Group 12 Loss: 5.3416 +[2025-09-05 22:48:52] [Rank 0] Group 13 Loss: 5.3882 +[2025-09-05 22:48:52] [Rank 0] Group 13 Loss: 5.3882 +[2025-09-05 22:48:52] [Rank 0] Group 14 Loss: 5.4080 +[2025-09-05 22:48:52] [Rank 0] Group 14 Loss: 5.4080 +[2025-09-05 22:48:52] [Rank 0] Group 15 Loss: 5.3907 +[2025-09-05 22:48:52] [Rank 0] Group 15 Loss: 5.3907 +[2025-09-05 22:48:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:48:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:48:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:48:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:48:52] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 22:48:52] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 22:48:52] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:48:52] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 22:48:52] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:48:52] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 22:48:52] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:48:52] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:48:52] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:48:52] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:48:52] [Rank 0] Group 7 FTA: 0.1800 +[2025-09-05 22:48:52] [Rank 0] Group 7 FTA: 0.1800 +[2025-09-05 22:48:52] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:48:52] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:48:52] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:48:52] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 22:48:52] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:48:52] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:48:52] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 22:48:52] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 22:48:52] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:48:52] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:48:52] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 22:48:52] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 22:48:52] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 22:48:52] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 22:48:52] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 22:48:52] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 22:48:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:48:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:48:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:48:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:48:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:48:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:48:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:48:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:48:53] [Rank 0] step:9501/10000 train_time:391809ms step_avg:41.24ms +[2025-09-05 22:48:53] [Rank 0] step:9501/10000 train_time:391809ms step_avg:41.24ms +[2025-09-05 22:48:54] [Rank 0] step:9521/10000 train_time:392485ms step_avg:41.22ms +[2025-09-05 22:48:54] [Rank 0] step:9521/10000 train_time:392485ms step_avg:41.22ms +[2025-09-05 22:48:55] [Rank 0] step:9541/10000 train_time:393223ms step_avg:41.21ms +[2025-09-05 22:48:55] [Rank 0] step:9541/10000 train_time:393223ms step_avg:41.21ms +[2025-09-05 22:48:55] [Rank 0] step:9561/10000 train_time:393962ms step_avg:41.21ms +[2025-09-05 22:48:55] [Rank 0] step:9561/10000 train_time:393962ms step_avg:41.21ms +[2025-09-05 22:48:56] [Rank 0] step:9581/10000 train_time:394701ms step_avg:41.20ms +[2025-09-05 22:48:56] [Rank 0] step:9581/10000 train_time:394701ms step_avg:41.20ms +[2025-09-05 22:48:57] [Rank 0] step:9601/10000 train_time:395440ms step_avg:41.19ms +[2025-09-05 22:48:57] [Rank 0] step:9601/10000 train_time:395440ms step_avg:41.19ms +[2025-09-05 22:48:58] [Rank 0] step:9621/10000 train_time:396179ms step_avg:41.18ms +[2025-09-05 22:48:58] [Rank 0] step:9621/10000 train_time:396179ms step_avg:41.18ms +[2025-09-05 22:48:58] [Rank 0] step:9641/10000 train_time:396918ms step_avg:41.17ms +[2025-09-05 22:48:58] [Rank 0] step:9641/10000 train_time:396918ms step_avg:41.17ms +[2025-09-05 22:48:59] [Rank 0] step:9661/10000 train_time:397932ms step_avg:41.19ms +[2025-09-05 22:48:59] [Rank 0] step:9661/10000 train_time:397932ms step_avg:41.19ms +[2025-09-05 22:49:00] [Rank 0] step:9681/10000 train_time:398670ms step_avg:41.18ms +[2025-09-05 22:49:00] [Rank 0] step:9681/10000 train_time:398670ms step_avg:41.18ms +[2025-09-05 22:49:01] [Rank 0] step:9701/10000 train_time:399408ms step_avg:41.17ms +[2025-09-05 22:49:01] [Rank 0] step:9701/10000 train_time:399408ms step_avg:41.17ms +[2025-09-05 22:49:02] [Rank 0] step:9721/10000 train_time:400145ms step_avg:41.16ms +[2025-09-05 22:49:02] [Rank 0] step:9721/10000 train_time:400145ms step_avg:41.16ms +[2025-09-05 22:49:02] [Rank 0] step:9741/10000 train_time:400885ms step_avg:41.15ms +[2025-09-05 22:49:02] [Rank 0] step:9741/10000 train_time:400885ms step_avg:41.15ms +[2025-09-05 22:49:03] [Rank 0] step:9761/10000 train_time:401623ms step_avg:41.15ms +[2025-09-05 22:49:03] [Rank 0] step:9761/10000 train_time:401623ms step_avg:41.15ms +[2025-09-05 22:49:04] [Rank 0] step:9781/10000 train_time:402363ms step_avg:41.14ms +[2025-09-05 22:49:04] [Rank 0] step:9781/10000 train_time:402363ms step_avg:41.14ms +[2025-09-05 22:49:05] [Rank 0] step:9801/10000 train_time:403102ms step_avg:41.13ms +[2025-09-05 22:49:05] [Rank 0] step:9801/10000 train_time:403102ms step_avg:41.13ms +[2025-09-05 22:49:05] [Rank 0] step:9821/10000 train_time:403841ms step_avg:41.12ms +[2025-09-05 22:49:05] [Rank 0] step:9821/10000 train_time:403841ms step_avg:41.12ms +[2025-09-05 22:49:06] [Rank 0] step:9841/10000 train_time:404579ms step_avg:41.11ms +[2025-09-05 22:49:06] [Rank 0] step:9841/10000 train_time:404579ms step_avg:41.11ms +[2025-09-05 22:49:07] [Rank 0] step:9861/10000 train_time:405318ms step_avg:41.10ms +[2025-09-05 22:49:07] [Rank 0] step:9861/10000 train_time:405318ms step_avg:41.10ms +[2025-09-05 22:49:07] [Rank 0] step:9881/10000 train_time:406057ms step_avg:41.09ms +[2025-09-05 22:49:07] [Rank 0] step:9881/10000 train_time:406057ms step_avg:41.09ms +[2025-09-05 22:49:08] [Rank 0] step:9901/10000 train_time:406795ms step_avg:41.09ms +[2025-09-05 22:49:08] [Rank 0] step:9901/10000 train_time:406795ms step_avg:41.09ms +[2025-09-05 22:49:09] [Rank 0] step:9921/10000 train_time:407534ms step_avg:41.08ms +[2025-09-05 22:49:09] [Rank 0] step:9921/10000 train_time:407534ms step_avg:41.08ms +[2025-09-05 22:49:10] [Rank 0] step:9941/10000 train_time:408273ms step_avg:41.07ms +[2025-09-05 22:49:10] [Rank 0] step:9941/10000 train_time:408273ms step_avg:41.07ms +[2025-09-05 22:49:10] [Rank 0] step:9961/10000 train_time:409011ms step_avg:41.06ms +[2025-09-05 22:49:10] [Rank 0] step:9961/10000 train_time:409011ms step_avg:41.06ms +[2025-09-05 22:49:11] [Rank 0] step:9981/10000 train_time:409750ms step_avg:41.05ms +[2025-09-05 22:49:11] [Rank 0] step:9981/10000 train_time:409750ms step_avg:41.05ms +[2025-09-05 22:49:12] [Rank 0] step:10000/10000 train_time:410453ms step_avg:41.05ms +[2025-09-05 22:49:12] [Rank 0] step:10000/10000 train_time:410453ms step_avg:41.05ms +[2025-09-05 22:49:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:49:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:49:12] [Rank 0] PRINT: step:10000/10000 train_loss:2.0367 val_loss:2.0252 train_time:410578ms step_avg:41.06ms +[2025-09-05 22:49:12] [Rank 0] PRINT: step:10000/10000 train_loss:2.0367 val_loss:2.0252 train_time:410578ms step_avg:41.06ms +[2025-09-05 22:49:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:49:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:49:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:49:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:50:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:50:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:50:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:50:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:50:34] [Rank 0] Total Loss: 4.6837 +[2025-09-05 22:50:34] [Rank 0] Total Loss: 4.6837 +[2025-09-05 22:50:34] [Rank 0] Total FTA (Unweighted): 0.3613 +[2025-09-05 22:50:34] [Rank 0] Total FTA (Unweighted): 0.3613 +[2025-09-05 22:50:34] [Rank 0] Total FTA (Weighted): 0.3613 +[2025-09-05 22:50:34] [Rank 0] Total FTA (Weighted): 0.3613 +[2025-09-05 22:50:34] [Rank 0] Group 0 Loss: 3.5349 +[2025-09-05 22:50:34] [Rank 0] Group 0 Loss: 3.5349 +[2025-09-05 22:50:34] [Rank 0] Group 1 Loss: 3.3954 +[2025-09-05 22:50:34] [Rank 0] Group 1 Loss: 3.3954 +[2025-09-05 22:50:34] [Rank 0] Group 2 Loss: 3.3717 +[2025-09-05 22:50:34] [Rank 0] Group 2 Loss: 3.3717 +[2025-09-05 22:50:34] [Rank 0] Group 3 Loss: 3.7712 +[2025-09-05 22:50:34] [Rank 0] Group 3 Loss: 3.7712 +[2025-09-05 22:50:34] [Rank 0] Group 4 Loss: 4.0550 +[2025-09-05 22:50:34] [Rank 0] Group 4 Loss: 4.0550 +[2025-09-05 22:50:34] [Rank 0] Group 5 Loss: 4.4312 +[2025-09-05 22:50:34] [Rank 0] Group 5 Loss: 4.4312 +[2025-09-05 22:50:34] [Rank 0] Group 6 Loss: 4.7139 +[2025-09-05 22:50:34] [Rank 0] Group 6 Loss: 4.7139 +[2025-09-05 22:50:34] [Rank 0] Group 7 Loss: 4.8831 +[2025-09-05 22:50:34] [Rank 0] Group 7 Loss: 4.8831 +[2025-09-05 22:50:34] [Rank 0] Group 8 Loss: 5.2016 +[2025-09-05 22:50:34] [Rank 0] Group 8 Loss: 5.2016 +[2025-09-05 22:50:34] [Rank 0] Group 9 Loss: 5.2979 +[2025-09-05 22:50:34] [Rank 0] Group 9 Loss: 5.2979 +[2025-09-05 22:50:34] [Rank 0] Group 10 Loss: 5.4297 +[2025-09-05 22:50:34] [Rank 0] Group 10 Loss: 5.4297 +[2025-09-05 22:50:34] [Rank 0] Group 11 Loss: 5.4135 +[2025-09-05 22:50:34] [Rank 0] Group 11 Loss: 5.4135 +[2025-09-05 22:50:34] [Rank 0] Group 12 Loss: 5.3095 +[2025-09-05 22:50:34] [Rank 0] Group 12 Loss: 5.3095 +[2025-09-05 22:50:34] [Rank 0] Group 13 Loss: 5.3709 +[2025-09-05 22:50:34] [Rank 0] Group 13 Loss: 5.3709 +[2025-09-05 22:50:34] [Rank 0] Group 14 Loss: 5.3889 +[2025-09-05 22:50:34] [Rank 0] Group 14 Loss: 5.3889 +[2025-09-05 22:50:34] [Rank 0] Group 15 Loss: 5.3710 +[2025-09-05 22:50:34] [Rank 0] Group 15 Loss: 5.3710 +[2025-09-05 22:50:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:50:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:50:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:50:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 22:50:34] [Rank 0] Group 2 FTA: 0.7800 +[2025-09-05 22:50:34] [Rank 0] Group 2 FTA: 0.7800 +[2025-09-05 22:50:34] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 22:50:34] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 22:50:34] [Rank 0] Group 4 FTA: 0.2600 +[2025-09-05 22:50:34] [Rank 0] Group 4 FTA: 0.2600 +[2025-09-05 22:50:34] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:50:34] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 22:50:34] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:50:34] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 22:50:34] [Rank 0] Group 7 FTA: 0.1900 +[2025-09-05 22:50:34] [Rank 0] Group 7 FTA: 0.1900 +[2025-09-05 22:50:34] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:50:34] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 22:50:34] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 22:50:34] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 22:50:34] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:50:34] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 22:50:34] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 22:50:34] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 22:50:34] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:50:34] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 22:50:34] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 22:50:34] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 22:50:34] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 22:50:34] [Rank 0] Group 14 FTA: 0.2200 +[2025-09-05 22:50:34] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 22:50:34] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 22:50:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:50:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_loss_curves.png +[2025-09-05 22:50:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:50:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/per_class_acc_curves.png +[2025-09-05 22:50:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:50:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_loss_curve.png +[2025-09-05 22:50:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:50:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_45/total_acc_curve.png +[2025-09-05 22:50:36] [Rank 0] step:10001/10000 train_time:410587ms step_avg:41.05ms +[2025-09-05 22:50:36] [Rank 0] step:10001/10000 train_time:410587ms step_avg:41.05ms +[2025-09-05 22:50:36] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 22:50:36 2025 --- +[2025-09-05 22:50:36] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 22:50:36 2025 --- +[2025-09-05 22:50:36] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 22:50:36] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5fbf80fcf0a7c1a81d72c779535d4327900e13c5 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.1, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "544fe31d-6bd6-40a1-8bd8-bfa6cc9d4269", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..2ff10a4fb27e410e9b72e0f8e6b574782d21dbce --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e788bbe2b1726f06b62200ebf1315e5b78010a3989054815661d69dd2093ea2 +size 340744 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..c775d696cc33ac08b5c4ef29128c1446da7119d3 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58cd53260e6d7809209216fe3386353207937e842383117354a4b7ed93f034c7 +size 413654 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..b072ee7263799eac6a32137b41878581380f2183 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c3a998c4c92c160408fa3f482517507ef7fd0eac884b7394bad0189b25425fa0 +size 90946 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..770d4af35e46df605a8ffb5b560e249ddc59c356 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39089f10fb9a1282a7c57baf0692874a58614773367d4f85c9ee562725bc4039 +size 114797 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/training_log_544fe31d-6bd6-40a1-8bd8-bfa6cc9d4269.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/training_log_544fe31d-6bd6-40a1-8bd8-bfa6cc9d4269.txt new file mode 100644 index 0000000000000000000000000000000000000000..5e1e3e79cd5e72cc9e8efe914c9e5975584a59f5 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/training_log_544fe31d-6bd6-40a1-8bd8-bfa6cc9d4269.txt @@ -0,0 +1,5614 @@ +[2025-09-05 22:51:01] [Rank 0] PRINT: --- Script Start: Fri Sep 5 22:51:01 2025 --- +[2025-09-05 22:51:01] [Rank 0] PRINT: --- Script Start: Fri Sep 5 22:51:01 2025 --- +[2025-09-05 22:51:01] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 22:51:01] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.1, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 22:51:01] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 22:51:01] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 22:51:01] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-05 22:51:01] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-05 22:51:01] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46 +[2025-09-05 22:51:01] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46 +[2025-09-05 22:51:01] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 22:51:01] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 22:51:01] [Rank 0] PRINT: Constructing model... +[2025-09-05 22:51:01] [Rank 0] PRINT: Constructing model... +[2025-09-05 22:51:03] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 22:51:03] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 22:51:03] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 22:51:03] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 22:51:03] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 22:51:03] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 22:51:07] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 22:51:07] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 22:51:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 22:51:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 22:51:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 22:51:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 22:51:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 22:51:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 22:51:07] [Rank 0] PRINT: Model returns: +[2025-09-05 22:51:07] [Rank 0] PRINT: Model returns: +[2025-09-05 22:51:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 22:51:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 22:51:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 22:51:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 22:51:07] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 22:51:07] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.1). +[2025-09-05 22:51:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 22:51:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 22:51:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 22:51:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 22:51:12] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 22:51:12] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 22:51:12] [Rank 0] PRINT: Starting warmup... +[2025-09-05 22:51:12] [Rank 0] PRINT: Starting warmup... +[2025-09-05 22:51:50] [Rank 0] PRINT: Warmup complete. +[2025-09-05 22:51:50] [Rank 0] PRINT: Warmup complete. +[2025-09-05 22:51:50] [Rank 0] PRINT: Starting training... +[2025-09-05 22:51:50] [Rank 0] PRINT: Starting training... +[2025-09-05 22:51:57] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/fixed_eval_indices.json +[2025-09-05 22:51:57] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/fixed_eval_indices.json +[2025-09-05 22:51:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:51:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:52:00] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 22:52:00] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 22:52:33] [Rank 0] step:21/10000 train_time:32812ms step_avg:1562.49ms +[2025-09-05 22:52:33] [Rank 0] step:21/10000 train_time:32812ms step_avg:1562.49ms +[2025-09-05 22:52:34] [Rank 0] step:41/10000 train_time:33540ms step_avg:818.06ms +[2025-09-05 22:52:34] [Rank 0] step:41/10000 train_time:33540ms step_avg:818.06ms +[2025-09-05 22:52:34] [Rank 0] step:61/10000 train_time:34268ms step_avg:561.76ms +[2025-09-05 22:52:34] [Rank 0] step:61/10000 train_time:34268ms step_avg:561.76ms +[2025-09-05 22:52:35] [Rank 0] step:81/10000 train_time:34994ms step_avg:432.03ms +[2025-09-05 22:52:35] [Rank 0] step:81/10000 train_time:34994ms step_avg:432.03ms +[2025-09-05 22:52:36] [Rank 0] step:101/10000 train_time:35722ms step_avg:353.69ms +[2025-09-05 22:52:36] [Rank 0] step:101/10000 train_time:35722ms step_avg:353.69ms +[2025-09-05 22:52:37] [Rank 0] step:121/10000 train_time:36449ms step_avg:301.23ms +[2025-09-05 22:52:37] [Rank 0] step:121/10000 train_time:36449ms step_avg:301.23ms +[2025-09-05 22:52:37] [Rank 0] step:141/10000 train_time:37177ms step_avg:263.67ms +[2025-09-05 22:52:37] [Rank 0] step:141/10000 train_time:37177ms step_avg:263.67ms +[2025-09-05 22:52:38] [Rank 0] step:161/10000 train_time:37906ms step_avg:235.44ms +[2025-09-05 22:52:38] [Rank 0] step:161/10000 train_time:37906ms step_avg:235.44ms +[2025-09-05 22:52:39] [Rank 0] step:181/10000 train_time:38634ms step_avg:213.45ms +[2025-09-05 22:52:39] [Rank 0] step:181/10000 train_time:38634ms step_avg:213.45ms +[2025-09-05 22:52:40] [Rank 0] step:201/10000 train_time:39361ms step_avg:195.83ms +[2025-09-05 22:52:40] [Rank 0] step:201/10000 train_time:39361ms step_avg:195.83ms +[2025-09-05 22:52:40] [Rank 0] step:221/10000 train_time:40088ms step_avg:181.39ms +[2025-09-05 22:52:40] [Rank 0] step:221/10000 train_time:40088ms step_avg:181.39ms +[2025-09-05 22:52:41] [Rank 0] step:241/10000 train_time:40815ms step_avg:169.36ms +[2025-09-05 22:52:41] [Rank 0] step:241/10000 train_time:40815ms step_avg:169.36ms +[2025-09-05 22:52:42] [Rank 0] step:261/10000 train_time:41548ms step_avg:159.19ms +[2025-09-05 22:52:42] [Rank 0] step:261/10000 train_time:41548ms step_avg:159.19ms +[2025-09-05 22:52:42] [Rank 0] step:281/10000 train_time:42283ms step_avg:150.47ms +[2025-09-05 22:52:42] [Rank 0] step:281/10000 train_time:42283ms step_avg:150.47ms +[2025-09-05 22:52:43] [Rank 0] step:301/10000 train_time:43012ms step_avg:142.90ms +[2025-09-05 22:52:43] [Rank 0] step:301/10000 train_time:43012ms step_avg:142.90ms +[2025-09-05 22:52:44] [Rank 0] step:321/10000 train_time:43738ms step_avg:136.26ms +[2025-09-05 22:52:44] [Rank 0] step:321/10000 train_time:43738ms step_avg:136.26ms +[2025-09-05 22:52:45] [Rank 0] step:341/10000 train_time:44466ms step_avg:130.40ms +[2025-09-05 22:52:45] [Rank 0] step:341/10000 train_time:44466ms step_avg:130.40ms +[2025-09-05 22:52:45] [Rank 0] step:361/10000 train_time:45192ms step_avg:125.19ms +[2025-09-05 22:52:45] [Rank 0] step:361/10000 train_time:45192ms step_avg:125.19ms +[2025-09-05 22:52:46] [Rank 0] step:381/10000 train_time:45920ms step_avg:120.52ms +[2025-09-05 22:52:46] [Rank 0] step:381/10000 train_time:45920ms step_avg:120.52ms +[2025-09-05 22:52:47] [Rank 0] step:401/10000 train_time:46647ms step_avg:116.33ms +[2025-09-05 22:52:47] [Rank 0] step:401/10000 train_time:46647ms step_avg:116.33ms +[2025-09-05 22:52:48] [Rank 0] step:421/10000 train_time:47375ms step_avg:112.53ms +[2025-09-05 22:52:48] [Rank 0] step:421/10000 train_time:47375ms step_avg:112.53ms +[2025-09-05 22:52:48] [Rank 0] step:441/10000 train_time:48103ms step_avg:109.08ms +[2025-09-05 22:52:48] [Rank 0] step:441/10000 train_time:48103ms step_avg:109.08ms +[2025-09-05 22:52:49] [Rank 0] step:461/10000 train_time:48830ms step_avg:105.92ms +[2025-09-05 22:52:49] [Rank 0] step:461/10000 train_time:48830ms step_avg:105.92ms +[2025-09-05 22:52:50] [Rank 0] step:481/10000 train_time:49557ms step_avg:103.03ms +[2025-09-05 22:52:50] [Rank 0] step:481/10000 train_time:49557ms step_avg:103.03ms +[2025-09-05 22:52:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:52:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:52:51] [Rank 0] PRINT: step:500/10000 train_loss:5.6469 val_loss:4.0587 train_time:50364ms step_avg:100.73ms +[2025-09-05 22:52:51] [Rank 0] PRINT: step:500/10000 train_loss:5.6469 val_loss:4.0587 train_time:50364ms step_avg:100.73ms +[2025-09-05 22:52:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:52:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:52:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:52:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:54:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:54:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:54:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:54:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:54:12] [Rank 0] Total Loss: 6.0165 +[2025-09-05 22:54:12] [Rank 0] Total Loss: 6.0165 +[2025-09-05 22:54:12] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-05 22:54:12] [Rank 0] Total FTA (Unweighted): 0.0819 +[2025-09-05 22:54:12] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-05 22:54:12] [Rank 0] Total FTA (Weighted): 0.0819 +[2025-09-05 22:54:12] [Rank 0] Group 0 Loss: 3.7818 +[2025-09-05 22:54:12] [Rank 0] Group 0 Loss: 3.7818 +[2025-09-05 22:54:12] [Rank 0] Group 1 Loss: 3.7675 +[2025-09-05 22:54:12] [Rank 0] Group 1 Loss: 3.7675 +[2025-09-05 22:54:12] [Rank 0] Group 2 Loss: 4.5293 +[2025-09-05 22:54:12] [Rank 0] Group 2 Loss: 4.5293 +[2025-09-05 22:54:12] [Rank 0] Group 3 Loss: 5.4331 +[2025-09-05 22:54:12] [Rank 0] Group 3 Loss: 5.4331 +[2025-09-05 22:54:12] [Rank 0] Group 4 Loss: 6.2112 +[2025-09-05 22:54:12] [Rank 0] Group 4 Loss: 6.2112 +[2025-09-05 22:54:12] [Rank 0] Group 5 Loss: 6.3615 +[2025-09-05 22:54:12] [Rank 0] Group 5 Loss: 6.3615 +[2025-09-05 22:54:12] [Rank 0] Group 6 Loss: 6.4548 +[2025-09-05 22:54:12] [Rank 0] Group 6 Loss: 6.4548 +[2025-09-05 22:54:12] [Rank 0] Group 7 Loss: 6.4600 +[2025-09-05 22:54:12] [Rank 0] Group 7 Loss: 6.4600 +[2025-09-05 22:54:12] [Rank 0] Group 8 Loss: 6.6017 +[2025-09-05 22:54:12] [Rank 0] Group 8 Loss: 6.6017 +[2025-09-05 22:54:12] [Rank 0] Group 9 Loss: 6.7156 +[2025-09-05 22:54:12] [Rank 0] Group 9 Loss: 6.7156 +[2025-09-05 22:54:12] [Rank 0] Group 10 Loss: 6.7076 +[2025-09-05 22:54:12] [Rank 0] Group 10 Loss: 6.7076 +[2025-09-05 22:54:12] [Rank 0] Group 11 Loss: 6.7895 +[2025-09-05 22:54:12] [Rank 0] Group 11 Loss: 6.7895 +[2025-09-05 22:54:12] [Rank 0] Group 12 Loss: 6.5728 +[2025-09-05 22:54:12] [Rank 0] Group 12 Loss: 6.5728 +[2025-09-05 22:54:12] [Rank 0] Group 13 Loss: 6.5750 +[2025-09-05 22:54:12] [Rank 0] Group 13 Loss: 6.5750 +[2025-09-05 22:54:12] [Rank 0] Group 14 Loss: 6.6911 +[2025-09-05 22:54:12] [Rank 0] Group 14 Loss: 6.6911 +[2025-09-05 22:54:12] [Rank 0] Group 15 Loss: 6.6106 +[2025-09-05 22:54:12] [Rank 0] Group 15 Loss: 6.6106 +[2025-09-05 22:54:12] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 22:54:12] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 22:54:12] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:54:12] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:54:12] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 22:54:12] [Rank 0] Group 2 FTA: 0.0700 +[2025-09-05 22:54:12] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 22:54:12] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 22:54:12] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 22:54:12] [Rank 0] Group 4 FTA: 0.0300 +[2025-09-05 22:54:12] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 22:54:12] [Rank 0] Group 5 FTA: 0.0600 +[2025-09-05 22:54:12] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 22:54:12] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 22:54:12] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 22:54:12] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 22:54:12] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 22:54:12] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 22:54:12] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 22:54:12] [Rank 0] Group 9 FTA: 0.0700 +[2025-09-05 22:54:12] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 22:54:12] [Rank 0] Group 10 FTA: 0.0600 +[2025-09-05 22:54:12] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 22:54:12] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 22:54:12] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:54:12] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:54:12] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 22:54:12] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 22:54:12] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:54:12] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:54:12] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 22:54:12] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 22:54:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 22:54:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 22:54:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 22:54:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 22:54:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 22:54:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 22:54:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 22:54:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 22:54:14] [Rank 0] step:501/10000 train_time:50374ms step_avg:100.55ms +[2025-09-05 22:54:14] [Rank 0] step:501/10000 train_time:50374ms step_avg:100.55ms +[2025-09-05 22:54:15] [Rank 0] step:521/10000 train_time:51038ms step_avg:97.96ms +[2025-09-05 22:54:15] [Rank 0] step:521/10000 train_time:51038ms step_avg:97.96ms +[2025-09-05 22:54:15] [Rank 0] step:541/10000 train_time:51765ms step_avg:95.68ms +[2025-09-05 22:54:15] [Rank 0] step:541/10000 train_time:51765ms step_avg:95.68ms +[2025-09-05 22:54:16] [Rank 0] step:561/10000 train_time:52491ms step_avg:93.57ms +[2025-09-05 22:54:16] [Rank 0] step:561/10000 train_time:52491ms step_avg:93.57ms +[2025-09-05 22:54:17] [Rank 0] step:581/10000 train_time:53216ms step_avg:91.59ms +[2025-09-05 22:54:17] [Rank 0] step:581/10000 train_time:53216ms step_avg:91.59ms +[2025-09-05 22:54:18] [Rank 0] step:601/10000 train_time:53943ms step_avg:89.76ms +[2025-09-05 22:54:18] [Rank 0] step:601/10000 train_time:53943ms step_avg:89.76ms +[2025-09-05 22:54:18] [Rank 0] step:621/10000 train_time:54671ms step_avg:88.04ms +[2025-09-05 22:54:18] [Rank 0] step:621/10000 train_time:54671ms step_avg:88.04ms +[2025-09-05 22:54:19] [Rank 0] step:641/10000 train_time:55396ms step_avg:86.42ms +[2025-09-05 22:54:19] [Rank 0] step:641/10000 train_time:55396ms step_avg:86.42ms +[2025-09-05 22:54:20] [Rank 0] step:661/10000 train_time:56123ms step_avg:84.91ms +[2025-09-05 22:54:20] [Rank 0] step:661/10000 train_time:56123ms step_avg:84.91ms +[2025-09-05 22:54:20] [Rank 0] step:681/10000 train_time:56850ms step_avg:83.48ms +[2025-09-05 22:54:20] [Rank 0] step:681/10000 train_time:56850ms step_avg:83.48ms +[2025-09-05 22:54:21] [Rank 0] step:701/10000 train_time:57576ms step_avg:82.13ms +[2025-09-05 22:54:21] [Rank 0] step:701/10000 train_time:57576ms step_avg:82.13ms +[2025-09-05 22:54:22] [Rank 0] step:721/10000 train_time:58303ms step_avg:80.86ms +[2025-09-05 22:54:22] [Rank 0] step:721/10000 train_time:58303ms step_avg:80.86ms +[2025-09-05 22:54:23] [Rank 0] step:741/10000 train_time:59029ms step_avg:79.66ms +[2025-09-05 22:54:23] [Rank 0] step:741/10000 train_time:59029ms step_avg:79.66ms +[2025-09-05 22:54:23] [Rank 0] step:761/10000 train_time:59759ms step_avg:78.53ms +[2025-09-05 22:54:23] [Rank 0] step:761/10000 train_time:59759ms step_avg:78.53ms +[2025-09-05 22:54:24] [Rank 0] step:781/10000 train_time:60491ms step_avg:77.45ms +[2025-09-05 22:54:24] [Rank 0] step:781/10000 train_time:60491ms step_avg:77.45ms +[2025-09-05 22:54:25] [Rank 0] step:801/10000 train_time:61224ms step_avg:76.43ms +[2025-09-05 22:54:25] [Rank 0] step:801/10000 train_time:61224ms step_avg:76.43ms +[2025-09-05 22:54:26] [Rank 0] step:821/10000 train_time:62570ms step_avg:76.21ms +[2025-09-05 22:54:26] [Rank 0] step:821/10000 train_time:62570ms step_avg:76.21ms +[2025-09-05 22:54:27] [Rank 0] step:841/10000 train_time:63303ms step_avg:75.27ms +[2025-09-05 22:54:27] [Rank 0] step:841/10000 train_time:63303ms step_avg:75.27ms +[2025-09-05 22:54:28] [Rank 0] step:861/10000 train_time:64036ms step_avg:74.37ms +[2025-09-05 22:54:28] [Rank 0] step:861/10000 train_time:64036ms step_avg:74.37ms +[2025-09-05 22:54:28] [Rank 0] step:881/10000 train_time:64768ms step_avg:73.52ms +[2025-09-05 22:54:28] [Rank 0] step:881/10000 train_time:64768ms step_avg:73.52ms +[2025-09-05 22:54:29] [Rank 0] step:901/10000 train_time:65500ms step_avg:72.70ms +[2025-09-05 22:54:29] [Rank 0] step:901/10000 train_time:65500ms step_avg:72.70ms +[2025-09-05 22:54:30] [Rank 0] step:921/10000 train_time:66232ms step_avg:71.91ms +[2025-09-05 22:54:30] [Rank 0] step:921/10000 train_time:66232ms step_avg:71.91ms +[2025-09-05 22:54:31] [Rank 0] step:941/10000 train_time:66964ms step_avg:71.16ms +[2025-09-05 22:54:31] [Rank 0] step:941/10000 train_time:66964ms step_avg:71.16ms +[2025-09-05 22:54:31] [Rank 0] step:961/10000 train_time:67698ms step_avg:70.44ms +[2025-09-05 22:54:31] [Rank 0] step:961/10000 train_time:67698ms step_avg:70.44ms +[2025-09-05 22:54:32] [Rank 0] step:981/10000 train_time:68430ms step_avg:69.76ms +[2025-09-05 22:54:32] [Rank 0] step:981/10000 train_time:68430ms step_avg:69.76ms +[2025-09-05 22:54:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:54:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:54:33] [Rank 0] PRINT: step:1000/10000 train_loss:3.6543 val_loss:3.3272 train_time:69243ms step_avg:69.24ms +[2025-09-05 22:54:33] [Rank 0] PRINT: step:1000/10000 train_loss:3.6543 val_loss:3.3272 train_time:69243ms step_avg:69.24ms +[2025-09-05 22:54:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:54:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:54:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:54:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:55:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:55:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:55:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:55:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:55:54] [Rank 0] Total Loss: 5.4956 +[2025-09-05 22:55:54] [Rank 0] Total Loss: 5.4956 +[2025-09-05 22:55:54] [Rank 0] Total FTA (Unweighted): 0.1288 +[2025-09-05 22:55:54] [Rank 0] Total FTA (Unweighted): 0.1288 +[2025-09-05 22:55:54] [Rank 0] Total FTA (Weighted): 0.1288 +[2025-09-05 22:55:54] [Rank 0] Total FTA (Weighted): 0.1288 +[2025-09-05 22:55:54] [Rank 0] Group 0 Loss: 3.4998 +[2025-09-05 22:55:54] [Rank 0] Group 0 Loss: 3.4998 +[2025-09-05 22:55:54] [Rank 0] Group 1 Loss: 3.2909 +[2025-09-05 22:55:54] [Rank 0] Group 1 Loss: 3.2909 +[2025-09-05 22:55:54] [Rank 0] Group 2 Loss: 3.7247 +[2025-09-05 22:55:54] [Rank 0] Group 2 Loss: 3.7247 +[2025-09-05 22:55:54] [Rank 0] Group 3 Loss: 4.4519 +[2025-09-05 22:55:54] [Rank 0] Group 3 Loss: 4.4519 +[2025-09-05 22:55:54] [Rank 0] Group 4 Loss: 5.3854 +[2025-09-05 22:55:54] [Rank 0] Group 4 Loss: 5.3854 +[2025-09-05 22:55:54] [Rank 0] Group 5 Loss: 5.6911 +[2025-09-05 22:55:54] [Rank 0] Group 5 Loss: 5.6911 +[2025-09-05 22:55:54] [Rank 0] Group 6 Loss: 5.9134 +[2025-09-05 22:55:54] [Rank 0] Group 6 Loss: 5.9134 +[2025-09-05 22:55:54] [Rank 0] Group 7 Loss: 5.9622 +[2025-09-05 22:55:54] [Rank 0] Group 7 Loss: 5.9622 +[2025-09-05 22:55:54] [Rank 0] Group 8 Loss: 6.1676 +[2025-09-05 22:55:54] [Rank 0] Group 8 Loss: 6.1676 +[2025-09-05 22:55:54] [Rank 0] Group 9 Loss: 6.3328 +[2025-09-05 22:55:54] [Rank 0] Group 9 Loss: 6.3328 +[2025-09-05 22:55:54] [Rank 0] Group 10 Loss: 6.2807 +[2025-09-05 22:55:54] [Rank 0] Group 10 Loss: 6.2807 +[2025-09-05 22:55:54] [Rank 0] Group 11 Loss: 6.3544 +[2025-09-05 22:55:54] [Rank 0] Group 11 Loss: 6.3544 +[2025-09-05 22:55:54] [Rank 0] Group 12 Loss: 6.1808 +[2025-09-05 22:55:54] [Rank 0] Group 12 Loss: 6.1808 +[2025-09-05 22:55:54] [Rank 0] Group 13 Loss: 6.2029 +[2025-09-05 22:55:54] [Rank 0] Group 13 Loss: 6.2029 +[2025-09-05 22:55:54] [Rank 0] Group 14 Loss: 6.2780 +[2025-09-05 22:55:54] [Rank 0] Group 14 Loss: 6.2780 +[2025-09-05 22:55:54] [Rank 0] Group 15 Loss: 6.2123 +[2025-09-05 22:55:54] [Rank 0] Group 15 Loss: 6.2123 +[2025-09-05 22:55:54] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 22:55:54] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 22:55:54] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:55:54] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:55:54] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:55:54] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:55:54] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:55:54] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:55:54] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 22:55:54] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 22:55:54] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 22:55:54] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 22:55:54] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 22:55:54] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 22:55:54] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:55:54] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:55:54] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 22:55:54] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 22:55:54] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:55:54] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:55:54] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 22:55:54] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 22:55:54] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:55:54] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:55:54] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 22:55:54] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 22:55:54] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 22:55:54] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 22:55:54] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:55:54] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:55:54] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 22:55:54] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 22:55:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 22:55:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 22:55:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 22:55:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 22:55:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 22:55:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 22:55:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 22:55:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 22:55:56] [Rank 0] step:1001/10000 train_time:69252ms step_avg:69.18ms +[2025-09-05 22:55:56] [Rank 0] step:1001/10000 train_time:69252ms step_avg:69.18ms +[2025-09-05 22:55:57] [Rank 0] step:1021/10000 train_time:69926ms step_avg:68.49ms +[2025-09-05 22:55:57] [Rank 0] step:1021/10000 train_time:69926ms step_avg:68.49ms +[2025-09-05 22:55:57] [Rank 0] step:1041/10000 train_time:70658ms step_avg:67.88ms +[2025-09-05 22:55:57] [Rank 0] step:1041/10000 train_time:70658ms step_avg:67.88ms +[2025-09-05 22:55:58] [Rank 0] step:1061/10000 train_time:71392ms step_avg:67.29ms +[2025-09-05 22:55:58] [Rank 0] step:1061/10000 train_time:71392ms step_avg:67.29ms +[2025-09-05 22:55:59] [Rank 0] step:1081/10000 train_time:72124ms step_avg:66.72ms +[2025-09-05 22:55:59] [Rank 0] step:1081/10000 train_time:72124ms step_avg:66.72ms +[2025-09-05 22:55:59] [Rank 0] step:1101/10000 train_time:72855ms step_avg:66.17ms +[2025-09-05 22:55:59] [Rank 0] step:1101/10000 train_time:72855ms step_avg:66.17ms +[2025-09-05 22:56:00] [Rank 0] step:1121/10000 train_time:73587ms step_avg:65.64ms +[2025-09-05 22:56:00] [Rank 0] step:1121/10000 train_time:73587ms step_avg:65.64ms +[2025-09-05 22:56:01] [Rank 0] step:1141/10000 train_time:74320ms step_avg:65.14ms +[2025-09-05 22:56:01] [Rank 0] step:1141/10000 train_time:74320ms step_avg:65.14ms +[2025-09-05 22:56:02] [Rank 0] step:1161/10000 train_time:75051ms step_avg:64.64ms +[2025-09-05 22:56:02] [Rank 0] step:1161/10000 train_time:75051ms step_avg:64.64ms +[2025-09-05 22:56:02] [Rank 0] step:1181/10000 train_time:75784ms step_avg:64.17ms +[2025-09-05 22:56:02] [Rank 0] step:1181/10000 train_time:75784ms step_avg:64.17ms +[2025-09-05 22:56:03] [Rank 0] step:1201/10000 train_time:76518ms step_avg:63.71ms +[2025-09-05 22:56:03] [Rank 0] step:1201/10000 train_time:76518ms step_avg:63.71ms +[2025-09-05 22:56:04] [Rank 0] step:1221/10000 train_time:77249ms step_avg:63.27ms +[2025-09-05 22:56:04] [Rank 0] step:1221/10000 train_time:77249ms step_avg:63.27ms +[2025-09-05 22:56:05] [Rank 0] step:1241/10000 train_time:77982ms step_avg:62.84ms +[2025-09-05 22:56:05] [Rank 0] step:1241/10000 train_time:77982ms step_avg:62.84ms +[2025-09-05 22:56:05] [Rank 0] step:1261/10000 train_time:78715ms step_avg:62.42ms +[2025-09-05 22:56:05] [Rank 0] step:1261/10000 train_time:78715ms step_avg:62.42ms +[2025-09-05 22:56:06] [Rank 0] step:1281/10000 train_time:79448ms step_avg:62.02ms +[2025-09-05 22:56:06] [Rank 0] step:1281/10000 train_time:79448ms step_avg:62.02ms +[2025-09-05 22:56:07] [Rank 0] step:1301/10000 train_time:80180ms step_avg:61.63ms +[2025-09-05 22:56:07] [Rank 0] step:1301/10000 train_time:80180ms step_avg:61.63ms +[2025-09-05 22:56:08] [Rank 0] step:1321/10000 train_time:80912ms step_avg:61.25ms +[2025-09-05 22:56:08] [Rank 0] step:1321/10000 train_time:80912ms step_avg:61.25ms +[2025-09-05 22:56:08] [Rank 0] step:1341/10000 train_time:81643ms step_avg:60.88ms +[2025-09-05 22:56:08] [Rank 0] step:1341/10000 train_time:81643ms step_avg:60.88ms +[2025-09-05 22:56:09] [Rank 0] step:1361/10000 train_time:82373ms step_avg:60.52ms +[2025-09-05 22:56:09] [Rank 0] step:1361/10000 train_time:82373ms step_avg:60.52ms +[2025-09-05 22:56:10] [Rank 0] step:1381/10000 train_time:83103ms step_avg:60.18ms +[2025-09-05 22:56:10] [Rank 0] step:1381/10000 train_time:83103ms step_avg:60.18ms +[2025-09-05 22:56:10] [Rank 0] step:1401/10000 train_time:83836ms step_avg:59.84ms +[2025-09-05 22:56:10] [Rank 0] step:1401/10000 train_time:83836ms step_avg:59.84ms +[2025-09-05 22:56:11] [Rank 0] step:1421/10000 train_time:84567ms step_avg:59.51ms +[2025-09-05 22:56:11] [Rank 0] step:1421/10000 train_time:84567ms step_avg:59.51ms +[2025-09-05 22:56:12] [Rank 0] step:1441/10000 train_time:85299ms step_avg:59.19ms +[2025-09-05 22:56:12] [Rank 0] step:1441/10000 train_time:85299ms step_avg:59.19ms +[2025-09-05 22:56:13] [Rank 0] step:1461/10000 train_time:86031ms step_avg:58.89ms +[2025-09-05 22:56:13] [Rank 0] step:1461/10000 train_time:86031ms step_avg:58.89ms +[2025-09-05 22:56:13] [Rank 0] step:1481/10000 train_time:86764ms step_avg:58.58ms +[2025-09-05 22:56:13] [Rank 0] step:1481/10000 train_time:86764ms step_avg:58.58ms +[2025-09-05 22:56:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:56:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:56:15] [Rank 0] PRINT: step:1500/10000 train_loss:3.1346 val_loss:2.9619 train_time:87576ms step_avg:58.38ms +[2025-09-05 22:56:15] [Rank 0] PRINT: step:1500/10000 train_loss:3.1346 val_loss:2.9619 train_time:87576ms step_avg:58.38ms +[2025-09-05 22:56:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:56:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:56:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:56:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:57:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:57:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:57:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:57:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:57:35] [Rank 0] Total Loss: 5.2157 +[2025-09-05 22:57:35] [Rank 0] Total Loss: 5.2157 +[2025-09-05 22:57:35] [Rank 0] Total FTA (Unweighted): 0.1500 +[2025-09-05 22:57:35] [Rank 0] Total FTA (Unweighted): 0.1500 +[2025-09-05 22:57:35] [Rank 0] Total FTA (Weighted): 0.1500 +[2025-09-05 22:57:35] [Rank 0] Total FTA (Weighted): 0.1500 +[2025-09-05 22:57:35] [Rank 0] Group 0 Loss: 3.3276 +[2025-09-05 22:57:35] [Rank 0] Group 0 Loss: 3.3276 +[2025-09-05 22:57:35] [Rank 0] Group 1 Loss: 3.3072 +[2025-09-05 22:57:35] [Rank 0] Group 1 Loss: 3.3072 +[2025-09-05 22:57:35] [Rank 0] Group 2 Loss: 3.4481 +[2025-09-05 22:57:35] [Rank 0] Group 2 Loss: 3.4481 +[2025-09-05 22:57:35] [Rank 0] Group 3 Loss: 4.0129 +[2025-09-05 22:57:35] [Rank 0] Group 3 Loss: 4.0129 +[2025-09-05 22:57:35] [Rank 0] Group 4 Loss: 4.8372 +[2025-09-05 22:57:35] [Rank 0] Group 4 Loss: 4.8372 +[2025-09-05 22:57:35] [Rank 0] Group 5 Loss: 5.3039 +[2025-09-05 22:57:35] [Rank 0] Group 5 Loss: 5.3039 +[2025-09-05 22:57:35] [Rank 0] Group 6 Loss: 5.5855 +[2025-09-05 22:57:35] [Rank 0] Group 6 Loss: 5.5855 +[2025-09-05 22:57:35] [Rank 0] Group 7 Loss: 5.6602 +[2025-09-05 22:57:35] [Rank 0] Group 7 Loss: 5.6602 +[2025-09-05 22:57:35] [Rank 0] Group 8 Loss: 5.8859 +[2025-09-05 22:57:35] [Rank 0] Group 8 Loss: 5.8859 +[2025-09-05 22:57:35] [Rank 0] Group 9 Loss: 6.0249 +[2025-09-05 22:57:35] [Rank 0] Group 9 Loss: 6.0249 +[2025-09-05 22:57:35] [Rank 0] Group 10 Loss: 6.0454 +[2025-09-05 22:57:35] [Rank 0] Group 10 Loss: 6.0454 +[2025-09-05 22:57:35] [Rank 0] Group 11 Loss: 6.0830 +[2025-09-05 22:57:35] [Rank 0] Group 11 Loss: 6.0830 +[2025-09-05 22:57:35] [Rank 0] Group 12 Loss: 5.9470 +[2025-09-05 22:57:35] [Rank 0] Group 12 Loss: 5.9470 +[2025-09-05 22:57:35] [Rank 0] Group 13 Loss: 5.9583 +[2025-09-05 22:57:35] [Rank 0] Group 13 Loss: 5.9583 +[2025-09-05 22:57:35] [Rank 0] Group 14 Loss: 6.0552 +[2025-09-05 22:57:35] [Rank 0] Group 14 Loss: 6.0552 +[2025-09-05 22:57:35] [Rank 0] Group 15 Loss: 5.9690 +[2025-09-05 22:57:35] [Rank 0] Group 15 Loss: 5.9690 +[2025-09-05 22:57:35] [Rank 0] Group 0 FTA: 0.6200 +[2025-09-05 22:57:35] [Rank 0] Group 0 FTA: 0.6200 +[2025-09-05 22:57:35] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:57:35] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:57:35] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:57:35] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:57:35] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 22:57:35] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 22:57:35] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:57:35] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:57:35] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 22:57:35] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 22:57:35] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:57:35] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:57:35] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 22:57:35] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 22:57:35] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:57:35] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:57:35] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 22:57:35] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 22:57:35] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 22:57:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 22:57:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 22:57:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 22:57:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 22:57:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 22:57:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 22:57:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 22:57:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 22:57:37] [Rank 0] step:1501/10000 train_time:87585ms step_avg:58.35ms +[2025-09-05 22:57:37] [Rank 0] step:1501/10000 train_time:87585ms step_avg:58.35ms +[2025-09-05 22:57:37] [Rank 0] step:1521/10000 train_time:88253ms step_avg:58.02ms +[2025-09-05 22:57:37] [Rank 0] step:1521/10000 train_time:88253ms step_avg:58.02ms +[2025-09-05 22:57:38] [Rank 0] step:1541/10000 train_time:88985ms step_avg:57.74ms +[2025-09-05 22:57:38] [Rank 0] step:1541/10000 train_time:88985ms step_avg:57.74ms +[2025-09-05 22:57:39] [Rank 0] step:1561/10000 train_time:89717ms step_avg:57.47ms +[2025-09-05 22:57:39] [Rank 0] step:1561/10000 train_time:89717ms step_avg:57.47ms +[2025-09-05 22:57:40] [Rank 0] step:1581/10000 train_time:90448ms step_avg:57.21ms +[2025-09-05 22:57:40] [Rank 0] step:1581/10000 train_time:90448ms step_avg:57.21ms +[2025-09-05 22:57:40] [Rank 0] step:1601/10000 train_time:91180ms step_avg:56.95ms +[2025-09-05 22:57:40] [Rank 0] step:1601/10000 train_time:91180ms step_avg:56.95ms +[2025-09-05 22:57:41] [Rank 0] step:1621/10000 train_time:91911ms step_avg:56.70ms +[2025-09-05 22:57:41] [Rank 0] step:1621/10000 train_time:91911ms step_avg:56.70ms +[2025-09-05 22:57:43] [Rank 0] step:1641/10000 train_time:93273ms step_avg:56.84ms +[2025-09-05 22:57:43] [Rank 0] step:1641/10000 train_time:93273ms step_avg:56.84ms +[2025-09-05 22:57:43] [Rank 0] step:1661/10000 train_time:94005ms step_avg:56.60ms +[2025-09-05 22:57:43] [Rank 0] step:1661/10000 train_time:94005ms step_avg:56.60ms +[2025-09-05 22:57:44] [Rank 0] step:1681/10000 train_time:94872ms step_avg:56.44ms +[2025-09-05 22:57:44] [Rank 0] step:1681/10000 train_time:94872ms step_avg:56.44ms +[2025-09-05 22:57:45] [Rank 0] step:1701/10000 train_time:95604ms step_avg:56.20ms +[2025-09-05 22:57:45] [Rank 0] step:1701/10000 train_time:95604ms step_avg:56.20ms +[2025-09-05 22:57:46] [Rank 0] step:1721/10000 train_time:96337ms step_avg:55.98ms +[2025-09-05 22:57:46] [Rank 0] step:1721/10000 train_time:96337ms step_avg:55.98ms +[2025-09-05 22:57:46] [Rank 0] step:1741/10000 train_time:97216ms step_avg:55.84ms +[2025-09-05 22:57:46] [Rank 0] step:1741/10000 train_time:97216ms step_avg:55.84ms +[2025-09-05 22:57:47] [Rank 0] step:1761/10000 train_time:97949ms step_avg:55.62ms +[2025-09-05 22:57:47] [Rank 0] step:1761/10000 train_time:97949ms step_avg:55.62ms +[2025-09-05 22:57:48] [Rank 0] step:1781/10000 train_time:98681ms step_avg:55.41ms +[2025-09-05 22:57:48] [Rank 0] step:1781/10000 train_time:98681ms step_avg:55.41ms +[2025-09-05 22:57:49] [Rank 0] step:1801/10000 train_time:99415ms step_avg:55.20ms +[2025-09-05 22:57:49] [Rank 0] step:1801/10000 train_time:99415ms step_avg:55.20ms +[2025-09-05 22:57:49] [Rank 0] step:1821/10000 train_time:100147ms step_avg:55.00ms +[2025-09-05 22:57:49] [Rank 0] step:1821/10000 train_time:100147ms step_avg:55.00ms +[2025-09-05 22:57:50] [Rank 0] step:1841/10000 train_time:100880ms step_avg:54.80ms +[2025-09-05 22:57:50] [Rank 0] step:1841/10000 train_time:100880ms step_avg:54.80ms +[2025-09-05 22:57:51] [Rank 0] step:1861/10000 train_time:101612ms step_avg:54.60ms +[2025-09-05 22:57:51] [Rank 0] step:1861/10000 train_time:101612ms step_avg:54.60ms +[2025-09-05 22:57:52] [Rank 0] step:1881/10000 train_time:102346ms step_avg:54.41ms +[2025-09-05 22:57:52] [Rank 0] step:1881/10000 train_time:102346ms step_avg:54.41ms +[2025-09-05 22:57:52] [Rank 0] step:1901/10000 train_time:103078ms step_avg:54.22ms +[2025-09-05 22:57:52] [Rank 0] step:1901/10000 train_time:103078ms step_avg:54.22ms +[2025-09-05 22:57:53] [Rank 0] step:1921/10000 train_time:103811ms step_avg:54.04ms +[2025-09-05 22:57:53] [Rank 0] step:1921/10000 train_time:103811ms step_avg:54.04ms +[2025-09-05 22:57:54] [Rank 0] step:1941/10000 train_time:104543ms step_avg:53.86ms +[2025-09-05 22:57:54] [Rank 0] step:1941/10000 train_time:104543ms step_avg:53.86ms +[2025-09-05 22:57:55] [Rank 0] step:1961/10000 train_time:105275ms step_avg:53.68ms +[2025-09-05 22:57:55] [Rank 0] step:1961/10000 train_time:105275ms step_avg:53.68ms +[2025-09-05 22:57:55] [Rank 0] step:1981/10000 train_time:106008ms step_avg:53.51ms +[2025-09-05 22:57:55] [Rank 0] step:1981/10000 train_time:106008ms step_avg:53.51ms +[2025-09-05 22:57:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:57:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:57:56] [Rank 0] PRINT: step:2000/10000 train_loss:2.8504 val_loss:2.7299 train_time:106820ms step_avg:53.41ms +[2025-09-05 22:57:56] [Rank 0] PRINT: step:2000/10000 train_loss:2.8504 val_loss:2.7299 train_time:106820ms step_avg:53.41ms +[2025-09-05 22:57:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:57:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:57:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:57:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:59:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:59:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 22:59:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:59:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 22:59:18] [Rank 0] Total Loss: 5.0370 +[2025-09-05 22:59:18] [Rank 0] Total Loss: 5.0370 +[2025-09-05 22:59:18] [Rank 0] Total FTA (Unweighted): 0.1781 +[2025-09-05 22:59:18] [Rank 0] Total FTA (Unweighted): 0.1781 +[2025-09-05 22:59:18] [Rank 0] Total FTA (Weighted): 0.1781 +[2025-09-05 22:59:18] [Rank 0] Total FTA (Weighted): 0.1781 +[2025-09-05 22:59:18] [Rank 0] Group 0 Loss: 3.2796 +[2025-09-05 22:59:18] [Rank 0] Group 0 Loss: 3.2796 +[2025-09-05 22:59:18] [Rank 0] Group 1 Loss: 3.1873 +[2025-09-05 22:59:18] [Rank 0] Group 1 Loss: 3.1873 +[2025-09-05 22:59:18] [Rank 0] Group 2 Loss: 3.2981 +[2025-09-05 22:59:18] [Rank 0] Group 2 Loss: 3.2981 +[2025-09-05 22:59:18] [Rank 0] Group 3 Loss: 3.8530 +[2025-09-05 22:59:18] [Rank 0] Group 3 Loss: 3.8530 +[2025-09-05 22:59:18] [Rank 0] Group 4 Loss: 4.5088 +[2025-09-05 22:59:18] [Rank 0] Group 4 Loss: 4.5088 +[2025-09-05 22:59:18] [Rank 0] Group 5 Loss: 5.0412 +[2025-09-05 22:59:18] [Rank 0] Group 5 Loss: 5.0412 +[2025-09-05 22:59:18] [Rank 0] Group 6 Loss: 5.3160 +[2025-09-05 22:59:18] [Rank 0] Group 6 Loss: 5.3160 +[2025-09-05 22:59:18] [Rank 0] Group 7 Loss: 5.4508 +[2025-09-05 22:59:18] [Rank 0] Group 7 Loss: 5.4508 +[2025-09-05 22:59:18] [Rank 0] Group 8 Loss: 5.7029 +[2025-09-05 22:59:18] [Rank 0] Group 8 Loss: 5.7029 +[2025-09-05 22:59:18] [Rank 0] Group 9 Loss: 5.8389 +[2025-09-05 22:59:18] [Rank 0] Group 9 Loss: 5.8389 +[2025-09-05 22:59:18] [Rank 0] Group 10 Loss: 5.8797 +[2025-09-05 22:59:18] [Rank 0] Group 10 Loss: 5.8797 +[2025-09-05 22:59:18] [Rank 0] Group 11 Loss: 5.9140 +[2025-09-05 22:59:18] [Rank 0] Group 11 Loss: 5.9140 +[2025-09-05 22:59:18] [Rank 0] Group 12 Loss: 5.8063 +[2025-09-05 22:59:18] [Rank 0] Group 12 Loss: 5.8063 +[2025-09-05 22:59:18] [Rank 0] Group 13 Loss: 5.8099 +[2025-09-05 22:59:18] [Rank 0] Group 13 Loss: 5.8099 +[2025-09-05 22:59:18] [Rank 0] Group 14 Loss: 5.8977 +[2025-09-05 22:59:18] [Rank 0] Group 14 Loss: 5.8977 +[2025-09-05 22:59:18] [Rank 0] Group 15 Loss: 5.8080 +[2025-09-05 22:59:18] [Rank 0] Group 15 Loss: 5.8080 +[2025-09-05 22:59:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:59:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 22:59:18] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:59:18] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 22:59:18] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:59:18] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 22:59:18] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:59:18] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 22:59:18] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 22:59:18] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 22:59:18] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:59:18] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 22:59:18] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 22:59:18] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 22:59:18] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:59:18] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 22:59:18] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-05 22:59:18] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-05 22:59:18] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:59:18] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 22:59:18] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 22:59:18] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 22:59:18] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:59:18] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 22:59:18] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:59:18] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 22:59:18] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 22:59:18] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 22:59:18] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:59:18] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 22:59:18] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:59:18] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 22:59:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 22:59:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 22:59:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 22:59:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 22:59:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 22:59:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 22:59:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 22:59:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 22:59:19] [Rank 0] step:2001/10000 train_time:106829ms step_avg:53.39ms +[2025-09-05 22:59:19] [Rank 0] step:2001/10000 train_time:106829ms step_avg:53.39ms +[2025-09-05 22:59:20] [Rank 0] step:2021/10000 train_time:107697ms step_avg:53.29ms +[2025-09-05 22:59:20] [Rank 0] step:2021/10000 train_time:107697ms step_avg:53.29ms +[2025-09-05 22:59:21] [Rank 0] step:2041/10000 train_time:108429ms step_avg:53.13ms +[2025-09-05 22:59:21] [Rank 0] step:2041/10000 train_time:108429ms step_avg:53.13ms +[2025-09-05 22:59:22] [Rank 0] step:2061/10000 train_time:109160ms step_avg:52.96ms +[2025-09-05 22:59:22] [Rank 0] step:2061/10000 train_time:109160ms step_avg:52.96ms +[2025-09-05 22:59:22] [Rank 0] step:2081/10000 train_time:109890ms step_avg:52.81ms +[2025-09-05 22:59:22] [Rank 0] step:2081/10000 train_time:109890ms step_avg:52.81ms +[2025-09-05 22:59:23] [Rank 0] step:2101/10000 train_time:110622ms step_avg:52.65ms +[2025-09-05 22:59:23] [Rank 0] step:2101/10000 train_time:110622ms step_avg:52.65ms +[2025-09-05 22:59:24] [Rank 0] step:2121/10000 train_time:111354ms step_avg:52.50ms +[2025-09-05 22:59:24] [Rank 0] step:2121/10000 train_time:111354ms step_avg:52.50ms +[2025-09-05 22:59:25] [Rank 0] step:2141/10000 train_time:112087ms step_avg:52.35ms +[2025-09-05 22:59:25] [Rank 0] step:2141/10000 train_time:112087ms step_avg:52.35ms +[2025-09-05 22:59:25] [Rank 0] step:2161/10000 train_time:112818ms step_avg:52.21ms +[2025-09-05 22:59:25] [Rank 0] step:2161/10000 train_time:112818ms step_avg:52.21ms +[2025-09-05 22:59:26] [Rank 0] step:2181/10000 train_time:113550ms step_avg:52.06ms +[2025-09-05 22:59:26] [Rank 0] step:2181/10000 train_time:113550ms step_avg:52.06ms +[2025-09-05 22:59:27] [Rank 0] step:2201/10000 train_time:114282ms step_avg:51.92ms +[2025-09-05 22:59:27] [Rank 0] step:2201/10000 train_time:114282ms step_avg:51.92ms +[2025-09-05 22:59:27] [Rank 0] step:2221/10000 train_time:115014ms step_avg:51.78ms +[2025-09-05 22:59:27] [Rank 0] step:2221/10000 train_time:115014ms step_avg:51.78ms +[2025-09-05 22:59:28] [Rank 0] step:2241/10000 train_time:115750ms step_avg:51.65ms +[2025-09-05 22:59:28] [Rank 0] step:2241/10000 train_time:115750ms step_avg:51.65ms +[2025-09-05 22:59:29] [Rank 0] step:2261/10000 train_time:116489ms step_avg:51.52ms +[2025-09-05 22:59:29] [Rank 0] step:2261/10000 train_time:116489ms step_avg:51.52ms +[2025-09-05 22:59:30] [Rank 0] step:2281/10000 train_time:117227ms step_avg:51.39ms +[2025-09-05 22:59:30] [Rank 0] step:2281/10000 train_time:117227ms step_avg:51.39ms +[2025-09-05 22:59:30] [Rank 0] step:2301/10000 train_time:117965ms step_avg:51.27ms +[2025-09-05 22:59:30] [Rank 0] step:2301/10000 train_time:117965ms step_avg:51.27ms +[2025-09-05 22:59:31] [Rank 0] step:2321/10000 train_time:118703ms step_avg:51.14ms +[2025-09-05 22:59:31] [Rank 0] step:2321/10000 train_time:118703ms step_avg:51.14ms +[2025-09-05 22:59:32] [Rank 0] step:2341/10000 train_time:119442ms step_avg:51.02ms +[2025-09-05 22:59:32] [Rank 0] step:2341/10000 train_time:119442ms step_avg:51.02ms +[2025-09-05 22:59:33] [Rank 0] step:2361/10000 train_time:120180ms step_avg:50.90ms +[2025-09-05 22:59:33] [Rank 0] step:2361/10000 train_time:120180ms step_avg:50.90ms +[2025-09-05 22:59:33] [Rank 0] step:2381/10000 train_time:120919ms step_avg:50.78ms +[2025-09-05 22:59:33] [Rank 0] step:2381/10000 train_time:120919ms step_avg:50.78ms +[2025-09-05 22:59:34] [Rank 0] step:2401/10000 train_time:121658ms step_avg:50.67ms +[2025-09-05 22:59:34] [Rank 0] step:2401/10000 train_time:121658ms step_avg:50.67ms +[2025-09-05 22:59:35] [Rank 0] step:2421/10000 train_time:122396ms step_avg:50.56ms +[2025-09-05 22:59:35] [Rank 0] step:2421/10000 train_time:122396ms step_avg:50.56ms +[2025-09-05 22:59:36] [Rank 0] step:2441/10000 train_time:123135ms step_avg:50.44ms +[2025-09-05 22:59:36] [Rank 0] step:2441/10000 train_time:123135ms step_avg:50.44ms +[2025-09-05 22:59:36] [Rank 0] step:2461/10000 train_time:123874ms step_avg:50.33ms +[2025-09-05 22:59:36] [Rank 0] step:2461/10000 train_time:123874ms step_avg:50.33ms +[2025-09-05 22:59:37] [Rank 0] step:2481/10000 train_time:124614ms step_avg:50.23ms +[2025-09-05 22:59:37] [Rank 0] step:2481/10000 train_time:124614ms step_avg:50.23ms +[2025-09-05 22:59:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:59:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 22:59:38] [Rank 0] PRINT: step:2500/10000 train_loss:2.6539 val_loss:2.5594 train_time:125434ms step_avg:50.17ms +[2025-09-05 22:59:38] [Rank 0] PRINT: step:2500/10000 train_loss:2.6539 val_loss:2.5594 train_time:125434ms step_avg:50.17ms +[2025-09-05 22:59:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:59:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 22:59:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 22:59:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:01:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:01:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:01:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:01:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:01:00] [Rank 0] Total Loss: 4.9109 +[2025-09-05 23:01:00] [Rank 0] Total Loss: 4.9109 +[2025-09-05 23:01:00] [Rank 0] Total FTA (Unweighted): 0.2081 +[2025-09-05 23:01:00] [Rank 0] Total FTA (Unweighted): 0.2081 +[2025-09-05 23:01:00] [Rank 0] Total FTA (Weighted): 0.2081 +[2025-09-05 23:01:00] [Rank 0] Total FTA (Weighted): 0.2081 +[2025-09-05 23:01:00] [Rank 0] Group 0 Loss: 3.3395 +[2025-09-05 23:01:00] [Rank 0] Group 0 Loss: 3.3395 +[2025-09-05 23:01:00] [Rank 0] Group 1 Loss: 3.1603 +[2025-09-05 23:01:00] [Rank 0] Group 1 Loss: 3.1603 +[2025-09-05 23:01:00] [Rank 0] Group 2 Loss: 3.2594 +[2025-09-05 23:01:00] [Rank 0] Group 2 Loss: 3.2594 +[2025-09-05 23:01:00] [Rank 0] Group 3 Loss: 3.7634 +[2025-09-05 23:01:00] [Rank 0] Group 3 Loss: 3.7634 +[2025-09-05 23:01:00] [Rank 0] Group 4 Loss: 4.3195 +[2025-09-05 23:01:00] [Rank 0] Group 4 Loss: 4.3195 +[2025-09-05 23:01:00] [Rank 0] Group 5 Loss: 4.8264 +[2025-09-05 23:01:00] [Rank 0] Group 5 Loss: 4.8264 +[2025-09-05 23:01:00] [Rank 0] Group 6 Loss: 5.1401 +[2025-09-05 23:01:00] [Rank 0] Group 6 Loss: 5.1401 +[2025-09-05 23:01:00] [Rank 0] Group 7 Loss: 5.2824 +[2025-09-05 23:01:00] [Rank 0] Group 7 Loss: 5.2824 +[2025-09-05 23:01:00] [Rank 0] Group 8 Loss: 5.5463 +[2025-09-05 23:01:00] [Rank 0] Group 8 Loss: 5.5463 +[2025-09-05 23:01:00] [Rank 0] Group 9 Loss: 5.6772 +[2025-09-05 23:01:00] [Rank 0] Group 9 Loss: 5.6772 +[2025-09-05 23:01:00] [Rank 0] Group 10 Loss: 5.7109 +[2025-09-05 23:01:00] [Rank 0] Group 10 Loss: 5.7109 +[2025-09-05 23:01:00] [Rank 0] Group 11 Loss: 5.7583 +[2025-09-05 23:01:00] [Rank 0] Group 11 Loss: 5.7583 +[2025-09-05 23:01:00] [Rank 0] Group 12 Loss: 5.6576 +[2025-09-05 23:01:00] [Rank 0] Group 12 Loss: 5.6576 +[2025-09-05 23:01:00] [Rank 0] Group 13 Loss: 5.6929 +[2025-09-05 23:01:00] [Rank 0] Group 13 Loss: 5.6929 +[2025-09-05 23:01:00] [Rank 0] Group 14 Loss: 5.7644 +[2025-09-05 23:01:00] [Rank 0] Group 14 Loss: 5.7644 +[2025-09-05 23:01:00] [Rank 0] Group 15 Loss: 5.6759 +[2025-09-05 23:01:00] [Rank 0] Group 15 Loss: 5.6759 +[2025-09-05 23:01:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:01:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:01:00] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 23:01:00] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 23:01:00] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:01:00] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:01:00] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:01:00] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:01:00] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 23:01:00] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 23:01:00] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:01:00] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 23:01:00] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 23:01:00] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 23:01:00] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 23:01:00] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 23:01:00] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 23:01:00] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 23:01:00] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 23:01:00] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 23:01:00] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 23:01:00] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 23:01:00] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 23:01:00] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 23:01:00] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:01:00] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 23:01:00] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 23:01:00] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 23:01:00] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:01:00] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:01:00] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:01:00] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:01:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:01:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:01:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:01:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:01:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:01:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:01:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:01:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:01:02] [Rank 0] step:2501/10000 train_time:125443ms step_avg:50.16ms +[2025-09-05 23:01:02] [Rank 0] step:2501/10000 train_time:125443ms step_avg:50.16ms +[2025-09-05 23:01:02] [Rank 0] step:2521/10000 train_time:126113ms step_avg:50.03ms +[2025-09-05 23:01:02] [Rank 0] step:2521/10000 train_time:126113ms step_avg:50.03ms +[2025-09-05 23:01:03] [Rank 0] step:2541/10000 train_time:126850ms step_avg:49.92ms +[2025-09-05 23:01:03] [Rank 0] step:2541/10000 train_time:126850ms step_avg:49.92ms +[2025-09-05 23:01:04] [Rank 0] step:2561/10000 train_time:127588ms step_avg:49.82ms +[2025-09-05 23:01:04] [Rank 0] step:2561/10000 train_time:127588ms step_avg:49.82ms +[2025-09-05 23:01:05] [Rank 0] step:2581/10000 train_time:128327ms step_avg:49.72ms +[2025-09-05 23:01:05] [Rank 0] step:2581/10000 train_time:128327ms step_avg:49.72ms +[2025-09-05 23:01:05] [Rank 0] step:2601/10000 train_time:129065ms step_avg:49.62ms +[2025-09-05 23:01:05] [Rank 0] step:2601/10000 train_time:129065ms step_avg:49.62ms +[2025-09-05 23:01:06] [Rank 0] step:2621/10000 train_time:129803ms step_avg:49.52ms +[2025-09-05 23:01:06] [Rank 0] step:2621/10000 train_time:129803ms step_avg:49.52ms +[2025-09-05 23:01:07] [Rank 0] step:2641/10000 train_time:130542ms step_avg:49.43ms +[2025-09-05 23:01:07] [Rank 0] step:2641/10000 train_time:130542ms step_avg:49.43ms +[2025-09-05 23:01:08] [Rank 0] step:2661/10000 train_time:131280ms step_avg:49.33ms +[2025-09-05 23:01:08] [Rank 0] step:2661/10000 train_time:131280ms step_avg:49.33ms +[2025-09-05 23:01:08] [Rank 0] step:2681/10000 train_time:132017ms step_avg:49.24ms +[2025-09-05 23:01:08] [Rank 0] step:2681/10000 train_time:132017ms step_avg:49.24ms +[2025-09-05 23:01:09] [Rank 0] step:2701/10000 train_time:132756ms step_avg:49.15ms +[2025-09-05 23:01:09] [Rank 0] step:2701/10000 train_time:132756ms step_avg:49.15ms +[2025-09-05 23:01:10] [Rank 0] step:2721/10000 train_time:133494ms step_avg:49.06ms +[2025-09-05 23:01:10] [Rank 0] step:2721/10000 train_time:133494ms step_avg:49.06ms +[2025-09-05 23:01:10] [Rank 0] step:2741/10000 train_time:134232ms step_avg:48.97ms +[2025-09-05 23:01:10] [Rank 0] step:2741/10000 train_time:134232ms step_avg:48.97ms +[2025-09-05 23:01:11] [Rank 0] step:2761/10000 train_time:134971ms step_avg:48.88ms +[2025-09-05 23:01:11] [Rank 0] step:2761/10000 train_time:134971ms step_avg:48.88ms +[2025-09-05 23:01:12] [Rank 0] step:2781/10000 train_time:135709ms step_avg:48.80ms +[2025-09-05 23:01:12] [Rank 0] step:2781/10000 train_time:135709ms step_avg:48.80ms +[2025-09-05 23:01:13] [Rank 0] step:2801/10000 train_time:136445ms step_avg:48.71ms +[2025-09-05 23:01:13] [Rank 0] step:2801/10000 train_time:136445ms step_avg:48.71ms +[2025-09-05 23:01:14] [Rank 0] step:2821/10000 train_time:137808ms step_avg:48.85ms +[2025-09-05 23:01:14] [Rank 0] step:2821/10000 train_time:137808ms step_avg:48.85ms +[2025-09-05 23:01:15] [Rank 0] step:2841/10000 train_time:138557ms step_avg:48.77ms +[2025-09-05 23:01:15] [Rank 0] step:2841/10000 train_time:138557ms step_avg:48.77ms +[2025-09-05 23:01:16] [Rank 0] step:2861/10000 train_time:139294ms step_avg:48.69ms +[2025-09-05 23:01:16] [Rank 0] step:2861/10000 train_time:139294ms step_avg:48.69ms +[2025-09-05 23:01:16] [Rank 0] step:2881/10000 train_time:140033ms step_avg:48.61ms +[2025-09-05 23:01:16] [Rank 0] step:2881/10000 train_time:140033ms step_avg:48.61ms +[2025-09-05 23:01:17] [Rank 0] step:2901/10000 train_time:140772ms step_avg:48.53ms +[2025-09-05 23:01:17] [Rank 0] step:2901/10000 train_time:140772ms step_avg:48.53ms +[2025-09-05 23:01:18] [Rank 0] step:2921/10000 train_time:141510ms step_avg:48.45ms +[2025-09-05 23:01:18] [Rank 0] step:2921/10000 train_time:141510ms step_avg:48.45ms +[2025-09-05 23:01:18] [Rank 0] step:2941/10000 train_time:142249ms step_avg:48.37ms +[2025-09-05 23:01:18] [Rank 0] step:2941/10000 train_time:142249ms step_avg:48.37ms +[2025-09-05 23:01:19] [Rank 0] step:2961/10000 train_time:142987ms step_avg:48.29ms +[2025-09-05 23:01:19] [Rank 0] step:2961/10000 train_time:142987ms step_avg:48.29ms +[2025-09-05 23:01:20] [Rank 0] step:2981/10000 train_time:143725ms step_avg:48.21ms +[2025-09-05 23:01:20] [Rank 0] step:2981/10000 train_time:143725ms step_avg:48.21ms +[2025-09-05 23:01:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:01:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:01:21] [Rank 0] PRINT: step:3000/10000 train_loss:2.5036 val_loss:2.4411 train_time:144544ms step_avg:48.18ms +[2025-09-05 23:01:21] [Rank 0] PRINT: step:3000/10000 train_loss:2.5036 val_loss:2.4411 train_time:144544ms step_avg:48.18ms +[2025-09-05 23:01:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:01:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:01:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:01:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:02:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:02:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:02:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:02:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:02:43] [Rank 0] Total Loss: 4.8365 +[2025-09-05 23:02:43] [Rank 0] Total Loss: 4.8365 +[2025-09-05 23:02:43] [Rank 0] Total FTA (Unweighted): 0.2519 +[2025-09-05 23:02:43] [Rank 0] Total FTA (Unweighted): 0.2519 +[2025-09-05 23:02:43] [Rank 0] Total FTA (Weighted): 0.2519 +[2025-09-05 23:02:43] [Rank 0] Total FTA (Weighted): 0.2519 +[2025-09-05 23:02:43] [Rank 0] Group 0 Loss: 3.3326 +[2025-09-05 23:02:43] [Rank 0] Group 0 Loss: 3.3326 +[2025-09-05 23:02:43] [Rank 0] Group 1 Loss: 3.1215 +[2025-09-05 23:02:43] [Rank 0] Group 1 Loss: 3.1215 +[2025-09-05 23:02:43] [Rank 0] Group 2 Loss: 3.2713 +[2025-09-05 23:02:43] [Rank 0] Group 2 Loss: 3.2713 +[2025-09-05 23:02:43] [Rank 0] Group 3 Loss: 3.7901 +[2025-09-05 23:02:43] [Rank 0] Group 3 Loss: 3.7901 +[2025-09-05 23:02:43] [Rank 0] Group 4 Loss: 4.2196 +[2025-09-05 23:02:43] [Rank 0] Group 4 Loss: 4.2196 +[2025-09-05 23:02:43] [Rank 0] Group 5 Loss: 4.6927 +[2025-09-05 23:02:43] [Rank 0] Group 5 Loss: 4.6927 +[2025-09-05 23:02:43] [Rank 0] Group 6 Loss: 5.0388 +[2025-09-05 23:02:43] [Rank 0] Group 6 Loss: 5.0388 +[2025-09-05 23:02:43] [Rank 0] Group 7 Loss: 5.1850 +[2025-09-05 23:02:43] [Rank 0] Group 7 Loss: 5.1850 +[2025-09-05 23:02:43] [Rank 0] Group 8 Loss: 5.4503 +[2025-09-05 23:02:43] [Rank 0] Group 8 Loss: 5.4503 +[2025-09-05 23:02:43] [Rank 0] Group 9 Loss: 5.5813 +[2025-09-05 23:02:43] [Rank 0] Group 9 Loss: 5.5813 +[2025-09-05 23:02:43] [Rank 0] Group 10 Loss: 5.6061 +[2025-09-05 23:02:43] [Rank 0] Group 10 Loss: 5.6061 +[2025-09-05 23:02:43] [Rank 0] Group 11 Loss: 5.6565 +[2025-09-05 23:02:43] [Rank 0] Group 11 Loss: 5.6565 +[2025-09-05 23:02:43] [Rank 0] Group 12 Loss: 5.5761 +[2025-09-05 23:02:43] [Rank 0] Group 12 Loss: 5.5761 +[2025-09-05 23:02:43] [Rank 0] Group 13 Loss: 5.6104 +[2025-09-05 23:02:43] [Rank 0] Group 13 Loss: 5.6104 +[2025-09-05 23:02:43] [Rank 0] Group 14 Loss: 5.6640 +[2025-09-05 23:02:43] [Rank 0] Group 14 Loss: 5.6640 +[2025-09-05 23:02:43] [Rank 0] Group 15 Loss: 5.5878 +[2025-09-05 23:02:43] [Rank 0] Group 15 Loss: 5.5878 +[2025-09-05 23:02:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:02:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:02:43] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-05 23:02:43] [Rank 0] Group 1 FTA: 0.8700 +[2025-09-05 23:02:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:02:43] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:02:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:02:43] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:02:43] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 23:02:43] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 23:02:43] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 23:02:43] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 23:02:43] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-05 23:02:43] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-05 23:02:43] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 23:02:43] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 23:02:43] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:02:43] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:02:43] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:02:43] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 23:02:43] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 23:02:43] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 23:02:43] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 23:02:43] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 23:02:43] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 23:02:43] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 23:02:43] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 23:02:43] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 23:02:43] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:02:43] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:02:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:02:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:02:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:02:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:02:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:02:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:02:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:02:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:02:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:02:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:02:44] [Rank 0] step:3001/10000 train_time:144554ms step_avg:48.17ms +[2025-09-05 23:02:44] [Rank 0] step:3001/10000 train_time:144554ms step_avg:48.17ms +[2025-09-05 23:02:45] [Rank 0] step:3021/10000 train_time:145238ms step_avg:48.08ms +[2025-09-05 23:02:45] [Rank 0] step:3021/10000 train_time:145238ms step_avg:48.08ms +[2025-09-05 23:02:46] [Rank 0] step:3041/10000 train_time:145975ms step_avg:48.00ms +[2025-09-05 23:02:46] [Rank 0] step:3041/10000 train_time:145975ms step_avg:48.00ms +[2025-09-05 23:02:47] [Rank 0] step:3061/10000 train_time:146714ms step_avg:47.93ms +[2025-09-05 23:02:47] [Rank 0] step:3061/10000 train_time:146714ms step_avg:47.93ms +[2025-09-05 23:02:47] [Rank 0] step:3081/10000 train_time:147452ms step_avg:47.86ms +[2025-09-05 23:02:47] [Rank 0] step:3081/10000 train_time:147452ms step_avg:47.86ms +[2025-09-05 23:02:48] [Rank 0] step:3101/10000 train_time:148189ms step_avg:47.79ms +[2025-09-05 23:02:48] [Rank 0] step:3101/10000 train_time:148189ms step_avg:47.79ms +[2025-09-05 23:02:49] [Rank 0] step:3121/10000 train_time:148927ms step_avg:47.72ms +[2025-09-05 23:02:49] [Rank 0] step:3121/10000 train_time:148927ms step_avg:47.72ms +[2025-09-05 23:02:50] [Rank 0] step:3141/10000 train_time:149664ms step_avg:47.65ms +[2025-09-05 23:02:50] [Rank 0] step:3141/10000 train_time:149664ms step_avg:47.65ms +[2025-09-05 23:02:50] [Rank 0] step:3161/10000 train_time:150403ms step_avg:47.58ms +[2025-09-05 23:02:50] [Rank 0] step:3161/10000 train_time:150403ms step_avg:47.58ms +[2025-09-05 23:02:51] [Rank 0] step:3181/10000 train_time:151141ms step_avg:47.51ms +[2025-09-05 23:02:51] [Rank 0] step:3181/10000 train_time:151141ms step_avg:47.51ms +[2025-09-05 23:02:52] [Rank 0] step:3201/10000 train_time:151879ms step_avg:47.45ms +[2025-09-05 23:02:52] [Rank 0] step:3201/10000 train_time:151879ms step_avg:47.45ms +[2025-09-05 23:02:53] [Rank 0] step:3221/10000 train_time:152617ms step_avg:47.38ms +[2025-09-05 23:02:53] [Rank 0] step:3221/10000 train_time:152617ms step_avg:47.38ms +[2025-09-05 23:02:53] [Rank 0] step:3241/10000 train_time:153355ms step_avg:47.32ms +[2025-09-05 23:02:53] [Rank 0] step:3241/10000 train_time:153355ms step_avg:47.32ms +[2025-09-05 23:02:54] [Rank 0] step:3261/10000 train_time:154093ms step_avg:47.25ms +[2025-09-05 23:02:54] [Rank 0] step:3261/10000 train_time:154093ms step_avg:47.25ms +[2025-09-05 23:02:55] [Rank 0] step:3281/10000 train_time:154831ms step_avg:47.19ms +[2025-09-05 23:02:55] [Rank 0] step:3281/10000 train_time:154831ms step_avg:47.19ms +[2025-09-05 23:02:55] [Rank 0] step:3301/10000 train_time:155568ms step_avg:47.13ms +[2025-09-05 23:02:55] [Rank 0] step:3301/10000 train_time:155568ms step_avg:47.13ms +[2025-09-05 23:02:56] [Rank 0] step:3321/10000 train_time:156306ms step_avg:47.07ms +[2025-09-05 23:02:56] [Rank 0] step:3321/10000 train_time:156306ms step_avg:47.07ms +[2025-09-05 23:02:57] [Rank 0] step:3341/10000 train_time:157043ms step_avg:47.00ms +[2025-09-05 23:02:57] [Rank 0] step:3341/10000 train_time:157043ms step_avg:47.00ms +[2025-09-05 23:02:58] [Rank 0] step:3361/10000 train_time:157780ms step_avg:46.94ms +[2025-09-05 23:02:58] [Rank 0] step:3361/10000 train_time:157780ms step_avg:46.94ms +[2025-09-05 23:02:58] [Rank 0] step:3381/10000 train_time:158516ms step_avg:46.88ms +[2025-09-05 23:02:58] [Rank 0] step:3381/10000 train_time:158516ms step_avg:46.88ms +[2025-09-05 23:02:59] [Rank 0] step:3401/10000 train_time:159254ms step_avg:46.83ms +[2025-09-05 23:02:59] [Rank 0] step:3401/10000 train_time:159254ms step_avg:46.83ms +[2025-09-05 23:03:00] [Rank 0] step:3421/10000 train_time:160101ms step_avg:46.80ms +[2025-09-05 23:03:00] [Rank 0] step:3421/10000 train_time:160101ms step_avg:46.80ms +[2025-09-05 23:03:01] [Rank 0] step:3441/10000 train_time:160839ms step_avg:46.74ms +[2025-09-05 23:03:01] [Rank 0] step:3441/10000 train_time:160839ms step_avg:46.74ms +[2025-09-05 23:03:02] [Rank 0] step:3461/10000 train_time:161577ms step_avg:46.69ms +[2025-09-05 23:03:02] [Rank 0] step:3461/10000 train_time:161577ms step_avg:46.69ms +[2025-09-05 23:03:02] [Rank 0] step:3481/10000 train_time:162314ms step_avg:46.63ms +[2025-09-05 23:03:02] [Rank 0] step:3481/10000 train_time:162314ms step_avg:46.63ms +[2025-09-05 23:03:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:03:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:03:04] [Rank 0] PRINT: step:3500/10000 train_loss:2.4024 val_loss:2.3518 train_time:163310ms step_avg:46.66ms +[2025-09-05 23:03:04] [Rank 0] PRINT: step:3500/10000 train_loss:2.4024 val_loss:2.3518 train_time:163310ms step_avg:46.66ms +[2025-09-05 23:03:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:03:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:03:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:03:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:04:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:04:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:04:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:04:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:04:25] [Rank 0] Total Loss: 4.8067 +[2025-09-05 23:04:25] [Rank 0] Total Loss: 4.8067 +[2025-09-05 23:04:25] [Rank 0] Total FTA (Unweighted): 0.2556 +[2025-09-05 23:04:25] [Rank 0] Total FTA (Unweighted): 0.2556 +[2025-09-05 23:04:25] [Rank 0] Total FTA (Weighted): 0.2556 +[2025-09-05 23:04:25] [Rank 0] Total FTA (Weighted): 0.2556 +[2025-09-05 23:04:25] [Rank 0] Group 0 Loss: 3.3470 +[2025-09-05 23:04:25] [Rank 0] Group 0 Loss: 3.3470 +[2025-09-05 23:04:25] [Rank 0] Group 1 Loss: 3.2074 +[2025-09-05 23:04:25] [Rank 0] Group 1 Loss: 3.2074 +[2025-09-05 23:04:25] [Rank 0] Group 2 Loss: 3.2605 +[2025-09-05 23:04:25] [Rank 0] Group 2 Loss: 3.2605 +[2025-09-05 23:04:25] [Rank 0] Group 3 Loss: 3.7131 +[2025-09-05 23:04:25] [Rank 0] Group 3 Loss: 3.7131 +[2025-09-05 23:04:25] [Rank 0] Group 4 Loss: 4.1607 +[2025-09-05 23:04:25] [Rank 0] Group 4 Loss: 4.1607 +[2025-09-05 23:04:25] [Rank 0] Group 5 Loss: 4.6870 +[2025-09-05 23:04:25] [Rank 0] Group 5 Loss: 4.6870 +[2025-09-05 23:04:25] [Rank 0] Group 6 Loss: 4.9548 +[2025-09-05 23:04:25] [Rank 0] Group 6 Loss: 4.9548 +[2025-09-05 23:04:25] [Rank 0] Group 7 Loss: 5.1247 +[2025-09-05 23:04:25] [Rank 0] Group 7 Loss: 5.1247 +[2025-09-05 23:04:25] [Rank 0] Group 8 Loss: 5.3997 +[2025-09-05 23:04:25] [Rank 0] Group 8 Loss: 5.3997 +[2025-09-05 23:04:25] [Rank 0] Group 9 Loss: 5.5341 +[2025-09-05 23:04:25] [Rank 0] Group 9 Loss: 5.5341 +[2025-09-05 23:04:25] [Rank 0] Group 10 Loss: 5.5902 +[2025-09-05 23:04:25] [Rank 0] Group 10 Loss: 5.5902 +[2025-09-05 23:04:25] [Rank 0] Group 11 Loss: 5.6290 +[2025-09-05 23:04:25] [Rank 0] Group 11 Loss: 5.6290 +[2025-09-05 23:04:25] [Rank 0] Group 12 Loss: 5.5518 +[2025-09-05 23:04:25] [Rank 0] Group 12 Loss: 5.5518 +[2025-09-05 23:04:25] [Rank 0] Group 13 Loss: 5.5642 +[2025-09-05 23:04:25] [Rank 0] Group 13 Loss: 5.5642 +[2025-09-05 23:04:25] [Rank 0] Group 14 Loss: 5.6446 +[2025-09-05 23:04:25] [Rank 0] Group 14 Loss: 5.6446 +[2025-09-05 23:04:25] [Rank 0] Group 15 Loss: 5.5391 +[2025-09-05 23:04:25] [Rank 0] Group 15 Loss: 5.5391 +[2025-09-05 23:04:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:04:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:04:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:04:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:04:25] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:04:25] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 23:04:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:04:25] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:04:25] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 23:04:25] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 23:04:25] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 23:04:25] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 23:04:25] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-05 23:04:25] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-05 23:04:25] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 23:04:25] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 23:04:25] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:04:25] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:04:25] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 23:04:25] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 23:04:25] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-05 23:04:25] [Rank 0] Group 10 FTA: 0.1500 +[2025-09-05 23:04:25] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 23:04:25] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 23:04:25] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 23:04:25] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 23:04:25] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 23:04:25] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 23:04:25] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:04:25] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:04:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:04:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:04:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:04:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:04:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:04:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:04:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:04:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:04:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:04:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:04:27] [Rank 0] step:3501/10000 train_time:163319ms step_avg:46.65ms +[2025-09-05 23:04:27] [Rank 0] step:3501/10000 train_time:163319ms step_avg:46.65ms +[2025-09-05 23:04:27] [Rank 0] step:3521/10000 train_time:163999ms step_avg:46.58ms +[2025-09-05 23:04:27] [Rank 0] step:3521/10000 train_time:163999ms step_avg:46.58ms +[2025-09-05 23:04:28] [Rank 0] step:3541/10000 train_time:164736ms step_avg:46.52ms +[2025-09-05 23:04:28] [Rank 0] step:3541/10000 train_time:164736ms step_avg:46.52ms +[2025-09-05 23:04:29] [Rank 0] step:3561/10000 train_time:165475ms step_avg:46.47ms +[2025-09-05 23:04:29] [Rank 0] step:3561/10000 train_time:165475ms step_avg:46.47ms +[2025-09-05 23:04:30] [Rank 0] step:3581/10000 train_time:166212ms step_avg:46.42ms +[2025-09-05 23:04:30] [Rank 0] step:3581/10000 train_time:166212ms step_avg:46.42ms +[2025-09-05 23:04:30] [Rank 0] step:3601/10000 train_time:166951ms step_avg:46.36ms +[2025-09-05 23:04:30] [Rank 0] step:3601/10000 train_time:166951ms step_avg:46.36ms +[2025-09-05 23:04:31] [Rank 0] step:3621/10000 train_time:167688ms step_avg:46.31ms +[2025-09-05 23:04:31] [Rank 0] step:3621/10000 train_time:167688ms step_avg:46.31ms +[2025-09-05 23:04:32] [Rank 0] step:3641/10000 train_time:169050ms step_avg:46.43ms +[2025-09-05 23:04:32] [Rank 0] step:3641/10000 train_time:169050ms step_avg:46.43ms +[2025-09-05 23:04:33] [Rank 0] step:3661/10000 train_time:169787ms step_avg:46.38ms +[2025-09-05 23:04:33] [Rank 0] step:3661/10000 train_time:169787ms step_avg:46.38ms +[2025-09-05 23:04:34] [Rank 0] step:3681/10000 train_time:170525ms step_avg:46.33ms +[2025-09-05 23:04:34] [Rank 0] step:3681/10000 train_time:170525ms step_avg:46.33ms +[2025-09-05 23:04:35] [Rank 0] step:3701/10000 train_time:171261ms step_avg:46.27ms +[2025-09-05 23:04:35] [Rank 0] step:3701/10000 train_time:171261ms step_avg:46.27ms +[2025-09-05 23:04:35] [Rank 0] step:3721/10000 train_time:171999ms step_avg:46.22ms +[2025-09-05 23:04:35] [Rank 0] step:3721/10000 train_time:171999ms step_avg:46.22ms +[2025-09-05 23:04:36] [Rank 0] step:3741/10000 train_time:172737ms step_avg:46.17ms +[2025-09-05 23:04:36] [Rank 0] step:3741/10000 train_time:172737ms step_avg:46.17ms +[2025-09-05 23:04:37] [Rank 0] step:3761/10000 train_time:173476ms step_avg:46.12ms +[2025-09-05 23:04:37] [Rank 0] step:3761/10000 train_time:173476ms step_avg:46.12ms +[2025-09-05 23:04:38] [Rank 0] step:3781/10000 train_time:174215ms step_avg:46.08ms +[2025-09-05 23:04:38] [Rank 0] step:3781/10000 train_time:174215ms step_avg:46.08ms +[2025-09-05 23:04:38] [Rank 0] step:3801/10000 train_time:174953ms step_avg:46.03ms +[2025-09-05 23:04:38] [Rank 0] step:3801/10000 train_time:174953ms step_avg:46.03ms +[2025-09-05 23:04:39] [Rank 0] step:3821/10000 train_time:175691ms step_avg:45.98ms +[2025-09-05 23:04:39] [Rank 0] step:3821/10000 train_time:175691ms step_avg:45.98ms +[2025-09-05 23:04:40] [Rank 0] step:3841/10000 train_time:176430ms step_avg:45.93ms +[2025-09-05 23:04:40] [Rank 0] step:3841/10000 train_time:176430ms step_avg:45.93ms +[2025-09-05 23:04:40] [Rank 0] step:3861/10000 train_time:177169ms step_avg:45.89ms +[2025-09-05 23:04:40] [Rank 0] step:3861/10000 train_time:177169ms step_avg:45.89ms +[2025-09-05 23:04:41] [Rank 0] step:3881/10000 train_time:177907ms step_avg:45.84ms +[2025-09-05 23:04:41] [Rank 0] step:3881/10000 train_time:177907ms step_avg:45.84ms +[2025-09-05 23:04:42] [Rank 0] step:3901/10000 train_time:178646ms step_avg:45.79ms +[2025-09-05 23:04:42] [Rank 0] step:3901/10000 train_time:178646ms step_avg:45.79ms +[2025-09-05 23:04:43] [Rank 0] step:3921/10000 train_time:179383ms step_avg:45.75ms +[2025-09-05 23:04:43] [Rank 0] step:3921/10000 train_time:179383ms step_avg:45.75ms +[2025-09-05 23:04:43] [Rank 0] step:3941/10000 train_time:180120ms step_avg:45.70ms +[2025-09-05 23:04:43] [Rank 0] step:3941/10000 train_time:180120ms step_avg:45.70ms +[2025-09-05 23:04:44] [Rank 0] step:3961/10000 train_time:180859ms step_avg:45.66ms +[2025-09-05 23:04:44] [Rank 0] step:3961/10000 train_time:180859ms step_avg:45.66ms +[2025-09-05 23:04:45] [Rank 0] step:3981/10000 train_time:181596ms step_avg:45.62ms +[2025-09-05 23:04:45] [Rank 0] step:3981/10000 train_time:181596ms step_avg:45.62ms +[2025-09-05 23:04:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:04:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:04:46] [Rank 0] PRINT: step:4000/10000 train_loss:2.3289 val_loss:2.2875 train_time:182416ms step_avg:45.60ms +[2025-09-05 23:04:46] [Rank 0] PRINT: step:4000/10000 train_loss:2.3289 val_loss:2.2875 train_time:182416ms step_avg:45.60ms +[2025-09-05 23:04:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:04:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:04:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:04:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:06:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:06:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:06:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:06:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:06:08] [Rank 0] Total Loss: 4.7045 +[2025-09-05 23:06:08] [Rank 0] Total Loss: 4.7045 +[2025-09-05 23:06:08] [Rank 0] Total FTA (Unweighted): 0.2781 +[2025-09-05 23:06:08] [Rank 0] Total FTA (Unweighted): 0.2781 +[2025-09-05 23:06:08] [Rank 0] Total FTA (Weighted): 0.2781 +[2025-09-05 23:06:08] [Rank 0] Total FTA (Weighted): 0.2781 +[2025-09-05 23:06:08] [Rank 0] Group 0 Loss: 3.4089 +[2025-09-05 23:06:08] [Rank 0] Group 0 Loss: 3.4089 +[2025-09-05 23:06:08] [Rank 0] Group 1 Loss: 3.1366 +[2025-09-05 23:06:08] [Rank 0] Group 1 Loss: 3.1366 +[2025-09-05 23:06:08] [Rank 0] Group 2 Loss: 3.1567 +[2025-09-05 23:06:08] [Rank 0] Group 2 Loss: 3.1567 +[2025-09-05 23:06:08] [Rank 0] Group 3 Loss: 3.6437 +[2025-09-05 23:06:08] [Rank 0] Group 3 Loss: 3.6437 +[2025-09-05 23:06:08] [Rank 0] Group 4 Loss: 4.0433 +[2025-09-05 23:06:08] [Rank 0] Group 4 Loss: 4.0433 +[2025-09-05 23:06:08] [Rank 0] Group 5 Loss: 4.5579 +[2025-09-05 23:06:08] [Rank 0] Group 5 Loss: 4.5579 +[2025-09-05 23:06:08] [Rank 0] Group 6 Loss: 4.8475 +[2025-09-05 23:06:08] [Rank 0] Group 6 Loss: 4.8475 +[2025-09-05 23:06:08] [Rank 0] Group 7 Loss: 4.9807 +[2025-09-05 23:06:08] [Rank 0] Group 7 Loss: 4.9807 +[2025-09-05 23:06:08] [Rank 0] Group 8 Loss: 5.2849 +[2025-09-05 23:06:08] [Rank 0] Group 8 Loss: 5.2849 +[2025-09-05 23:06:08] [Rank 0] Group 9 Loss: 5.4132 +[2025-09-05 23:06:08] [Rank 0] Group 9 Loss: 5.4132 +[2025-09-05 23:06:08] [Rank 0] Group 10 Loss: 5.4552 +[2025-09-05 23:06:08] [Rank 0] Group 10 Loss: 5.4552 +[2025-09-05 23:06:08] [Rank 0] Group 11 Loss: 5.5079 +[2025-09-05 23:06:08] [Rank 0] Group 11 Loss: 5.5079 +[2025-09-05 23:06:08] [Rank 0] Group 12 Loss: 5.4154 +[2025-09-05 23:06:08] [Rank 0] Group 12 Loss: 5.4154 +[2025-09-05 23:06:08] [Rank 0] Group 13 Loss: 5.4643 +[2025-09-05 23:06:08] [Rank 0] Group 13 Loss: 5.4643 +[2025-09-05 23:06:08] [Rank 0] Group 14 Loss: 5.5053 +[2025-09-05 23:06:08] [Rank 0] Group 14 Loss: 5.5053 +[2025-09-05 23:06:08] [Rank 0] Group 15 Loss: 5.4500 +[2025-09-05 23:06:08] [Rank 0] Group 15 Loss: 5.4500 +[2025-09-05 23:06:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:06:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:06:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:06:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:06:08] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:06:08] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:06:08] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:06:08] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:06:08] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:06:08] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:06:08] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:06:08] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:06:08] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 23:06:08] [Rank 0] Group 6 FTA: 0.2700 +[2025-09-05 23:06:08] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:06:08] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:06:08] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:06:08] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:06:08] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:06:08] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:06:08] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 23:06:08] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 23:06:08] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 23:06:08] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 23:06:08] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 23:06:08] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 23:06:08] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:06:08] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 23:06:08] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:06:08] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:06:08] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:06:08] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:06:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:06:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:06:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:06:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:06:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:06:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:06:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:06:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:06:09] [Rank 0] step:4001/10000 train_time:182425ms step_avg:45.59ms +[2025-09-05 23:06:09] [Rank 0] step:4001/10000 train_time:182425ms step_avg:45.59ms +[2025-09-05 23:06:11] [Rank 0] step:4021/10000 train_time:183756ms step_avg:45.70ms +[2025-09-05 23:06:11] [Rank 0] step:4021/10000 train_time:183756ms step_avg:45.70ms +[2025-09-05 23:06:11] [Rank 0] step:4041/10000 train_time:184495ms step_avg:45.66ms +[2025-09-05 23:06:11] [Rank 0] step:4041/10000 train_time:184495ms step_avg:45.66ms +[2025-09-05 23:06:12] [Rank 0] step:4061/10000 train_time:185368ms step_avg:45.65ms +[2025-09-05 23:06:12] [Rank 0] step:4061/10000 train_time:185368ms step_avg:45.65ms +[2025-09-05 23:06:13] [Rank 0] step:4081/10000 train_time:186104ms step_avg:45.60ms +[2025-09-05 23:06:13] [Rank 0] step:4081/10000 train_time:186104ms step_avg:45.60ms +[2025-09-05 23:06:14] [Rank 0] step:4101/10000 train_time:186841ms step_avg:45.56ms +[2025-09-05 23:06:14] [Rank 0] step:4101/10000 train_time:186841ms step_avg:45.56ms +[2025-09-05 23:06:15] [Rank 0] step:4121/10000 train_time:187579ms step_avg:45.52ms +[2025-09-05 23:06:15] [Rank 0] step:4121/10000 train_time:187579ms step_avg:45.52ms +[2025-09-05 23:06:15] [Rank 0] step:4141/10000 train_time:188317ms step_avg:45.48ms +[2025-09-05 23:06:15] [Rank 0] step:4141/10000 train_time:188317ms step_avg:45.48ms +[2025-09-05 23:06:16] [Rank 0] step:4161/10000 train_time:189056ms step_avg:45.44ms +[2025-09-05 23:06:16] [Rank 0] step:4161/10000 train_time:189056ms step_avg:45.44ms +[2025-09-05 23:06:17] [Rank 0] step:4181/10000 train_time:189794ms step_avg:45.39ms +[2025-09-05 23:06:17] [Rank 0] step:4181/10000 train_time:189794ms step_avg:45.39ms +[2025-09-05 23:06:18] [Rank 0] step:4201/10000 train_time:190532ms step_avg:45.35ms +[2025-09-05 23:06:18] [Rank 0] step:4201/10000 train_time:190532ms step_avg:45.35ms +[2025-09-05 23:06:18] [Rank 0] step:4221/10000 train_time:191271ms step_avg:45.31ms +[2025-09-05 23:06:18] [Rank 0] step:4221/10000 train_time:191271ms step_avg:45.31ms +[2025-09-05 23:06:19] [Rank 0] step:4241/10000 train_time:192010ms step_avg:45.27ms +[2025-09-05 23:06:19] [Rank 0] step:4241/10000 train_time:192010ms step_avg:45.27ms +[2025-09-05 23:06:20] [Rank 0] step:4261/10000 train_time:192748ms step_avg:45.24ms +[2025-09-05 23:06:20] [Rank 0] step:4261/10000 train_time:192748ms step_avg:45.24ms +[2025-09-05 23:06:20] [Rank 0] step:4281/10000 train_time:193486ms step_avg:45.20ms +[2025-09-05 23:06:20] [Rank 0] step:4281/10000 train_time:193486ms step_avg:45.20ms +[2025-09-05 23:06:21] [Rank 0] step:4301/10000 train_time:194225ms step_avg:45.16ms +[2025-09-05 23:06:21] [Rank 0] step:4301/10000 train_time:194225ms step_avg:45.16ms +[2025-09-05 23:06:22] [Rank 0] step:4321/10000 train_time:194965ms step_avg:45.12ms +[2025-09-05 23:06:22] [Rank 0] step:4321/10000 train_time:194965ms step_avg:45.12ms +[2025-09-05 23:06:23] [Rank 0] step:4341/10000 train_time:195704ms step_avg:45.08ms +[2025-09-05 23:06:23] [Rank 0] step:4341/10000 train_time:195704ms step_avg:45.08ms +[2025-09-05 23:06:23] [Rank 0] step:4361/10000 train_time:196442ms step_avg:45.05ms +[2025-09-05 23:06:23] [Rank 0] step:4361/10000 train_time:196442ms step_avg:45.05ms +[2025-09-05 23:06:24] [Rank 0] step:4381/10000 train_time:197181ms step_avg:45.01ms +[2025-09-05 23:06:24] [Rank 0] step:4381/10000 train_time:197181ms step_avg:45.01ms +[2025-09-05 23:06:25] [Rank 0] step:4401/10000 train_time:197920ms step_avg:44.97ms +[2025-09-05 23:06:25] [Rank 0] step:4401/10000 train_time:197920ms step_avg:44.97ms +[2025-09-05 23:06:26] [Rank 0] step:4421/10000 train_time:198659ms step_avg:44.94ms +[2025-09-05 23:06:26] [Rank 0] step:4421/10000 train_time:198659ms step_avg:44.94ms +[2025-09-05 23:06:26] [Rank 0] step:4441/10000 train_time:199398ms step_avg:44.90ms +[2025-09-05 23:06:26] [Rank 0] step:4441/10000 train_time:199398ms step_avg:44.90ms +[2025-09-05 23:06:27] [Rank 0] step:4461/10000 train_time:200137ms step_avg:44.86ms +[2025-09-05 23:06:27] [Rank 0] step:4461/10000 train_time:200137ms step_avg:44.86ms +[2025-09-05 23:06:28] [Rank 0] step:4481/10000 train_time:200876ms step_avg:44.83ms +[2025-09-05 23:06:28] [Rank 0] step:4481/10000 train_time:200876ms step_avg:44.83ms +[2025-09-05 23:06:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:06:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:06:29] [Rank 0] PRINT: step:4500/10000 train_loss:2.2670 val_loss:2.2292 train_time:201699ms step_avg:44.82ms +[2025-09-05 23:06:29] [Rank 0] PRINT: step:4500/10000 train_loss:2.2670 val_loss:2.2292 train_time:201699ms step_avg:44.82ms +[2025-09-05 23:06:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:06:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:06:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:06:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:07:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:07:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:07:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:07:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:07:50] [Rank 0] Total Loss: 4.7056 +[2025-09-05 23:07:50] [Rank 0] Total Loss: 4.7056 +[2025-09-05 23:07:50] [Rank 0] Total FTA (Unweighted): 0.2838 +[2025-09-05 23:07:50] [Rank 0] Total FTA (Unweighted): 0.2838 +[2025-09-05 23:07:50] [Rank 0] Total FTA (Weighted): 0.2838 +[2025-09-05 23:07:50] [Rank 0] Total FTA (Weighted): 0.2838 +[2025-09-05 23:07:50] [Rank 0] Group 0 Loss: 3.4501 +[2025-09-05 23:07:50] [Rank 0] Group 0 Loss: 3.4501 +[2025-09-05 23:07:50] [Rank 0] Group 1 Loss: 3.1375 +[2025-09-05 23:07:50] [Rank 0] Group 1 Loss: 3.1375 +[2025-09-05 23:07:50] [Rank 0] Group 2 Loss: 3.2754 +[2025-09-05 23:07:50] [Rank 0] Group 2 Loss: 3.2754 +[2025-09-05 23:07:50] [Rank 0] Group 3 Loss: 3.6650 +[2025-09-05 23:07:50] [Rank 0] Group 3 Loss: 3.6650 +[2025-09-05 23:07:50] [Rank 0] Group 4 Loss: 4.0139 +[2025-09-05 23:07:50] [Rank 0] Group 4 Loss: 4.0139 +[2025-09-05 23:07:50] [Rank 0] Group 5 Loss: 4.5085 +[2025-09-05 23:07:50] [Rank 0] Group 5 Loss: 4.5085 +[2025-09-05 23:07:50] [Rank 0] Group 6 Loss: 4.8173 +[2025-09-05 23:07:50] [Rank 0] Group 6 Loss: 4.8173 +[2025-09-05 23:07:50] [Rank 0] Group 7 Loss: 4.9688 +[2025-09-05 23:07:50] [Rank 0] Group 7 Loss: 4.9688 +[2025-09-05 23:07:50] [Rank 0] Group 8 Loss: 5.2734 +[2025-09-05 23:07:50] [Rank 0] Group 8 Loss: 5.2734 +[2025-09-05 23:07:50] [Rank 0] Group 9 Loss: 5.4120 +[2025-09-05 23:07:50] [Rank 0] Group 9 Loss: 5.4120 +[2025-09-05 23:07:50] [Rank 0] Group 10 Loss: 5.4678 +[2025-09-05 23:07:50] [Rank 0] Group 10 Loss: 5.4678 +[2025-09-05 23:07:50] [Rank 0] Group 11 Loss: 5.4766 +[2025-09-05 23:07:50] [Rank 0] Group 11 Loss: 5.4766 +[2025-09-05 23:07:50] [Rank 0] Group 12 Loss: 5.4144 +[2025-09-05 23:07:50] [Rank 0] Group 12 Loss: 5.4144 +[2025-09-05 23:07:50] [Rank 0] Group 13 Loss: 5.4449 +[2025-09-05 23:07:50] [Rank 0] Group 13 Loss: 5.4449 +[2025-09-05 23:07:50] [Rank 0] Group 14 Loss: 5.5153 +[2025-09-05 23:07:50] [Rank 0] Group 14 Loss: 5.5153 +[2025-09-05 23:07:50] [Rank 0] Group 15 Loss: 5.4484 +[2025-09-05 23:07:50] [Rank 0] Group 15 Loss: 5.4484 +[2025-09-05 23:07:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:07:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:07:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:07:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:07:50] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:07:50] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:07:50] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:07:50] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:07:50] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:07:50] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:07:50] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:07:50] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:07:50] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:07:50] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:07:50] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:07:50] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:07:50] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:07:50] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:07:50] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:07:50] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:07:50] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 23:07:50] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 23:07:50] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:07:50] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:07:50] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 23:07:50] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 23:07:50] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 23:07:50] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 23:07:50] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 23:07:50] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 23:07:50] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 23:07:50] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 23:07:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:07:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:07:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:07:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:07:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:07:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:07:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:07:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:07:51] [Rank 0] step:4501/10000 train_time:201708ms step_avg:44.81ms +[2025-09-05 23:07:51] [Rank 0] step:4501/10000 train_time:201708ms step_avg:44.81ms +[2025-09-05 23:07:52] [Rank 0] step:4521/10000 train_time:202389ms step_avg:44.77ms +[2025-09-05 23:07:52] [Rank 0] step:4521/10000 train_time:202389ms step_avg:44.77ms +[2025-09-05 23:07:53] [Rank 0] step:4541/10000 train_time:203126ms step_avg:44.73ms +[2025-09-05 23:07:53] [Rank 0] step:4541/10000 train_time:203126ms step_avg:44.73ms +[2025-09-05 23:07:53] [Rank 0] step:4561/10000 train_time:203864ms step_avg:44.70ms +[2025-09-05 23:07:53] [Rank 0] step:4561/10000 train_time:203864ms step_avg:44.70ms +[2025-09-05 23:07:54] [Rank 0] step:4581/10000 train_time:204601ms step_avg:44.66ms +[2025-09-05 23:07:54] [Rank 0] step:4581/10000 train_time:204601ms step_avg:44.66ms +[2025-09-05 23:07:55] [Rank 0] step:4601/10000 train_time:205339ms step_avg:44.63ms +[2025-09-05 23:07:55] [Rank 0] step:4601/10000 train_time:205339ms step_avg:44.63ms +[2025-09-05 23:07:56] [Rank 0] step:4621/10000 train_time:206076ms step_avg:44.60ms +[2025-09-05 23:07:56] [Rank 0] step:4621/10000 train_time:206076ms step_avg:44.60ms +[2025-09-05 23:07:56] [Rank 0] step:4641/10000 train_time:206813ms step_avg:44.56ms +[2025-09-05 23:07:56] [Rank 0] step:4641/10000 train_time:206813ms step_avg:44.56ms +[2025-09-05 23:07:57] [Rank 0] step:4661/10000 train_time:207551ms step_avg:44.53ms +[2025-09-05 23:07:57] [Rank 0] step:4661/10000 train_time:207551ms step_avg:44.53ms +[2025-09-05 23:07:58] [Rank 0] step:4681/10000 train_time:208293ms step_avg:44.50ms +[2025-09-05 23:07:58] [Rank 0] step:4681/10000 train_time:208293ms step_avg:44.50ms +[2025-09-05 23:07:59] [Rank 0] step:4701/10000 train_time:209030ms step_avg:44.47ms +[2025-09-05 23:07:59] [Rank 0] step:4701/10000 train_time:209030ms step_avg:44.47ms +[2025-09-05 23:07:59] [Rank 0] step:4721/10000 train_time:209768ms step_avg:44.43ms +[2025-09-05 23:07:59] [Rank 0] step:4721/10000 train_time:209768ms step_avg:44.43ms +[2025-09-05 23:08:00] [Rank 0] step:4741/10000 train_time:210506ms step_avg:44.40ms +[2025-09-05 23:08:00] [Rank 0] step:4741/10000 train_time:210506ms step_avg:44.40ms +[2025-09-05 23:08:01] [Rank 0] step:4761/10000 train_time:211243ms step_avg:44.37ms +[2025-09-05 23:08:01] [Rank 0] step:4761/10000 train_time:211243ms step_avg:44.37ms +[2025-09-05 23:08:02] [Rank 0] step:4781/10000 train_time:211980ms step_avg:44.34ms +[2025-09-05 23:08:02] [Rank 0] step:4781/10000 train_time:211980ms step_avg:44.34ms +[2025-09-05 23:08:02] [Rank 0] step:4801/10000 train_time:212722ms step_avg:44.31ms +[2025-09-05 23:08:02] [Rank 0] step:4801/10000 train_time:212722ms step_avg:44.31ms +[2025-09-05 23:08:03] [Rank 0] step:4821/10000 train_time:213460ms step_avg:44.28ms +[2025-09-05 23:08:03] [Rank 0] step:4821/10000 train_time:213460ms step_avg:44.28ms +[2025-09-05 23:08:04] [Rank 0] step:4841/10000 train_time:214503ms step_avg:44.31ms +[2025-09-05 23:08:04] [Rank 0] step:4841/10000 train_time:214503ms step_avg:44.31ms +[2025-09-05 23:08:05] [Rank 0] step:4861/10000 train_time:215241ms step_avg:44.28ms +[2025-09-05 23:08:05] [Rank 0] step:4861/10000 train_time:215241ms step_avg:44.28ms +[2025-09-05 23:08:06] [Rank 0] step:4881/10000 train_time:215979ms step_avg:44.25ms +[2025-09-05 23:08:06] [Rank 0] step:4881/10000 train_time:215979ms step_avg:44.25ms +[2025-09-05 23:08:06] [Rank 0] step:4901/10000 train_time:216717ms step_avg:44.22ms +[2025-09-05 23:08:06] [Rank 0] step:4901/10000 train_time:216717ms step_avg:44.22ms +[2025-09-05 23:08:07] [Rank 0] step:4921/10000 train_time:217454ms step_avg:44.19ms +[2025-09-05 23:08:07] [Rank 0] step:4921/10000 train_time:217454ms step_avg:44.19ms +[2025-09-05 23:08:08] [Rank 0] step:4941/10000 train_time:218191ms step_avg:44.16ms +[2025-09-05 23:08:08] [Rank 0] step:4941/10000 train_time:218191ms step_avg:44.16ms +[2025-09-05 23:08:08] [Rank 0] step:4961/10000 train_time:218929ms step_avg:44.13ms +[2025-09-05 23:08:08] [Rank 0] step:4961/10000 train_time:218929ms step_avg:44.13ms +[2025-09-05 23:08:09] [Rank 0] step:4981/10000 train_time:219666ms step_avg:44.10ms +[2025-09-05 23:08:09] [Rank 0] step:4981/10000 train_time:219666ms step_avg:44.10ms +[2025-09-05 23:08:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:08:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:08:10] [Rank 0] PRINT: step:5000/10000 train_loss:2.2148 val_loss:2.1844 train_time:220503ms step_avg:44.10ms +[2025-09-05 23:08:10] [Rank 0] PRINT: step:5000/10000 train_loss:2.2148 val_loss:2.1844 train_time:220503ms step_avg:44.10ms +[2025-09-05 23:08:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:08:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:08:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:08:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:09:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:09:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:09:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:09:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:09:31] [Rank 0] Total Loss: 4.6579 +[2025-09-05 23:09:31] [Rank 0] Total Loss: 4.6579 +[2025-09-05 23:09:31] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-05 23:09:31] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-05 23:09:31] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-05 23:09:31] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-05 23:09:31] [Rank 0] Group 0 Loss: 3.4415 +[2025-09-05 23:09:31] [Rank 0] Group 0 Loss: 3.4415 +[2025-09-05 23:09:31] [Rank 0] Group 1 Loss: 3.1222 +[2025-09-05 23:09:31] [Rank 0] Group 1 Loss: 3.1222 +[2025-09-05 23:09:31] [Rank 0] Group 2 Loss: 3.2023 +[2025-09-05 23:09:31] [Rank 0] Group 2 Loss: 3.2023 +[2025-09-05 23:09:31] [Rank 0] Group 3 Loss: 3.6515 +[2025-09-05 23:09:31] [Rank 0] Group 3 Loss: 3.6515 +[2025-09-05 23:09:31] [Rank 0] Group 4 Loss: 3.9817 +[2025-09-05 23:09:31] [Rank 0] Group 4 Loss: 3.9817 +[2025-09-05 23:09:31] [Rank 0] Group 5 Loss: 4.4795 +[2025-09-05 23:09:31] [Rank 0] Group 5 Loss: 4.4795 +[2025-09-05 23:09:31] [Rank 0] Group 6 Loss: 4.7441 +[2025-09-05 23:09:31] [Rank 0] Group 6 Loss: 4.7441 +[2025-09-05 23:09:31] [Rank 0] Group 7 Loss: 4.9022 +[2025-09-05 23:09:31] [Rank 0] Group 7 Loss: 4.9022 +[2025-09-05 23:09:31] [Rank 0] Group 8 Loss: 5.2164 +[2025-09-05 23:09:31] [Rank 0] Group 8 Loss: 5.2164 +[2025-09-05 23:09:31] [Rank 0] Group 9 Loss: 5.3365 +[2025-09-05 23:09:31] [Rank 0] Group 9 Loss: 5.3365 +[2025-09-05 23:09:31] [Rank 0] Group 10 Loss: 5.4131 +[2025-09-05 23:09:31] [Rank 0] Group 10 Loss: 5.4131 +[2025-09-05 23:09:31] [Rank 0] Group 11 Loss: 5.4229 +[2025-09-05 23:09:31] [Rank 0] Group 11 Loss: 5.4229 +[2025-09-05 23:09:31] [Rank 0] Group 12 Loss: 5.3607 +[2025-09-05 23:09:31] [Rank 0] Group 12 Loss: 5.3607 +[2025-09-05 23:09:31] [Rank 0] Group 13 Loss: 5.3987 +[2025-09-05 23:09:31] [Rank 0] Group 13 Loss: 5.3987 +[2025-09-05 23:09:31] [Rank 0] Group 14 Loss: 5.4542 +[2025-09-05 23:09:31] [Rank 0] Group 14 Loss: 5.4542 +[2025-09-05 23:09:31] [Rank 0] Group 15 Loss: 5.3982 +[2025-09-05 23:09:31] [Rank 0] Group 15 Loss: 5.3982 +[2025-09-05 23:09:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:09:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:09:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:09:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:09:31] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:09:31] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 23:09:31] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:09:31] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:09:31] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:09:31] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:09:31] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:09:31] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:09:31] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:09:31] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:09:31] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:09:31] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:09:31] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:09:31] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:09:31] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:09:31] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:09:31] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 23:09:31] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 23:09:31] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 23:09:31] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 23:09:31] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 23:09:31] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 23:09:31] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 23:09:31] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 23:09:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:09:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:09:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 23:09:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 23:09:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:09:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:09:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:09:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:09:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:09:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:09:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:09:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:09:33] [Rank 0] step:5001/10000 train_time:220512ms step_avg:44.09ms +[2025-09-05 23:09:33] [Rank 0] step:5001/10000 train_time:220512ms step_avg:44.09ms +[2025-09-05 23:09:34] [Rank 0] step:5021/10000 train_time:221196ms step_avg:44.05ms +[2025-09-05 23:09:34] [Rank 0] step:5021/10000 train_time:221196ms step_avg:44.05ms +[2025-09-05 23:09:34] [Rank 0] step:5041/10000 train_time:221935ms step_avg:44.03ms +[2025-09-05 23:09:34] [Rank 0] step:5041/10000 train_time:221935ms step_avg:44.03ms +[2025-09-05 23:09:35] [Rank 0] step:5061/10000 train_time:222672ms step_avg:44.00ms +[2025-09-05 23:09:35] [Rank 0] step:5061/10000 train_time:222672ms step_avg:44.00ms +[2025-09-05 23:09:36] [Rank 0] step:5081/10000 train_time:223410ms step_avg:43.97ms +[2025-09-05 23:09:36] [Rank 0] step:5081/10000 train_time:223410ms step_avg:43.97ms +[2025-09-05 23:09:37] [Rank 0] step:5101/10000 train_time:224149ms step_avg:43.94ms +[2025-09-05 23:09:37] [Rank 0] step:5101/10000 train_time:224149ms step_avg:43.94ms +[2025-09-05 23:09:37] [Rank 0] step:5121/10000 train_time:224887ms step_avg:43.91ms +[2025-09-05 23:09:37] [Rank 0] step:5121/10000 train_time:224887ms step_avg:43.91ms +[2025-09-05 23:09:38] [Rank 0] step:5141/10000 train_time:225625ms step_avg:43.89ms +[2025-09-05 23:09:38] [Rank 0] step:5141/10000 train_time:225625ms step_avg:43.89ms +[2025-09-05 23:09:39] [Rank 0] step:5161/10000 train_time:226364ms step_avg:43.86ms +[2025-09-05 23:09:39] [Rank 0] step:5161/10000 train_time:226364ms step_avg:43.86ms +[2025-09-05 23:09:40] [Rank 0] step:5181/10000 train_time:227103ms step_avg:43.83ms +[2025-09-05 23:09:40] [Rank 0] step:5181/10000 train_time:227103ms step_avg:43.83ms +[2025-09-05 23:09:40] [Rank 0] step:5201/10000 train_time:227842ms step_avg:43.81ms +[2025-09-05 23:09:40] [Rank 0] step:5201/10000 train_time:227842ms step_avg:43.81ms +[2025-09-05 23:09:41] [Rank 0] step:5221/10000 train_time:228579ms step_avg:43.78ms +[2025-09-05 23:09:41] [Rank 0] step:5221/10000 train_time:228579ms step_avg:43.78ms +[2025-09-05 23:09:42] [Rank 0] step:5241/10000 train_time:229317ms step_avg:43.75ms +[2025-09-05 23:09:42] [Rank 0] step:5241/10000 train_time:229317ms step_avg:43.75ms +[2025-09-05 23:09:42] [Rank 0] step:5261/10000 train_time:230056ms step_avg:43.73ms +[2025-09-05 23:09:42] [Rank 0] step:5261/10000 train_time:230056ms step_avg:43.73ms +[2025-09-05 23:09:43] [Rank 0] step:5281/10000 train_time:230795ms step_avg:43.70ms +[2025-09-05 23:09:43] [Rank 0] step:5281/10000 train_time:230795ms step_avg:43.70ms +[2025-09-05 23:09:44] [Rank 0] step:5301/10000 train_time:231533ms step_avg:43.68ms +[2025-09-05 23:09:44] [Rank 0] step:5301/10000 train_time:231533ms step_avg:43.68ms +[2025-09-05 23:09:45] [Rank 0] step:5321/10000 train_time:232272ms step_avg:43.65ms +[2025-09-05 23:09:45] [Rank 0] step:5321/10000 train_time:232272ms step_avg:43.65ms +[2025-09-05 23:09:45] [Rank 0] step:5341/10000 train_time:233011ms step_avg:43.63ms +[2025-09-05 23:09:45] [Rank 0] step:5341/10000 train_time:233011ms step_avg:43.63ms +[2025-09-05 23:09:46] [Rank 0] step:5361/10000 train_time:233749ms step_avg:43.60ms +[2025-09-05 23:09:46] [Rank 0] step:5361/10000 train_time:233749ms step_avg:43.60ms +[2025-09-05 23:09:47] [Rank 0] step:5381/10000 train_time:234488ms step_avg:43.58ms +[2025-09-05 23:09:47] [Rank 0] step:5381/10000 train_time:234488ms step_avg:43.58ms +[2025-09-05 23:09:48] [Rank 0] step:5401/10000 train_time:235226ms step_avg:43.55ms +[2025-09-05 23:09:48] [Rank 0] step:5401/10000 train_time:235226ms step_avg:43.55ms +[2025-09-05 23:09:48] [Rank 0] step:5421/10000 train_time:235964ms step_avg:43.53ms +[2025-09-05 23:09:48] [Rank 0] step:5421/10000 train_time:235964ms step_avg:43.53ms +[2025-09-05 23:09:49] [Rank 0] step:5441/10000 train_time:236702ms step_avg:43.50ms +[2025-09-05 23:09:49] [Rank 0] step:5441/10000 train_time:236702ms step_avg:43.50ms +[2025-09-05 23:09:50] [Rank 0] step:5461/10000 train_time:237441ms step_avg:43.48ms +[2025-09-05 23:09:50] [Rank 0] step:5461/10000 train_time:237441ms step_avg:43.48ms +[2025-09-05 23:09:51] [Rank 0] step:5481/10000 train_time:238179ms step_avg:43.46ms +[2025-09-05 23:09:51] [Rank 0] step:5481/10000 train_time:238179ms step_avg:43.46ms +[2025-09-05 23:09:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:09:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:09:52] [Rank 0] PRINT: step:5500/10000 train_loss:2.1762 val_loss:2.1502 train_time:238998ms step_avg:43.45ms +[2025-09-05 23:09:52] [Rank 0] PRINT: step:5500/10000 train_loss:2.1762 val_loss:2.1502 train_time:238998ms step_avg:43.45ms +[2025-09-05 23:09:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:09:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:09:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:09:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:11:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:11:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:11:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:11:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:11:12] [Rank 0] Total Loss: 4.5741 +[2025-09-05 23:11:12] [Rank 0] Total Loss: 4.5741 +[2025-09-05 23:11:12] [Rank 0] Total FTA (Unweighted): 0.3025 +[2025-09-05 23:11:12] [Rank 0] Total FTA (Unweighted): 0.3025 +[2025-09-05 23:11:12] [Rank 0] Total FTA (Weighted): 0.3025 +[2025-09-05 23:11:12] [Rank 0] Total FTA (Weighted): 0.3025 +[2025-09-05 23:11:12] [Rank 0] Group 0 Loss: 3.3737 +[2025-09-05 23:11:12] [Rank 0] Group 0 Loss: 3.3737 +[2025-09-05 23:11:12] [Rank 0] Group 1 Loss: 3.0897 +[2025-09-05 23:11:12] [Rank 0] Group 1 Loss: 3.0897 +[2025-09-05 23:11:12] [Rank 0] Group 2 Loss: 3.0995 +[2025-09-05 23:11:12] [Rank 0] Group 2 Loss: 3.0995 +[2025-09-05 23:11:12] [Rank 0] Group 3 Loss: 3.5237 +[2025-09-05 23:11:12] [Rank 0] Group 3 Loss: 3.5237 +[2025-09-05 23:11:12] [Rank 0] Group 4 Loss: 3.8876 +[2025-09-05 23:11:12] [Rank 0] Group 4 Loss: 3.8876 +[2025-09-05 23:11:12] [Rank 0] Group 5 Loss: 4.3757 +[2025-09-05 23:11:12] [Rank 0] Group 5 Loss: 4.3757 +[2025-09-05 23:11:12] [Rank 0] Group 6 Loss: 4.6599 +[2025-09-05 23:11:12] [Rank 0] Group 6 Loss: 4.6599 +[2025-09-05 23:11:12] [Rank 0] Group 7 Loss: 4.8270 +[2025-09-05 23:11:12] [Rank 0] Group 7 Loss: 4.8270 +[2025-09-05 23:11:12] [Rank 0] Group 8 Loss: 5.1500 +[2025-09-05 23:11:12] [Rank 0] Group 8 Loss: 5.1500 +[2025-09-05 23:11:12] [Rank 0] Group 9 Loss: 5.2747 +[2025-09-05 23:11:12] [Rank 0] Group 9 Loss: 5.2747 +[2025-09-05 23:11:12] [Rank 0] Group 10 Loss: 5.3285 +[2025-09-05 23:11:12] [Rank 0] Group 10 Loss: 5.3285 +[2025-09-05 23:11:12] [Rank 0] Group 11 Loss: 5.3391 +[2025-09-05 23:11:12] [Rank 0] Group 11 Loss: 5.3391 +[2025-09-05 23:11:12] [Rank 0] Group 12 Loss: 5.2605 +[2025-09-05 23:11:12] [Rank 0] Group 12 Loss: 5.2605 +[2025-09-05 23:11:12] [Rank 0] Group 13 Loss: 5.3268 +[2025-09-05 23:11:12] [Rank 0] Group 13 Loss: 5.3268 +[2025-09-05 23:11:12] [Rank 0] Group 14 Loss: 5.3639 +[2025-09-05 23:11:12] [Rank 0] Group 14 Loss: 5.3639 +[2025-09-05 23:11:12] [Rank 0] Group 15 Loss: 5.3053 +[2025-09-05 23:11:12] [Rank 0] Group 15 Loss: 5.3053 +[2025-09-05 23:11:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:11:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:11:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:11:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:11:12] [Rank 0] Group 2 FTA: 0.4700 +[2025-09-05 23:11:12] [Rank 0] Group 2 FTA: 0.4700 +[2025-09-05 23:11:12] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:11:12] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:11:12] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:11:12] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:11:12] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 23:11:12] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 23:11:12] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:11:12] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:11:12] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 23:11:12] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 23:11:12] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:11:12] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:11:12] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 23:11:12] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 23:11:12] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 23:11:12] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 23:11:12] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 23:11:12] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 23:11:12] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 23:11:12] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 23:11:12] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 23:11:12] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 23:11:12] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:11:12] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:11:12] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:11:12] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:11:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:11:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:11:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:11:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:11:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:11:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:11:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:11:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:11:14] [Rank 0] step:5501/10000 train_time:239007ms step_avg:43.45ms +[2025-09-05 23:11:14] [Rank 0] step:5501/10000 train_time:239007ms step_avg:43.45ms +[2025-09-05 23:11:15] [Rank 0] step:5521/10000 train_time:239676ms step_avg:43.41ms +[2025-09-05 23:11:15] [Rank 0] step:5521/10000 train_time:239676ms step_avg:43.41ms +[2025-09-05 23:11:15] [Rank 0] step:5541/10000 train_time:240415ms step_avg:43.39ms +[2025-09-05 23:11:15] [Rank 0] step:5541/10000 train_time:240415ms step_avg:43.39ms +[2025-09-05 23:11:16] [Rank 0] step:5561/10000 train_time:241154ms step_avg:43.37ms +[2025-09-05 23:11:16] [Rank 0] step:5561/10000 train_time:241154ms step_avg:43.37ms +[2025-09-05 23:11:17] [Rank 0] step:5581/10000 train_time:241896ms step_avg:43.34ms +[2025-09-05 23:11:17] [Rank 0] step:5581/10000 train_time:241896ms step_avg:43.34ms +[2025-09-05 23:11:18] [Rank 0] step:5601/10000 train_time:242635ms step_avg:43.32ms +[2025-09-05 23:11:18] [Rank 0] step:5601/10000 train_time:242635ms step_avg:43.32ms +[2025-09-05 23:11:18] [Rank 0] step:5621/10000 train_time:243374ms step_avg:43.30ms +[2025-09-05 23:11:18] [Rank 0] step:5621/10000 train_time:243374ms step_avg:43.30ms +[2025-09-05 23:11:20] [Rank 0] step:5641/10000 train_time:244740ms step_avg:43.39ms +[2025-09-05 23:11:20] [Rank 0] step:5641/10000 train_time:244740ms step_avg:43.39ms +[2025-09-05 23:11:20] [Rank 0] step:5661/10000 train_time:245480ms step_avg:43.36ms +[2025-09-05 23:11:20] [Rank 0] step:5661/10000 train_time:245480ms step_avg:43.36ms +[2025-09-05 23:11:21] [Rank 0] step:5681/10000 train_time:246218ms step_avg:43.34ms +[2025-09-05 23:11:21] [Rank 0] step:5681/10000 train_time:246218ms step_avg:43.34ms +[2025-09-05 23:11:22] [Rank 0] step:5701/10000 train_time:246999ms step_avg:43.33ms +[2025-09-05 23:11:22] [Rank 0] step:5701/10000 train_time:246999ms step_avg:43.33ms +[2025-09-05 23:11:23] [Rank 0] step:5721/10000 train_time:247738ms step_avg:43.30ms +[2025-09-05 23:11:23] [Rank 0] step:5721/10000 train_time:247738ms step_avg:43.30ms +[2025-09-05 23:11:23] [Rank 0] step:5741/10000 train_time:248477ms step_avg:43.28ms +[2025-09-05 23:11:23] [Rank 0] step:5741/10000 train_time:248477ms step_avg:43.28ms +[2025-09-05 23:11:24] [Rank 0] step:5761/10000 train_time:249214ms step_avg:43.26ms +[2025-09-05 23:11:24] [Rank 0] step:5761/10000 train_time:249214ms step_avg:43.26ms +[2025-09-05 23:11:25] [Rank 0] step:5781/10000 train_time:249953ms step_avg:43.24ms +[2025-09-05 23:11:25] [Rank 0] step:5781/10000 train_time:249953ms step_avg:43.24ms +[2025-09-05 23:11:26] [Rank 0] step:5801/10000 train_time:250690ms step_avg:43.21ms +[2025-09-05 23:11:26] [Rank 0] step:5801/10000 train_time:250690ms step_avg:43.21ms +[2025-09-05 23:11:26] [Rank 0] step:5821/10000 train_time:251545ms step_avg:43.21ms +[2025-09-05 23:11:26] [Rank 0] step:5821/10000 train_time:251545ms step_avg:43.21ms +[2025-09-05 23:11:27] [Rank 0] step:5841/10000 train_time:252283ms step_avg:43.19ms +[2025-09-05 23:11:27] [Rank 0] step:5841/10000 train_time:252283ms step_avg:43.19ms +[2025-09-05 23:11:28] [Rank 0] step:5861/10000 train_time:253023ms step_avg:43.17ms +[2025-09-05 23:11:28] [Rank 0] step:5861/10000 train_time:253023ms step_avg:43.17ms +[2025-09-05 23:11:29] [Rank 0] step:5881/10000 train_time:253881ms step_avg:43.17ms +[2025-09-05 23:11:29] [Rank 0] step:5881/10000 train_time:253881ms step_avg:43.17ms +[2025-09-05 23:11:30] [Rank 0] step:5901/10000 train_time:254620ms step_avg:43.15ms +[2025-09-05 23:11:30] [Rank 0] step:5901/10000 train_time:254620ms step_avg:43.15ms +[2025-09-05 23:11:30] [Rank 0] step:5921/10000 train_time:255358ms step_avg:43.13ms +[2025-09-05 23:11:30] [Rank 0] step:5921/10000 train_time:255358ms step_avg:43.13ms +[2025-09-05 23:11:31] [Rank 0] step:5941/10000 train_time:256095ms step_avg:43.11ms +[2025-09-05 23:11:31] [Rank 0] step:5941/10000 train_time:256095ms step_avg:43.11ms +[2025-09-05 23:11:32] [Rank 0] step:5961/10000 train_time:256833ms step_avg:43.09ms +[2025-09-05 23:11:32] [Rank 0] step:5961/10000 train_time:256833ms step_avg:43.09ms +[2025-09-05 23:11:33] [Rank 0] step:5981/10000 train_time:257570ms step_avg:43.06ms +[2025-09-05 23:11:33] [Rank 0] step:5981/10000 train_time:257570ms step_avg:43.06ms +[2025-09-05 23:11:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:11:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:11:34] [Rank 0] PRINT: step:6000/10000 train_loss:2.1456 val_loss:2.1231 train_time:258388ms step_avg:43.06ms +[2025-09-05 23:11:34] [Rank 0] PRINT: step:6000/10000 train_loss:2.1456 val_loss:2.1231 train_time:258388ms step_avg:43.06ms +[2025-09-05 23:11:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:11:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:11:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:11:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:12:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:12:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:12:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:12:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:12:55] [Rank 0] Total Loss: 4.5909 +[2025-09-05 23:12:55] [Rank 0] Total Loss: 4.5909 +[2025-09-05 23:12:55] [Rank 0] Total FTA (Unweighted): 0.2994 +[2025-09-05 23:12:55] [Rank 0] Total FTA (Unweighted): 0.2994 +[2025-09-05 23:12:55] [Rank 0] Total FTA (Weighted): 0.2994 +[2025-09-05 23:12:55] [Rank 0] Total FTA (Weighted): 0.2994 +[2025-09-05 23:12:55] [Rank 0] Group 0 Loss: 3.2952 +[2025-09-05 23:12:55] [Rank 0] Group 0 Loss: 3.2952 +[2025-09-05 23:12:55] [Rank 0] Group 1 Loss: 3.1090 +[2025-09-05 23:12:55] [Rank 0] Group 1 Loss: 3.1090 +[2025-09-05 23:12:55] [Rank 0] Group 2 Loss: 3.1504 +[2025-09-05 23:12:55] [Rank 0] Group 2 Loss: 3.1504 +[2025-09-05 23:12:55] [Rank 0] Group 3 Loss: 3.6578 +[2025-09-05 23:12:55] [Rank 0] Group 3 Loss: 3.6578 +[2025-09-05 23:12:55] [Rank 0] Group 4 Loss: 3.9343 +[2025-09-05 23:12:55] [Rank 0] Group 4 Loss: 3.9343 +[2025-09-05 23:12:55] [Rank 0] Group 5 Loss: 4.3718 +[2025-09-05 23:12:55] [Rank 0] Group 5 Loss: 4.3718 +[2025-09-05 23:12:55] [Rank 0] Group 6 Loss: 4.6638 +[2025-09-05 23:12:55] [Rank 0] Group 6 Loss: 4.6638 +[2025-09-05 23:12:55] [Rank 0] Group 7 Loss: 4.8206 +[2025-09-05 23:12:55] [Rank 0] Group 7 Loss: 4.8206 +[2025-09-05 23:12:55] [Rank 0] Group 8 Loss: 5.1544 +[2025-09-05 23:12:55] [Rank 0] Group 8 Loss: 5.1544 +[2025-09-05 23:12:55] [Rank 0] Group 9 Loss: 5.2827 +[2025-09-05 23:12:55] [Rank 0] Group 9 Loss: 5.2827 +[2025-09-05 23:12:55] [Rank 0] Group 10 Loss: 5.3513 +[2025-09-05 23:12:55] [Rank 0] Group 10 Loss: 5.3513 +[2025-09-05 23:12:55] [Rank 0] Group 11 Loss: 5.3615 +[2025-09-05 23:12:55] [Rank 0] Group 11 Loss: 5.3615 +[2025-09-05 23:12:55] [Rank 0] Group 12 Loss: 5.2740 +[2025-09-05 23:12:55] [Rank 0] Group 12 Loss: 5.2740 +[2025-09-05 23:12:55] [Rank 0] Group 13 Loss: 5.3241 +[2025-09-05 23:12:55] [Rank 0] Group 13 Loss: 5.3241 +[2025-09-05 23:12:55] [Rank 0] Group 14 Loss: 5.3777 +[2025-09-05 23:12:55] [Rank 0] Group 14 Loss: 5.3777 +[2025-09-05 23:12:55] [Rank 0] Group 15 Loss: 5.3257 +[2025-09-05 23:12:55] [Rank 0] Group 15 Loss: 5.3257 +[2025-09-05 23:12:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:12:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:12:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:12:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:12:55] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 23:12:55] [Rank 0] Group 2 FTA: 0.3900 +[2025-09-05 23:12:55] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:12:55] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:12:55] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:12:55] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:12:55] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:12:55] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 23:12:55] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:12:55] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:12:55] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:12:55] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 23:12:55] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:12:55] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:12:55] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:12:55] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 23:12:55] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 23:12:55] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 23:12:55] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 23:12:55] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 23:12:55] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 23:12:55] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 23:12:55] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 23:12:55] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 23:12:55] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:12:55] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 23:12:55] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:12:55] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 23:12:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:12:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:12:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:12:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:12:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:12:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:12:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:12:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:12:56] [Rank 0] step:6001/10000 train_time:258397ms step_avg:43.06ms +[2025-09-05 23:12:56] [Rank 0] step:6001/10000 train_time:258397ms step_avg:43.06ms +[2025-09-05 23:12:57] [Rank 0] step:6021/10000 train_time:259272ms step_avg:43.06ms +[2025-09-05 23:12:57] [Rank 0] step:6021/10000 train_time:259272ms step_avg:43.06ms +[2025-09-05 23:12:58] [Rank 0] step:6041/10000 train_time:260011ms step_avg:43.04ms +[2025-09-05 23:12:58] [Rank 0] step:6041/10000 train_time:260011ms step_avg:43.04ms +[2025-09-05 23:12:59] [Rank 0] step:6061/10000 train_time:260749ms step_avg:43.02ms +[2025-09-05 23:12:59] [Rank 0] step:6061/10000 train_time:260749ms step_avg:43.02ms +[2025-09-05 23:12:59] [Rank 0] step:6081/10000 train_time:261486ms step_avg:43.00ms +[2025-09-05 23:12:59] [Rank 0] step:6081/10000 train_time:261486ms step_avg:43.00ms +[2025-09-05 23:13:00] [Rank 0] step:6101/10000 train_time:262224ms step_avg:42.98ms +[2025-09-05 23:13:00] [Rank 0] step:6101/10000 train_time:262224ms step_avg:42.98ms +[2025-09-05 23:13:01] [Rank 0] step:6121/10000 train_time:262962ms step_avg:42.96ms +[2025-09-05 23:13:01] [Rank 0] step:6121/10000 train_time:262962ms step_avg:42.96ms +[2025-09-05 23:13:02] [Rank 0] step:6141/10000 train_time:263702ms step_avg:42.94ms +[2025-09-05 23:13:02] [Rank 0] step:6141/10000 train_time:263702ms step_avg:42.94ms +[2025-09-05 23:13:02] [Rank 0] step:6161/10000 train_time:264441ms step_avg:42.92ms +[2025-09-05 23:13:02] [Rank 0] step:6161/10000 train_time:264441ms step_avg:42.92ms +[2025-09-05 23:13:03] [Rank 0] step:6181/10000 train_time:265180ms step_avg:42.90ms +[2025-09-05 23:13:03] [Rank 0] step:6181/10000 train_time:265180ms step_avg:42.90ms +[2025-09-05 23:13:04] [Rank 0] step:6201/10000 train_time:265919ms step_avg:42.88ms +[2025-09-05 23:13:04] [Rank 0] step:6201/10000 train_time:265919ms step_avg:42.88ms +[2025-09-05 23:13:05] [Rank 0] step:6221/10000 train_time:266658ms step_avg:42.86ms +[2025-09-05 23:13:05] [Rank 0] step:6221/10000 train_time:266658ms step_avg:42.86ms +[2025-09-05 23:13:05] [Rank 0] step:6241/10000 train_time:267398ms step_avg:42.85ms +[2025-09-05 23:13:05] [Rank 0] step:6241/10000 train_time:267398ms step_avg:42.85ms +[2025-09-05 23:13:06] [Rank 0] step:6261/10000 train_time:268137ms step_avg:42.83ms +[2025-09-05 23:13:06] [Rank 0] step:6261/10000 train_time:268137ms step_avg:42.83ms +[2025-09-05 23:13:07] [Rank 0] step:6281/10000 train_time:268877ms step_avg:42.81ms +[2025-09-05 23:13:07] [Rank 0] step:6281/10000 train_time:268877ms step_avg:42.81ms +[2025-09-05 23:13:08] [Rank 0] step:6301/10000 train_time:269615ms step_avg:42.79ms +[2025-09-05 23:13:08] [Rank 0] step:6301/10000 train_time:269615ms step_avg:42.79ms +[2025-09-05 23:13:08] [Rank 0] step:6321/10000 train_time:270354ms step_avg:42.77ms +[2025-09-05 23:13:08] [Rank 0] step:6321/10000 train_time:270354ms step_avg:42.77ms +[2025-09-05 23:13:09] [Rank 0] step:6341/10000 train_time:271093ms step_avg:42.75ms +[2025-09-05 23:13:09] [Rank 0] step:6341/10000 train_time:271093ms step_avg:42.75ms +[2025-09-05 23:13:10] [Rank 0] step:6361/10000 train_time:271834ms step_avg:42.73ms +[2025-09-05 23:13:10] [Rank 0] step:6361/10000 train_time:271834ms step_avg:42.73ms +[2025-09-05 23:13:10] [Rank 0] step:6381/10000 train_time:272573ms step_avg:42.72ms +[2025-09-05 23:13:10] [Rank 0] step:6381/10000 train_time:272573ms step_avg:42.72ms +[2025-09-05 23:13:11] [Rank 0] step:6401/10000 train_time:273312ms step_avg:42.70ms +[2025-09-05 23:13:11] [Rank 0] step:6401/10000 train_time:273312ms step_avg:42.70ms +[2025-09-05 23:13:12] [Rank 0] step:6421/10000 train_time:274051ms step_avg:42.68ms +[2025-09-05 23:13:12] [Rank 0] step:6421/10000 train_time:274051ms step_avg:42.68ms +[2025-09-05 23:13:13] [Rank 0] step:6441/10000 train_time:274790ms step_avg:42.66ms +[2025-09-05 23:13:13] [Rank 0] step:6441/10000 train_time:274790ms step_avg:42.66ms +[2025-09-05 23:13:13] [Rank 0] step:6461/10000 train_time:275529ms step_avg:42.64ms +[2025-09-05 23:13:13] [Rank 0] step:6461/10000 train_time:275529ms step_avg:42.64ms +[2025-09-05 23:13:14] [Rank 0] step:6481/10000 train_time:276266ms step_avg:42.63ms +[2025-09-05 23:13:14] [Rank 0] step:6481/10000 train_time:276266ms step_avg:42.63ms +[2025-09-05 23:13:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:13:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:13:15] [Rank 0] PRINT: step:6500/10000 train_loss:2.1202 val_loss:2.0991 train_time:277084ms step_avg:42.63ms +[2025-09-05 23:13:15] [Rank 0] PRINT: step:6500/10000 train_loss:2.1202 val_loss:2.0991 train_time:277084ms step_avg:42.63ms +[2025-09-05 23:13:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:13:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:13:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:13:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:14:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:14:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:14:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:14:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:14:37] [Rank 0] Total Loss: 4.5847 +[2025-09-05 23:14:37] [Rank 0] Total Loss: 4.5847 +[2025-09-05 23:14:37] [Rank 0] Total FTA (Unweighted): 0.3175 +[2025-09-05 23:14:37] [Rank 0] Total FTA (Unweighted): 0.3175 +[2025-09-05 23:14:37] [Rank 0] Total FTA (Weighted): 0.3175 +[2025-09-05 23:14:37] [Rank 0] Total FTA (Weighted): 0.3175 +[2025-09-05 23:14:37] [Rank 0] Group 0 Loss: 3.3640 +[2025-09-05 23:14:37] [Rank 0] Group 0 Loss: 3.3640 +[2025-09-05 23:14:37] [Rank 0] Group 1 Loss: 3.1059 +[2025-09-05 23:14:37] [Rank 0] Group 1 Loss: 3.1059 +[2025-09-05 23:14:37] [Rank 0] Group 2 Loss: 3.1380 +[2025-09-05 23:14:37] [Rank 0] Group 2 Loss: 3.1380 +[2025-09-05 23:14:37] [Rank 0] Group 3 Loss: 3.6305 +[2025-09-05 23:14:37] [Rank 0] Group 3 Loss: 3.6305 +[2025-09-05 23:14:37] [Rank 0] Group 4 Loss: 3.9225 +[2025-09-05 23:14:37] [Rank 0] Group 4 Loss: 3.9225 +[2025-09-05 23:14:37] [Rank 0] Group 5 Loss: 4.3526 +[2025-09-05 23:14:37] [Rank 0] Group 5 Loss: 4.3526 +[2025-09-05 23:14:37] [Rank 0] Group 6 Loss: 4.6545 +[2025-09-05 23:14:37] [Rank 0] Group 6 Loss: 4.6545 +[2025-09-05 23:14:37] [Rank 0] Group 7 Loss: 4.8090 +[2025-09-05 23:14:37] [Rank 0] Group 7 Loss: 4.8090 +[2025-09-05 23:14:37] [Rank 0] Group 8 Loss: 5.1475 +[2025-09-05 23:14:37] [Rank 0] Group 8 Loss: 5.1475 +[2025-09-05 23:14:37] [Rank 0] Group 9 Loss: 5.2684 +[2025-09-05 23:14:37] [Rank 0] Group 9 Loss: 5.2684 +[2025-09-05 23:14:37] [Rank 0] Group 10 Loss: 5.3336 +[2025-09-05 23:14:37] [Rank 0] Group 10 Loss: 5.3336 +[2025-09-05 23:14:37] [Rank 0] Group 11 Loss: 5.3508 +[2025-09-05 23:14:37] [Rank 0] Group 11 Loss: 5.3508 +[2025-09-05 23:14:37] [Rank 0] Group 12 Loss: 5.2714 +[2025-09-05 23:14:37] [Rank 0] Group 12 Loss: 5.2714 +[2025-09-05 23:14:37] [Rank 0] Group 13 Loss: 5.3251 +[2025-09-05 23:14:37] [Rank 0] Group 13 Loss: 5.3251 +[2025-09-05 23:14:37] [Rank 0] Group 14 Loss: 5.3640 +[2025-09-05 23:14:37] [Rank 0] Group 14 Loss: 5.3640 +[2025-09-05 23:14:37] [Rank 0] Group 15 Loss: 5.3179 +[2025-09-05 23:14:37] [Rank 0] Group 15 Loss: 5.3179 +[2025-09-05 23:14:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:14:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:14:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:14:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:14:37] [Rank 0] Group 2 FTA: 0.5800 +[2025-09-05 23:14:37] [Rank 0] Group 2 FTA: 0.5800 +[2025-09-05 23:14:37] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:14:37] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:14:37] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:14:37] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:14:37] [Rank 0] Group 5 FTA: 0.2800 +[2025-09-05 23:14:37] [Rank 0] Group 5 FTA: 0.2800 +[2025-09-05 23:14:37] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:14:37] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:14:37] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 23:14:37] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 23:14:37] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:14:37] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 23:14:37] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 23:14:37] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 23:14:37] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:14:37] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:14:37] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:14:37] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 23:14:37] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 23:14:37] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 23:14:37] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 23:14:37] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 23:14:37] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 23:14:37] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 23:14:37] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:14:37] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 23:14:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:14:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:14:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:14:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:14:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:14:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:14:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:14:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:14:39] [Rank 0] step:6501/10000 train_time:277093ms step_avg:42.62ms +[2025-09-05 23:14:39] [Rank 0] step:6501/10000 train_time:277093ms step_avg:42.62ms +[2025-09-05 23:14:39] [Rank 0] step:6521/10000 train_time:277770ms step_avg:42.60ms +[2025-09-05 23:14:39] [Rank 0] step:6521/10000 train_time:277770ms step_avg:42.60ms +[2025-09-05 23:14:40] [Rank 0] step:6541/10000 train_time:278509ms step_avg:42.58ms +[2025-09-05 23:14:40] [Rank 0] step:6541/10000 train_time:278509ms step_avg:42.58ms +[2025-09-05 23:14:41] [Rank 0] step:6561/10000 train_time:279247ms step_avg:42.56ms +[2025-09-05 23:14:41] [Rank 0] step:6561/10000 train_time:279247ms step_avg:42.56ms +[2025-09-05 23:14:42] [Rank 0] step:6581/10000 train_time:279986ms step_avg:42.54ms +[2025-09-05 23:14:42] [Rank 0] step:6581/10000 train_time:279986ms step_avg:42.54ms +[2025-09-05 23:14:42] [Rank 0] step:6601/10000 train_time:280725ms step_avg:42.53ms +[2025-09-05 23:14:42] [Rank 0] step:6601/10000 train_time:280725ms step_avg:42.53ms +[2025-09-05 23:14:43] [Rank 0] step:6621/10000 train_time:281464ms step_avg:42.51ms +[2025-09-05 23:14:43] [Rank 0] step:6621/10000 train_time:281464ms step_avg:42.51ms +[2025-09-05 23:14:44] [Rank 0] step:6641/10000 train_time:282203ms step_avg:42.49ms +[2025-09-05 23:14:44] [Rank 0] step:6641/10000 train_time:282203ms step_avg:42.49ms +[2025-09-05 23:14:45] [Rank 0] step:6661/10000 train_time:282942ms step_avg:42.48ms +[2025-09-05 23:14:45] [Rank 0] step:6661/10000 train_time:282942ms step_avg:42.48ms +[2025-09-05 23:14:45] [Rank 0] step:6681/10000 train_time:283680ms step_avg:42.46ms +[2025-09-05 23:14:45] [Rank 0] step:6681/10000 train_time:283680ms step_avg:42.46ms +[2025-09-05 23:14:46] [Rank 0] step:6701/10000 train_time:284417ms step_avg:42.44ms +[2025-09-05 23:14:46] [Rank 0] step:6701/10000 train_time:284417ms step_avg:42.44ms +[2025-09-05 23:14:47] [Rank 0] step:6721/10000 train_time:285155ms step_avg:42.43ms +[2025-09-05 23:14:47] [Rank 0] step:6721/10000 train_time:285155ms step_avg:42.43ms +[2025-09-05 23:14:47] [Rank 0] step:6741/10000 train_time:285893ms step_avg:42.41ms +[2025-09-05 23:14:47] [Rank 0] step:6741/10000 train_time:285893ms step_avg:42.41ms +[2025-09-05 23:14:48] [Rank 0] step:6761/10000 train_time:286632ms step_avg:42.39ms +[2025-09-05 23:14:48] [Rank 0] step:6761/10000 train_time:286632ms step_avg:42.39ms +[2025-09-05 23:14:49] [Rank 0] step:6781/10000 train_time:287371ms step_avg:42.38ms +[2025-09-05 23:14:49] [Rank 0] step:6781/10000 train_time:287371ms step_avg:42.38ms +[2025-09-05 23:14:50] [Rank 0] step:6801/10000 train_time:288108ms step_avg:42.36ms +[2025-09-05 23:14:50] [Rank 0] step:6801/10000 train_time:288108ms step_avg:42.36ms +[2025-09-05 23:14:50] [Rank 0] step:6821/10000 train_time:288846ms step_avg:42.35ms +[2025-09-05 23:14:50] [Rank 0] step:6821/10000 train_time:288846ms step_avg:42.35ms +[2025-09-05 23:14:52] [Rank 0] step:6841/10000 train_time:290187ms step_avg:42.42ms +[2025-09-05 23:14:52] [Rank 0] step:6841/10000 train_time:290187ms step_avg:42.42ms +[2025-09-05 23:14:53] [Rank 0] step:6861/10000 train_time:290927ms step_avg:42.40ms +[2025-09-05 23:14:53] [Rank 0] step:6861/10000 train_time:290927ms step_avg:42.40ms +[2025-09-05 23:14:53] [Rank 0] step:6881/10000 train_time:291665ms step_avg:42.39ms +[2025-09-05 23:14:53] [Rank 0] step:6881/10000 train_time:291665ms step_avg:42.39ms +[2025-09-05 23:14:54] [Rank 0] step:6901/10000 train_time:292404ms step_avg:42.37ms +[2025-09-05 23:14:54] [Rank 0] step:6901/10000 train_time:292404ms step_avg:42.37ms +[2025-09-05 23:14:55] [Rank 0] step:6921/10000 train_time:293141ms step_avg:42.36ms +[2025-09-05 23:14:55] [Rank 0] step:6921/10000 train_time:293141ms step_avg:42.36ms +[2025-09-05 23:14:55] [Rank 0] step:6941/10000 train_time:293880ms step_avg:42.34ms +[2025-09-05 23:14:55] [Rank 0] step:6941/10000 train_time:293880ms step_avg:42.34ms +[2025-09-05 23:14:56] [Rank 0] step:6961/10000 train_time:294619ms step_avg:42.32ms +[2025-09-05 23:14:56] [Rank 0] step:6961/10000 train_time:294619ms step_avg:42.32ms +[2025-09-05 23:14:57] [Rank 0] step:6981/10000 train_time:295357ms step_avg:42.31ms +[2025-09-05 23:14:57] [Rank 0] step:6981/10000 train_time:295357ms step_avg:42.31ms +[2025-09-05 23:14:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:14:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:14:58] [Rank 0] PRINT: step:7000/10000 train_loss:2.0956 val_loss:2.0769 train_time:296176ms step_avg:42.31ms +[2025-09-05 23:14:58] [Rank 0] PRINT: step:7000/10000 train_loss:2.0956 val_loss:2.0769 train_time:296176ms step_avg:42.31ms +[2025-09-05 23:14:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:14:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:14:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:14:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:16:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:16:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:16:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:16:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:16:19] [Rank 0] Total Loss: 4.5682 +[2025-09-05 23:16:19] [Rank 0] Total Loss: 4.5682 +[2025-09-05 23:16:19] [Rank 0] Total FTA (Unweighted): 0.3281 +[2025-09-05 23:16:19] [Rank 0] Total FTA (Unweighted): 0.3281 +[2025-09-05 23:16:19] [Rank 0] Total FTA (Weighted): 0.3281 +[2025-09-05 23:16:19] [Rank 0] Total FTA (Weighted): 0.3281 +[2025-09-05 23:16:19] [Rank 0] Group 0 Loss: 3.3070 +[2025-09-05 23:16:19] [Rank 0] Group 0 Loss: 3.3070 +[2025-09-05 23:16:19] [Rank 0] Group 1 Loss: 3.1928 +[2025-09-05 23:16:19] [Rank 0] Group 1 Loss: 3.1928 +[2025-09-05 23:16:19] [Rank 0] Group 2 Loss: 3.2046 +[2025-09-05 23:16:19] [Rank 0] Group 2 Loss: 3.2046 +[2025-09-05 23:16:19] [Rank 0] Group 3 Loss: 3.5955 +[2025-09-05 23:16:19] [Rank 0] Group 3 Loss: 3.5955 +[2025-09-05 23:16:19] [Rank 0] Group 4 Loss: 3.8992 +[2025-09-05 23:16:19] [Rank 0] Group 4 Loss: 3.8992 +[2025-09-05 23:16:19] [Rank 0] Group 5 Loss: 4.3373 +[2025-09-05 23:16:19] [Rank 0] Group 5 Loss: 4.3373 +[2025-09-05 23:16:19] [Rank 0] Group 6 Loss: 4.6459 +[2025-09-05 23:16:19] [Rank 0] Group 6 Loss: 4.6459 +[2025-09-05 23:16:19] [Rank 0] Group 7 Loss: 4.7779 +[2025-09-05 23:16:19] [Rank 0] Group 7 Loss: 4.7779 +[2025-09-05 23:16:19] [Rank 0] Group 8 Loss: 5.1152 +[2025-09-05 23:16:19] [Rank 0] Group 8 Loss: 5.1152 +[2025-09-05 23:16:19] [Rank 0] Group 9 Loss: 5.2274 +[2025-09-05 23:16:19] [Rank 0] Group 9 Loss: 5.2274 +[2025-09-05 23:16:19] [Rank 0] Group 10 Loss: 5.2918 +[2025-09-05 23:16:19] [Rank 0] Group 10 Loss: 5.2918 +[2025-09-05 23:16:19] [Rank 0] Group 11 Loss: 5.3222 +[2025-09-05 23:16:19] [Rank 0] Group 11 Loss: 5.3222 +[2025-09-05 23:16:19] [Rank 0] Group 12 Loss: 5.2568 +[2025-09-05 23:16:19] [Rank 0] Group 12 Loss: 5.2568 +[2025-09-05 23:16:19] [Rank 0] Group 13 Loss: 5.2839 +[2025-09-05 23:16:19] [Rank 0] Group 13 Loss: 5.2839 +[2025-09-05 23:16:19] [Rank 0] Group 14 Loss: 5.3566 +[2025-09-05 23:16:19] [Rank 0] Group 14 Loss: 5.3566 +[2025-09-05 23:16:19] [Rank 0] Group 15 Loss: 5.2775 +[2025-09-05 23:16:19] [Rank 0] Group 15 Loss: 5.2775 +[2025-09-05 23:16:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:16:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:16:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:16:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:16:19] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 23:16:19] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 23:16:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:16:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:16:19] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:16:19] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:16:19] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 23:16:19] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 23:16:19] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:16:19] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:16:19] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 23:16:19] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 23:16:19] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:16:19] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:16:19] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 23:16:19] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 23:16:19] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:16:19] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:16:19] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 23:16:19] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 23:16:19] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 23:16:19] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 23:16:19] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 23:16:19] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 23:16:19] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:16:19] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:16:19] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:16:19] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 23:16:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:16:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:16:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:16:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:16:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:16:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:16:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:16:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:16:21] [Rank 0] step:7001/10000 train_time:296185ms step_avg:42.31ms +[2025-09-05 23:16:21] [Rank 0] step:7001/10000 train_time:296185ms step_avg:42.31ms +[2025-09-05 23:16:22] [Rank 0] step:7021/10000 train_time:296855ms step_avg:42.28ms +[2025-09-05 23:16:22] [Rank 0] step:7021/10000 train_time:296855ms step_avg:42.28ms +[2025-09-05 23:16:22] [Rank 0] step:7041/10000 train_time:297594ms step_avg:42.27ms +[2025-09-05 23:16:22] [Rank 0] step:7041/10000 train_time:297594ms step_avg:42.27ms +[2025-09-05 23:16:23] [Rank 0] step:7061/10000 train_time:298332ms step_avg:42.25ms +[2025-09-05 23:16:23] [Rank 0] step:7061/10000 train_time:298332ms step_avg:42.25ms +[2025-09-05 23:16:24] [Rank 0] step:7081/10000 train_time:299070ms step_avg:42.24ms +[2025-09-05 23:16:24] [Rank 0] step:7081/10000 train_time:299070ms step_avg:42.24ms +[2025-09-05 23:16:24] [Rank 0] step:7101/10000 train_time:299808ms step_avg:42.22ms +[2025-09-05 23:16:24] [Rank 0] step:7101/10000 train_time:299808ms step_avg:42.22ms +[2025-09-05 23:16:25] [Rank 0] step:7121/10000 train_time:300547ms step_avg:42.21ms +[2025-09-05 23:16:25] [Rank 0] step:7121/10000 train_time:300547ms step_avg:42.21ms +[2025-09-05 23:16:26] [Rank 0] step:7141/10000 train_time:301285ms step_avg:42.19ms +[2025-09-05 23:16:26] [Rank 0] step:7141/10000 train_time:301285ms step_avg:42.19ms +[2025-09-05 23:16:27] [Rank 0] step:7161/10000 train_time:302024ms step_avg:42.18ms +[2025-09-05 23:16:27] [Rank 0] step:7161/10000 train_time:302024ms step_avg:42.18ms +[2025-09-05 23:16:27] [Rank 0] step:7181/10000 train_time:302763ms step_avg:42.16ms +[2025-09-05 23:16:27] [Rank 0] step:7181/10000 train_time:302763ms step_avg:42.16ms +[2025-09-05 23:16:28] [Rank 0] step:7201/10000 train_time:303501ms step_avg:42.15ms +[2025-09-05 23:16:28] [Rank 0] step:7201/10000 train_time:303501ms step_avg:42.15ms +[2025-09-05 23:16:29] [Rank 0] step:7221/10000 train_time:304240ms step_avg:42.13ms +[2025-09-05 23:16:29] [Rank 0] step:7221/10000 train_time:304240ms step_avg:42.13ms +[2025-09-05 23:16:30] [Rank 0] step:7241/10000 train_time:304979ms step_avg:42.12ms +[2025-09-05 23:16:30] [Rank 0] step:7241/10000 train_time:304979ms step_avg:42.12ms +[2025-09-05 23:16:30] [Rank 0] step:7261/10000 train_time:305718ms step_avg:42.10ms +[2025-09-05 23:16:30] [Rank 0] step:7261/10000 train_time:305718ms step_avg:42.10ms +[2025-09-05 23:16:31] [Rank 0] step:7281/10000 train_time:306456ms step_avg:42.09ms +[2025-09-05 23:16:31] [Rank 0] step:7281/10000 train_time:306456ms step_avg:42.09ms +[2025-09-05 23:16:32] [Rank 0] step:7301/10000 train_time:307194ms step_avg:42.08ms +[2025-09-05 23:16:32] [Rank 0] step:7301/10000 train_time:307194ms step_avg:42.08ms +[2025-09-05 23:16:33] [Rank 0] step:7321/10000 train_time:307933ms step_avg:42.06ms +[2025-09-05 23:16:33] [Rank 0] step:7321/10000 train_time:307933ms step_avg:42.06ms +[2025-09-05 23:16:33] [Rank 0] step:7341/10000 train_time:308671ms step_avg:42.05ms +[2025-09-05 23:16:33] [Rank 0] step:7341/10000 train_time:308671ms step_avg:42.05ms +[2025-09-05 23:16:34] [Rank 0] step:7361/10000 train_time:309410ms step_avg:42.03ms +[2025-09-05 23:16:34] [Rank 0] step:7361/10000 train_time:309410ms step_avg:42.03ms +[2025-09-05 23:16:35] [Rank 0] step:7381/10000 train_time:310148ms step_avg:42.02ms +[2025-09-05 23:16:35] [Rank 0] step:7381/10000 train_time:310148ms step_avg:42.02ms +[2025-09-05 23:16:36] [Rank 0] step:7401/10000 train_time:310887ms step_avg:42.01ms +[2025-09-05 23:16:36] [Rank 0] step:7401/10000 train_time:310887ms step_avg:42.01ms +[2025-09-05 23:16:36] [Rank 0] step:7421/10000 train_time:311625ms step_avg:41.99ms +[2025-09-05 23:16:36] [Rank 0] step:7421/10000 train_time:311625ms step_avg:41.99ms +[2025-09-05 23:16:37] [Rank 0] step:7441/10000 train_time:312363ms step_avg:41.98ms +[2025-09-05 23:16:37] [Rank 0] step:7441/10000 train_time:312363ms step_avg:41.98ms +[2025-09-05 23:16:38] [Rank 0] step:7461/10000 train_time:313102ms step_avg:41.97ms +[2025-09-05 23:16:38] [Rank 0] step:7461/10000 train_time:313102ms step_avg:41.97ms +[2025-09-05 23:16:38] [Rank 0] step:7481/10000 train_time:313840ms step_avg:41.95ms +[2025-09-05 23:16:38] [Rank 0] step:7481/10000 train_time:313840ms step_avg:41.95ms +[2025-09-05 23:16:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:16:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:16:40] [Rank 0] PRINT: step:7500/10000 train_loss:2.0770 val_loss:2.0612 train_time:314660ms step_avg:41.95ms +[2025-09-05 23:16:40] [Rank 0] PRINT: step:7500/10000 train_loss:2.0770 val_loss:2.0612 train_time:314660ms step_avg:41.95ms +[2025-09-05 23:16:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:16:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:16:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:16:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:18:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:18:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:18:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:18:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:18:01] [Rank 0] Total Loss: 4.5700 +[2025-09-05 23:18:01] [Rank 0] Total Loss: 4.5700 +[2025-09-05 23:18:01] [Rank 0] Total FTA (Unweighted): 0.3350 +[2025-09-05 23:18:01] [Rank 0] Total FTA (Unweighted): 0.3350 +[2025-09-05 23:18:01] [Rank 0] Total FTA (Weighted): 0.3350 +[2025-09-05 23:18:01] [Rank 0] Total FTA (Weighted): 0.3350 +[2025-09-05 23:18:01] [Rank 0] Group 0 Loss: 3.3751 +[2025-09-05 23:18:01] [Rank 0] Group 0 Loss: 3.3751 +[2025-09-05 23:18:01] [Rank 0] Group 1 Loss: 3.1971 +[2025-09-05 23:18:01] [Rank 0] Group 1 Loss: 3.1971 +[2025-09-05 23:18:01] [Rank 0] Group 2 Loss: 3.2185 +[2025-09-05 23:18:01] [Rank 0] Group 2 Loss: 3.2185 +[2025-09-05 23:18:01] [Rank 0] Group 3 Loss: 3.6362 +[2025-09-05 23:18:01] [Rank 0] Group 3 Loss: 3.6362 +[2025-09-05 23:18:01] [Rank 0] Group 4 Loss: 3.8857 +[2025-09-05 23:18:01] [Rank 0] Group 4 Loss: 3.8857 +[2025-09-05 23:18:01] [Rank 0] Group 5 Loss: 4.3235 +[2025-09-05 23:18:01] [Rank 0] Group 5 Loss: 4.3235 +[2025-09-05 23:18:01] [Rank 0] Group 6 Loss: 4.6245 +[2025-09-05 23:18:01] [Rank 0] Group 6 Loss: 4.6245 +[2025-09-05 23:18:01] [Rank 0] Group 7 Loss: 4.7734 +[2025-09-05 23:18:01] [Rank 0] Group 7 Loss: 4.7734 +[2025-09-05 23:18:01] [Rank 0] Group 8 Loss: 5.1097 +[2025-09-05 23:18:01] [Rank 0] Group 8 Loss: 5.1097 +[2025-09-05 23:18:01] [Rank 0] Group 9 Loss: 5.2210 +[2025-09-05 23:18:01] [Rank 0] Group 9 Loss: 5.2210 +[2025-09-05 23:18:01] [Rank 0] Group 10 Loss: 5.3060 +[2025-09-05 23:18:01] [Rank 0] Group 10 Loss: 5.3060 +[2025-09-05 23:18:01] [Rank 0] Group 11 Loss: 5.3180 +[2025-09-05 23:18:01] [Rank 0] Group 11 Loss: 5.3180 +[2025-09-05 23:18:01] [Rank 0] Group 12 Loss: 5.2469 +[2025-09-05 23:18:01] [Rank 0] Group 12 Loss: 5.2469 +[2025-09-05 23:18:01] [Rank 0] Group 13 Loss: 5.2834 +[2025-09-05 23:18:01] [Rank 0] Group 13 Loss: 5.2834 +[2025-09-05 23:18:01] [Rank 0] Group 14 Loss: 5.3331 +[2025-09-05 23:18:01] [Rank 0] Group 14 Loss: 5.3331 +[2025-09-05 23:18:01] [Rank 0] Group 15 Loss: 5.2678 +[2025-09-05 23:18:01] [Rank 0] Group 15 Loss: 5.2678 +[2025-09-05 23:18:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:18:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:18:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:18:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:18:01] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 23:18:01] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 23:18:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:18:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:18:01] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:18:01] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:18:01] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:18:01] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:18:01] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:18:01] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:18:01] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 23:18:01] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 23:18:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:18:01] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:18:01] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:18:01] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:18:01] [Rank 0] Group 10 FTA: 0.2600 +[2025-09-05 23:18:01] [Rank 0] Group 10 FTA: 0.2600 +[2025-09-05 23:18:01] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 23:18:01] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 23:18:01] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 23:18:01] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 23:18:01] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 23:18:01] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 23:18:01] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:18:01] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 23:18:01] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 23:18:01] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 23:18:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:18:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:18:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:18:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:18:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:18:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:18:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:18:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:18:03] [Rank 0] step:7501/10000 train_time:314668ms step_avg:41.95ms +[2025-09-05 23:18:03] [Rank 0] step:7501/10000 train_time:314668ms step_avg:41.95ms +[2025-09-05 23:18:03] [Rank 0] step:7521/10000 train_time:315353ms step_avg:41.93ms +[2025-09-05 23:18:03] [Rank 0] step:7521/10000 train_time:315353ms step_avg:41.93ms +[2025-09-05 23:18:04] [Rank 0] step:7541/10000 train_time:316092ms step_avg:41.92ms +[2025-09-05 23:18:04] [Rank 0] step:7541/10000 train_time:316092ms step_avg:41.92ms +[2025-09-05 23:18:05] [Rank 0] step:7561/10000 train_time:316830ms step_avg:41.90ms +[2025-09-05 23:18:05] [Rank 0] step:7561/10000 train_time:316830ms step_avg:41.90ms +[2025-09-05 23:18:06] [Rank 0] step:7581/10000 train_time:317568ms step_avg:41.89ms +[2025-09-05 23:18:06] [Rank 0] step:7581/10000 train_time:317568ms step_avg:41.89ms +[2025-09-05 23:18:06] [Rank 0] step:7601/10000 train_time:318307ms step_avg:41.88ms +[2025-09-05 23:18:06] [Rank 0] step:7601/10000 train_time:318307ms step_avg:41.88ms +[2025-09-05 23:18:07] [Rank 0] step:7621/10000 train_time:319045ms step_avg:41.86ms +[2025-09-05 23:18:07] [Rank 0] step:7621/10000 train_time:319045ms step_avg:41.86ms +[2025-09-05 23:18:08] [Rank 0] step:7641/10000 train_time:320007ms step_avg:41.88ms +[2025-09-05 23:18:08] [Rank 0] step:7641/10000 train_time:320007ms step_avg:41.88ms +[2025-09-05 23:18:09] [Rank 0] step:7661/10000 train_time:321123ms step_avg:41.92ms +[2025-09-05 23:18:09] [Rank 0] step:7661/10000 train_time:321123ms step_avg:41.92ms +[2025-09-05 23:18:10] [Rank 0] step:7681/10000 train_time:321861ms step_avg:41.90ms +[2025-09-05 23:18:10] [Rank 0] step:7681/10000 train_time:321861ms step_avg:41.90ms +[2025-09-05 23:18:11] [Rank 0] step:7701/10000 train_time:322599ms step_avg:41.89ms +[2025-09-05 23:18:11] [Rank 0] step:7701/10000 train_time:322599ms step_avg:41.89ms +[2025-09-05 23:18:11] [Rank 0] step:7721/10000 train_time:323338ms step_avg:41.88ms +[2025-09-05 23:18:11] [Rank 0] step:7721/10000 train_time:323338ms step_avg:41.88ms +[2025-09-05 23:18:12] [Rank 0] step:7741/10000 train_time:324077ms step_avg:41.86ms +[2025-09-05 23:18:12] [Rank 0] step:7741/10000 train_time:324077ms step_avg:41.86ms +[2025-09-05 23:18:13] [Rank 0] step:7761/10000 train_time:324815ms step_avg:41.85ms +[2025-09-05 23:18:13] [Rank 0] step:7761/10000 train_time:324815ms step_avg:41.85ms +[2025-09-05 23:18:14] [Rank 0] step:7781/10000 train_time:325552ms step_avg:41.84ms +[2025-09-05 23:18:14] [Rank 0] step:7781/10000 train_time:325552ms step_avg:41.84ms +[2025-09-05 23:18:14] [Rank 0] step:7801/10000 train_time:326291ms step_avg:41.83ms +[2025-09-05 23:18:14] [Rank 0] step:7801/10000 train_time:326291ms step_avg:41.83ms +[2025-09-05 23:18:15] [Rank 0] step:7821/10000 train_time:327029ms step_avg:41.81ms +[2025-09-05 23:18:15] [Rank 0] step:7821/10000 train_time:327029ms step_avg:41.81ms +[2025-09-05 23:18:16] [Rank 0] step:7841/10000 train_time:327768ms step_avg:41.80ms +[2025-09-05 23:18:16] [Rank 0] step:7841/10000 train_time:327768ms step_avg:41.80ms +[2025-09-05 23:18:16] [Rank 0] step:7861/10000 train_time:328507ms step_avg:41.79ms +[2025-09-05 23:18:16] [Rank 0] step:7861/10000 train_time:328507ms step_avg:41.79ms +[2025-09-05 23:18:17] [Rank 0] step:7881/10000 train_time:329246ms step_avg:41.78ms +[2025-09-05 23:18:17] [Rank 0] step:7881/10000 train_time:329246ms step_avg:41.78ms +[2025-09-05 23:18:18] [Rank 0] step:7901/10000 train_time:329984ms step_avg:41.76ms +[2025-09-05 23:18:18] [Rank 0] step:7901/10000 train_time:329984ms step_avg:41.76ms +[2025-09-05 23:18:19] [Rank 0] step:7921/10000 train_time:330722ms step_avg:41.75ms +[2025-09-05 23:18:19] [Rank 0] step:7921/10000 train_time:330722ms step_avg:41.75ms +[2025-09-05 23:18:19] [Rank 0] step:7941/10000 train_time:331460ms step_avg:41.74ms +[2025-09-05 23:18:19] [Rank 0] step:7941/10000 train_time:331460ms step_avg:41.74ms +[2025-09-05 23:18:20] [Rank 0] step:7961/10000 train_time:332199ms step_avg:41.73ms +[2025-09-05 23:18:20] [Rank 0] step:7961/10000 train_time:332199ms step_avg:41.73ms +[2025-09-05 23:18:21] [Rank 0] step:7981/10000 train_time:332938ms step_avg:41.72ms +[2025-09-05 23:18:21] [Rank 0] step:7981/10000 train_time:332938ms step_avg:41.72ms +[2025-09-05 23:18:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:18:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:18:22] [Rank 0] PRINT: step:8000/10000 train_loss:2.0628 val_loss:2.0471 train_time:333757ms step_avg:41.72ms +[2025-09-05 23:18:22] [Rank 0] PRINT: step:8000/10000 train_loss:2.0628 val_loss:2.0471 train_time:333757ms step_avg:41.72ms +[2025-09-05 23:18:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:18:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:18:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:18:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:19:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:19:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:19:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:19:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:19:43] [Rank 0] Total Loss: 4.5542 +[2025-09-05 23:19:43] [Rank 0] Total Loss: 4.5542 +[2025-09-05 23:19:43] [Rank 0] Total FTA (Unweighted): 0.3369 +[2025-09-05 23:19:43] [Rank 0] Total FTA (Unweighted): 0.3369 +[2025-09-05 23:19:43] [Rank 0] Total FTA (Weighted): 0.3369 +[2025-09-05 23:19:43] [Rank 0] Total FTA (Weighted): 0.3369 +[2025-09-05 23:19:43] [Rank 0] Group 0 Loss: 3.4748 +[2025-09-05 23:19:43] [Rank 0] Group 0 Loss: 3.4748 +[2025-09-05 23:19:43] [Rank 0] Group 1 Loss: 3.1959 +[2025-09-05 23:19:43] [Rank 0] Group 1 Loss: 3.1959 +[2025-09-05 23:19:43] [Rank 0] Group 2 Loss: 3.1704 +[2025-09-05 23:19:43] [Rank 0] Group 2 Loss: 3.1704 +[2025-09-05 23:19:43] [Rank 0] Group 3 Loss: 3.6332 +[2025-09-05 23:19:43] [Rank 0] Group 3 Loss: 3.6332 +[2025-09-05 23:19:43] [Rank 0] Group 4 Loss: 3.8759 +[2025-09-05 23:19:43] [Rank 0] Group 4 Loss: 3.8759 +[2025-09-05 23:19:43] [Rank 0] Group 5 Loss: 4.2909 +[2025-09-05 23:19:43] [Rank 0] Group 5 Loss: 4.2909 +[2025-09-05 23:19:43] [Rank 0] Group 6 Loss: 4.6070 +[2025-09-05 23:19:43] [Rank 0] Group 6 Loss: 4.6070 +[2025-09-05 23:19:43] [Rank 0] Group 7 Loss: 4.7455 +[2025-09-05 23:19:43] [Rank 0] Group 7 Loss: 4.7455 +[2025-09-05 23:19:43] [Rank 0] Group 8 Loss: 5.0739 +[2025-09-05 23:19:43] [Rank 0] Group 8 Loss: 5.0739 +[2025-09-05 23:19:43] [Rank 0] Group 9 Loss: 5.1989 +[2025-09-05 23:19:43] [Rank 0] Group 9 Loss: 5.1989 +[2025-09-05 23:19:43] [Rank 0] Group 10 Loss: 5.2777 +[2025-09-05 23:19:43] [Rank 0] Group 10 Loss: 5.2777 +[2025-09-05 23:19:43] [Rank 0] Group 11 Loss: 5.3024 +[2025-09-05 23:19:43] [Rank 0] Group 11 Loss: 5.3024 +[2025-09-05 23:19:43] [Rank 0] Group 12 Loss: 5.2176 +[2025-09-05 23:19:43] [Rank 0] Group 12 Loss: 5.2176 +[2025-09-05 23:19:44] [Rank 0] Group 13 Loss: 5.2513 +[2025-09-05 23:19:44] [Rank 0] Group 13 Loss: 5.2513 +[2025-09-05 23:19:44] [Rank 0] Group 14 Loss: 5.3171 +[2025-09-05 23:19:44] [Rank 0] Group 14 Loss: 5.3171 +[2025-09-05 23:19:44] [Rank 0] Group 15 Loss: 5.2352 +[2025-09-05 23:19:44] [Rank 0] Group 15 Loss: 5.2352 +[2025-09-05 23:19:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:19:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:19:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:19:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:19:44] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 23:19:44] [Rank 0] Group 2 FTA: 0.6400 +[2025-09-05 23:19:44] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:19:44] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:19:44] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:19:44] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:19:44] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:19:44] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:19:44] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:19:44] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:19:44] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 23:19:44] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 23:19:44] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 23:19:44] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 23:19:44] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 23:19:44] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 23:19:44] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 23:19:44] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 23:19:44] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:19:44] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:19:44] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 23:19:44] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 23:19:44] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 23:19:44] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 23:19:44] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:19:44] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:19:44] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 23:19:44] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 23:19:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:19:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:19:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:19:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:19:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:19:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:19:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:19:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:19:45] [Rank 0] step:8001/10000 train_time:333768ms step_avg:41.72ms +[2025-09-05 23:19:45] [Rank 0] step:8001/10000 train_time:333768ms step_avg:41.72ms +[2025-09-05 23:19:46] [Rank 0] step:8021/10000 train_time:335063ms step_avg:41.77ms +[2025-09-05 23:19:46] [Rank 0] step:8021/10000 train_time:335063ms step_avg:41.77ms +[2025-09-05 23:19:47] [Rank 0] step:8041/10000 train_time:335803ms step_avg:41.76ms +[2025-09-05 23:19:47] [Rank 0] step:8041/10000 train_time:335803ms step_avg:41.76ms +[2025-09-05 23:19:48] [Rank 0] step:8061/10000 train_time:336542ms step_avg:41.75ms +[2025-09-05 23:19:48] [Rank 0] step:8061/10000 train_time:336542ms step_avg:41.75ms +[2025-09-05 23:19:49] [Rank 0] step:8081/10000 train_time:337280ms step_avg:41.74ms +[2025-09-05 23:19:49] [Rank 0] step:8081/10000 train_time:337280ms step_avg:41.74ms +[2025-09-05 23:19:49] [Rank 0] step:8101/10000 train_time:338019ms step_avg:41.73ms +[2025-09-05 23:19:49] [Rank 0] step:8101/10000 train_time:338019ms step_avg:41.73ms +[2025-09-05 23:19:50] [Rank 0] step:8121/10000 train_time:338757ms step_avg:41.71ms +[2025-09-05 23:19:50] [Rank 0] step:8121/10000 train_time:338757ms step_avg:41.71ms +[2025-09-05 23:19:51] [Rank 0] step:8141/10000 train_time:339495ms step_avg:41.70ms +[2025-09-05 23:19:51] [Rank 0] step:8141/10000 train_time:339495ms step_avg:41.70ms +[2025-09-05 23:19:52] [Rank 0] step:8161/10000 train_time:340349ms step_avg:41.70ms +[2025-09-05 23:19:52] [Rank 0] step:8161/10000 train_time:340349ms step_avg:41.70ms +[2025-09-05 23:19:52] [Rank 0] step:8181/10000 train_time:341086ms step_avg:41.69ms +[2025-09-05 23:19:52] [Rank 0] step:8181/10000 train_time:341086ms step_avg:41.69ms +[2025-09-05 23:19:53] [Rank 0] step:8201/10000 train_time:341826ms step_avg:41.68ms +[2025-09-05 23:19:53] [Rank 0] step:8201/10000 train_time:341826ms step_avg:41.68ms +[2025-09-05 23:19:54] [Rank 0] step:8221/10000 train_time:342564ms step_avg:41.67ms +[2025-09-05 23:19:54] [Rank 0] step:8221/10000 train_time:342564ms step_avg:41.67ms +[2025-09-05 23:19:55] [Rank 0] step:8241/10000 train_time:343437ms step_avg:41.67ms +[2025-09-05 23:19:55] [Rank 0] step:8241/10000 train_time:343437ms step_avg:41.67ms +[2025-09-05 23:19:56] [Rank 0] step:8261/10000 train_time:344176ms step_avg:41.66ms +[2025-09-05 23:19:56] [Rank 0] step:8261/10000 train_time:344176ms step_avg:41.66ms +[2025-09-05 23:19:56] [Rank 0] step:8281/10000 train_time:344914ms step_avg:41.65ms +[2025-09-05 23:19:56] [Rank 0] step:8281/10000 train_time:344914ms step_avg:41.65ms +[2025-09-05 23:19:57] [Rank 0] step:8301/10000 train_time:345653ms step_avg:41.64ms +[2025-09-05 23:19:57] [Rank 0] step:8301/10000 train_time:345653ms step_avg:41.64ms +[2025-09-05 23:19:58] [Rank 0] step:8321/10000 train_time:346392ms step_avg:41.63ms +[2025-09-05 23:19:58] [Rank 0] step:8321/10000 train_time:346392ms step_avg:41.63ms +[2025-09-05 23:19:59] [Rank 0] step:8341/10000 train_time:347131ms step_avg:41.62ms +[2025-09-05 23:19:59] [Rank 0] step:8341/10000 train_time:347131ms step_avg:41.62ms +[2025-09-05 23:19:59] [Rank 0] step:8361/10000 train_time:347868ms step_avg:41.61ms +[2025-09-05 23:19:59] [Rank 0] step:8361/10000 train_time:347868ms step_avg:41.61ms +[2025-09-05 23:20:00] [Rank 0] step:8381/10000 train_time:348607ms step_avg:41.59ms +[2025-09-05 23:20:00] [Rank 0] step:8381/10000 train_time:348607ms step_avg:41.59ms +[2025-09-05 23:20:01] [Rank 0] step:8401/10000 train_time:349346ms step_avg:41.58ms +[2025-09-05 23:20:01] [Rank 0] step:8401/10000 train_time:349346ms step_avg:41.58ms +[2025-09-05 23:20:01] [Rank 0] step:8421/10000 train_time:350103ms step_avg:41.58ms +[2025-09-05 23:20:01] [Rank 0] step:8421/10000 train_time:350103ms step_avg:41.58ms +[2025-09-05 23:20:02] [Rank 0] step:8441/10000 train_time:350843ms step_avg:41.56ms +[2025-09-05 23:20:02] [Rank 0] step:8441/10000 train_time:350843ms step_avg:41.56ms +[2025-09-05 23:20:03] [Rank 0] step:8461/10000 train_time:351582ms step_avg:41.55ms +[2025-09-05 23:20:03] [Rank 0] step:8461/10000 train_time:351582ms step_avg:41.55ms +[2025-09-05 23:20:04] [Rank 0] step:8481/10000 train_time:352321ms step_avg:41.54ms +[2025-09-05 23:20:04] [Rank 0] step:8481/10000 train_time:352321ms step_avg:41.54ms +[2025-09-05 23:20:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:20:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:20:05] [Rank 0] PRINT: step:8500/10000 train_loss:2.0500 val_loss:2.0350 train_time:353141ms step_avg:41.55ms +[2025-09-05 23:20:05] [Rank 0] PRINT: step:8500/10000 train_loss:2.0500 val_loss:2.0350 train_time:353141ms step_avg:41.55ms +[2025-09-05 23:20:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:20:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:20:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:20:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:21:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:21:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:21:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:21:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:21:26] [Rank 0] Total Loss: 4.5570 +[2025-09-05 23:21:26] [Rank 0] Total Loss: 4.5570 +[2025-09-05 23:21:26] [Rank 0] Total FTA (Unweighted): 0.3406 +[2025-09-05 23:21:26] [Rank 0] Total FTA (Unweighted): 0.3406 +[2025-09-05 23:21:26] [Rank 0] Total FTA (Weighted): 0.3406 +[2025-09-05 23:21:26] [Rank 0] Total FTA (Weighted): 0.3406 +[2025-09-05 23:21:26] [Rank 0] Group 0 Loss: 3.4599 +[2025-09-05 23:21:26] [Rank 0] Group 0 Loss: 3.4599 +[2025-09-05 23:21:26] [Rank 0] Group 1 Loss: 3.2026 +[2025-09-05 23:21:26] [Rank 0] Group 1 Loss: 3.2026 +[2025-09-05 23:21:26] [Rank 0] Group 2 Loss: 3.2018 +[2025-09-05 23:21:26] [Rank 0] Group 2 Loss: 3.2018 +[2025-09-05 23:21:26] [Rank 0] Group 3 Loss: 3.6177 +[2025-09-05 23:21:26] [Rank 0] Group 3 Loss: 3.6177 +[2025-09-05 23:21:26] [Rank 0] Group 4 Loss: 3.8667 +[2025-09-05 23:21:26] [Rank 0] Group 4 Loss: 3.8667 +[2025-09-05 23:21:26] [Rank 0] Group 5 Loss: 4.3127 +[2025-09-05 23:21:26] [Rank 0] Group 5 Loss: 4.3127 +[2025-09-05 23:21:26] [Rank 0] Group 6 Loss: 4.6151 +[2025-09-05 23:21:26] [Rank 0] Group 6 Loss: 4.6151 +[2025-09-05 23:21:26] [Rank 0] Group 7 Loss: 4.7567 +[2025-09-05 23:21:26] [Rank 0] Group 7 Loss: 4.7567 +[2025-09-05 23:21:26] [Rank 0] Group 8 Loss: 5.0772 +[2025-09-05 23:21:26] [Rank 0] Group 8 Loss: 5.0772 +[2025-09-05 23:21:26] [Rank 0] Group 9 Loss: 5.1914 +[2025-09-05 23:21:26] [Rank 0] Group 9 Loss: 5.1914 +[2025-09-05 23:21:26] [Rank 0] Group 10 Loss: 5.2748 +[2025-09-05 23:21:26] [Rank 0] Group 10 Loss: 5.2748 +[2025-09-05 23:21:26] [Rank 0] Group 11 Loss: 5.3059 +[2025-09-05 23:21:26] [Rank 0] Group 11 Loss: 5.3059 +[2025-09-05 23:21:26] [Rank 0] Group 12 Loss: 5.2154 +[2025-09-05 23:21:26] [Rank 0] Group 12 Loss: 5.2154 +[2025-09-05 23:21:26] [Rank 0] Group 13 Loss: 5.2550 +[2025-09-05 23:21:26] [Rank 0] Group 13 Loss: 5.2550 +[2025-09-05 23:21:26] [Rank 0] Group 14 Loss: 5.3227 +[2025-09-05 23:21:26] [Rank 0] Group 14 Loss: 5.3227 +[2025-09-05 23:21:26] [Rank 0] Group 15 Loss: 5.2363 +[2025-09-05 23:21:26] [Rank 0] Group 15 Loss: 5.2363 +[2025-09-05 23:21:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:21:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:21:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:21:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:21:26] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 23:21:26] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 23:21:26] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:21:26] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:21:26] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:21:26] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:21:26] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:21:26] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:21:26] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:21:26] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:21:26] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 23:21:26] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 23:21:26] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:21:26] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:21:26] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:21:26] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:21:26] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:21:26] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:21:26] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:21:26] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:21:26] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 23:21:26] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 23:21:26] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 23:21:26] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 23:21:26] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 23:21:26] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 23:21:26] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 23:21:26] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 23:21:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:21:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:21:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:21:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:21:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:21:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:21:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:21:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:21:28] [Rank 0] step:8501/10000 train_time:353150ms step_avg:41.54ms +[2025-09-05 23:21:28] [Rank 0] step:8501/10000 train_time:353150ms step_avg:41.54ms +[2025-09-05 23:21:28] [Rank 0] step:8521/10000 train_time:353817ms step_avg:41.52ms +[2025-09-05 23:21:28] [Rank 0] step:8521/10000 train_time:353817ms step_avg:41.52ms +[2025-09-05 23:21:29] [Rank 0] step:8541/10000 train_time:354555ms step_avg:41.51ms +[2025-09-05 23:21:29] [Rank 0] step:8541/10000 train_time:354555ms step_avg:41.51ms +[2025-09-05 23:21:30] [Rank 0] step:8561/10000 train_time:355294ms step_avg:41.50ms +[2025-09-05 23:21:30] [Rank 0] step:8561/10000 train_time:355294ms step_avg:41.50ms +[2025-09-05 23:21:30] [Rank 0] step:8581/10000 train_time:356033ms step_avg:41.49ms +[2025-09-05 23:21:30] [Rank 0] step:8581/10000 train_time:356033ms step_avg:41.49ms +[2025-09-05 23:21:31] [Rank 0] step:8601/10000 train_time:356771ms step_avg:41.48ms +[2025-09-05 23:21:31] [Rank 0] step:8601/10000 train_time:356771ms step_avg:41.48ms +[2025-09-05 23:21:32] [Rank 0] step:8621/10000 train_time:357509ms step_avg:41.47ms +[2025-09-05 23:21:32] [Rank 0] step:8621/10000 train_time:357509ms step_avg:41.47ms +[2025-09-05 23:21:33] [Rank 0] step:8641/10000 train_time:358247ms step_avg:41.46ms +[2025-09-05 23:21:33] [Rank 0] step:8641/10000 train_time:358247ms step_avg:41.46ms +[2025-09-05 23:21:33] [Rank 0] step:8661/10000 train_time:358986ms step_avg:41.45ms +[2025-09-05 23:21:33] [Rank 0] step:8661/10000 train_time:358986ms step_avg:41.45ms +[2025-09-05 23:21:34] [Rank 0] step:8681/10000 train_time:359723ms step_avg:41.44ms +[2025-09-05 23:21:34] [Rank 0] step:8681/10000 train_time:359723ms step_avg:41.44ms +[2025-09-05 23:21:35] [Rank 0] step:8701/10000 train_time:360460ms step_avg:41.43ms +[2025-09-05 23:21:35] [Rank 0] step:8701/10000 train_time:360460ms step_avg:41.43ms +[2025-09-05 23:21:36] [Rank 0] step:8721/10000 train_time:361196ms step_avg:41.42ms +[2025-09-05 23:21:36] [Rank 0] step:8721/10000 train_time:361196ms step_avg:41.42ms +[2025-09-05 23:21:36] [Rank 0] step:8741/10000 train_time:361934ms step_avg:41.41ms +[2025-09-05 23:21:36] [Rank 0] step:8741/10000 train_time:361934ms step_avg:41.41ms +[2025-09-05 23:21:37] [Rank 0] step:8761/10000 train_time:362671ms step_avg:41.40ms +[2025-09-05 23:21:37] [Rank 0] step:8761/10000 train_time:362671ms step_avg:41.40ms +[2025-09-05 23:21:38] [Rank 0] step:8781/10000 train_time:363410ms step_avg:41.39ms +[2025-09-05 23:21:38] [Rank 0] step:8781/10000 train_time:363410ms step_avg:41.39ms +[2025-09-05 23:21:39] [Rank 0] step:8801/10000 train_time:364148ms step_avg:41.38ms +[2025-09-05 23:21:39] [Rank 0] step:8801/10000 train_time:364148ms step_avg:41.38ms +[2025-09-05 23:21:39] [Rank 0] step:8821/10000 train_time:364887ms step_avg:41.37ms +[2025-09-05 23:21:39] [Rank 0] step:8821/10000 train_time:364887ms step_avg:41.37ms +[2025-09-05 23:21:41] [Rank 0] step:8841/10000 train_time:366250ms step_avg:41.43ms +[2025-09-05 23:21:41] [Rank 0] step:8841/10000 train_time:366250ms step_avg:41.43ms +[2025-09-05 23:21:41] [Rank 0] step:8861/10000 train_time:366988ms step_avg:41.42ms +[2025-09-05 23:21:41] [Rank 0] step:8861/10000 train_time:366988ms step_avg:41.42ms +[2025-09-05 23:21:42] [Rank 0] step:8881/10000 train_time:367726ms step_avg:41.41ms +[2025-09-05 23:21:42] [Rank 0] step:8881/10000 train_time:367726ms step_avg:41.41ms +[2025-09-05 23:21:43] [Rank 0] step:8901/10000 train_time:368464ms step_avg:41.40ms +[2025-09-05 23:21:43] [Rank 0] step:8901/10000 train_time:368464ms step_avg:41.40ms +[2025-09-05 23:21:44] [Rank 0] step:8921/10000 train_time:369202ms step_avg:41.39ms +[2025-09-05 23:21:44] [Rank 0] step:8921/10000 train_time:369202ms step_avg:41.39ms +[2025-09-05 23:21:44] [Rank 0] step:8941/10000 train_time:369943ms step_avg:41.38ms +[2025-09-05 23:21:44] [Rank 0] step:8941/10000 train_time:369943ms step_avg:41.38ms +[2025-09-05 23:21:45] [Rank 0] step:8961/10000 train_time:370681ms step_avg:41.37ms +[2025-09-05 23:21:45] [Rank 0] step:8961/10000 train_time:370681ms step_avg:41.37ms +[2025-09-05 23:21:46] [Rank 0] step:8981/10000 train_time:371420ms step_avg:41.36ms +[2025-09-05 23:21:46] [Rank 0] step:8981/10000 train_time:371420ms step_avg:41.36ms +[2025-09-05 23:21:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:21:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:21:47] [Rank 0] PRINT: step:9000/10000 train_loss:2.0377 val_loss:2.0244 train_time:372238ms step_avg:41.36ms +[2025-09-05 23:21:47] [Rank 0] PRINT: step:9000/10000 train_loss:2.0377 val_loss:2.0244 train_time:372238ms step_avg:41.36ms +[2025-09-05 23:21:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:21:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:21:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:21:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:23:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:23:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:23:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:23:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:23:09] [Rank 0] Total Loss: 4.5645 +[2025-09-05 23:23:09] [Rank 0] Total Loss: 4.5645 +[2025-09-05 23:23:09] [Rank 0] Total FTA (Unweighted): 0.3400 +[2025-09-05 23:23:09] [Rank 0] Total FTA (Unweighted): 0.3400 +[2025-09-05 23:23:09] [Rank 0] Total FTA (Weighted): 0.3400 +[2025-09-05 23:23:09] [Rank 0] Total FTA (Weighted): 0.3400 +[2025-09-05 23:23:09] [Rank 0] Group 0 Loss: 3.4633 +[2025-09-05 23:23:09] [Rank 0] Group 0 Loss: 3.4633 +[2025-09-05 23:23:09] [Rank 0] Group 1 Loss: 3.2190 +[2025-09-05 23:23:09] [Rank 0] Group 1 Loss: 3.2190 +[2025-09-05 23:23:09] [Rank 0] Group 2 Loss: 3.2334 +[2025-09-05 23:23:09] [Rank 0] Group 2 Loss: 3.2334 +[2025-09-05 23:23:09] [Rank 0] Group 3 Loss: 3.6404 +[2025-09-05 23:23:09] [Rank 0] Group 3 Loss: 3.6404 +[2025-09-05 23:23:09] [Rank 0] Group 4 Loss: 3.8627 +[2025-09-05 23:23:09] [Rank 0] Group 4 Loss: 3.8627 +[2025-09-05 23:23:09] [Rank 0] Group 5 Loss: 4.3217 +[2025-09-05 23:23:09] [Rank 0] Group 5 Loss: 4.3217 +[2025-09-05 23:23:09] [Rank 0] Group 6 Loss: 4.6215 +[2025-09-05 23:23:09] [Rank 0] Group 6 Loss: 4.6215 +[2025-09-05 23:23:09] [Rank 0] Group 7 Loss: 4.7561 +[2025-09-05 23:23:09] [Rank 0] Group 7 Loss: 4.7561 +[2025-09-05 23:23:09] [Rank 0] Group 8 Loss: 5.0775 +[2025-09-05 23:23:09] [Rank 0] Group 8 Loss: 5.0775 +[2025-09-05 23:23:09] [Rank 0] Group 9 Loss: 5.1875 +[2025-09-05 23:23:09] [Rank 0] Group 9 Loss: 5.1875 +[2025-09-05 23:23:09] [Rank 0] Group 10 Loss: 5.2825 +[2025-09-05 23:23:09] [Rank 0] Group 10 Loss: 5.2825 +[2025-09-05 23:23:09] [Rank 0] Group 11 Loss: 5.3214 +[2025-09-05 23:23:09] [Rank 0] Group 11 Loss: 5.3214 +[2025-09-05 23:23:09] [Rank 0] Group 12 Loss: 5.2132 +[2025-09-05 23:23:09] [Rank 0] Group 12 Loss: 5.2132 +[2025-09-05 23:23:09] [Rank 0] Group 13 Loss: 5.2536 +[2025-09-05 23:23:09] [Rank 0] Group 13 Loss: 5.2536 +[2025-09-05 23:23:09] [Rank 0] Group 14 Loss: 5.3230 +[2025-09-05 23:23:09] [Rank 0] Group 14 Loss: 5.3230 +[2025-09-05 23:23:09] [Rank 0] Group 15 Loss: 5.2556 +[2025-09-05 23:23:09] [Rank 0] Group 15 Loss: 5.2556 +[2025-09-05 23:23:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:23:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:23:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:23:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:23:09] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 23:23:09] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 23:23:09] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:23:09] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 23:23:09] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:23:09] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:23:09] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:23:09] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:23:09] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:23:09] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:23:09] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 23:23:09] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 23:23:09] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:23:09] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 23:23:09] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:23:09] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:23:09] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 23:23:09] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 23:23:09] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:23:09] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:23:09] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 23:23:09] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 23:23:09] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 23:23:09] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 23:23:09] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:23:09] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 23:23:09] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 23:23:09] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 23:23:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:23:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:23:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:23:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:23:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:23:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:23:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:23:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:23:11] [Rank 0] step:9001/10000 train_time:372247ms step_avg:41.36ms +[2025-09-05 23:23:11] [Rank 0] step:9001/10000 train_time:372247ms step_avg:41.36ms +[2025-09-05 23:23:12] [Rank 0] step:9021/10000 train_time:372914ms step_avg:41.34ms +[2025-09-05 23:23:12] [Rank 0] step:9021/10000 train_time:372914ms step_avg:41.34ms +[2025-09-05 23:23:12] [Rank 0] step:9041/10000 train_time:373652ms step_avg:41.33ms +[2025-09-05 23:23:12] [Rank 0] step:9041/10000 train_time:373652ms step_avg:41.33ms +[2025-09-05 23:23:13] [Rank 0] step:9061/10000 train_time:374390ms step_avg:41.32ms +[2025-09-05 23:23:13] [Rank 0] step:9061/10000 train_time:374390ms step_avg:41.32ms +[2025-09-05 23:23:14] [Rank 0] step:9081/10000 train_time:375128ms step_avg:41.31ms +[2025-09-05 23:23:14] [Rank 0] step:9081/10000 train_time:375128ms step_avg:41.31ms +[2025-09-05 23:23:15] [Rank 0] step:9101/10000 train_time:375866ms step_avg:41.30ms +[2025-09-05 23:23:15] [Rank 0] step:9101/10000 train_time:375866ms step_avg:41.30ms +[2025-09-05 23:23:15] [Rank 0] step:9121/10000 train_time:376605ms step_avg:41.29ms +[2025-09-05 23:23:15] [Rank 0] step:9121/10000 train_time:376605ms step_avg:41.29ms +[2025-09-05 23:23:16] [Rank 0] step:9141/10000 train_time:377342ms step_avg:41.28ms +[2025-09-05 23:23:16] [Rank 0] step:9141/10000 train_time:377342ms step_avg:41.28ms +[2025-09-05 23:23:17] [Rank 0] step:9161/10000 train_time:378079ms step_avg:41.27ms +[2025-09-05 23:23:17] [Rank 0] step:9161/10000 train_time:378079ms step_avg:41.27ms +[2025-09-05 23:23:17] [Rank 0] step:9181/10000 train_time:378816ms step_avg:41.26ms +[2025-09-05 23:23:17] [Rank 0] step:9181/10000 train_time:378816ms step_avg:41.26ms +[2025-09-05 23:23:18] [Rank 0] step:9201/10000 train_time:379554ms step_avg:41.25ms +[2025-09-05 23:23:18] [Rank 0] step:9201/10000 train_time:379554ms step_avg:41.25ms +[2025-09-05 23:23:19] [Rank 0] step:9221/10000 train_time:380291ms step_avg:41.24ms +[2025-09-05 23:23:19] [Rank 0] step:9221/10000 train_time:380291ms step_avg:41.24ms +[2025-09-05 23:23:20] [Rank 0] step:9241/10000 train_time:381029ms step_avg:41.23ms +[2025-09-05 23:23:20] [Rank 0] step:9241/10000 train_time:381029ms step_avg:41.23ms +[2025-09-05 23:23:20] [Rank 0] step:9261/10000 train_time:381766ms step_avg:41.22ms +[2025-09-05 23:23:20] [Rank 0] step:9261/10000 train_time:381766ms step_avg:41.22ms +[2025-09-05 23:23:21] [Rank 0] step:9281/10000 train_time:382505ms step_avg:41.21ms +[2025-09-05 23:23:21] [Rank 0] step:9281/10000 train_time:382505ms step_avg:41.21ms +[2025-09-05 23:23:22] [Rank 0] step:9301/10000 train_time:383286ms step_avg:41.21ms +[2025-09-05 23:23:22] [Rank 0] step:9301/10000 train_time:383286ms step_avg:41.21ms +[2025-09-05 23:23:23] [Rank 0] step:9321/10000 train_time:384025ms step_avg:41.20ms +[2025-09-05 23:23:23] [Rank 0] step:9321/10000 train_time:384025ms step_avg:41.20ms +[2025-09-05 23:23:23] [Rank 0] step:9341/10000 train_time:384764ms step_avg:41.19ms +[2025-09-05 23:23:23] [Rank 0] step:9341/10000 train_time:384764ms step_avg:41.19ms +[2025-09-05 23:23:24] [Rank 0] step:9361/10000 train_time:385502ms step_avg:41.18ms +[2025-09-05 23:23:24] [Rank 0] step:9361/10000 train_time:385502ms step_avg:41.18ms +[2025-09-05 23:23:25] [Rank 0] step:9381/10000 train_time:386240ms step_avg:41.17ms +[2025-09-05 23:23:25] [Rank 0] step:9381/10000 train_time:386240ms step_avg:41.17ms +[2025-09-05 23:23:26] [Rank 0] step:9401/10000 train_time:386979ms step_avg:41.16ms +[2025-09-05 23:23:26] [Rank 0] step:9401/10000 train_time:386979ms step_avg:41.16ms +[2025-09-05 23:23:26] [Rank 0] step:9421/10000 train_time:387716ms step_avg:41.15ms +[2025-09-05 23:23:26] [Rank 0] step:9421/10000 train_time:387716ms step_avg:41.15ms +[2025-09-05 23:23:27] [Rank 0] step:9441/10000 train_time:388456ms step_avg:41.15ms +[2025-09-05 23:23:27] [Rank 0] step:9441/10000 train_time:388456ms step_avg:41.15ms +[2025-09-05 23:23:28] [Rank 0] step:9461/10000 train_time:389196ms step_avg:41.14ms +[2025-09-05 23:23:28] [Rank 0] step:9461/10000 train_time:389196ms step_avg:41.14ms +[2025-09-05 23:23:29] [Rank 0] step:9481/10000 train_time:389935ms step_avg:41.13ms +[2025-09-05 23:23:29] [Rank 0] step:9481/10000 train_time:389935ms step_avg:41.13ms +[2025-09-05 23:23:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:23:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:23:30] [Rank 0] PRINT: step:9500/10000 train_loss:2.0272 val_loss:2.0156 train_time:390754ms step_avg:41.13ms +[2025-09-05 23:23:30] [Rank 0] PRINT: step:9500/10000 train_loss:2.0272 val_loss:2.0156 train_time:390754ms step_avg:41.13ms +[2025-09-05 23:23:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:23:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:23:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:23:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:24:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:24:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:24:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:24:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:24:51] [Rank 0] Total Loss: 4.5531 +[2025-09-05 23:24:51] [Rank 0] Total Loss: 4.5531 +[2025-09-05 23:24:51] [Rank 0] Total FTA (Unweighted): 0.3512 +[2025-09-05 23:24:51] [Rank 0] Total FTA (Unweighted): 0.3512 +[2025-09-05 23:24:51] [Rank 0] Total FTA (Weighted): 0.3513 +[2025-09-05 23:24:51] [Rank 0] Total FTA (Weighted): 0.3513 +[2025-09-05 23:24:51] [Rank 0] Group 0 Loss: 3.4190 +[2025-09-05 23:24:51] [Rank 0] Group 0 Loss: 3.4190 +[2025-09-05 23:24:51] [Rank 0] Group 1 Loss: 3.2054 +[2025-09-05 23:24:51] [Rank 0] Group 1 Loss: 3.2054 +[2025-09-05 23:24:51] [Rank 0] Group 2 Loss: 3.2248 +[2025-09-05 23:24:51] [Rank 0] Group 2 Loss: 3.2248 +[2025-09-05 23:24:51] [Rank 0] Group 3 Loss: 3.6478 +[2025-09-05 23:24:51] [Rank 0] Group 3 Loss: 3.6478 +[2025-09-05 23:24:51] [Rank 0] Group 4 Loss: 3.8569 +[2025-09-05 23:24:51] [Rank 0] Group 4 Loss: 3.8569 +[2025-09-05 23:24:51] [Rank 0] Group 5 Loss: 4.3069 +[2025-09-05 23:24:51] [Rank 0] Group 5 Loss: 4.3069 +[2025-09-05 23:24:51] [Rank 0] Group 6 Loss: 4.6153 +[2025-09-05 23:24:51] [Rank 0] Group 6 Loss: 4.6153 +[2025-09-05 23:24:51] [Rank 0] Group 7 Loss: 4.7558 +[2025-09-05 23:24:51] [Rank 0] Group 7 Loss: 4.7558 +[2025-09-05 23:24:51] [Rank 0] Group 8 Loss: 5.0727 +[2025-09-05 23:24:51] [Rank 0] Group 8 Loss: 5.0727 +[2025-09-05 23:24:51] [Rank 0] Group 9 Loss: 5.1870 +[2025-09-05 23:24:51] [Rank 0] Group 9 Loss: 5.1870 +[2025-09-05 23:24:51] [Rank 0] Group 10 Loss: 5.2689 +[2025-09-05 23:24:51] [Rank 0] Group 10 Loss: 5.2689 +[2025-09-05 23:24:51] [Rank 0] Group 11 Loss: 5.3013 +[2025-09-05 23:24:51] [Rank 0] Group 11 Loss: 5.3013 +[2025-09-05 23:24:51] [Rank 0] Group 12 Loss: 5.2037 +[2025-09-05 23:24:51] [Rank 0] Group 12 Loss: 5.2037 +[2025-09-05 23:24:51] [Rank 0] Group 13 Loss: 5.2467 +[2025-09-05 23:24:51] [Rank 0] Group 13 Loss: 5.2467 +[2025-09-05 23:24:51] [Rank 0] Group 14 Loss: 5.3074 +[2025-09-05 23:24:51] [Rank 0] Group 14 Loss: 5.3074 +[2025-09-05 23:24:51] [Rank 0] Group 15 Loss: 5.2301 +[2025-09-05 23:24:51] [Rank 0] Group 15 Loss: 5.2301 +[2025-09-05 23:24:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:24:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:24:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:24:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:24:51] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 23:24:51] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 23:24:51] [Rank 0] Group 3 FTA: 0.2300 +[2025-09-05 23:24:51] [Rank 0] Group 3 FTA: 0.2300 +[2025-09-05 23:24:51] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:24:51] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:24:51] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:24:51] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:24:51] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:24:51] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:24:51] [Rank 0] Group 7 FTA: 0.1800 +[2025-09-05 23:24:51] [Rank 0] Group 7 FTA: 0.1800 +[2025-09-05 23:24:51] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 23:24:51] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 23:24:51] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:24:51] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:24:51] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:24:51] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:24:51] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:24:51] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:24:51] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 23:24:51] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 23:24:51] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 23:24:51] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 23:24:51] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 23:24:51] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 23:24:51] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 23:24:51] [Rank 0] Group 15 FTA: 0.1800 +[2025-09-05 23:24:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:24:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:24:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:24:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:24:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:24:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:24:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:24:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:24:52] [Rank 0] step:9501/10000 train_time:390763ms step_avg:41.13ms +[2025-09-05 23:24:52] [Rank 0] step:9501/10000 train_time:390763ms step_avg:41.13ms +[2025-09-05 23:24:53] [Rank 0] step:9521/10000 train_time:391426ms step_avg:41.11ms +[2025-09-05 23:24:53] [Rank 0] step:9521/10000 train_time:391426ms step_avg:41.11ms +[2025-09-05 23:24:54] [Rank 0] step:9541/10000 train_time:392166ms step_avg:41.10ms +[2025-09-05 23:24:54] [Rank 0] step:9541/10000 train_time:392166ms step_avg:41.10ms +[2025-09-05 23:24:55] [Rank 0] step:9561/10000 train_time:392902ms step_avg:41.09ms +[2025-09-05 23:24:55] [Rank 0] step:9561/10000 train_time:392902ms step_avg:41.09ms +[2025-09-05 23:24:55] [Rank 0] step:9581/10000 train_time:393641ms step_avg:41.09ms +[2025-09-05 23:24:55] [Rank 0] step:9581/10000 train_time:393641ms step_avg:41.09ms +[2025-09-05 23:24:56] [Rank 0] step:9601/10000 train_time:394379ms step_avg:41.08ms +[2025-09-05 23:24:56] [Rank 0] step:9601/10000 train_time:394379ms step_avg:41.08ms +[2025-09-05 23:24:57] [Rank 0] step:9621/10000 train_time:395116ms step_avg:41.07ms +[2025-09-05 23:24:57] [Rank 0] step:9621/10000 train_time:395116ms step_avg:41.07ms +[2025-09-05 23:24:58] [Rank 0] step:9641/10000 train_time:395853ms step_avg:41.06ms +[2025-09-05 23:24:58] [Rank 0] step:9641/10000 train_time:395853ms step_avg:41.06ms +[2025-09-05 23:24:59] [Rank 0] step:9661/10000 train_time:396866ms step_avg:41.08ms +[2025-09-05 23:24:59] [Rank 0] step:9661/10000 train_time:396866ms step_avg:41.08ms +[2025-09-05 23:24:59] [Rank 0] step:9681/10000 train_time:397602ms step_avg:41.07ms +[2025-09-05 23:24:59] [Rank 0] step:9681/10000 train_time:397602ms step_avg:41.07ms +[2025-09-05 23:25:00] [Rank 0] step:9701/10000 train_time:398341ms step_avg:41.06ms +[2025-09-05 23:25:00] [Rank 0] step:9701/10000 train_time:398341ms step_avg:41.06ms +[2025-09-05 23:25:01] [Rank 0] step:9721/10000 train_time:399080ms step_avg:41.05ms +[2025-09-05 23:25:01] [Rank 0] step:9721/10000 train_time:399080ms step_avg:41.05ms +[2025-09-05 23:25:01] [Rank 0] step:9741/10000 train_time:399818ms step_avg:41.04ms +[2025-09-05 23:25:01] [Rank 0] step:9741/10000 train_time:399818ms step_avg:41.04ms +[2025-09-05 23:25:02] [Rank 0] step:9761/10000 train_time:400557ms step_avg:41.04ms +[2025-09-05 23:25:02] [Rank 0] step:9761/10000 train_time:400557ms step_avg:41.04ms +[2025-09-05 23:25:03] [Rank 0] step:9781/10000 train_time:401296ms step_avg:41.03ms +[2025-09-05 23:25:03] [Rank 0] step:9781/10000 train_time:401296ms step_avg:41.03ms +[2025-09-05 23:25:04] [Rank 0] step:9801/10000 train_time:402035ms step_avg:41.02ms +[2025-09-05 23:25:04] [Rank 0] step:9801/10000 train_time:402035ms step_avg:41.02ms +[2025-09-05 23:25:04] [Rank 0] step:9821/10000 train_time:402774ms step_avg:41.01ms +[2025-09-05 23:25:04] [Rank 0] step:9821/10000 train_time:402774ms step_avg:41.01ms +[2025-09-05 23:25:05] [Rank 0] step:9841/10000 train_time:403512ms step_avg:41.00ms +[2025-09-05 23:25:05] [Rank 0] step:9841/10000 train_time:403512ms step_avg:41.00ms +[2025-09-05 23:25:06] [Rank 0] step:9861/10000 train_time:404252ms step_avg:41.00ms +[2025-09-05 23:25:06] [Rank 0] step:9861/10000 train_time:404252ms step_avg:41.00ms +[2025-09-05 23:25:07] [Rank 0] step:9881/10000 train_time:404989ms step_avg:40.99ms +[2025-09-05 23:25:07] [Rank 0] step:9881/10000 train_time:404989ms step_avg:40.99ms +[2025-09-05 23:25:07] [Rank 0] step:9901/10000 train_time:405728ms step_avg:40.98ms +[2025-09-05 23:25:07] [Rank 0] step:9901/10000 train_time:405728ms step_avg:40.98ms +[2025-09-05 23:25:08] [Rank 0] step:9921/10000 train_time:406577ms step_avg:40.98ms +[2025-09-05 23:25:08] [Rank 0] step:9921/10000 train_time:406577ms step_avg:40.98ms +[2025-09-05 23:25:09] [Rank 0] step:9941/10000 train_time:407314ms step_avg:40.97ms +[2025-09-05 23:25:09] [Rank 0] step:9941/10000 train_time:407314ms step_avg:40.97ms +[2025-09-05 23:25:10] [Rank 0] step:9961/10000 train_time:408052ms step_avg:40.96ms +[2025-09-05 23:25:10] [Rank 0] step:9961/10000 train_time:408052ms step_avg:40.96ms +[2025-09-05 23:25:11] [Rank 0] step:9981/10000 train_time:408905ms step_avg:40.97ms +[2025-09-05 23:25:11] [Rank 0] step:9981/10000 train_time:408905ms step_avg:40.97ms +[2025-09-05 23:25:11] [Rank 0] step:10000/10000 train_time:409606ms step_avg:40.96ms +[2025-09-05 23:25:11] [Rank 0] step:10000/10000 train_time:409606ms step_avg:40.96ms +[2025-09-05 23:25:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:25:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 23:25:12] [Rank 0] PRINT: step:10000/10000 train_loss:2.0195 val_loss:2.0081 train_time:409730ms step_avg:40.97ms +[2025-09-05 23:25:12] [Rank 0] PRINT: step:10000/10000 train_loss:2.0195 val_loss:2.0081 train_time:409730ms step_avg:40.97ms +[2025-09-05 23:25:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:25:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 23:25:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:25:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 23:26:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:26:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 23:26:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:26:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 23:26:33] [Rank 0] Total Loss: 4.5406 +[2025-09-05 23:26:33] [Rank 0] Total Loss: 4.5406 +[2025-09-05 23:26:33] [Rank 0] Total FTA (Unweighted): 0.3506 +[2025-09-05 23:26:33] [Rank 0] Total FTA (Unweighted): 0.3506 +[2025-09-05 23:26:33] [Rank 0] Total FTA (Weighted): 0.3506 +[2025-09-05 23:26:33] [Rank 0] Total FTA (Weighted): 0.3506 +[2025-09-05 23:26:33] [Rank 0] Group 0 Loss: 3.4641 +[2025-09-05 23:26:33] [Rank 0] Group 0 Loss: 3.4641 +[2025-09-05 23:26:33] [Rank 0] Group 1 Loss: 3.2068 +[2025-09-05 23:26:33] [Rank 0] Group 1 Loss: 3.2068 +[2025-09-05 23:26:33] [Rank 0] Group 2 Loss: 3.2286 +[2025-09-05 23:26:33] [Rank 0] Group 2 Loss: 3.2286 +[2025-09-05 23:26:33] [Rank 0] Group 3 Loss: 3.6187 +[2025-09-05 23:26:33] [Rank 0] Group 3 Loss: 3.6187 +[2025-09-05 23:26:33] [Rank 0] Group 4 Loss: 3.8383 +[2025-09-05 23:26:33] [Rank 0] Group 4 Loss: 3.8383 +[2025-09-05 23:26:33] [Rank 0] Group 5 Loss: 4.2923 +[2025-09-05 23:26:33] [Rank 0] Group 5 Loss: 4.2923 +[2025-09-05 23:26:33] [Rank 0] Group 6 Loss: 4.5894 +[2025-09-05 23:26:33] [Rank 0] Group 6 Loss: 4.5894 +[2025-09-05 23:26:33] [Rank 0] Group 7 Loss: 4.7306 +[2025-09-05 23:26:33] [Rank 0] Group 7 Loss: 4.7306 +[2025-09-05 23:26:33] [Rank 0] Group 8 Loss: 5.0503 +[2025-09-05 23:26:33] [Rank 0] Group 8 Loss: 5.0503 +[2025-09-05 23:26:33] [Rank 0] Group 9 Loss: 5.1637 +[2025-09-05 23:26:33] [Rank 0] Group 9 Loss: 5.1637 +[2025-09-05 23:26:33] [Rank 0] Group 10 Loss: 5.2500 +[2025-09-05 23:26:33] [Rank 0] Group 10 Loss: 5.2500 +[2025-09-05 23:26:33] [Rank 0] Group 11 Loss: 5.2893 +[2025-09-05 23:26:33] [Rank 0] Group 11 Loss: 5.2893 +[2025-09-05 23:26:33] [Rank 0] Group 12 Loss: 5.1956 +[2025-09-05 23:26:33] [Rank 0] Group 12 Loss: 5.1956 +[2025-09-05 23:26:33] [Rank 0] Group 13 Loss: 5.2160 +[2025-09-05 23:26:33] [Rank 0] Group 13 Loss: 5.2160 +[2025-09-05 23:26:33] [Rank 0] Group 14 Loss: 5.2959 +[2025-09-05 23:26:33] [Rank 0] Group 14 Loss: 5.2959 +[2025-09-05 23:26:33] [Rank 0] Group 15 Loss: 5.2199 +[2025-09-05 23:26:33] [Rank 0] Group 15 Loss: 5.2199 +[2025-09-05 23:26:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:26:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 23:26:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:26:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 23:26:33] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 23:26:33] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 23:26:33] [Rank 0] Group 3 FTA: 0.2300 +[2025-09-05 23:26:33] [Rank 0] Group 3 FTA: 0.2300 +[2025-09-05 23:26:33] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:26:33] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 23:26:33] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:26:33] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 23:26:33] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:26:33] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 23:26:33] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 23:26:33] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 23:26:33] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 23:26:33] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 23:26:33] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:26:33] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 23:26:33] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:26:33] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 23:26:33] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:26:33] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 23:26:33] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 23:26:33] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 23:26:33] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 23:26:33] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 23:26:33] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 23:26:33] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 23:26:33] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 23:26:33] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 23:26:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:26:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_loss_curves.png +[2025-09-05 23:26:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:26:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/per_class_acc_curves.png +[2025-09-05 23:26:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:26:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_loss_curve.png +[2025-09-05 23:26:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:26:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.1_seed_46/total_acc_curve.png +[2025-09-05 23:26:35] [Rank 0] step:10001/10000 train_time:409739ms step_avg:40.97ms +[2025-09-05 23:26:35] [Rank 0] step:10001/10000 train_time:409739ms step_avg:40.97ms +[2025-09-05 23:26:35] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 23:26:35 2025 --- +[2025-09-05 23:26:35] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 23:26:35 2025 --- +[2025-09-05 23:26:35] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 23:26:35] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1c48e76da516403fad4e26d8121479d5bae27cc5 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.2, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "c256444f-f2de-44f5-bf36-322b53de49fd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..dad6e67d4bd278b1d2b41a1edeb8c7dbf4e98eb1 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eeb19b6c022b064503f971a7fef529858e9b3bc1c09b1340742dabaf6a1c2141 +size 414844 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7bb98e98ca522a544a677cb97e8d11c2954eb024 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab41d561d527153ee6c57d54ac9b17544d496098c3d24196b02fe096de2f03a3 +size 436213 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..25855722d61312cf9432f59796da1c811dea63b6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54ab271b22a66f76a400273931f210ba8c27948d79f670b06803fca876096647 +size 96973 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ede711ad38bb4ba838232d5a09671403d56d9af3 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a25a15f304ba4466833292a917473242da825ca701a5cf4839da8d4c310f02a0 +size 112385 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/training_log_c256444f-f2de-44f5-bf36-322b53de49fd.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/training_log_c256444f-f2de-44f5-bf36-322b53de49fd.txt new file mode 100644 index 0000000000000000000000000000000000000000..1b3f2d2b21a14b94971a1da9cb6e332b4f687895 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/training_log_c256444f-f2de-44f5-bf36-322b53de49fd.txt @@ -0,0 +1,5614 @@ +[2025-09-05 17:26:06] [Rank 0] PRINT: --- Script Start: Fri Sep 5 17:26:06 2025 --- +[2025-09-05 17:26:06] [Rank 0] PRINT: --- Script Start: Fri Sep 5 17:26:06 2025 --- +[2025-09-05 17:26:06] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 17:26:06] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 17:26:06] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 17:26:06] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 17:26:06] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 17:26:06] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 17:26:06] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42 +[2025-09-05 17:26:06] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42 +[2025-09-05 17:26:06] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 17:26:06] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 17:26:06] [Rank 0] PRINT: Constructing model... +[2025-09-05 17:26:06] [Rank 0] PRINT: Constructing model... +[2025-09-05 17:26:07] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 17:26:07] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 17:26:07] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 17:26:07] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 17:26:07] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 17:26:07] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 17:26:11] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 17:26:11] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 17:26:11] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 17:26:11] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 17:26:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 17:26:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 17:26:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 17:26:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 17:26:12] [Rank 0] PRINT: Model returns: +[2025-09-05 17:26:12] [Rank 0] PRINT: Model returns: +[2025-09-05 17:26:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 17:26:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 17:26:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 17:26:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 17:26:12] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 17:26:12] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 17:26:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 17:26:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 17:26:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 17:26:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 17:26:16] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 17:26:16] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 17:26:16] [Rank 0] PRINT: Starting warmup... +[2025-09-05 17:26:16] [Rank 0] PRINT: Starting warmup... +[2025-09-05 17:27:03] [Rank 0] PRINT: Warmup complete. +[2025-09-05 17:27:03] [Rank 0] PRINT: Warmup complete. +[2025-09-05 17:27:04] [Rank 0] PRINT: Starting training... +[2025-09-05 17:27:04] [Rank 0] PRINT: Starting training... +[2025-09-05 17:27:10] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/fixed_eval_indices.json +[2025-09-05 17:27:10] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/fixed_eval_indices.json +[2025-09-05 17:27:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:27:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:27:14] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 17:27:14] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 17:27:47] [Rank 0] step:21/10000 train_time:33288ms step_avg:1585.13ms +[2025-09-05 17:27:47] [Rank 0] step:21/10000 train_time:33288ms step_avg:1585.13ms +[2025-09-05 17:27:48] [Rank 0] step:41/10000 train_time:34016ms step_avg:829.65ms +[2025-09-05 17:27:48] [Rank 0] step:41/10000 train_time:34016ms step_avg:829.65ms +[2025-09-05 17:27:49] [Rank 0] step:61/10000 train_time:34743ms step_avg:569.56ms +[2025-09-05 17:27:49] [Rank 0] step:61/10000 train_time:34743ms step_avg:569.56ms +[2025-09-05 17:27:49] [Rank 0] step:81/10000 train_time:35590ms step_avg:439.39ms +[2025-09-05 17:27:49] [Rank 0] step:81/10000 train_time:35590ms step_avg:439.39ms +[2025-09-05 17:27:50] [Rank 0] step:101/10000 train_time:36316ms step_avg:359.57ms +[2025-09-05 17:27:50] [Rank 0] step:101/10000 train_time:36316ms step_avg:359.57ms +[2025-09-05 17:27:51] [Rank 0] step:121/10000 train_time:37043ms step_avg:306.14ms +[2025-09-05 17:27:51] [Rank 0] step:121/10000 train_time:37043ms step_avg:306.14ms +[2025-09-05 17:27:52] [Rank 0] step:141/10000 train_time:37911ms step_avg:268.88ms +[2025-09-05 17:27:52] [Rank 0] step:141/10000 train_time:37911ms step_avg:268.88ms +[2025-09-05 17:27:52] [Rank 0] step:161/10000 train_time:38638ms step_avg:239.99ms +[2025-09-05 17:27:52] [Rank 0] step:161/10000 train_time:38638ms step_avg:239.99ms +[2025-09-05 17:27:53] [Rank 0] step:181/10000 train_time:39365ms step_avg:217.49ms +[2025-09-05 17:27:53] [Rank 0] step:181/10000 train_time:39365ms step_avg:217.49ms +[2025-09-05 17:27:54] [Rank 0] step:201/10000 train_time:40092ms step_avg:199.46ms +[2025-09-05 17:27:54] [Rank 0] step:201/10000 train_time:40092ms step_avg:199.46ms +[2025-09-05 17:27:55] [Rank 0] step:221/10000 train_time:40818ms step_avg:184.70ms +[2025-09-05 17:27:55] [Rank 0] step:221/10000 train_time:40818ms step_avg:184.70ms +[2025-09-05 17:27:55] [Rank 0] step:241/10000 train_time:41545ms step_avg:172.39ms +[2025-09-05 17:27:55] [Rank 0] step:241/10000 train_time:41545ms step_avg:172.39ms +[2025-09-05 17:27:56] [Rank 0] step:261/10000 train_time:42273ms step_avg:161.97ms +[2025-09-05 17:27:56] [Rank 0] step:261/10000 train_time:42273ms step_avg:161.97ms +[2025-09-05 17:27:57] [Rank 0] step:281/10000 train_time:42999ms step_avg:153.02ms +[2025-09-05 17:27:57] [Rank 0] step:281/10000 train_time:42999ms step_avg:153.02ms +[2025-09-05 17:27:57] [Rank 0] step:301/10000 train_time:43726ms step_avg:145.27ms +[2025-09-05 17:27:57] [Rank 0] step:301/10000 train_time:43726ms step_avg:145.27ms +[2025-09-05 17:27:58] [Rank 0] step:321/10000 train_time:44453ms step_avg:138.48ms +[2025-09-05 17:27:58] [Rank 0] step:321/10000 train_time:44453ms step_avg:138.48ms +[2025-09-05 17:27:59] [Rank 0] step:341/10000 train_time:45180ms step_avg:132.49ms +[2025-09-05 17:27:59] [Rank 0] step:341/10000 train_time:45180ms step_avg:132.49ms +[2025-09-05 17:28:00] [Rank 0] step:361/10000 train_time:45906ms step_avg:127.16ms +[2025-09-05 17:28:00] [Rank 0] step:361/10000 train_time:45906ms step_avg:127.16ms +[2025-09-05 17:28:00] [Rank 0] step:381/10000 train_time:46632ms step_avg:122.39ms +[2025-09-05 17:28:00] [Rank 0] step:381/10000 train_time:46632ms step_avg:122.39ms +[2025-09-05 17:28:01] [Rank 0] step:401/10000 train_time:47359ms step_avg:118.10ms +[2025-09-05 17:28:01] [Rank 0] step:401/10000 train_time:47359ms step_avg:118.10ms +[2025-09-05 17:28:02] [Rank 0] step:421/10000 train_time:48086ms step_avg:114.22ms +[2025-09-05 17:28:02] [Rank 0] step:421/10000 train_time:48086ms step_avg:114.22ms +[2025-09-05 17:28:03] [Rank 0] step:441/10000 train_time:48813ms step_avg:110.69ms +[2025-09-05 17:28:03] [Rank 0] step:441/10000 train_time:48813ms step_avg:110.69ms +[2025-09-05 17:28:03] [Rank 0] step:461/10000 train_time:49541ms step_avg:107.46ms +[2025-09-05 17:28:03] [Rank 0] step:461/10000 train_time:49541ms step_avg:107.46ms +[2025-09-05 17:28:04] [Rank 0] step:481/10000 train_time:50267ms step_avg:104.51ms +[2025-09-05 17:28:04] [Rank 0] step:481/10000 train_time:50267ms step_avg:104.51ms +[2025-09-05 17:28:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:28:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:28:05] [Rank 0] PRINT: step:500/10000 train_loss:4.6792 val_loss:3.2652 train_time:51075ms step_avg:102.15ms +[2025-09-05 17:28:05] [Rank 0] PRINT: step:500/10000 train_loss:4.6792 val_loss:3.2652 train_time:51075ms step_avg:102.15ms +[2025-09-05 17:28:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:28:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:28:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:28:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:29:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:29:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:29:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:29:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:29:27] [Rank 0] Total Loss: 5.4359 +[2025-09-05 17:29:27] [Rank 0] Total Loss: 5.4359 +[2025-09-05 17:29:27] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-05 17:29:27] [Rank 0] Total FTA (Unweighted): 0.1269 +[2025-09-05 17:29:27] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-05 17:29:27] [Rank 0] Total FTA (Weighted): 0.1269 +[2025-09-05 17:29:27] [Rank 0] Group 0 Loss: 3.3404 +[2025-09-05 17:29:27] [Rank 0] Group 0 Loss: 3.3404 +[2025-09-05 17:29:27] [Rank 0] Group 1 Loss: 3.2597 +[2025-09-05 17:29:27] [Rank 0] Group 1 Loss: 3.2597 +[2025-09-05 17:29:27] [Rank 0] Group 2 Loss: 3.6126 +[2025-09-05 17:29:27] [Rank 0] Group 2 Loss: 3.6126 +[2025-09-05 17:29:27] [Rank 0] Group 3 Loss: 4.3442 +[2025-09-05 17:29:27] [Rank 0] Group 3 Loss: 4.3442 +[2025-09-05 17:29:27] [Rank 0] Group 4 Loss: 5.2473 +[2025-09-05 17:29:27] [Rank 0] Group 4 Loss: 5.2473 +[2025-09-05 17:29:27] [Rank 0] Group 5 Loss: 5.6078 +[2025-09-05 17:29:27] [Rank 0] Group 5 Loss: 5.6078 +[2025-09-05 17:29:27] [Rank 0] Group 6 Loss: 5.8981 +[2025-09-05 17:29:27] [Rank 0] Group 6 Loss: 5.8981 +[2025-09-05 17:29:27] [Rank 0] Group 7 Loss: 5.9043 +[2025-09-05 17:29:27] [Rank 0] Group 7 Loss: 5.9043 +[2025-09-05 17:29:27] [Rank 0] Group 8 Loss: 6.1078 +[2025-09-05 17:29:27] [Rank 0] Group 8 Loss: 6.1078 +[2025-09-05 17:29:27] [Rank 0] Group 9 Loss: 6.2899 +[2025-09-05 17:29:27] [Rank 0] Group 9 Loss: 6.2899 +[2025-09-05 17:29:27] [Rank 0] Group 10 Loss: 6.2634 +[2025-09-05 17:29:27] [Rank 0] Group 10 Loss: 6.2634 +[2025-09-05 17:29:27] [Rank 0] Group 11 Loss: 6.3520 +[2025-09-05 17:29:27] [Rank 0] Group 11 Loss: 6.3520 +[2025-09-05 17:29:27] [Rank 0] Group 12 Loss: 6.1644 +[2025-09-05 17:29:27] [Rank 0] Group 12 Loss: 6.1644 +[2025-09-05 17:29:27] [Rank 0] Group 13 Loss: 6.1541 +[2025-09-05 17:29:27] [Rank 0] Group 13 Loss: 6.1541 +[2025-09-05 17:29:27] [Rank 0] Group 14 Loss: 6.2590 +[2025-09-05 17:29:27] [Rank 0] Group 14 Loss: 6.2590 +[2025-09-05 17:29:27] [Rank 0] Group 15 Loss: 6.1698 +[2025-09-05 17:29:27] [Rank 0] Group 15 Loss: 6.1698 +[2025-09-05 17:29:27] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 17:29:27] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 17:29:27] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 17:29:27] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 17:29:27] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 17:29:27] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 17:29:27] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 17:29:27] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 17:29:27] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 17:29:27] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 17:29:27] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 17:29:27] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 17:29:27] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 17:29:27] [Rank 0] Group 6 FTA: 0.0700 +[2025-09-05 17:29:27] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 17:29:27] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 17:29:27] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-05 17:29:27] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-05 17:29:27] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 17:29:27] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 17:29:27] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 17:29:27] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 17:29:27] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 17:29:27] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 17:29:27] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 17:29:27] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 17:29:27] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 17:29:27] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 17:29:27] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:29:27] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:29:27] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:29:27] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:29:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:29:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:29:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:29:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:29:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:29:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:29:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:29:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:29:29] [Rank 0] step:501/10000 train_time:51085ms step_avg:101.97ms +[2025-09-05 17:29:29] [Rank 0] step:501/10000 train_time:51085ms step_avg:101.97ms +[2025-09-05 17:29:30] [Rank 0] step:521/10000 train_time:51748ms step_avg:99.32ms +[2025-09-05 17:29:30] [Rank 0] step:521/10000 train_time:51748ms step_avg:99.32ms +[2025-09-05 17:29:30] [Rank 0] step:541/10000 train_time:52474ms step_avg:96.99ms +[2025-09-05 17:29:30] [Rank 0] step:541/10000 train_time:52474ms step_avg:96.99ms +[2025-09-05 17:29:31] [Rank 0] step:561/10000 train_time:53200ms step_avg:94.83ms +[2025-09-05 17:29:31] [Rank 0] step:561/10000 train_time:53200ms step_avg:94.83ms +[2025-09-05 17:29:32] [Rank 0] step:581/10000 train_time:53927ms step_avg:92.82ms +[2025-09-05 17:29:32] [Rank 0] step:581/10000 train_time:53927ms step_avg:92.82ms +[2025-09-05 17:29:33] [Rank 0] step:601/10000 train_time:54654ms step_avg:90.94ms +[2025-09-05 17:29:33] [Rank 0] step:601/10000 train_time:54654ms step_avg:90.94ms +[2025-09-05 17:29:33] [Rank 0] step:621/10000 train_time:55381ms step_avg:89.18ms +[2025-09-05 17:29:33] [Rank 0] step:621/10000 train_time:55381ms step_avg:89.18ms +[2025-09-05 17:29:34] [Rank 0] step:641/10000 train_time:56108ms step_avg:87.53ms +[2025-09-05 17:29:34] [Rank 0] step:641/10000 train_time:56108ms step_avg:87.53ms +[2025-09-05 17:29:35] [Rank 0] step:661/10000 train_time:56835ms step_avg:85.98ms +[2025-09-05 17:29:35] [Rank 0] step:661/10000 train_time:56835ms step_avg:85.98ms +[2025-09-05 17:29:36] [Rank 0] step:681/10000 train_time:57561ms step_avg:84.52ms +[2025-09-05 17:29:36] [Rank 0] step:681/10000 train_time:57561ms step_avg:84.52ms +[2025-09-05 17:29:36] [Rank 0] step:701/10000 train_time:58293ms step_avg:83.16ms +[2025-09-05 17:29:36] [Rank 0] step:701/10000 train_time:58293ms step_avg:83.16ms +[2025-09-05 17:29:37] [Rank 0] step:721/10000 train_time:59020ms step_avg:81.86ms +[2025-09-05 17:29:37] [Rank 0] step:721/10000 train_time:59020ms step_avg:81.86ms +[2025-09-05 17:29:38] [Rank 0] step:741/10000 train_time:59746ms step_avg:80.63ms +[2025-09-05 17:29:38] [Rank 0] step:741/10000 train_time:59746ms step_avg:80.63ms +[2025-09-05 17:29:38] [Rank 0] step:761/10000 train_time:60477ms step_avg:79.47ms +[2025-09-05 17:29:38] [Rank 0] step:761/10000 train_time:60477ms step_avg:79.47ms +[2025-09-05 17:29:39] [Rank 0] step:781/10000 train_time:61210ms step_avg:78.37ms +[2025-09-05 17:29:39] [Rank 0] step:781/10000 train_time:61210ms step_avg:78.37ms +[2025-09-05 17:29:40] [Rank 0] step:801/10000 train_time:61942ms step_avg:77.33ms +[2025-09-05 17:29:40] [Rank 0] step:801/10000 train_time:61942ms step_avg:77.33ms +[2025-09-05 17:29:41] [Rank 0] step:821/10000 train_time:63286ms step_avg:77.08ms +[2025-09-05 17:29:41] [Rank 0] step:821/10000 train_time:63286ms step_avg:77.08ms +[2025-09-05 17:29:42] [Rank 0] step:841/10000 train_time:64017ms step_avg:76.12ms +[2025-09-05 17:29:42] [Rank 0] step:841/10000 train_time:64017ms step_avg:76.12ms +[2025-09-05 17:29:43] [Rank 0] step:861/10000 train_time:64749ms step_avg:75.20ms +[2025-09-05 17:29:43] [Rank 0] step:861/10000 train_time:64749ms step_avg:75.20ms +[2025-09-05 17:29:43] [Rank 0] step:881/10000 train_time:65480ms step_avg:74.32ms +[2025-09-05 17:29:43] [Rank 0] step:881/10000 train_time:65480ms step_avg:74.32ms +[2025-09-05 17:29:44] [Rank 0] step:901/10000 train_time:66212ms step_avg:73.49ms +[2025-09-05 17:29:44] [Rank 0] step:901/10000 train_time:66212ms step_avg:73.49ms +[2025-09-05 17:29:45] [Rank 0] step:921/10000 train_time:66944ms step_avg:72.69ms +[2025-09-05 17:29:45] [Rank 0] step:921/10000 train_time:66944ms step_avg:72.69ms +[2025-09-05 17:29:46] [Rank 0] step:941/10000 train_time:67676ms step_avg:71.92ms +[2025-09-05 17:29:46] [Rank 0] step:941/10000 train_time:67676ms step_avg:71.92ms +[2025-09-05 17:29:46] [Rank 0] step:961/10000 train_time:68408ms step_avg:71.18ms +[2025-09-05 17:29:46] [Rank 0] step:961/10000 train_time:68408ms step_avg:71.18ms +[2025-09-05 17:29:47] [Rank 0] step:981/10000 train_time:69140ms step_avg:70.48ms +[2025-09-05 17:29:47] [Rank 0] step:981/10000 train_time:69140ms step_avg:70.48ms +[2025-09-05 17:29:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:29:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:29:48] [Rank 0] PRINT: step:1000/10000 train_loss:2.9017 val_loss:2.6164 train_time:69952ms step_avg:69.95ms +[2025-09-05 17:29:48] [Rank 0] PRINT: step:1000/10000 train_loss:2.9017 val_loss:2.6164 train_time:69952ms step_avg:69.95ms +[2025-09-05 17:29:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:29:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:29:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:29:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:31:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:31:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:31:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:31:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:31:10] [Rank 0] Total Loss: 5.0743 +[2025-09-05 17:31:10] [Rank 0] Total Loss: 5.0743 +[2025-09-05 17:31:10] [Rank 0] Total FTA (Unweighted): 0.1994 +[2025-09-05 17:31:10] [Rank 0] Total FTA (Unweighted): 0.1994 +[2025-09-05 17:31:10] [Rank 0] Total FTA (Weighted): 0.1994 +[2025-09-05 17:31:10] [Rank 0] Total FTA (Weighted): 0.1994 +[2025-09-05 17:31:10] [Rank 0] Group 0 Loss: 3.2788 +[2025-09-05 17:31:10] [Rank 0] Group 0 Loss: 3.2788 +[2025-09-05 17:31:10] [Rank 0] Group 1 Loss: 3.2649 +[2025-09-05 17:31:10] [Rank 0] Group 1 Loss: 3.2649 +[2025-09-05 17:31:10] [Rank 0] Group 2 Loss: 3.4291 +[2025-09-05 17:31:10] [Rank 0] Group 2 Loss: 3.4291 +[2025-09-05 17:31:10] [Rank 0] Group 3 Loss: 3.8925 +[2025-09-05 17:31:10] [Rank 0] Group 3 Loss: 3.8925 +[2025-09-05 17:31:10] [Rank 0] Group 4 Loss: 4.4879 +[2025-09-05 17:31:10] [Rank 0] Group 4 Loss: 4.4879 +[2025-09-05 17:31:10] [Rank 0] Group 5 Loss: 4.9986 +[2025-09-05 17:31:10] [Rank 0] Group 5 Loss: 4.9986 +[2025-09-05 17:31:10] [Rank 0] Group 6 Loss: 5.3242 +[2025-09-05 17:31:10] [Rank 0] Group 6 Loss: 5.3242 +[2025-09-05 17:31:10] [Rank 0] Group 7 Loss: 5.4681 +[2025-09-05 17:31:10] [Rank 0] Group 7 Loss: 5.4681 +[2025-09-05 17:31:10] [Rank 0] Group 8 Loss: 5.7407 +[2025-09-05 17:31:10] [Rank 0] Group 8 Loss: 5.7407 +[2025-09-05 17:31:10] [Rank 0] Group 9 Loss: 5.8894 +[2025-09-05 17:31:10] [Rank 0] Group 9 Loss: 5.8894 +[2025-09-05 17:31:10] [Rank 0] Group 10 Loss: 5.9167 +[2025-09-05 17:31:10] [Rank 0] Group 10 Loss: 5.9167 +[2025-09-05 17:31:10] [Rank 0] Group 11 Loss: 5.9944 +[2025-09-05 17:31:10] [Rank 0] Group 11 Loss: 5.9944 +[2025-09-05 17:31:10] [Rank 0] Group 12 Loss: 5.8556 +[2025-09-05 17:31:10] [Rank 0] Group 12 Loss: 5.8556 +[2025-09-05 17:31:10] [Rank 0] Group 13 Loss: 5.8577 +[2025-09-05 17:31:10] [Rank 0] Group 13 Loss: 5.8577 +[2025-09-05 17:31:10] [Rank 0] Group 14 Loss: 5.9113 +[2025-09-05 17:31:10] [Rank 0] Group 14 Loss: 5.9113 +[2025-09-05 17:31:10] [Rank 0] Group 15 Loss: 5.8789 +[2025-09-05 17:31:10] [Rank 0] Group 15 Loss: 5.8789 +[2025-09-05 17:31:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:31:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:31:10] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 17:31:10] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 17:31:10] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 17:31:10] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 17:31:10] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 17:31:10] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 17:31:10] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 17:31:10] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 17:31:10] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 17:31:10] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 17:31:10] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 17:31:10] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 17:31:10] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 17:31:10] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 17:31:10] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 17:31:10] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 17:31:10] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 17:31:10] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 17:31:10] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 17:31:10] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 17:31:10] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 17:31:10] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 17:31:10] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 17:31:10] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 17:31:10] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 17:31:10] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 17:31:10] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:31:10] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:31:10] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:31:10] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:31:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:31:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:31:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:31:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:31:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:31:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:31:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:31:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:31:12] [Rank 0] step:1001/10000 train_time:69962ms step_avg:69.89ms +[2025-09-05 17:31:12] [Rank 0] step:1001/10000 train_time:69962ms step_avg:69.89ms +[2025-09-05 17:31:13] [Rank 0] step:1021/10000 train_time:70635ms step_avg:69.18ms +[2025-09-05 17:31:13] [Rank 0] step:1021/10000 train_time:70635ms step_avg:69.18ms +[2025-09-05 17:31:13] [Rank 0] step:1041/10000 train_time:71367ms step_avg:68.56ms +[2025-09-05 17:31:13] [Rank 0] step:1041/10000 train_time:71367ms step_avg:68.56ms +[2025-09-05 17:31:14] [Rank 0] step:1061/10000 train_time:72099ms step_avg:67.95ms +[2025-09-05 17:31:14] [Rank 0] step:1061/10000 train_time:72099ms step_avg:67.95ms +[2025-09-05 17:31:15] [Rank 0] step:1081/10000 train_time:72831ms step_avg:67.37ms +[2025-09-05 17:31:15] [Rank 0] step:1081/10000 train_time:72831ms step_avg:67.37ms +[2025-09-05 17:31:16] [Rank 0] step:1101/10000 train_time:73564ms step_avg:66.82ms +[2025-09-05 17:31:16] [Rank 0] step:1101/10000 train_time:73564ms step_avg:66.82ms +[2025-09-05 17:31:16] [Rank 0] step:1121/10000 train_time:74295ms step_avg:66.28ms +[2025-09-05 17:31:16] [Rank 0] step:1121/10000 train_time:74295ms step_avg:66.28ms +[2025-09-05 17:31:17] [Rank 0] step:1141/10000 train_time:75028ms step_avg:65.76ms +[2025-09-05 17:31:17] [Rank 0] step:1141/10000 train_time:75028ms step_avg:65.76ms +[2025-09-05 17:31:18] [Rank 0] step:1161/10000 train_time:75762ms step_avg:65.26ms +[2025-09-05 17:31:18] [Rank 0] step:1161/10000 train_time:75762ms step_avg:65.26ms +[2025-09-05 17:31:19] [Rank 0] step:1181/10000 train_time:76494ms step_avg:64.77ms +[2025-09-05 17:31:19] [Rank 0] step:1181/10000 train_time:76494ms step_avg:64.77ms +[2025-09-05 17:31:19] [Rank 0] step:1201/10000 train_time:77226ms step_avg:64.30ms +[2025-09-05 17:31:19] [Rank 0] step:1201/10000 train_time:77226ms step_avg:64.30ms +[2025-09-05 17:31:20] [Rank 0] step:1221/10000 train_time:77958ms step_avg:63.85ms +[2025-09-05 17:31:20] [Rank 0] step:1221/10000 train_time:77958ms step_avg:63.85ms +[2025-09-05 17:31:21] [Rank 0] step:1241/10000 train_time:78689ms step_avg:63.41ms +[2025-09-05 17:31:21] [Rank 0] step:1241/10000 train_time:78689ms step_avg:63.41ms +[2025-09-05 17:31:22] [Rank 0] step:1261/10000 train_time:79421ms step_avg:62.98ms +[2025-09-05 17:31:22] [Rank 0] step:1261/10000 train_time:79421ms step_avg:62.98ms +[2025-09-05 17:31:22] [Rank 0] step:1281/10000 train_time:80153ms step_avg:62.57ms +[2025-09-05 17:31:22] [Rank 0] step:1281/10000 train_time:80153ms step_avg:62.57ms +[2025-09-05 17:31:23] [Rank 0] step:1301/10000 train_time:80885ms step_avg:62.17ms +[2025-09-05 17:31:23] [Rank 0] step:1301/10000 train_time:80885ms step_avg:62.17ms +[2025-09-05 17:31:24] [Rank 0] step:1321/10000 train_time:81617ms step_avg:61.78ms +[2025-09-05 17:31:24] [Rank 0] step:1321/10000 train_time:81617ms step_avg:61.78ms +[2025-09-05 17:31:24] [Rank 0] step:1341/10000 train_time:82349ms step_avg:61.41ms +[2025-09-05 17:31:24] [Rank 0] step:1341/10000 train_time:82349ms step_avg:61.41ms +[2025-09-05 17:31:25] [Rank 0] step:1361/10000 train_time:83080ms step_avg:61.04ms +[2025-09-05 17:31:25] [Rank 0] step:1361/10000 train_time:83080ms step_avg:61.04ms +[2025-09-05 17:31:26] [Rank 0] step:1381/10000 train_time:83813ms step_avg:60.69ms +[2025-09-05 17:31:26] [Rank 0] step:1381/10000 train_time:83813ms step_avg:60.69ms +[2025-09-05 17:31:27] [Rank 0] step:1401/10000 train_time:84545ms step_avg:60.35ms +[2025-09-05 17:31:27] [Rank 0] step:1401/10000 train_time:84545ms step_avg:60.35ms +[2025-09-05 17:31:27] [Rank 0] step:1421/10000 train_time:85278ms step_avg:60.01ms +[2025-09-05 17:31:27] [Rank 0] step:1421/10000 train_time:85278ms step_avg:60.01ms +[2025-09-05 17:31:28] [Rank 0] step:1441/10000 train_time:86009ms step_avg:59.69ms +[2025-09-05 17:31:28] [Rank 0] step:1441/10000 train_time:86009ms step_avg:59.69ms +[2025-09-05 17:31:29] [Rank 0] step:1461/10000 train_time:86741ms step_avg:59.37ms +[2025-09-05 17:31:29] [Rank 0] step:1461/10000 train_time:86741ms step_avg:59.37ms +[2025-09-05 17:31:30] [Rank 0] step:1481/10000 train_time:87476ms step_avg:59.07ms +[2025-09-05 17:31:30] [Rank 0] step:1481/10000 train_time:87476ms step_avg:59.07ms +[2025-09-05 17:31:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:31:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:31:31] [Rank 0] PRINT: step:1500/10000 train_loss:2.4559 val_loss:2.3100 train_time:88288ms step_avg:58.86ms +[2025-09-05 17:31:31] [Rank 0] PRINT: step:1500/10000 train_loss:2.4559 val_loss:2.3100 train_time:88288ms step_avg:58.86ms +[2025-09-05 17:31:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:31:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:31:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:31:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:32:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:32:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:32:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:32:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:32:53] [Rank 0] Total Loss: 4.8771 +[2025-09-05 17:32:53] [Rank 0] Total Loss: 4.8771 +[2025-09-05 17:32:53] [Rank 0] Total FTA (Unweighted): 0.2594 +[2025-09-05 17:32:53] [Rank 0] Total FTA (Unweighted): 0.2594 +[2025-09-05 17:32:53] [Rank 0] Total FTA (Weighted): 0.2594 +[2025-09-05 17:32:53] [Rank 0] Total FTA (Weighted): 0.2594 +[2025-09-05 17:32:53] [Rank 0] Group 0 Loss: 3.4655 +[2025-09-05 17:32:53] [Rank 0] Group 0 Loss: 3.4655 +[2025-09-05 17:32:53] [Rank 0] Group 1 Loss: 3.2653 +[2025-09-05 17:32:53] [Rank 0] Group 1 Loss: 3.2653 +[2025-09-05 17:32:53] [Rank 0] Group 2 Loss: 3.3732 +[2025-09-05 17:32:53] [Rank 0] Group 2 Loss: 3.3732 +[2025-09-05 17:32:53] [Rank 0] Group 3 Loss: 3.7948 +[2025-09-05 17:32:53] [Rank 0] Group 3 Loss: 3.7948 +[2025-09-05 17:32:53] [Rank 0] Group 4 Loss: 4.1911 +[2025-09-05 17:32:53] [Rank 0] Group 4 Loss: 4.1911 +[2025-09-05 17:32:53] [Rank 0] Group 5 Loss: 4.6872 +[2025-09-05 17:32:53] [Rank 0] Group 5 Loss: 4.6872 +[2025-09-05 17:32:53] [Rank 0] Group 6 Loss: 5.0183 +[2025-09-05 17:32:53] [Rank 0] Group 6 Loss: 5.0183 +[2025-09-05 17:32:53] [Rank 0] Group 7 Loss: 5.1698 +[2025-09-05 17:32:53] [Rank 0] Group 7 Loss: 5.1698 +[2025-09-05 17:32:53] [Rank 0] Group 8 Loss: 5.4517 +[2025-09-05 17:32:53] [Rank 0] Group 8 Loss: 5.4517 +[2025-09-05 17:32:53] [Rank 0] Group 9 Loss: 5.5973 +[2025-09-05 17:32:53] [Rank 0] Group 9 Loss: 5.5973 +[2025-09-05 17:32:53] [Rank 0] Group 10 Loss: 5.6458 +[2025-09-05 17:32:53] [Rank 0] Group 10 Loss: 5.6458 +[2025-09-05 17:32:53] [Rank 0] Group 11 Loss: 5.7185 +[2025-09-05 17:32:53] [Rank 0] Group 11 Loss: 5.7185 +[2025-09-05 17:32:53] [Rank 0] Group 12 Loss: 5.6128 +[2025-09-05 17:32:53] [Rank 0] Group 12 Loss: 5.6128 +[2025-09-05 17:32:53] [Rank 0] Group 13 Loss: 5.6363 +[2025-09-05 17:32:53] [Rank 0] Group 13 Loss: 5.6363 +[2025-09-05 17:32:53] [Rank 0] Group 14 Loss: 5.7098 +[2025-09-05 17:32:53] [Rank 0] Group 14 Loss: 5.7098 +[2025-09-05 17:32:53] [Rank 0] Group 15 Loss: 5.6969 +[2025-09-05 17:32:53] [Rank 0] Group 15 Loss: 5.6969 +[2025-09-05 17:32:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:32:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:32:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:32:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:32:53] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 17:32:53] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 17:32:53] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 17:32:53] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 17:32:53] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 17:32:53] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 17:32:53] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 17:32:53] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 17:32:53] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 17:32:53] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 17:32:53] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 17:32:53] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 17:32:53] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 17:32:53] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 17:32:53] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 17:32:53] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 17:32:53] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 17:32:53] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 17:32:53] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 17:32:53] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 17:32:53] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 17:32:53] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 17:32:53] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 17:32:53] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 17:32:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:32:53] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:32:53] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:32:53] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:32:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:32:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:32:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:32:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:32:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:32:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:32:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:32:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:32:54] [Rank 0] step:1501/10000 train_time:88298ms step_avg:58.83ms +[2025-09-05 17:32:54] [Rank 0] step:1501/10000 train_time:88298ms step_avg:58.83ms +[2025-09-05 17:32:55] [Rank 0] step:1521/10000 train_time:88972ms step_avg:58.50ms +[2025-09-05 17:32:55] [Rank 0] step:1521/10000 train_time:88972ms step_avg:58.50ms +[2025-09-05 17:32:56] [Rank 0] step:1541/10000 train_time:89703ms step_avg:58.21ms +[2025-09-05 17:32:56] [Rank 0] step:1541/10000 train_time:89703ms step_avg:58.21ms +[2025-09-05 17:32:56] [Rank 0] step:1561/10000 train_time:90436ms step_avg:57.93ms +[2025-09-05 17:32:56] [Rank 0] step:1561/10000 train_time:90436ms step_avg:57.93ms +[2025-09-05 17:32:57] [Rank 0] step:1581/10000 train_time:91167ms step_avg:57.66ms +[2025-09-05 17:32:57] [Rank 0] step:1581/10000 train_time:91167ms step_avg:57.66ms +[2025-09-05 17:32:58] [Rank 0] step:1601/10000 train_time:91898ms step_avg:57.40ms +[2025-09-05 17:32:58] [Rank 0] step:1601/10000 train_time:91898ms step_avg:57.40ms +[2025-09-05 17:32:58] [Rank 0] step:1621/10000 train_time:92630ms step_avg:57.14ms +[2025-09-05 17:32:58] [Rank 0] step:1621/10000 train_time:92630ms step_avg:57.14ms +[2025-09-05 17:32:59] [Rank 0] step:1641/10000 train_time:93565ms step_avg:57.02ms +[2025-09-05 17:32:59] [Rank 0] step:1641/10000 train_time:93565ms step_avg:57.02ms +[2025-09-05 17:33:00] [Rank 0] step:1661/10000 train_time:94297ms step_avg:56.77ms +[2025-09-05 17:33:00] [Rank 0] step:1661/10000 train_time:94297ms step_avg:56.77ms +[2025-09-05 17:33:01] [Rank 0] step:1681/10000 train_time:95028ms step_avg:56.53ms +[2025-09-05 17:33:01] [Rank 0] step:1681/10000 train_time:95028ms step_avg:56.53ms +[2025-09-05 17:33:02] [Rank 0] step:1701/10000 train_time:95759ms step_avg:56.30ms +[2025-09-05 17:33:02] [Rank 0] step:1701/10000 train_time:95759ms step_avg:56.30ms +[2025-09-05 17:33:02] [Rank 0] step:1721/10000 train_time:96492ms step_avg:56.07ms +[2025-09-05 17:33:02] [Rank 0] step:1721/10000 train_time:96492ms step_avg:56.07ms +[2025-09-05 17:33:03] [Rank 0] step:1741/10000 train_time:97224ms step_avg:55.84ms +[2025-09-05 17:33:03] [Rank 0] step:1741/10000 train_time:97224ms step_avg:55.84ms +[2025-09-05 17:33:04] [Rank 0] step:1761/10000 train_time:97956ms step_avg:55.63ms +[2025-09-05 17:33:04] [Rank 0] step:1761/10000 train_time:97956ms step_avg:55.63ms +[2025-09-05 17:33:05] [Rank 0] step:1781/10000 train_time:98689ms step_avg:55.41ms +[2025-09-05 17:33:05] [Rank 0] step:1781/10000 train_time:98689ms step_avg:55.41ms +[2025-09-05 17:33:05] [Rank 0] step:1801/10000 train_time:99574ms step_avg:55.29ms +[2025-09-05 17:33:05] [Rank 0] step:1801/10000 train_time:99574ms step_avg:55.29ms +[2025-09-05 17:33:06] [Rank 0] step:1821/10000 train_time:100306ms step_avg:55.08ms +[2025-09-05 17:33:06] [Rank 0] step:1821/10000 train_time:100306ms step_avg:55.08ms +[2025-09-05 17:33:07] [Rank 0] step:1841/10000 train_time:101038ms step_avg:54.88ms +[2025-09-05 17:33:07] [Rank 0] step:1841/10000 train_time:101038ms step_avg:54.88ms +[2025-09-05 17:33:08] [Rank 0] step:1861/10000 train_time:101890ms step_avg:54.75ms +[2025-09-05 17:33:08] [Rank 0] step:1861/10000 train_time:101890ms step_avg:54.75ms +[2025-09-05 17:33:08] [Rank 0] step:1881/10000 train_time:102622ms step_avg:54.56ms +[2025-09-05 17:33:08] [Rank 0] step:1881/10000 train_time:102622ms step_avg:54.56ms +[2025-09-05 17:33:09] [Rank 0] step:1901/10000 train_time:103355ms step_avg:54.37ms +[2025-09-05 17:33:09] [Rank 0] step:1901/10000 train_time:103355ms step_avg:54.37ms +[2025-09-05 17:33:10] [Rank 0] step:1921/10000 train_time:104087ms step_avg:54.18ms +[2025-09-05 17:33:10] [Rank 0] step:1921/10000 train_time:104087ms step_avg:54.18ms +[2025-09-05 17:33:11] [Rank 0] step:1941/10000 train_time:104818ms step_avg:54.00ms +[2025-09-05 17:33:11] [Rank 0] step:1941/10000 train_time:104818ms step_avg:54.00ms +[2025-09-05 17:33:11] [Rank 0] step:1961/10000 train_time:105551ms step_avg:53.82ms +[2025-09-05 17:33:11] [Rank 0] step:1961/10000 train_time:105551ms step_avg:53.82ms +[2025-09-05 17:33:12] [Rank 0] step:1981/10000 train_time:106282ms step_avg:53.65ms +[2025-09-05 17:33:12] [Rank 0] step:1981/10000 train_time:106282ms step_avg:53.65ms +[2025-09-05 17:33:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:33:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:33:13] [Rank 0] PRINT: step:2000/10000 train_loss:2.2223 val_loss:2.1294 train_time:107094ms step_avg:53.55ms +[2025-09-05 17:33:13] [Rank 0] PRINT: step:2000/10000 train_loss:2.2223 val_loss:2.1294 train_time:107094ms step_avg:53.55ms +[2025-09-05 17:33:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:33:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:33:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:33:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:34:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:34:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:34:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:34:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:34:36] [Rank 0] Total Loss: 4.7540 +[2025-09-05 17:34:36] [Rank 0] Total Loss: 4.7540 +[2025-09-05 17:34:36] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-05 17:34:36] [Rank 0] Total FTA (Unweighted): 0.2906 +[2025-09-05 17:34:36] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-05 17:34:36] [Rank 0] Total FTA (Weighted): 0.2906 +[2025-09-05 17:34:36] [Rank 0] Group 0 Loss: 3.3080 +[2025-09-05 17:34:36] [Rank 0] Group 0 Loss: 3.3080 +[2025-09-05 17:34:36] [Rank 0] Group 1 Loss: 3.3211 +[2025-09-05 17:34:36] [Rank 0] Group 1 Loss: 3.3211 +[2025-09-05 17:34:36] [Rank 0] Group 2 Loss: 3.4040 +[2025-09-05 17:34:36] [Rank 0] Group 2 Loss: 3.4040 +[2025-09-05 17:34:36] [Rank 0] Group 3 Loss: 3.7374 +[2025-09-05 17:34:36] [Rank 0] Group 3 Loss: 3.7374 +[2025-09-05 17:34:36] [Rank 0] Group 4 Loss: 4.0857 +[2025-09-05 17:34:36] [Rank 0] Group 4 Loss: 4.0857 +[2025-09-05 17:34:36] [Rank 0] Group 5 Loss: 4.4933 +[2025-09-05 17:34:36] [Rank 0] Group 5 Loss: 4.4933 +[2025-09-05 17:34:36] [Rank 0] Group 6 Loss: 4.7892 +[2025-09-05 17:34:36] [Rank 0] Group 6 Loss: 4.7892 +[2025-09-05 17:34:36] [Rank 0] Group 7 Loss: 4.9796 +[2025-09-05 17:34:36] [Rank 0] Group 7 Loss: 4.9796 +[2025-09-05 17:34:36] [Rank 0] Group 8 Loss: 5.3026 +[2025-09-05 17:34:36] [Rank 0] Group 8 Loss: 5.3026 +[2025-09-05 17:34:36] [Rank 0] Group 9 Loss: 5.4371 +[2025-09-05 17:34:36] [Rank 0] Group 9 Loss: 5.4371 +[2025-09-05 17:34:36] [Rank 0] Group 10 Loss: 5.5321 +[2025-09-05 17:34:36] [Rank 0] Group 10 Loss: 5.5321 +[2025-09-05 17:34:36] [Rank 0] Group 11 Loss: 5.5895 +[2025-09-05 17:34:36] [Rank 0] Group 11 Loss: 5.5895 +[2025-09-05 17:34:36] [Rank 0] Group 12 Loss: 5.4726 +[2025-09-05 17:34:36] [Rank 0] Group 12 Loss: 5.4726 +[2025-09-05 17:34:36] [Rank 0] Group 13 Loss: 5.4956 +[2025-09-05 17:34:36] [Rank 0] Group 13 Loss: 5.4956 +[2025-09-05 17:34:36] [Rank 0] Group 14 Loss: 5.5615 +[2025-09-05 17:34:36] [Rank 0] Group 14 Loss: 5.5615 +[2025-09-05 17:34:36] [Rank 0] Group 15 Loss: 5.5546 +[2025-09-05 17:34:36] [Rank 0] Group 15 Loss: 5.5546 +[2025-09-05 17:34:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:34:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:34:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:34:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:34:36] [Rank 0] Group 2 FTA: 0.5400 +[2025-09-05 17:34:36] [Rank 0] Group 2 FTA: 0.5400 +[2025-09-05 17:34:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 17:34:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 17:34:36] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 17:34:36] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 17:34:36] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 17:34:36] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 17:34:36] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-05 17:34:36] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-05 17:34:36] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 17:34:36] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 17:34:36] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 17:34:36] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 17:34:36] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 17:34:36] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 17:34:36] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 17:34:36] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 17:34:36] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 17:34:36] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 17:34:36] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 17:34:36] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 17:34:36] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 17:34:36] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 17:34:36] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-05 17:34:36] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-05 17:34:36] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:34:36] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:34:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:34:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:34:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:34:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:34:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:34:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:34:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:34:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:34:37] [Rank 0] step:2001/10000 train_time:107105ms step_avg:53.53ms +[2025-09-05 17:34:37] [Rank 0] step:2001/10000 train_time:107105ms step_avg:53.53ms +[2025-09-05 17:34:38] [Rank 0] step:2021/10000 train_time:107771ms step_avg:53.33ms +[2025-09-05 17:34:38] [Rank 0] step:2021/10000 train_time:107771ms step_avg:53.33ms +[2025-09-05 17:34:39] [Rank 0] step:2041/10000 train_time:108503ms step_avg:53.16ms +[2025-09-05 17:34:39] [Rank 0] step:2041/10000 train_time:108503ms step_avg:53.16ms +[2025-09-05 17:34:40] [Rank 0] step:2061/10000 train_time:109234ms step_avg:53.00ms +[2025-09-05 17:34:40] [Rank 0] step:2061/10000 train_time:109234ms step_avg:53.00ms +[2025-09-05 17:34:40] [Rank 0] step:2081/10000 train_time:109966ms step_avg:52.84ms +[2025-09-05 17:34:40] [Rank 0] step:2081/10000 train_time:109966ms step_avg:52.84ms +[2025-09-05 17:34:41] [Rank 0] step:2101/10000 train_time:110698ms step_avg:52.69ms +[2025-09-05 17:34:41] [Rank 0] step:2101/10000 train_time:110698ms step_avg:52.69ms +[2025-09-05 17:34:42] [Rank 0] step:2121/10000 train_time:111430ms step_avg:52.54ms +[2025-09-05 17:34:42] [Rank 0] step:2121/10000 train_time:111430ms step_avg:52.54ms +[2025-09-05 17:34:42] [Rank 0] step:2141/10000 train_time:112162ms step_avg:52.39ms +[2025-09-05 17:34:42] [Rank 0] step:2141/10000 train_time:112162ms step_avg:52.39ms +[2025-09-05 17:34:43] [Rank 0] step:2161/10000 train_time:112893ms step_avg:52.24ms +[2025-09-05 17:34:43] [Rank 0] step:2161/10000 train_time:112893ms step_avg:52.24ms +[2025-09-05 17:34:44] [Rank 0] step:2181/10000 train_time:113626ms step_avg:52.10ms +[2025-09-05 17:34:44] [Rank 0] step:2181/10000 train_time:113626ms step_avg:52.10ms +[2025-09-05 17:34:45] [Rank 0] step:2201/10000 train_time:114356ms step_avg:51.96ms +[2025-09-05 17:34:45] [Rank 0] step:2201/10000 train_time:114356ms step_avg:51.96ms +[2025-09-05 17:34:45] [Rank 0] step:2221/10000 train_time:115088ms step_avg:51.82ms +[2025-09-05 17:34:45] [Rank 0] step:2221/10000 train_time:115088ms step_avg:51.82ms +[2025-09-05 17:34:46] [Rank 0] step:2241/10000 train_time:115824ms step_avg:51.68ms +[2025-09-05 17:34:46] [Rank 0] step:2241/10000 train_time:115824ms step_avg:51.68ms +[2025-09-05 17:34:47] [Rank 0] step:2261/10000 train_time:116562ms step_avg:51.55ms +[2025-09-05 17:34:47] [Rank 0] step:2261/10000 train_time:116562ms step_avg:51.55ms +[2025-09-05 17:34:48] [Rank 0] step:2281/10000 train_time:117300ms step_avg:51.42ms +[2025-09-05 17:34:48] [Rank 0] step:2281/10000 train_time:117300ms step_avg:51.42ms +[2025-09-05 17:34:48] [Rank 0] step:2301/10000 train_time:118037ms step_avg:51.30ms +[2025-09-05 17:34:48] [Rank 0] step:2301/10000 train_time:118037ms step_avg:51.30ms +[2025-09-05 17:34:49] [Rank 0] step:2321/10000 train_time:118776ms step_avg:51.17ms +[2025-09-05 17:34:49] [Rank 0] step:2321/10000 train_time:118776ms step_avg:51.17ms +[2025-09-05 17:34:50] [Rank 0] step:2341/10000 train_time:119514ms step_avg:51.05ms +[2025-09-05 17:34:50] [Rank 0] step:2341/10000 train_time:119514ms step_avg:51.05ms +[2025-09-05 17:34:51] [Rank 0] step:2361/10000 train_time:120253ms step_avg:50.93ms +[2025-09-05 17:34:51] [Rank 0] step:2361/10000 train_time:120253ms step_avg:50.93ms +[2025-09-05 17:34:51] [Rank 0] step:2381/10000 train_time:120990ms step_avg:50.81ms +[2025-09-05 17:34:51] [Rank 0] step:2381/10000 train_time:120990ms step_avg:50.81ms +[2025-09-05 17:34:52] [Rank 0] step:2401/10000 train_time:121729ms step_avg:50.70ms +[2025-09-05 17:34:52] [Rank 0] step:2401/10000 train_time:121729ms step_avg:50.70ms +[2025-09-05 17:34:53] [Rank 0] step:2421/10000 train_time:122467ms step_avg:50.59ms +[2025-09-05 17:34:53] [Rank 0] step:2421/10000 train_time:122467ms step_avg:50.59ms +[2025-09-05 17:34:53] [Rank 0] step:2441/10000 train_time:123206ms step_avg:50.47ms +[2025-09-05 17:34:53] [Rank 0] step:2441/10000 train_time:123206ms step_avg:50.47ms +[2025-09-05 17:34:54] [Rank 0] step:2461/10000 train_time:123944ms step_avg:50.36ms +[2025-09-05 17:34:54] [Rank 0] step:2461/10000 train_time:123944ms step_avg:50.36ms +[2025-09-05 17:34:55] [Rank 0] step:2481/10000 train_time:124682ms step_avg:50.25ms +[2025-09-05 17:34:55] [Rank 0] step:2481/10000 train_time:124682ms step_avg:50.25ms +[2025-09-05 17:34:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:34:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:34:56] [Rank 0] PRINT: step:2500/10000 train_loss:2.0723 val_loss:1.9983 train_time:125501ms step_avg:50.20ms +[2025-09-05 17:34:56] [Rank 0] PRINT: step:2500/10000 train_loss:2.0723 val_loss:1.9983 train_time:125501ms step_avg:50.20ms +[2025-09-05 17:34:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:34:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:34:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:34:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:36:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:36:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:36:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:36:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:36:18] [Rank 0] Total Loss: 4.5949 +[2025-09-05 17:36:18] [Rank 0] Total Loss: 4.5949 +[2025-09-05 17:36:18] [Rank 0] Total FTA (Unweighted): 0.3125 +[2025-09-05 17:36:18] [Rank 0] Total FTA (Unweighted): 0.3125 +[2025-09-05 17:36:18] [Rank 0] Total FTA (Weighted): 0.3125 +[2025-09-05 17:36:18] [Rank 0] Total FTA (Weighted): 0.3125 +[2025-09-05 17:36:18] [Rank 0] Group 0 Loss: 3.2583 +[2025-09-05 17:36:18] [Rank 0] Group 0 Loss: 3.2583 +[2025-09-05 17:36:18] [Rank 0] Group 1 Loss: 3.2588 +[2025-09-05 17:36:18] [Rank 0] Group 1 Loss: 3.2588 +[2025-09-05 17:36:18] [Rank 0] Group 2 Loss: 3.2935 +[2025-09-05 17:36:18] [Rank 0] Group 2 Loss: 3.2935 +[2025-09-05 17:36:18] [Rank 0] Group 3 Loss: 3.6275 +[2025-09-05 17:36:18] [Rank 0] Group 3 Loss: 3.6275 +[2025-09-05 17:36:18] [Rank 0] Group 4 Loss: 3.9728 +[2025-09-05 17:36:18] [Rank 0] Group 4 Loss: 3.9728 +[2025-09-05 17:36:18] [Rank 0] Group 5 Loss: 4.3142 +[2025-09-05 17:36:18] [Rank 0] Group 5 Loss: 4.3142 +[2025-09-05 17:36:18] [Rank 0] Group 6 Loss: 4.5886 +[2025-09-05 17:36:18] [Rank 0] Group 6 Loss: 4.5886 +[2025-09-05 17:36:18] [Rank 0] Group 7 Loss: 4.7816 +[2025-09-05 17:36:18] [Rank 0] Group 7 Loss: 4.7816 +[2025-09-05 17:36:18] [Rank 0] Group 8 Loss: 5.1053 +[2025-09-05 17:36:18] [Rank 0] Group 8 Loss: 5.1053 +[2025-09-05 17:36:18] [Rank 0] Group 9 Loss: 5.2332 +[2025-09-05 17:36:18] [Rank 0] Group 9 Loss: 5.2332 +[2025-09-05 17:36:18] [Rank 0] Group 10 Loss: 5.3536 +[2025-09-05 17:36:18] [Rank 0] Group 10 Loss: 5.3536 +[2025-09-05 17:36:18] [Rank 0] Group 11 Loss: 5.3827 +[2025-09-05 17:36:18] [Rank 0] Group 11 Loss: 5.3827 +[2025-09-05 17:36:18] [Rank 0] Group 12 Loss: 5.3078 +[2025-09-05 17:36:18] [Rank 0] Group 12 Loss: 5.3078 +[2025-09-05 17:36:18] [Rank 0] Group 13 Loss: 5.3268 +[2025-09-05 17:36:18] [Rank 0] Group 13 Loss: 5.3268 +[2025-09-05 17:36:18] [Rank 0] Group 14 Loss: 5.3701 +[2025-09-05 17:36:18] [Rank 0] Group 14 Loss: 5.3701 +[2025-09-05 17:36:18] [Rank 0] Group 15 Loss: 5.3439 +[2025-09-05 17:36:18] [Rank 0] Group 15 Loss: 5.3439 +[2025-09-05 17:36:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:36:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:36:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:36:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:36:18] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 17:36:18] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 17:36:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 17:36:19] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 17:36:19] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 17:36:19] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 17:36:19] [Rank 0] Group 5 FTA: 0.2500 +[2025-09-05 17:36:19] [Rank 0] Group 5 FTA: 0.2500 +[2025-09-05 17:36:19] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 17:36:19] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 17:36:19] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 17:36:19] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 17:36:19] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 17:36:19] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 17:36:19] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 17:36:19] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 17:36:19] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 17:36:19] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 17:36:19] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 17:36:19] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 17:36:19] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 17:36:19] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 17:36:19] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 17:36:19] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 17:36:19] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 17:36:19] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 17:36:19] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:36:19] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:36:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:36:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:36:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:36:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:36:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:36:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:36:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:36:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:36:20] [Rank 0] step:2501/10000 train_time:125512ms step_avg:50.18ms +[2025-09-05 17:36:20] [Rank 0] step:2501/10000 train_time:125512ms step_avg:50.18ms +[2025-09-05 17:36:21] [Rank 0] step:2521/10000 train_time:126186ms step_avg:50.05ms +[2025-09-05 17:36:21] [Rank 0] step:2521/10000 train_time:126186ms step_avg:50.05ms +[2025-09-05 17:36:21] [Rank 0] step:2541/10000 train_time:126924ms step_avg:49.95ms +[2025-09-05 17:36:21] [Rank 0] step:2541/10000 train_time:126924ms step_avg:49.95ms +[2025-09-05 17:36:22] [Rank 0] step:2561/10000 train_time:127665ms step_avg:49.85ms +[2025-09-05 17:36:22] [Rank 0] step:2561/10000 train_time:127665ms step_avg:49.85ms +[2025-09-05 17:36:23] [Rank 0] step:2581/10000 train_time:128403ms step_avg:49.75ms +[2025-09-05 17:36:23] [Rank 0] step:2581/10000 train_time:128403ms step_avg:49.75ms +[2025-09-05 17:36:24] [Rank 0] step:2601/10000 train_time:129140ms step_avg:49.65ms +[2025-09-05 17:36:24] [Rank 0] step:2601/10000 train_time:129140ms step_avg:49.65ms +[2025-09-05 17:36:24] [Rank 0] step:2621/10000 train_time:129878ms step_avg:49.55ms +[2025-09-05 17:36:24] [Rank 0] step:2621/10000 train_time:129878ms step_avg:49.55ms +[2025-09-05 17:36:25] [Rank 0] step:2641/10000 train_time:130616ms step_avg:49.46ms +[2025-09-05 17:36:25] [Rank 0] step:2641/10000 train_time:130616ms step_avg:49.46ms +[2025-09-05 17:36:26] [Rank 0] step:2661/10000 train_time:131355ms step_avg:49.36ms +[2025-09-05 17:36:26] [Rank 0] step:2661/10000 train_time:131355ms step_avg:49.36ms +[2025-09-05 17:36:27] [Rank 0] step:2681/10000 train_time:132092ms step_avg:49.27ms +[2025-09-05 17:36:27] [Rank 0] step:2681/10000 train_time:132092ms step_avg:49.27ms +[2025-09-05 17:36:27] [Rank 0] step:2701/10000 train_time:132830ms step_avg:49.18ms +[2025-09-05 17:36:27] [Rank 0] step:2701/10000 train_time:132830ms step_avg:49.18ms +[2025-09-05 17:36:28] [Rank 0] step:2721/10000 train_time:133568ms step_avg:49.09ms +[2025-09-05 17:36:28] [Rank 0] step:2721/10000 train_time:133568ms step_avg:49.09ms +[2025-09-05 17:36:29] [Rank 0] step:2741/10000 train_time:134306ms step_avg:49.00ms +[2025-09-05 17:36:29] [Rank 0] step:2741/10000 train_time:134306ms step_avg:49.00ms +[2025-09-05 17:36:30] [Rank 0] step:2761/10000 train_time:135044ms step_avg:48.91ms +[2025-09-05 17:36:30] [Rank 0] step:2761/10000 train_time:135044ms step_avg:48.91ms +[2025-09-05 17:36:30] [Rank 0] step:2781/10000 train_time:135783ms step_avg:48.83ms +[2025-09-05 17:36:30] [Rank 0] step:2781/10000 train_time:135783ms step_avg:48.83ms +[2025-09-05 17:36:31] [Rank 0] step:2801/10000 train_time:136520ms step_avg:48.74ms +[2025-09-05 17:36:31] [Rank 0] step:2801/10000 train_time:136520ms step_avg:48.74ms +[2025-09-05 17:36:32] [Rank 0] step:2821/10000 train_time:137455ms step_avg:48.73ms +[2025-09-05 17:36:32] [Rank 0] step:2821/10000 train_time:137455ms step_avg:48.73ms +[2025-09-05 17:36:33] [Rank 0] step:2841/10000 train_time:138193ms step_avg:48.64ms +[2025-09-05 17:36:33] [Rank 0] step:2841/10000 train_time:138193ms step_avg:48.64ms +[2025-09-05 17:36:33] [Rank 0] step:2861/10000 train_time:138931ms step_avg:48.56ms +[2025-09-05 17:36:33] [Rank 0] step:2861/10000 train_time:138931ms step_avg:48.56ms +[2025-09-05 17:36:34] [Rank 0] step:2881/10000 train_time:139668ms step_avg:48.48ms +[2025-09-05 17:36:34] [Rank 0] step:2881/10000 train_time:139668ms step_avg:48.48ms +[2025-09-05 17:36:35] [Rank 0] step:2901/10000 train_time:140407ms step_avg:48.40ms +[2025-09-05 17:36:35] [Rank 0] step:2901/10000 train_time:140407ms step_avg:48.40ms +[2025-09-05 17:36:36] [Rank 0] step:2921/10000 train_time:141144ms step_avg:48.32ms +[2025-09-05 17:36:36] [Rank 0] step:2921/10000 train_time:141144ms step_avg:48.32ms +[2025-09-05 17:36:36] [Rank 0] step:2941/10000 train_time:141882ms step_avg:48.24ms +[2025-09-05 17:36:36] [Rank 0] step:2941/10000 train_time:141882ms step_avg:48.24ms +[2025-09-05 17:36:37] [Rank 0] step:2961/10000 train_time:142620ms step_avg:48.17ms +[2025-09-05 17:36:37] [Rank 0] step:2961/10000 train_time:142620ms step_avg:48.17ms +[2025-09-05 17:36:38] [Rank 0] step:2981/10000 train_time:143358ms step_avg:48.09ms +[2025-09-05 17:36:38] [Rank 0] step:2981/10000 train_time:143358ms step_avg:48.09ms +[2025-09-05 17:36:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:36:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:36:39] [Rank 0] PRINT: step:3000/10000 train_loss:1.9569 val_loss:1.9045 train_time:144177ms step_avg:48.06ms +[2025-09-05 17:36:39] [Rank 0] PRINT: step:3000/10000 train_loss:1.9569 val_loss:1.9045 train_time:144177ms step_avg:48.06ms +[2025-09-05 17:36:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:36:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:36:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:36:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:38:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:38:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:38:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:38:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:38:02] [Rank 0] Total Loss: 4.5670 +[2025-09-05 17:38:02] [Rank 0] Total Loss: 4.5670 +[2025-09-05 17:38:02] [Rank 0] Total FTA (Unweighted): 0.3506 +[2025-09-05 17:38:02] [Rank 0] Total FTA (Unweighted): 0.3506 +[2025-09-05 17:38:02] [Rank 0] Total FTA (Weighted): 0.3506 +[2025-09-05 17:38:02] [Rank 0] Total FTA (Weighted): 0.3506 +[2025-09-05 17:38:02] [Rank 0] Group 0 Loss: 3.2990 +[2025-09-05 17:38:02] [Rank 0] Group 0 Loss: 3.2990 +[2025-09-05 17:38:02] [Rank 0] Group 1 Loss: 3.3396 +[2025-09-05 17:38:02] [Rank 0] Group 1 Loss: 3.3396 +[2025-09-05 17:38:02] [Rank 0] Group 2 Loss: 3.3657 +[2025-09-05 17:38:02] [Rank 0] Group 2 Loss: 3.3657 +[2025-09-05 17:38:02] [Rank 0] Group 3 Loss: 3.6228 +[2025-09-05 17:38:02] [Rank 0] Group 3 Loss: 3.6228 +[2025-09-05 17:38:02] [Rank 0] Group 4 Loss: 3.9345 +[2025-09-05 17:38:02] [Rank 0] Group 4 Loss: 3.9345 +[2025-09-05 17:38:02] [Rank 0] Group 5 Loss: 4.2338 +[2025-09-05 17:38:02] [Rank 0] Group 5 Loss: 4.2338 +[2025-09-05 17:38:02] [Rank 0] Group 6 Loss: 4.5459 +[2025-09-05 17:38:02] [Rank 0] Group 6 Loss: 4.5459 +[2025-09-05 17:38:02] [Rank 0] Group 7 Loss: 4.7399 +[2025-09-05 17:38:02] [Rank 0] Group 7 Loss: 4.7399 +[2025-09-05 17:38:02] [Rank 0] Group 8 Loss: 5.0349 +[2025-09-05 17:38:02] [Rank 0] Group 8 Loss: 5.0349 +[2025-09-05 17:38:02] [Rank 0] Group 9 Loss: 5.1769 +[2025-09-05 17:38:02] [Rank 0] Group 9 Loss: 5.1769 +[2025-09-05 17:38:02] [Rank 0] Group 10 Loss: 5.2855 +[2025-09-05 17:38:02] [Rank 0] Group 10 Loss: 5.2855 +[2025-09-05 17:38:02] [Rank 0] Group 11 Loss: 5.3550 +[2025-09-05 17:38:02] [Rank 0] Group 11 Loss: 5.3550 +[2025-09-05 17:38:02] [Rank 0] Group 12 Loss: 5.2317 +[2025-09-05 17:38:02] [Rank 0] Group 12 Loss: 5.2317 +[2025-09-05 17:38:02] [Rank 0] Group 13 Loss: 5.2996 +[2025-09-05 17:38:02] [Rank 0] Group 13 Loss: 5.2996 +[2025-09-05 17:38:02] [Rank 0] Group 14 Loss: 5.3103 +[2025-09-05 17:38:02] [Rank 0] Group 14 Loss: 5.3103 +[2025-09-05 17:38:02] [Rank 0] Group 15 Loss: 5.2974 +[2025-09-05 17:38:02] [Rank 0] Group 15 Loss: 5.2974 +[2025-09-05 17:38:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:38:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:38:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:38:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:38:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:38:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:38:02] [Rank 0] Group 3 FTA: 0.3000 +[2025-09-05 17:38:02] [Rank 0] Group 3 FTA: 0.3000 +[2025-09-05 17:38:02] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 17:38:02] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 17:38:02] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 17:38:02] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 17:38:02] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 17:38:02] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 17:38:02] [Rank 0] Group 7 FTA: 0.1900 +[2025-09-05 17:38:02] [Rank 0] Group 7 FTA: 0.1900 +[2025-09-05 17:38:02] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 17:38:02] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 17:38:02] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 17:38:02] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 17:38:02] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 17:38:02] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 17:38:02] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 17:38:02] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 17:38:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 17:38:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 17:38:02] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:38:02] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:38:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:38:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:38:02] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 17:38:02] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 17:38:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:38:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:38:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:38:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:38:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:38:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:38:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:38:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:38:03] [Rank 0] step:3001/10000 train_time:144188ms step_avg:48.05ms +[2025-09-05 17:38:03] [Rank 0] step:3001/10000 train_time:144188ms step_avg:48.05ms +[2025-09-05 17:38:04] [Rank 0] step:3021/10000 train_time:144861ms step_avg:47.95ms +[2025-09-05 17:38:04] [Rank 0] step:3021/10000 train_time:144861ms step_avg:47.95ms +[2025-09-05 17:38:05] [Rank 0] step:3041/10000 train_time:145598ms step_avg:47.88ms +[2025-09-05 17:38:05] [Rank 0] step:3041/10000 train_time:145598ms step_avg:47.88ms +[2025-09-05 17:38:05] [Rank 0] step:3061/10000 train_time:146337ms step_avg:47.81ms +[2025-09-05 17:38:05] [Rank 0] step:3061/10000 train_time:146337ms step_avg:47.81ms +[2025-09-05 17:38:06] [Rank 0] step:3081/10000 train_time:147076ms step_avg:47.74ms +[2025-09-05 17:38:06] [Rank 0] step:3081/10000 train_time:147076ms step_avg:47.74ms +[2025-09-05 17:38:07] [Rank 0] step:3101/10000 train_time:147813ms step_avg:47.67ms +[2025-09-05 17:38:07] [Rank 0] step:3101/10000 train_time:147813ms step_avg:47.67ms +[2025-09-05 17:38:08] [Rank 0] step:3121/10000 train_time:148552ms step_avg:47.60ms +[2025-09-05 17:38:08] [Rank 0] step:3121/10000 train_time:148552ms step_avg:47.60ms +[2025-09-05 17:38:08] [Rank 0] step:3141/10000 train_time:149291ms step_avg:47.53ms +[2025-09-05 17:38:08] [Rank 0] step:3141/10000 train_time:149291ms step_avg:47.53ms +[2025-09-05 17:38:09] [Rank 0] step:3161/10000 train_time:150029ms step_avg:47.46ms +[2025-09-05 17:38:09] [Rank 0] step:3161/10000 train_time:150029ms step_avg:47.46ms +[2025-09-05 17:38:10] [Rank 0] step:3181/10000 train_time:150767ms step_avg:47.40ms +[2025-09-05 17:38:10] [Rank 0] step:3181/10000 train_time:150767ms step_avg:47.40ms +[2025-09-05 17:38:11] [Rank 0] step:3201/10000 train_time:151505ms step_avg:47.33ms +[2025-09-05 17:38:11] [Rank 0] step:3201/10000 train_time:151505ms step_avg:47.33ms +[2025-09-05 17:38:11] [Rank 0] step:3221/10000 train_time:152243ms step_avg:47.27ms +[2025-09-05 17:38:11] [Rank 0] step:3221/10000 train_time:152243ms step_avg:47.27ms +[2025-09-05 17:38:12] [Rank 0] step:3241/10000 train_time:152981ms step_avg:47.20ms +[2025-09-05 17:38:12] [Rank 0] step:3241/10000 train_time:152981ms step_avg:47.20ms +[2025-09-05 17:38:13] [Rank 0] step:3261/10000 train_time:153719ms step_avg:47.14ms +[2025-09-05 17:38:13] [Rank 0] step:3261/10000 train_time:153719ms step_avg:47.14ms +[2025-09-05 17:38:13] [Rank 0] step:3281/10000 train_time:154457ms step_avg:47.08ms +[2025-09-05 17:38:13] [Rank 0] step:3281/10000 train_time:154457ms step_avg:47.08ms +[2025-09-05 17:38:14] [Rank 0] step:3301/10000 train_time:155195ms step_avg:47.01ms +[2025-09-05 17:38:14] [Rank 0] step:3301/10000 train_time:155195ms step_avg:47.01ms +[2025-09-05 17:38:15] [Rank 0] step:3321/10000 train_time:155932ms step_avg:46.95ms +[2025-09-05 17:38:15] [Rank 0] step:3321/10000 train_time:155932ms step_avg:46.95ms +[2025-09-05 17:38:16] [Rank 0] step:3341/10000 train_time:156671ms step_avg:46.89ms +[2025-09-05 17:38:16] [Rank 0] step:3341/10000 train_time:156671ms step_avg:46.89ms +[2025-09-05 17:38:16] [Rank 0] step:3361/10000 train_time:157409ms step_avg:46.83ms +[2025-09-05 17:38:16] [Rank 0] step:3361/10000 train_time:157409ms step_avg:46.83ms +[2025-09-05 17:38:17] [Rank 0] step:3381/10000 train_time:158147ms step_avg:46.78ms +[2025-09-05 17:38:17] [Rank 0] step:3381/10000 train_time:158147ms step_avg:46.78ms +[2025-09-05 17:38:18] [Rank 0] step:3401/10000 train_time:158886ms step_avg:46.72ms +[2025-09-05 17:38:18] [Rank 0] step:3401/10000 train_time:158886ms step_avg:46.72ms +[2025-09-05 17:38:19] [Rank 0] step:3421/10000 train_time:159624ms step_avg:46.66ms +[2025-09-05 17:38:19] [Rank 0] step:3421/10000 train_time:159624ms step_avg:46.66ms +[2025-09-05 17:38:19] [Rank 0] step:3441/10000 train_time:160361ms step_avg:46.60ms +[2025-09-05 17:38:19] [Rank 0] step:3441/10000 train_time:160361ms step_avg:46.60ms +[2025-09-05 17:38:20] [Rank 0] step:3461/10000 train_time:161099ms step_avg:46.55ms +[2025-09-05 17:38:20] [Rank 0] step:3461/10000 train_time:161099ms step_avg:46.55ms +[2025-09-05 17:38:21] [Rank 0] step:3481/10000 train_time:161837ms step_avg:46.49ms +[2025-09-05 17:38:21] [Rank 0] step:3481/10000 train_time:161837ms step_avg:46.49ms +[2025-09-05 17:38:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:38:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:38:22] [Rank 0] PRINT: step:3500/10000 train_loss:1.8811 val_loss:1.8427 train_time:162717ms step_avg:46.49ms +[2025-09-05 17:38:22] [Rank 0] PRINT: step:3500/10000 train_loss:1.8811 val_loss:1.8427 train_time:162717ms step_avg:46.49ms +[2025-09-05 17:38:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:38:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:38:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:38:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:39:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:39:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:39:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:39:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:39:44] [Rank 0] Total Loss: 4.5391 +[2025-09-05 17:39:44] [Rank 0] Total Loss: 4.5391 +[2025-09-05 17:39:44] [Rank 0] Total FTA (Unweighted): 0.3731 +[2025-09-05 17:39:44] [Rank 0] Total FTA (Unweighted): 0.3731 +[2025-09-05 17:39:44] [Rank 0] Total FTA (Weighted): 0.3731 +[2025-09-05 17:39:44] [Rank 0] Total FTA (Weighted): 0.3731 +[2025-09-05 17:39:44] [Rank 0] Group 0 Loss: 3.3217 +[2025-09-05 17:39:44] [Rank 0] Group 0 Loss: 3.3217 +[2025-09-05 17:39:44] [Rank 0] Group 1 Loss: 3.4327 +[2025-09-05 17:39:44] [Rank 0] Group 1 Loss: 3.4327 +[2025-09-05 17:39:44] [Rank 0] Group 2 Loss: 3.3351 +[2025-09-05 17:39:44] [Rank 0] Group 2 Loss: 3.3351 +[2025-09-05 17:39:44] [Rank 0] Group 3 Loss: 3.5646 +[2025-09-05 17:39:44] [Rank 0] Group 3 Loss: 3.5646 +[2025-09-05 17:39:44] [Rank 0] Group 4 Loss: 3.8994 +[2025-09-05 17:39:44] [Rank 0] Group 4 Loss: 3.8994 +[2025-09-05 17:39:44] [Rank 0] Group 5 Loss: 4.1820 +[2025-09-05 17:39:44] [Rank 0] Group 5 Loss: 4.1820 +[2025-09-05 17:39:44] [Rank 0] Group 6 Loss: 4.5237 +[2025-09-05 17:39:44] [Rank 0] Group 6 Loss: 4.5237 +[2025-09-05 17:39:44] [Rank 0] Group 7 Loss: 4.6754 +[2025-09-05 17:39:44] [Rank 0] Group 7 Loss: 4.6754 +[2025-09-05 17:39:44] [Rank 0] Group 8 Loss: 4.9975 +[2025-09-05 17:39:44] [Rank 0] Group 8 Loss: 4.9975 +[2025-09-05 17:39:44] [Rank 0] Group 9 Loss: 5.0986 +[2025-09-05 17:39:44] [Rank 0] Group 9 Loss: 5.0986 +[2025-09-05 17:39:44] [Rank 0] Group 10 Loss: 5.2595 +[2025-09-05 17:39:44] [Rank 0] Group 10 Loss: 5.2595 +[2025-09-05 17:39:44] [Rank 0] Group 11 Loss: 5.3102 +[2025-09-05 17:39:44] [Rank 0] Group 11 Loss: 5.3102 +[2025-09-05 17:39:44] [Rank 0] Group 12 Loss: 5.1806 +[2025-09-05 17:39:44] [Rank 0] Group 12 Loss: 5.1806 +[2025-09-05 17:39:44] [Rank 0] Group 13 Loss: 5.2828 +[2025-09-05 17:39:44] [Rank 0] Group 13 Loss: 5.2828 +[2025-09-05 17:39:44] [Rank 0] Group 14 Loss: 5.2942 +[2025-09-05 17:39:44] [Rank 0] Group 14 Loss: 5.2942 +[2025-09-05 17:39:44] [Rank 0] Group 15 Loss: 5.2683 +[2025-09-05 17:39:44] [Rank 0] Group 15 Loss: 5.2683 +[2025-09-05 17:39:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:39:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:39:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:39:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:39:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:39:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:39:44] [Rank 0] Group 3 FTA: 0.3400 +[2025-09-05 17:39:44] [Rank 0] Group 3 FTA: 0.3400 +[2025-09-05 17:39:44] [Rank 0] Group 4 FTA: 0.2900 +[2025-09-05 17:39:44] [Rank 0] Group 4 FTA: 0.2900 +[2025-09-05 17:39:44] [Rank 0] Group 5 FTA: 0.3600 +[2025-09-05 17:39:44] [Rank 0] Group 5 FTA: 0.3600 +[2025-09-05 17:39:44] [Rank 0] Group 6 FTA: 0.3400 +[2025-09-05 17:39:44] [Rank 0] Group 6 FTA: 0.3400 +[2025-09-05 17:39:44] [Rank 0] Group 7 FTA: 0.2300 +[2025-09-05 17:39:44] [Rank 0] Group 7 FTA: 0.2300 +[2025-09-05 17:39:44] [Rank 0] Group 8 FTA: 0.2800 +[2025-09-05 17:39:44] [Rank 0] Group 8 FTA: 0.2800 +[2025-09-05 17:39:44] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 17:39:44] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 17:39:44] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 17:39:44] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 17:39:44] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 17:39:44] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 17:39:44] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 17:39:44] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 17:39:44] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 17:39:44] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 17:39:44] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:39:44] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:39:44] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:39:44] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:39:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:39:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:39:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:39:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:39:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:39:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:39:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:39:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:39:45] [Rank 0] step:3501/10000 train_time:162728ms step_avg:46.48ms +[2025-09-05 17:39:45] [Rank 0] step:3501/10000 train_time:162728ms step_avg:46.48ms +[2025-09-05 17:39:46] [Rank 0] step:3521/10000 train_time:163391ms step_avg:46.40ms +[2025-09-05 17:39:46] [Rank 0] step:3521/10000 train_time:163391ms step_avg:46.40ms +[2025-09-05 17:39:47] [Rank 0] step:3541/10000 train_time:164130ms step_avg:46.35ms +[2025-09-05 17:39:47] [Rank 0] step:3541/10000 train_time:164130ms step_avg:46.35ms +[2025-09-05 17:39:48] [Rank 0] step:3561/10000 train_time:164868ms step_avg:46.30ms +[2025-09-05 17:39:48] [Rank 0] step:3561/10000 train_time:164868ms step_avg:46.30ms +[2025-09-05 17:39:48] [Rank 0] step:3581/10000 train_time:165606ms step_avg:46.25ms +[2025-09-05 17:39:48] [Rank 0] step:3581/10000 train_time:165606ms step_avg:46.25ms +[2025-09-05 17:39:49] [Rank 0] step:3601/10000 train_time:166344ms step_avg:46.19ms +[2025-09-05 17:39:49] [Rank 0] step:3601/10000 train_time:166344ms step_avg:46.19ms +[2025-09-05 17:39:50] [Rank 0] step:3621/10000 train_time:167080ms step_avg:46.14ms +[2025-09-05 17:39:50] [Rank 0] step:3621/10000 train_time:167080ms step_avg:46.14ms +[2025-09-05 17:39:51] [Rank 0] step:3641/10000 train_time:168431ms step_avg:46.26ms +[2025-09-05 17:39:51] [Rank 0] step:3641/10000 train_time:168431ms step_avg:46.26ms +[2025-09-05 17:39:52] [Rank 0] step:3661/10000 train_time:169168ms step_avg:46.21ms +[2025-09-05 17:39:52] [Rank 0] step:3661/10000 train_time:169168ms step_avg:46.21ms +[2025-09-05 17:39:53] [Rank 0] step:3681/10000 train_time:169906ms step_avg:46.16ms +[2025-09-05 17:39:53] [Rank 0] step:3681/10000 train_time:169906ms step_avg:46.16ms +[2025-09-05 17:39:53] [Rank 0] step:3701/10000 train_time:170643ms step_avg:46.11ms +[2025-09-05 17:39:53] [Rank 0] step:3701/10000 train_time:170643ms step_avg:46.11ms +[2025-09-05 17:39:54] [Rank 0] step:3721/10000 train_time:171381ms step_avg:46.06ms +[2025-09-05 17:39:54] [Rank 0] step:3721/10000 train_time:171381ms step_avg:46.06ms +[2025-09-05 17:39:55] [Rank 0] step:3741/10000 train_time:172119ms step_avg:46.01ms +[2025-09-05 17:39:55] [Rank 0] step:3741/10000 train_time:172119ms step_avg:46.01ms +[2025-09-05 17:39:56] [Rank 0] step:3761/10000 train_time:172857ms step_avg:45.96ms +[2025-09-05 17:39:56] [Rank 0] step:3761/10000 train_time:172857ms step_avg:45.96ms +[2025-09-05 17:39:56] [Rank 0] step:3781/10000 train_time:173593ms step_avg:45.91ms +[2025-09-05 17:39:56] [Rank 0] step:3781/10000 train_time:173593ms step_avg:45.91ms +[2025-09-05 17:39:57] [Rank 0] step:3801/10000 train_time:174331ms step_avg:45.86ms +[2025-09-05 17:39:57] [Rank 0] step:3801/10000 train_time:174331ms step_avg:45.86ms +[2025-09-05 17:39:58] [Rank 0] step:3821/10000 train_time:175068ms step_avg:45.82ms +[2025-09-05 17:39:58] [Rank 0] step:3821/10000 train_time:175068ms step_avg:45.82ms +[2025-09-05 17:39:59] [Rank 0] step:3841/10000 train_time:175806ms step_avg:45.77ms +[2025-09-05 17:39:59] [Rank 0] step:3841/10000 train_time:175806ms step_avg:45.77ms +[2025-09-05 17:39:59] [Rank 0] step:3861/10000 train_time:176544ms step_avg:45.72ms +[2025-09-05 17:39:59] [Rank 0] step:3861/10000 train_time:176544ms step_avg:45.72ms +[2025-09-05 17:40:00] [Rank 0] step:3881/10000 train_time:177283ms step_avg:45.68ms +[2025-09-05 17:40:00] [Rank 0] step:3881/10000 train_time:177283ms step_avg:45.68ms +[2025-09-05 17:40:01] [Rank 0] step:3901/10000 train_time:178021ms step_avg:45.63ms +[2025-09-05 17:40:01] [Rank 0] step:3901/10000 train_time:178021ms step_avg:45.63ms +[2025-09-05 17:40:02] [Rank 0] step:3921/10000 train_time:178760ms step_avg:45.59ms +[2025-09-05 17:40:02] [Rank 0] step:3921/10000 train_time:178760ms step_avg:45.59ms +[2025-09-05 17:40:02] [Rank 0] step:3941/10000 train_time:179498ms step_avg:45.55ms +[2025-09-05 17:40:02] [Rank 0] step:3941/10000 train_time:179498ms step_avg:45.55ms +[2025-09-05 17:40:03] [Rank 0] step:3961/10000 train_time:180235ms step_avg:45.50ms +[2025-09-05 17:40:03] [Rank 0] step:3961/10000 train_time:180235ms step_avg:45.50ms +[2025-09-05 17:40:04] [Rank 0] step:3981/10000 train_time:180974ms step_avg:45.46ms +[2025-09-05 17:40:04] [Rank 0] step:3981/10000 train_time:180974ms step_avg:45.46ms +[2025-09-05 17:40:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:40:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:40:05] [Rank 0] PRINT: step:4000/10000 train_loss:1.8290 val_loss:1.7992 train_time:181792ms step_avg:45.45ms +[2025-09-05 17:40:05] [Rank 0] PRINT: step:4000/10000 train_loss:1.8290 val_loss:1.7992 train_time:181792ms step_avg:45.45ms +[2025-09-05 17:40:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:40:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:40:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:40:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:41:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:41:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:41:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:41:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:41:27] [Rank 0] Total Loss: 4.4575 +[2025-09-05 17:41:27] [Rank 0] Total Loss: 4.4575 +[2025-09-05 17:41:27] [Rank 0] Total FTA (Unweighted): 0.4012 +[2025-09-05 17:41:27] [Rank 0] Total FTA (Unweighted): 0.4012 +[2025-09-05 17:41:27] [Rank 0] Total FTA (Weighted): 0.4012 +[2025-09-05 17:41:27] [Rank 0] Total FTA (Weighted): 0.4012 +[2025-09-05 17:41:27] [Rank 0] Group 0 Loss: 3.3369 +[2025-09-05 17:41:27] [Rank 0] Group 0 Loss: 3.3369 +[2025-09-05 17:41:27] [Rank 0] Group 1 Loss: 3.3195 +[2025-09-05 17:41:27] [Rank 0] Group 1 Loss: 3.3195 +[2025-09-05 17:41:27] [Rank 0] Group 2 Loss: 3.2777 +[2025-09-05 17:41:27] [Rank 0] Group 2 Loss: 3.2777 +[2025-09-05 17:41:27] [Rank 0] Group 3 Loss: 3.5929 +[2025-09-05 17:41:27] [Rank 0] Group 3 Loss: 3.5929 +[2025-09-05 17:41:27] [Rank 0] Group 4 Loss: 3.8561 +[2025-09-05 17:41:27] [Rank 0] Group 4 Loss: 3.8561 +[2025-09-05 17:41:27] [Rank 0] Group 5 Loss: 4.0870 +[2025-09-05 17:41:27] [Rank 0] Group 5 Loss: 4.0870 +[2025-09-05 17:41:27] [Rank 0] Group 6 Loss: 4.4106 +[2025-09-05 17:41:27] [Rank 0] Group 6 Loss: 4.4106 +[2025-09-05 17:41:27] [Rank 0] Group 7 Loss: 4.5630 +[2025-09-05 17:41:27] [Rank 0] Group 7 Loss: 4.5630 +[2025-09-05 17:41:27] [Rank 0] Group 8 Loss: 4.8856 +[2025-09-05 17:41:27] [Rank 0] Group 8 Loss: 4.8856 +[2025-09-05 17:41:27] [Rank 0] Group 9 Loss: 5.0186 +[2025-09-05 17:41:27] [Rank 0] Group 9 Loss: 5.0186 +[2025-09-05 17:41:27] [Rank 0] Group 10 Loss: 5.1313 +[2025-09-05 17:41:27] [Rank 0] Group 10 Loss: 5.1313 +[2025-09-05 17:41:27] [Rank 0] Group 11 Loss: 5.2013 +[2025-09-05 17:41:27] [Rank 0] Group 11 Loss: 5.2013 +[2025-09-05 17:41:27] [Rank 0] Group 12 Loss: 5.1045 +[2025-09-05 17:41:27] [Rank 0] Group 12 Loss: 5.1045 +[2025-09-05 17:41:27] [Rank 0] Group 13 Loss: 5.1565 +[2025-09-05 17:41:27] [Rank 0] Group 13 Loss: 5.1565 +[2025-09-05 17:41:27] [Rank 0] Group 14 Loss: 5.1870 +[2025-09-05 17:41:27] [Rank 0] Group 14 Loss: 5.1870 +[2025-09-05 17:41:27] [Rank 0] Group 15 Loss: 5.1918 +[2025-09-05 17:41:27] [Rank 0] Group 15 Loss: 5.1918 +[2025-09-05 17:41:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:41:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:41:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:41:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:41:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:41:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:41:27] [Rank 0] Group 3 FTA: 0.4400 +[2025-09-05 17:41:27] [Rank 0] Group 3 FTA: 0.4400 +[2025-09-05 17:41:27] [Rank 0] Group 4 FTA: 0.3300 +[2025-09-05 17:41:27] [Rank 0] Group 4 FTA: 0.3300 +[2025-09-05 17:41:27] [Rank 0] Group 5 FTA: 0.4300 +[2025-09-05 17:41:27] [Rank 0] Group 5 FTA: 0.4300 +[2025-09-05 17:41:27] [Rank 0] Group 6 FTA: 0.3400 +[2025-09-05 17:41:27] [Rank 0] Group 6 FTA: 0.3400 +[2025-09-05 17:41:27] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 17:41:27] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 17:41:27] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 17:41:27] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 17:41:27] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 17:41:27] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 17:41:27] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 17:41:27] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 17:41:27] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 17:41:27] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 17:41:27] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 17:41:27] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 17:41:27] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 17:41:27] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 17:41:27] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:41:27] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:41:27] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:41:27] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:41:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:41:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:41:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:41:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:41:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:41:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:41:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:41:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:41:28] [Rank 0] step:4001/10000 train_time:181802ms step_avg:45.44ms +[2025-09-05 17:41:28] [Rank 0] step:4001/10000 train_time:181802ms step_avg:45.44ms +[2025-09-05 17:41:30] [Rank 0] step:4021/10000 train_time:183100ms step_avg:45.54ms +[2025-09-05 17:41:30] [Rank 0] step:4021/10000 train_time:183100ms step_avg:45.54ms +[2025-09-05 17:41:30] [Rank 0] step:4041/10000 train_time:183837ms step_avg:45.49ms +[2025-09-05 17:41:30] [Rank 0] step:4041/10000 train_time:183837ms step_avg:45.49ms +[2025-09-05 17:41:31] [Rank 0] step:4061/10000 train_time:184693ms step_avg:45.48ms +[2025-09-05 17:41:31] [Rank 0] step:4061/10000 train_time:184693ms step_avg:45.48ms +[2025-09-05 17:41:32] [Rank 0] step:4081/10000 train_time:185492ms step_avg:45.45ms +[2025-09-05 17:41:32] [Rank 0] step:4081/10000 train_time:185492ms step_avg:45.45ms +[2025-09-05 17:41:33] [Rank 0] step:4101/10000 train_time:186230ms step_avg:45.41ms +[2025-09-05 17:41:33] [Rank 0] step:4101/10000 train_time:186230ms step_avg:45.41ms +[2025-09-05 17:41:34] [Rank 0] step:4121/10000 train_time:186967ms step_avg:45.37ms +[2025-09-05 17:41:34] [Rank 0] step:4121/10000 train_time:186967ms step_avg:45.37ms +[2025-09-05 17:41:34] [Rank 0] step:4141/10000 train_time:187820ms step_avg:45.36ms +[2025-09-05 17:41:34] [Rank 0] step:4141/10000 train_time:187820ms step_avg:45.36ms +[2025-09-05 17:41:35] [Rank 0] step:4161/10000 train_time:188558ms step_avg:45.32ms +[2025-09-05 17:41:35] [Rank 0] step:4161/10000 train_time:188558ms step_avg:45.32ms +[2025-09-05 17:41:36] [Rank 0] step:4181/10000 train_time:189296ms step_avg:45.28ms +[2025-09-05 17:41:36] [Rank 0] step:4181/10000 train_time:189296ms step_avg:45.28ms +[2025-09-05 17:41:37] [Rank 0] step:4201/10000 train_time:190034ms step_avg:45.24ms +[2025-09-05 17:41:37] [Rank 0] step:4201/10000 train_time:190034ms step_avg:45.24ms +[2025-09-05 17:41:37] [Rank 0] step:4221/10000 train_time:190772ms step_avg:45.20ms +[2025-09-05 17:41:37] [Rank 0] step:4221/10000 train_time:190772ms step_avg:45.20ms +[2025-09-05 17:41:38] [Rank 0] step:4241/10000 train_time:191509ms step_avg:45.16ms +[2025-09-05 17:41:38] [Rank 0] step:4241/10000 train_time:191509ms step_avg:45.16ms +[2025-09-05 17:41:39] [Rank 0] step:4261/10000 train_time:192247ms step_avg:45.12ms +[2025-09-05 17:41:39] [Rank 0] step:4261/10000 train_time:192247ms step_avg:45.12ms +[2025-09-05 17:41:40] [Rank 0] step:4281/10000 train_time:192985ms step_avg:45.08ms +[2025-09-05 17:41:40] [Rank 0] step:4281/10000 train_time:192985ms step_avg:45.08ms +[2025-09-05 17:41:40] [Rank 0] step:4301/10000 train_time:193723ms step_avg:45.04ms +[2025-09-05 17:41:40] [Rank 0] step:4301/10000 train_time:193723ms step_avg:45.04ms +[2025-09-05 17:41:41] [Rank 0] step:4321/10000 train_time:194462ms step_avg:45.00ms +[2025-09-05 17:41:41] [Rank 0] step:4321/10000 train_time:194462ms step_avg:45.00ms +[2025-09-05 17:41:42] [Rank 0] step:4341/10000 train_time:195200ms step_avg:44.97ms +[2025-09-05 17:41:42] [Rank 0] step:4341/10000 train_time:195200ms step_avg:44.97ms +[2025-09-05 17:41:42] [Rank 0] step:4361/10000 train_time:195939ms step_avg:44.93ms +[2025-09-05 17:41:42] [Rank 0] step:4361/10000 train_time:195939ms step_avg:44.93ms +[2025-09-05 17:41:43] [Rank 0] step:4381/10000 train_time:196677ms step_avg:44.89ms +[2025-09-05 17:41:43] [Rank 0] step:4381/10000 train_time:196677ms step_avg:44.89ms +[2025-09-05 17:41:44] [Rank 0] step:4401/10000 train_time:197414ms step_avg:44.86ms +[2025-09-05 17:41:44] [Rank 0] step:4401/10000 train_time:197414ms step_avg:44.86ms +[2025-09-05 17:41:45] [Rank 0] step:4421/10000 train_time:198153ms step_avg:44.82ms +[2025-09-05 17:41:45] [Rank 0] step:4421/10000 train_time:198153ms step_avg:44.82ms +[2025-09-05 17:41:45] [Rank 0] step:4441/10000 train_time:198891ms step_avg:44.79ms +[2025-09-05 17:41:45] [Rank 0] step:4441/10000 train_time:198891ms step_avg:44.79ms +[2025-09-05 17:41:46] [Rank 0] step:4461/10000 train_time:199629ms step_avg:44.75ms +[2025-09-05 17:41:46] [Rank 0] step:4461/10000 train_time:199629ms step_avg:44.75ms +[2025-09-05 17:41:47] [Rank 0] step:4481/10000 train_time:200367ms step_avg:44.71ms +[2025-09-05 17:41:47] [Rank 0] step:4481/10000 train_time:200367ms step_avg:44.71ms +[2025-09-05 17:41:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:41:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:41:48] [Rank 0] PRINT: step:4500/10000 train_loss:1.7906 val_loss:1.7649 train_time:201186ms step_avg:44.71ms +[2025-09-05 17:41:48] [Rank 0] PRINT: step:4500/10000 train_loss:1.7906 val_loss:1.7649 train_time:201186ms step_avg:44.71ms +[2025-09-05 17:41:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:41:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:41:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:41:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:43:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:43:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:43:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:43:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:43:09] [Rank 0] Total Loss: 4.4214 +[2025-09-05 17:43:09] [Rank 0] Total Loss: 4.4214 +[2025-09-05 17:43:09] [Rank 0] Total FTA (Unweighted): 0.4156 +[2025-09-05 17:43:09] [Rank 0] Total FTA (Unweighted): 0.4156 +[2025-09-05 17:43:09] [Rank 0] Total FTA (Weighted): 0.4156 +[2025-09-05 17:43:09] [Rank 0] Total FTA (Weighted): 0.4156 +[2025-09-05 17:43:09] [Rank 0] Group 0 Loss: 3.3422 +[2025-09-05 17:43:09] [Rank 0] Group 0 Loss: 3.3422 +[2025-09-05 17:43:09] [Rank 0] Group 1 Loss: 3.2463 +[2025-09-05 17:43:09] [Rank 0] Group 1 Loss: 3.2463 +[2025-09-05 17:43:09] [Rank 0] Group 2 Loss: 3.3097 +[2025-09-05 17:43:09] [Rank 0] Group 2 Loss: 3.3097 +[2025-09-05 17:43:09] [Rank 0] Group 3 Loss: 3.5576 +[2025-09-05 17:43:09] [Rank 0] Group 3 Loss: 3.5576 +[2025-09-05 17:43:09] [Rank 0] Group 4 Loss: 3.8225 +[2025-09-05 17:43:09] [Rank 0] Group 4 Loss: 3.8225 +[2025-09-05 17:43:09] [Rank 0] Group 5 Loss: 4.0779 +[2025-09-05 17:43:09] [Rank 0] Group 5 Loss: 4.0779 +[2025-09-05 17:43:09] [Rank 0] Group 6 Loss: 4.3380 +[2025-09-05 17:43:09] [Rank 0] Group 6 Loss: 4.3380 +[2025-09-05 17:43:09] [Rank 0] Group 7 Loss: 4.5682 +[2025-09-05 17:43:09] [Rank 0] Group 7 Loss: 4.5682 +[2025-09-05 17:43:09] [Rank 0] Group 8 Loss: 4.8412 +[2025-09-05 17:43:09] [Rank 0] Group 8 Loss: 4.8412 +[2025-09-05 17:43:09] [Rank 0] Group 9 Loss: 4.9620 +[2025-09-05 17:43:09] [Rank 0] Group 9 Loss: 4.9620 +[2025-09-05 17:43:09] [Rank 0] Group 10 Loss: 5.0722 +[2025-09-05 17:43:09] [Rank 0] Group 10 Loss: 5.0722 +[2025-09-05 17:43:09] [Rank 0] Group 11 Loss: 5.1343 +[2025-09-05 17:43:09] [Rank 0] Group 11 Loss: 5.1343 +[2025-09-05 17:43:10] [Rank 0] Group 12 Loss: 5.0731 +[2025-09-05 17:43:10] [Rank 0] Group 12 Loss: 5.0731 +[2025-09-05 17:43:10] [Rank 0] Group 13 Loss: 5.0863 +[2025-09-05 17:43:10] [Rank 0] Group 13 Loss: 5.0863 +[2025-09-05 17:43:10] [Rank 0] Group 14 Loss: 5.1676 +[2025-09-05 17:43:10] [Rank 0] Group 14 Loss: 5.1676 +[2025-09-05 17:43:10] [Rank 0] Group 15 Loss: 5.1425 +[2025-09-05 17:43:10] [Rank 0] Group 15 Loss: 5.1425 +[2025-09-05 17:43:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:43:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:43:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:43:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:43:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:43:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:43:10] [Rank 0] Group 3 FTA: 0.4800 +[2025-09-05 17:43:10] [Rank 0] Group 3 FTA: 0.4800 +[2025-09-05 17:43:10] [Rank 0] Group 4 FTA: 0.3700 +[2025-09-05 17:43:10] [Rank 0] Group 4 FTA: 0.3700 +[2025-09-05 17:43:10] [Rank 0] Group 5 FTA: 0.4500 +[2025-09-05 17:43:10] [Rank 0] Group 5 FTA: 0.4500 +[2025-09-05 17:43:10] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 17:43:10] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 17:43:10] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 17:43:10] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 17:43:10] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 17:43:10] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 17:43:10] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 17:43:10] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 17:43:10] [Rank 0] Group 10 FTA: 0.2700 +[2025-09-05 17:43:10] [Rank 0] Group 10 FTA: 0.2700 +[2025-09-05 17:43:10] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 17:43:10] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 17:43:10] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 17:43:10] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 17:43:10] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 17:43:10] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 17:43:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:43:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:43:10] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:43:10] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:43:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:43:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:43:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:43:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:43:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:43:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:43:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:43:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:43:11] [Rank 0] step:4501/10000 train_time:201196ms step_avg:44.70ms +[2025-09-05 17:43:11] [Rank 0] step:4501/10000 train_time:201196ms step_avg:44.70ms +[2025-09-05 17:43:12] [Rank 0] step:4521/10000 train_time:201863ms step_avg:44.65ms +[2025-09-05 17:43:12] [Rank 0] step:4521/10000 train_time:201863ms step_avg:44.65ms +[2025-09-05 17:43:13] [Rank 0] step:4541/10000 train_time:202601ms step_avg:44.62ms +[2025-09-05 17:43:13] [Rank 0] step:4541/10000 train_time:202601ms step_avg:44.62ms +[2025-09-05 17:43:13] [Rank 0] step:4561/10000 train_time:203338ms step_avg:44.58ms +[2025-09-05 17:43:13] [Rank 0] step:4561/10000 train_time:203338ms step_avg:44.58ms +[2025-09-05 17:43:14] [Rank 0] step:4581/10000 train_time:204076ms step_avg:44.55ms +[2025-09-05 17:43:14] [Rank 0] step:4581/10000 train_time:204076ms step_avg:44.55ms +[2025-09-05 17:43:15] [Rank 0] step:4601/10000 train_time:204814ms step_avg:44.52ms +[2025-09-05 17:43:15] [Rank 0] step:4601/10000 train_time:204814ms step_avg:44.52ms +[2025-09-05 17:43:16] [Rank 0] step:4621/10000 train_time:205551ms step_avg:44.48ms +[2025-09-05 17:43:16] [Rank 0] step:4621/10000 train_time:205551ms step_avg:44.48ms +[2025-09-05 17:43:16] [Rank 0] step:4641/10000 train_time:206289ms step_avg:44.45ms +[2025-09-05 17:43:16] [Rank 0] step:4641/10000 train_time:206289ms step_avg:44.45ms +[2025-09-05 17:43:17] [Rank 0] step:4661/10000 train_time:207027ms step_avg:44.42ms +[2025-09-05 17:43:17] [Rank 0] step:4661/10000 train_time:207027ms step_avg:44.42ms +[2025-09-05 17:43:18] [Rank 0] step:4681/10000 train_time:207764ms step_avg:44.38ms +[2025-09-05 17:43:18] [Rank 0] step:4681/10000 train_time:207764ms step_avg:44.38ms +[2025-09-05 17:43:18] [Rank 0] step:4701/10000 train_time:208502ms step_avg:44.35ms +[2025-09-05 17:43:18] [Rank 0] step:4701/10000 train_time:208502ms step_avg:44.35ms +[2025-09-05 17:43:19] [Rank 0] step:4721/10000 train_time:209240ms step_avg:44.32ms +[2025-09-05 17:43:19] [Rank 0] step:4721/10000 train_time:209240ms step_avg:44.32ms +[2025-09-05 17:43:20] [Rank 0] step:4741/10000 train_time:209978ms step_avg:44.29ms +[2025-09-05 17:43:20] [Rank 0] step:4741/10000 train_time:209978ms step_avg:44.29ms +[2025-09-05 17:43:21] [Rank 0] step:4761/10000 train_time:210716ms step_avg:44.26ms +[2025-09-05 17:43:21] [Rank 0] step:4761/10000 train_time:210716ms step_avg:44.26ms +[2025-09-05 17:43:21] [Rank 0] step:4781/10000 train_time:211455ms step_avg:44.23ms +[2025-09-05 17:43:21] [Rank 0] step:4781/10000 train_time:211455ms step_avg:44.23ms +[2025-09-05 17:43:22] [Rank 0] step:4801/10000 train_time:212193ms step_avg:44.20ms +[2025-09-05 17:43:22] [Rank 0] step:4801/10000 train_time:212193ms step_avg:44.20ms +[2025-09-05 17:43:23] [Rank 0] step:4821/10000 train_time:212930ms step_avg:44.17ms +[2025-09-05 17:43:23] [Rank 0] step:4821/10000 train_time:212930ms step_avg:44.17ms +[2025-09-05 17:43:24] [Rank 0] step:4841/10000 train_time:213975ms step_avg:44.20ms +[2025-09-05 17:43:24] [Rank 0] step:4841/10000 train_time:213975ms step_avg:44.20ms +[2025-09-05 17:43:25] [Rank 0] step:4861/10000 train_time:214711ms step_avg:44.17ms +[2025-09-05 17:43:25] [Rank 0] step:4861/10000 train_time:214711ms step_avg:44.17ms +[2025-09-05 17:43:25] [Rank 0] step:4881/10000 train_time:215449ms step_avg:44.14ms +[2025-09-05 17:43:25] [Rank 0] step:4881/10000 train_time:215449ms step_avg:44.14ms +[2025-09-05 17:43:26] [Rank 0] step:4901/10000 train_time:216186ms step_avg:44.11ms +[2025-09-05 17:43:26] [Rank 0] step:4901/10000 train_time:216186ms step_avg:44.11ms +[2025-09-05 17:43:27] [Rank 0] step:4921/10000 train_time:216923ms step_avg:44.08ms +[2025-09-05 17:43:27] [Rank 0] step:4921/10000 train_time:216923ms step_avg:44.08ms +[2025-09-05 17:43:28] [Rank 0] step:4941/10000 train_time:217661ms step_avg:44.05ms +[2025-09-05 17:43:28] [Rank 0] step:4941/10000 train_time:217661ms step_avg:44.05ms +[2025-09-05 17:43:28] [Rank 0] step:4961/10000 train_time:218398ms step_avg:44.02ms +[2025-09-05 17:43:28] [Rank 0] step:4961/10000 train_time:218398ms step_avg:44.02ms +[2025-09-05 17:43:29] [Rank 0] step:4981/10000 train_time:219136ms step_avg:43.99ms +[2025-09-05 17:43:29] [Rank 0] step:4981/10000 train_time:219136ms step_avg:43.99ms +[2025-09-05 17:43:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:43:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:43:30] [Rank 0] PRINT: step:5000/10000 train_loss:1.7599 val_loss:1.7385 train_time:219954ms step_avg:43.99ms +[2025-09-05 17:43:30] [Rank 0] PRINT: step:5000/10000 train_loss:1.7599 val_loss:1.7385 train_time:219954ms step_avg:43.99ms +[2025-09-05 17:43:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:43:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:43:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:43:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:44:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:44:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:44:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:44:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:44:52] [Rank 0] Total Loss: 4.3508 +[2025-09-05 17:44:52] [Rank 0] Total Loss: 4.3508 +[2025-09-05 17:44:52] [Rank 0] Total FTA (Unweighted): 0.4375 +[2025-09-05 17:44:52] [Rank 0] Total FTA (Unweighted): 0.4375 +[2025-09-05 17:44:52] [Rank 0] Total FTA (Weighted): 0.4375 +[2025-09-05 17:44:52] [Rank 0] Total FTA (Weighted): 0.4375 +[2025-09-05 17:44:52] [Rank 0] Group 0 Loss: 3.3358 +[2025-09-05 17:44:52] [Rank 0] Group 0 Loss: 3.3358 +[2025-09-05 17:44:52] [Rank 0] Group 1 Loss: 3.2225 +[2025-09-05 17:44:52] [Rank 0] Group 1 Loss: 3.2225 +[2025-09-05 17:44:52] [Rank 0] Group 2 Loss: 3.2728 +[2025-09-05 17:44:52] [Rank 0] Group 2 Loss: 3.2728 +[2025-09-05 17:44:52] [Rank 0] Group 3 Loss: 3.4870 +[2025-09-05 17:44:52] [Rank 0] Group 3 Loss: 3.4870 +[2025-09-05 17:44:52] [Rank 0] Group 4 Loss: 3.7651 +[2025-09-05 17:44:52] [Rank 0] Group 4 Loss: 3.7651 +[2025-09-05 17:44:52] [Rank 0] Group 5 Loss: 4.0357 +[2025-09-05 17:44:52] [Rank 0] Group 5 Loss: 4.0357 +[2025-09-05 17:44:52] [Rank 0] Group 6 Loss: 4.2558 +[2025-09-05 17:44:52] [Rank 0] Group 6 Loss: 4.2558 +[2025-09-05 17:44:52] [Rank 0] Group 7 Loss: 4.4549 +[2025-09-05 17:44:52] [Rank 0] Group 7 Loss: 4.4549 +[2025-09-05 17:44:52] [Rank 0] Group 8 Loss: 4.7798 +[2025-09-05 17:44:52] [Rank 0] Group 8 Loss: 4.7798 +[2025-09-05 17:44:52] [Rank 0] Group 9 Loss: 4.9043 +[2025-09-05 17:44:52] [Rank 0] Group 9 Loss: 4.9043 +[2025-09-05 17:44:52] [Rank 0] Group 10 Loss: 5.0192 +[2025-09-05 17:44:52] [Rank 0] Group 10 Loss: 5.0192 +[2025-09-05 17:44:52] [Rank 0] Group 11 Loss: 5.0552 +[2025-09-05 17:44:52] [Rank 0] Group 11 Loss: 5.0552 +[2025-09-05 17:44:52] [Rank 0] Group 12 Loss: 4.9688 +[2025-09-05 17:44:52] [Rank 0] Group 12 Loss: 4.9688 +[2025-09-05 17:44:52] [Rank 0] Group 13 Loss: 4.9991 +[2025-09-05 17:44:52] [Rank 0] Group 13 Loss: 4.9991 +[2025-09-05 17:44:52] [Rank 0] Group 14 Loss: 5.0511 +[2025-09-05 17:44:52] [Rank 0] Group 14 Loss: 5.0511 +[2025-09-05 17:44:52] [Rank 0] Group 15 Loss: 5.0058 +[2025-09-05 17:44:52] [Rank 0] Group 15 Loss: 5.0058 +[2025-09-05 17:44:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:44:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:44:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:44:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:44:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:44:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:44:52] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 17:44:52] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 17:44:52] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 17:44:52] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 17:44:52] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 17:44:52] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 17:44:52] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 17:44:52] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 17:44:52] [Rank 0] Group 7 FTA: 0.3000 +[2025-09-05 17:44:52] [Rank 0] Group 7 FTA: 0.3000 +[2025-09-05 17:44:52] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 17:44:52] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 17:44:52] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 17:44:52] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 17:44:52] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 17:44:52] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 17:44:52] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 17:44:52] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 17:44:52] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 17:44:52] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 17:44:52] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 17:44:52] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 17:44:52] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:44:52] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:44:52] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:44:52] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:44:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:44:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:44:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:44:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:44:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:44:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:44:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:44:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:44:53] [Rank 0] step:5001/10000 train_time:219965ms step_avg:43.98ms +[2025-09-05 17:44:53] [Rank 0] step:5001/10000 train_time:219965ms step_avg:43.98ms +[2025-09-05 17:44:54] [Rank 0] step:5021/10000 train_time:220646ms step_avg:43.94ms +[2025-09-05 17:44:54] [Rank 0] step:5021/10000 train_time:220646ms step_avg:43.94ms +[2025-09-05 17:44:55] [Rank 0] step:5041/10000 train_time:221384ms step_avg:43.92ms +[2025-09-05 17:44:55] [Rank 0] step:5041/10000 train_time:221384ms step_avg:43.92ms +[2025-09-05 17:44:56] [Rank 0] step:5061/10000 train_time:222122ms step_avg:43.89ms +[2025-09-05 17:44:56] [Rank 0] step:5061/10000 train_time:222122ms step_avg:43.89ms +[2025-09-05 17:44:56] [Rank 0] step:5081/10000 train_time:222860ms step_avg:43.86ms +[2025-09-05 17:44:56] [Rank 0] step:5081/10000 train_time:222860ms step_avg:43.86ms +[2025-09-05 17:44:57] [Rank 0] step:5101/10000 train_time:223599ms step_avg:43.83ms +[2025-09-05 17:44:57] [Rank 0] step:5101/10000 train_time:223599ms step_avg:43.83ms +[2025-09-05 17:44:58] [Rank 0] step:5121/10000 train_time:224337ms step_avg:43.81ms +[2025-09-05 17:44:58] [Rank 0] step:5121/10000 train_time:224337ms step_avg:43.81ms +[2025-09-05 17:44:59] [Rank 0] step:5141/10000 train_time:225076ms step_avg:43.78ms +[2025-09-05 17:44:59] [Rank 0] step:5141/10000 train_time:225076ms step_avg:43.78ms +[2025-09-05 17:44:59] [Rank 0] step:5161/10000 train_time:225815ms step_avg:43.75ms +[2025-09-05 17:44:59] [Rank 0] step:5161/10000 train_time:225815ms step_avg:43.75ms +[2025-09-05 17:45:00] [Rank 0] step:5181/10000 train_time:226553ms step_avg:43.73ms +[2025-09-05 17:45:00] [Rank 0] step:5181/10000 train_time:226553ms step_avg:43.73ms +[2025-09-05 17:45:01] [Rank 0] step:5201/10000 train_time:227291ms step_avg:43.70ms +[2025-09-05 17:45:01] [Rank 0] step:5201/10000 train_time:227291ms step_avg:43.70ms +[2025-09-05 17:45:02] [Rank 0] step:5221/10000 train_time:228031ms step_avg:43.68ms +[2025-09-05 17:45:02] [Rank 0] step:5221/10000 train_time:228031ms step_avg:43.68ms +[2025-09-05 17:45:02] [Rank 0] step:5241/10000 train_time:228770ms step_avg:43.65ms +[2025-09-05 17:45:02] [Rank 0] step:5241/10000 train_time:228770ms step_avg:43.65ms +[2025-09-05 17:45:03] [Rank 0] step:5261/10000 train_time:229510ms step_avg:43.62ms +[2025-09-05 17:45:03] [Rank 0] step:5261/10000 train_time:229510ms step_avg:43.62ms +[2025-09-05 17:45:04] [Rank 0] step:5281/10000 train_time:230248ms step_avg:43.60ms +[2025-09-05 17:45:04] [Rank 0] step:5281/10000 train_time:230248ms step_avg:43.60ms +[2025-09-05 17:45:05] [Rank 0] step:5301/10000 train_time:230986ms step_avg:43.57ms +[2025-09-05 17:45:05] [Rank 0] step:5301/10000 train_time:230986ms step_avg:43.57ms +[2025-09-05 17:45:05] [Rank 0] step:5321/10000 train_time:231724ms step_avg:43.55ms +[2025-09-05 17:45:05] [Rank 0] step:5321/10000 train_time:231724ms step_avg:43.55ms +[2025-09-05 17:45:06] [Rank 0] step:5341/10000 train_time:232463ms step_avg:43.52ms +[2025-09-05 17:45:06] [Rank 0] step:5341/10000 train_time:232463ms step_avg:43.52ms +[2025-09-05 17:45:07] [Rank 0] step:5361/10000 train_time:233201ms step_avg:43.50ms +[2025-09-05 17:45:07] [Rank 0] step:5361/10000 train_time:233201ms step_avg:43.50ms +[2025-09-05 17:45:08] [Rank 0] step:5381/10000 train_time:233939ms step_avg:43.47ms +[2025-09-05 17:45:08] [Rank 0] step:5381/10000 train_time:233939ms step_avg:43.47ms +[2025-09-05 17:45:08] [Rank 0] step:5401/10000 train_time:234677ms step_avg:43.45ms +[2025-09-05 17:45:08] [Rank 0] step:5401/10000 train_time:234677ms step_avg:43.45ms +[2025-09-05 17:45:09] [Rank 0] step:5421/10000 train_time:235415ms step_avg:43.43ms +[2025-09-05 17:45:09] [Rank 0] step:5421/10000 train_time:235415ms step_avg:43.43ms +[2025-09-05 17:45:10] [Rank 0] step:5441/10000 train_time:236152ms step_avg:43.40ms +[2025-09-05 17:45:10] [Rank 0] step:5441/10000 train_time:236152ms step_avg:43.40ms +[2025-09-05 17:45:10] [Rank 0] step:5461/10000 train_time:236890ms step_avg:43.38ms +[2025-09-05 17:45:10] [Rank 0] step:5461/10000 train_time:236890ms step_avg:43.38ms +[2025-09-05 17:45:11] [Rank 0] step:5481/10000 train_time:237627ms step_avg:43.35ms +[2025-09-05 17:45:11] [Rank 0] step:5481/10000 train_time:237627ms step_avg:43.35ms +[2025-09-05 17:45:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:45:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:45:12] [Rank 0] PRINT: step:5500/10000 train_loss:1.7355 val_loss:1.7182 train_time:238446ms step_avg:43.35ms +[2025-09-05 17:45:12] [Rank 0] PRINT: step:5500/10000 train_loss:1.7355 val_loss:1.7182 train_time:238446ms step_avg:43.35ms +[2025-09-05 17:45:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:45:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:45:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:45:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:46:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:46:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:46:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:46:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:46:34] [Rank 0] Total Loss: 4.3547 +[2025-09-05 17:46:34] [Rank 0] Total Loss: 4.3547 +[2025-09-05 17:46:34] [Rank 0] Total FTA (Unweighted): 0.4263 +[2025-09-05 17:46:34] [Rank 0] Total FTA (Unweighted): 0.4263 +[2025-09-05 17:46:34] [Rank 0] Total FTA (Weighted): 0.4263 +[2025-09-05 17:46:34] [Rank 0] Total FTA (Weighted): 0.4263 +[2025-09-05 17:46:34] [Rank 0] Group 0 Loss: 3.3077 +[2025-09-05 17:46:34] [Rank 0] Group 0 Loss: 3.3077 +[2025-09-05 17:46:34] [Rank 0] Group 1 Loss: 3.1373 +[2025-09-05 17:46:34] [Rank 0] Group 1 Loss: 3.1373 +[2025-09-05 17:46:34] [Rank 0] Group 2 Loss: 3.2376 +[2025-09-05 17:46:34] [Rank 0] Group 2 Loss: 3.2376 +[2025-09-05 17:46:34] [Rank 0] Group 3 Loss: 3.5037 +[2025-09-05 17:46:34] [Rank 0] Group 3 Loss: 3.5037 +[2025-09-05 17:46:34] [Rank 0] Group 4 Loss: 3.7976 +[2025-09-05 17:46:34] [Rank 0] Group 4 Loss: 3.7976 +[2025-09-05 17:46:34] [Rank 0] Group 5 Loss: 4.0611 +[2025-09-05 17:46:34] [Rank 0] Group 5 Loss: 4.0611 +[2025-09-05 17:46:34] [Rank 0] Group 6 Loss: 4.2674 +[2025-09-05 17:46:34] [Rank 0] Group 6 Loss: 4.2674 +[2025-09-05 17:46:34] [Rank 0] Group 7 Loss: 4.4604 +[2025-09-05 17:46:34] [Rank 0] Group 7 Loss: 4.4604 +[2025-09-05 17:46:34] [Rank 0] Group 8 Loss: 4.8261 +[2025-09-05 17:46:34] [Rank 0] Group 8 Loss: 4.8261 +[2025-09-05 17:46:34] [Rank 0] Group 9 Loss: 4.9190 +[2025-09-05 17:46:34] [Rank 0] Group 9 Loss: 4.9190 +[2025-09-05 17:46:34] [Rank 0] Group 10 Loss: 5.0577 +[2025-09-05 17:46:34] [Rank 0] Group 10 Loss: 5.0577 +[2025-09-05 17:46:34] [Rank 0] Group 11 Loss: 5.0673 +[2025-09-05 17:46:34] [Rank 0] Group 11 Loss: 5.0673 +[2025-09-05 17:46:34] [Rank 0] Group 12 Loss: 4.9798 +[2025-09-05 17:46:34] [Rank 0] Group 12 Loss: 4.9798 +[2025-09-05 17:46:34] [Rank 0] Group 13 Loss: 4.9953 +[2025-09-05 17:46:34] [Rank 0] Group 13 Loss: 4.9953 +[2025-09-05 17:46:34] [Rank 0] Group 14 Loss: 5.0524 +[2025-09-05 17:46:34] [Rank 0] Group 14 Loss: 5.0524 +[2025-09-05 17:46:34] [Rank 0] Group 15 Loss: 5.0052 +[2025-09-05 17:46:34] [Rank 0] Group 15 Loss: 5.0052 +[2025-09-05 17:46:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:46:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:46:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:46:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:46:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:46:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:46:34] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 17:46:34] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 17:46:34] [Rank 0] Group 4 FTA: 0.4100 +[2025-09-05 17:46:34] [Rank 0] Group 4 FTA: 0.4100 +[2025-09-05 17:46:34] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 17:46:34] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 17:46:34] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 17:46:34] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 17:46:34] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 17:46:34] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 17:46:34] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 17:46:34] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 17:46:34] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 17:46:34] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 17:46:34] [Rank 0] Group 10 FTA: 0.2700 +[2025-09-05 17:46:34] [Rank 0] Group 10 FTA: 0.2700 +[2025-09-05 17:46:34] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 17:46:34] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 17:46:34] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 17:46:34] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 17:46:34] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:46:34] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:46:34] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:46:34] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 17:46:34] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:46:34] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:46:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:46:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:46:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:46:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:46:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:46:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:46:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:46:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:46:35] [Rank 0] step:5501/10000 train_time:238456ms step_avg:43.35ms +[2025-09-05 17:46:35] [Rank 0] step:5501/10000 train_time:238456ms step_avg:43.35ms +[2025-09-05 17:46:36] [Rank 0] step:5521/10000 train_time:239130ms step_avg:43.31ms +[2025-09-05 17:46:36] [Rank 0] step:5521/10000 train_time:239130ms step_avg:43.31ms +[2025-09-05 17:46:37] [Rank 0] step:5541/10000 train_time:239868ms step_avg:43.29ms +[2025-09-05 17:46:37] [Rank 0] step:5541/10000 train_time:239868ms step_avg:43.29ms +[2025-09-05 17:46:37] [Rank 0] step:5561/10000 train_time:240607ms step_avg:43.27ms +[2025-09-05 17:46:37] [Rank 0] step:5561/10000 train_time:240607ms step_avg:43.27ms +[2025-09-05 17:46:38] [Rank 0] step:5581/10000 train_time:241345ms step_avg:43.24ms +[2025-09-05 17:46:38] [Rank 0] step:5581/10000 train_time:241345ms step_avg:43.24ms +[2025-09-05 17:46:39] [Rank 0] step:5601/10000 train_time:242083ms step_avg:43.22ms +[2025-09-05 17:46:39] [Rank 0] step:5601/10000 train_time:242083ms step_avg:43.22ms +[2025-09-05 17:46:40] [Rank 0] step:5621/10000 train_time:242821ms step_avg:43.20ms +[2025-09-05 17:46:40] [Rank 0] step:5621/10000 train_time:242821ms step_avg:43.20ms +[2025-09-05 17:46:41] [Rank 0] step:5641/10000 train_time:244158ms step_avg:43.28ms +[2025-09-05 17:46:41] [Rank 0] step:5641/10000 train_time:244158ms step_avg:43.28ms +[2025-09-05 17:46:42] [Rank 0] step:5661/10000 train_time:244895ms step_avg:43.26ms +[2025-09-05 17:46:42] [Rank 0] step:5661/10000 train_time:244895ms step_avg:43.26ms +[2025-09-05 17:46:43] [Rank 0] step:5681/10000 train_time:245633ms step_avg:43.24ms +[2025-09-05 17:46:43] [Rank 0] step:5681/10000 train_time:245633ms step_avg:43.24ms +[2025-09-05 17:46:43] [Rank 0] step:5701/10000 train_time:246372ms step_avg:43.22ms +[2025-09-05 17:46:43] [Rank 0] step:5701/10000 train_time:246372ms step_avg:43.22ms +[2025-09-05 17:46:44] [Rank 0] step:5721/10000 train_time:247110ms step_avg:43.19ms +[2025-09-05 17:46:44] [Rank 0] step:5721/10000 train_time:247110ms step_avg:43.19ms +[2025-09-05 17:46:45] [Rank 0] step:5741/10000 train_time:247848ms step_avg:43.17ms +[2025-09-05 17:46:45] [Rank 0] step:5741/10000 train_time:247848ms step_avg:43.17ms +[2025-09-05 17:46:45] [Rank 0] step:5761/10000 train_time:248586ms step_avg:43.15ms +[2025-09-05 17:46:45] [Rank 0] step:5761/10000 train_time:248586ms step_avg:43.15ms +[2025-09-05 17:46:46] [Rank 0] step:5781/10000 train_time:249324ms step_avg:43.13ms +[2025-09-05 17:46:46] [Rank 0] step:5781/10000 train_time:249324ms step_avg:43.13ms +[2025-09-05 17:46:47] [Rank 0] step:5801/10000 train_time:250062ms step_avg:43.11ms +[2025-09-05 17:46:47] [Rank 0] step:5801/10000 train_time:250062ms step_avg:43.11ms +[2025-09-05 17:46:48] [Rank 0] step:5821/10000 train_time:250930ms step_avg:43.11ms +[2025-09-05 17:46:48] [Rank 0] step:5821/10000 train_time:250930ms step_avg:43.11ms +[2025-09-05 17:46:49] [Rank 0] step:5841/10000 train_time:251669ms step_avg:43.09ms +[2025-09-05 17:46:49] [Rank 0] step:5841/10000 train_time:251669ms step_avg:43.09ms +[2025-09-05 17:46:49] [Rank 0] step:5861/10000 train_time:252407ms step_avg:43.07ms +[2025-09-05 17:46:49] [Rank 0] step:5861/10000 train_time:252407ms step_avg:43.07ms +[2025-09-05 17:46:50] [Rank 0] step:5881/10000 train_time:253278ms step_avg:43.07ms +[2025-09-05 17:46:50] [Rank 0] step:5881/10000 train_time:253278ms step_avg:43.07ms +[2025-09-05 17:46:51] [Rank 0] step:5901/10000 train_time:254016ms step_avg:43.05ms +[2025-09-05 17:46:51] [Rank 0] step:5901/10000 train_time:254016ms step_avg:43.05ms +[2025-09-05 17:46:52] [Rank 0] step:5921/10000 train_time:254755ms step_avg:43.03ms +[2025-09-05 17:46:52] [Rank 0] step:5921/10000 train_time:254755ms step_avg:43.03ms +[2025-09-05 17:46:52] [Rank 0] step:5941/10000 train_time:255493ms step_avg:43.01ms +[2025-09-05 17:46:52] [Rank 0] step:5941/10000 train_time:255493ms step_avg:43.01ms +[2025-09-05 17:46:53] [Rank 0] step:5961/10000 train_time:256232ms step_avg:42.98ms +[2025-09-05 17:46:53] [Rank 0] step:5961/10000 train_time:256232ms step_avg:42.98ms +[2025-09-05 17:46:54] [Rank 0] step:5981/10000 train_time:256970ms step_avg:42.96ms +[2025-09-05 17:46:54] [Rank 0] step:5981/10000 train_time:256970ms step_avg:42.96ms +[2025-09-05 17:46:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:46:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:46:55] [Rank 0] PRINT: step:6000/10000 train_loss:1.7164 val_loss:1.6984 train_time:257789ms step_avg:42.96ms +[2025-09-05 17:46:55] [Rank 0] PRINT: step:6000/10000 train_loss:1.7164 val_loss:1.6984 train_time:257789ms step_avg:42.96ms +[2025-09-05 17:46:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:46:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:46:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:46:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:48:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:48:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:48:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:48:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:48:17] [Rank 0] Total Loss: 4.3619 +[2025-09-05 17:48:17] [Rank 0] Total Loss: 4.3619 +[2025-09-05 17:48:17] [Rank 0] Total FTA (Unweighted): 0.4381 +[2025-09-05 17:48:17] [Rank 0] Total FTA (Unweighted): 0.4381 +[2025-09-05 17:48:17] [Rank 0] Total FTA (Weighted): 0.4381 +[2025-09-05 17:48:17] [Rank 0] Total FTA (Weighted): 0.4381 +[2025-09-05 17:48:17] [Rank 0] Group 0 Loss: 3.4310 +[2025-09-05 17:48:17] [Rank 0] Group 0 Loss: 3.4310 +[2025-09-05 17:48:17] [Rank 0] Group 1 Loss: 3.2623 +[2025-09-05 17:48:17] [Rank 0] Group 1 Loss: 3.2623 +[2025-09-05 17:48:17] [Rank 0] Group 2 Loss: 3.2616 +[2025-09-05 17:48:17] [Rank 0] Group 2 Loss: 3.2616 +[2025-09-05 17:48:17] [Rank 0] Group 3 Loss: 3.5121 +[2025-09-05 17:48:17] [Rank 0] Group 3 Loss: 3.5121 +[2025-09-05 17:48:17] [Rank 0] Group 4 Loss: 3.8164 +[2025-09-05 17:48:17] [Rank 0] Group 4 Loss: 3.8164 +[2025-09-05 17:48:17] [Rank 0] Group 5 Loss: 4.0595 +[2025-09-05 17:48:17] [Rank 0] Group 5 Loss: 4.0595 +[2025-09-05 17:48:17] [Rank 0] Group 6 Loss: 4.2778 +[2025-09-05 17:48:17] [Rank 0] Group 6 Loss: 4.2778 +[2025-09-05 17:48:17] [Rank 0] Group 7 Loss: 4.4490 +[2025-09-05 17:48:17] [Rank 0] Group 7 Loss: 4.4490 +[2025-09-05 17:48:17] [Rank 0] Group 8 Loss: 4.7837 +[2025-09-05 17:48:17] [Rank 0] Group 8 Loss: 4.7837 +[2025-09-05 17:48:17] [Rank 0] Group 9 Loss: 4.8900 +[2025-09-05 17:48:17] [Rank 0] Group 9 Loss: 4.8900 +[2025-09-05 17:48:17] [Rank 0] Group 10 Loss: 4.9956 +[2025-09-05 17:48:17] [Rank 0] Group 10 Loss: 4.9956 +[2025-09-05 17:48:17] [Rank 0] Group 11 Loss: 5.0704 +[2025-09-05 17:48:17] [Rank 0] Group 11 Loss: 5.0704 +[2025-09-05 17:48:17] [Rank 0] Group 12 Loss: 4.9551 +[2025-09-05 17:48:17] [Rank 0] Group 12 Loss: 4.9551 +[2025-09-05 17:48:17] [Rank 0] Group 13 Loss: 4.9909 +[2025-09-05 17:48:17] [Rank 0] Group 13 Loss: 4.9909 +[2025-09-05 17:48:17] [Rank 0] Group 14 Loss: 5.0385 +[2025-09-05 17:48:17] [Rank 0] Group 14 Loss: 5.0385 +[2025-09-05 17:48:17] [Rank 0] Group 15 Loss: 4.9961 +[2025-09-05 17:48:17] [Rank 0] Group 15 Loss: 4.9961 +[2025-09-05 17:48:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:48:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:48:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:48:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:48:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:48:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:48:17] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 17:48:17] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 17:48:17] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 17:48:17] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 17:48:17] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 17:48:17] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 17:48:17] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 17:48:17] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 17:48:17] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 17:48:17] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 17:48:17] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:48:17] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:48:17] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 17:48:17] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 17:48:17] [Rank 0] Group 10 FTA: 0.3400 +[2025-09-05 17:48:17] [Rank 0] Group 10 FTA: 0.3400 +[2025-09-05 17:48:17] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 17:48:17] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 17:48:17] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 17:48:17] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 17:48:17] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 17:48:17] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 17:48:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:48:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 17:48:17] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:48:17] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:48:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:48:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:48:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:48:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:48:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:48:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:48:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:48:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:48:19] [Rank 0] step:6001/10000 train_time:257799ms step_avg:42.96ms +[2025-09-05 17:48:19] [Rank 0] step:6001/10000 train_time:257799ms step_avg:42.96ms +[2025-09-05 17:48:20] [Rank 0] step:6021/10000 train_time:259097ms step_avg:43.03ms +[2025-09-05 17:48:20] [Rank 0] step:6021/10000 train_time:259097ms step_avg:43.03ms +[2025-09-05 17:48:21] [Rank 0] step:6041/10000 train_time:259836ms step_avg:43.01ms +[2025-09-05 17:48:21] [Rank 0] step:6041/10000 train_time:259836ms step_avg:43.01ms +[2025-09-05 17:48:21] [Rank 0] step:6061/10000 train_time:260575ms step_avg:42.99ms +[2025-09-05 17:48:21] [Rank 0] step:6061/10000 train_time:260575ms step_avg:42.99ms +[2025-09-05 17:48:22] [Rank 0] step:6081/10000 train_time:261312ms step_avg:42.97ms +[2025-09-05 17:48:22] [Rank 0] step:6081/10000 train_time:261312ms step_avg:42.97ms +[2025-09-05 17:48:23] [Rank 0] step:6101/10000 train_time:262049ms step_avg:42.95ms +[2025-09-05 17:48:23] [Rank 0] step:6101/10000 train_time:262049ms step_avg:42.95ms +[2025-09-05 17:48:24] [Rank 0] step:6121/10000 train_time:262787ms step_avg:42.93ms +[2025-09-05 17:48:24] [Rank 0] step:6121/10000 train_time:262787ms step_avg:42.93ms +[2025-09-05 17:48:24] [Rank 0] step:6141/10000 train_time:263524ms step_avg:42.91ms +[2025-09-05 17:48:24] [Rank 0] step:6141/10000 train_time:263524ms step_avg:42.91ms +[2025-09-05 17:48:25] [Rank 0] step:6161/10000 train_time:264262ms step_avg:42.89ms +[2025-09-05 17:48:25] [Rank 0] step:6161/10000 train_time:264262ms step_avg:42.89ms +[2025-09-05 17:48:26] [Rank 0] step:6181/10000 train_time:265000ms step_avg:42.87ms +[2025-09-05 17:48:26] [Rank 0] step:6181/10000 train_time:265000ms step_avg:42.87ms +[2025-09-05 17:48:27] [Rank 0] step:6201/10000 train_time:265738ms step_avg:42.85ms +[2025-09-05 17:48:27] [Rank 0] step:6201/10000 train_time:265738ms step_avg:42.85ms +[2025-09-05 17:48:27] [Rank 0] step:6221/10000 train_time:266475ms step_avg:42.83ms +[2025-09-05 17:48:27] [Rank 0] step:6221/10000 train_time:266475ms step_avg:42.83ms +[2025-09-05 17:48:28] [Rank 0] step:6241/10000 train_time:267214ms step_avg:42.82ms +[2025-09-05 17:48:28] [Rank 0] step:6241/10000 train_time:267214ms step_avg:42.82ms +[2025-09-05 17:48:29] [Rank 0] step:6261/10000 train_time:267952ms step_avg:42.80ms +[2025-09-05 17:48:29] [Rank 0] step:6261/10000 train_time:267952ms step_avg:42.80ms +[2025-09-05 17:48:29] [Rank 0] step:6281/10000 train_time:268690ms step_avg:42.78ms +[2025-09-05 17:48:29] [Rank 0] step:6281/10000 train_time:268690ms step_avg:42.78ms +[2025-09-05 17:48:30] [Rank 0] step:6301/10000 train_time:269428ms step_avg:42.76ms +[2025-09-05 17:48:30] [Rank 0] step:6301/10000 train_time:269428ms step_avg:42.76ms +[2025-09-05 17:48:31] [Rank 0] step:6321/10000 train_time:270167ms step_avg:42.74ms +[2025-09-05 17:48:31] [Rank 0] step:6321/10000 train_time:270167ms step_avg:42.74ms +[2025-09-05 17:48:32] [Rank 0] step:6341/10000 train_time:270904ms step_avg:42.72ms +[2025-09-05 17:48:32] [Rank 0] step:6341/10000 train_time:270904ms step_avg:42.72ms +[2025-09-05 17:48:32] [Rank 0] step:6361/10000 train_time:271643ms step_avg:42.70ms +[2025-09-05 17:48:32] [Rank 0] step:6361/10000 train_time:271643ms step_avg:42.70ms +[2025-09-05 17:48:33] [Rank 0] step:6381/10000 train_time:272381ms step_avg:42.69ms +[2025-09-05 17:48:33] [Rank 0] step:6381/10000 train_time:272381ms step_avg:42.69ms +[2025-09-05 17:48:34] [Rank 0] step:6401/10000 train_time:273118ms step_avg:42.67ms +[2025-09-05 17:48:34] [Rank 0] step:6401/10000 train_time:273118ms step_avg:42.67ms +[2025-09-05 17:48:35] [Rank 0] step:6421/10000 train_time:273856ms step_avg:42.65ms +[2025-09-05 17:48:35] [Rank 0] step:6421/10000 train_time:273856ms step_avg:42.65ms +[2025-09-05 17:48:35] [Rank 0] step:6441/10000 train_time:274594ms step_avg:42.63ms +[2025-09-05 17:48:35] [Rank 0] step:6441/10000 train_time:274594ms step_avg:42.63ms +[2025-09-05 17:48:36] [Rank 0] step:6461/10000 train_time:275332ms step_avg:42.61ms +[2025-09-05 17:48:36] [Rank 0] step:6461/10000 train_time:275332ms step_avg:42.61ms +[2025-09-05 17:48:37] [Rank 0] step:6481/10000 train_time:276070ms step_avg:42.60ms +[2025-09-05 17:48:37] [Rank 0] step:6481/10000 train_time:276070ms step_avg:42.60ms +[2025-09-05 17:48:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:48:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:48:38] [Rank 0] PRINT: step:6500/10000 train_loss:1.6997 val_loss:1.6841 train_time:276889ms step_avg:42.60ms +[2025-09-05 17:48:38] [Rank 0] PRINT: step:6500/10000 train_loss:1.6997 val_loss:1.6841 train_time:276889ms step_avg:42.60ms +[2025-09-05 17:48:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:48:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:48:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:48:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:50:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:50:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:50:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:50:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:50:00] [Rank 0] Total Loss: 4.3071 +[2025-09-05 17:50:00] [Rank 0] Total Loss: 4.3071 +[2025-09-05 17:50:00] [Rank 0] Total FTA (Unweighted): 0.4419 +[2025-09-05 17:50:00] [Rank 0] Total FTA (Unweighted): 0.4419 +[2025-09-05 17:50:00] [Rank 0] Total FTA (Weighted): 0.4419 +[2025-09-05 17:50:00] [Rank 0] Total FTA (Weighted): 0.4419 +[2025-09-05 17:50:00] [Rank 0] Group 0 Loss: 3.3392 +[2025-09-05 17:50:00] [Rank 0] Group 0 Loss: 3.3392 +[2025-09-05 17:50:00] [Rank 0] Group 1 Loss: 3.1601 +[2025-09-05 17:50:00] [Rank 0] Group 1 Loss: 3.1601 +[2025-09-05 17:50:00] [Rank 0] Group 2 Loss: 3.1969 +[2025-09-05 17:50:00] [Rank 0] Group 2 Loss: 3.1969 +[2025-09-05 17:50:00] [Rank 0] Group 3 Loss: 3.4863 +[2025-09-05 17:50:00] [Rank 0] Group 3 Loss: 3.4863 +[2025-09-05 17:50:00] [Rank 0] Group 4 Loss: 3.7529 +[2025-09-05 17:50:00] [Rank 0] Group 4 Loss: 3.7529 +[2025-09-05 17:50:00] [Rank 0] Group 5 Loss: 3.9869 +[2025-09-05 17:50:00] [Rank 0] Group 5 Loss: 3.9869 +[2025-09-05 17:50:00] [Rank 0] Group 6 Loss: 4.2268 +[2025-09-05 17:50:00] [Rank 0] Group 6 Loss: 4.2268 +[2025-09-05 17:50:00] [Rank 0] Group 7 Loss: 4.3896 +[2025-09-05 17:50:00] [Rank 0] Group 7 Loss: 4.3896 +[2025-09-05 17:50:00] [Rank 0] Group 8 Loss: 4.7147 +[2025-09-05 17:50:00] [Rank 0] Group 8 Loss: 4.7147 +[2025-09-05 17:50:00] [Rank 0] Group 9 Loss: 4.8437 +[2025-09-05 17:50:00] [Rank 0] Group 9 Loss: 4.8437 +[2025-09-05 17:50:00] [Rank 0] Group 10 Loss: 4.9797 +[2025-09-05 17:50:00] [Rank 0] Group 10 Loss: 4.9797 +[2025-09-05 17:50:00] [Rank 0] Group 11 Loss: 5.0049 +[2025-09-05 17:50:00] [Rank 0] Group 11 Loss: 5.0049 +[2025-09-05 17:50:00] [Rank 0] Group 12 Loss: 4.9198 +[2025-09-05 17:50:00] [Rank 0] Group 12 Loss: 4.9198 +[2025-09-05 17:50:00] [Rank 0] Group 13 Loss: 4.9493 +[2025-09-05 17:50:00] [Rank 0] Group 13 Loss: 4.9493 +[2025-09-05 17:50:00] [Rank 0] Group 14 Loss: 5.0118 +[2025-09-05 17:50:00] [Rank 0] Group 14 Loss: 5.0118 +[2025-09-05 17:50:00] [Rank 0] Group 15 Loss: 4.9513 +[2025-09-05 17:50:00] [Rank 0] Group 15 Loss: 4.9513 +[2025-09-05 17:50:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:50:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:50:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:50:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:50:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:50:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:50:00] [Rank 0] Group 3 FTA: 0.6200 +[2025-09-05 17:50:00] [Rank 0] Group 3 FTA: 0.6200 +[2025-09-05 17:50:00] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 17:50:00] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 17:50:00] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 17:50:00] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 17:50:00] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 17:50:00] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 17:50:00] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 17:50:00] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 17:50:00] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:50:00] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:50:00] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:50:00] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:50:00] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 17:50:00] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 17:50:00] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 17:50:00] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 17:50:00] [Rank 0] Group 12 FTA: 0.2300 +[2025-09-05 17:50:00] [Rank 0] Group 12 FTA: 0.2300 +[2025-09-05 17:50:00] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 17:50:00] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 17:50:00] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:50:00] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:50:00] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 17:50:00] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 17:50:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:50:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:50:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:50:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:50:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:50:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:50:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:50:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:50:02] [Rank 0] step:6501/10000 train_time:276899ms step_avg:42.59ms +[2025-09-05 17:50:02] [Rank 0] step:6501/10000 train_time:276899ms step_avg:42.59ms +[2025-09-05 17:50:02] [Rank 0] step:6521/10000 train_time:277578ms step_avg:42.57ms +[2025-09-05 17:50:02] [Rank 0] step:6521/10000 train_time:277578ms step_avg:42.57ms +[2025-09-05 17:50:03] [Rank 0] step:6541/10000 train_time:278317ms step_avg:42.55ms +[2025-09-05 17:50:03] [Rank 0] step:6541/10000 train_time:278317ms step_avg:42.55ms +[2025-09-05 17:50:04] [Rank 0] step:6561/10000 train_time:279056ms step_avg:42.53ms +[2025-09-05 17:50:04] [Rank 0] step:6561/10000 train_time:279056ms step_avg:42.53ms +[2025-09-05 17:50:05] [Rank 0] step:6581/10000 train_time:279795ms step_avg:42.52ms +[2025-09-05 17:50:05] [Rank 0] step:6581/10000 train_time:279795ms step_avg:42.52ms +[2025-09-05 17:50:05] [Rank 0] step:6601/10000 train_time:280534ms step_avg:42.50ms +[2025-09-05 17:50:05] [Rank 0] step:6601/10000 train_time:280534ms step_avg:42.50ms +[2025-09-05 17:50:06] [Rank 0] step:6621/10000 train_time:281272ms step_avg:42.48ms +[2025-09-05 17:50:06] [Rank 0] step:6621/10000 train_time:281272ms step_avg:42.48ms +[2025-09-05 17:50:07] [Rank 0] step:6641/10000 train_time:282010ms step_avg:42.47ms +[2025-09-05 17:50:07] [Rank 0] step:6641/10000 train_time:282010ms step_avg:42.47ms +[2025-09-05 17:50:08] [Rank 0] step:6661/10000 train_time:282749ms step_avg:42.45ms +[2025-09-05 17:50:08] [Rank 0] step:6661/10000 train_time:282749ms step_avg:42.45ms +[2025-09-05 17:50:08] [Rank 0] step:6681/10000 train_time:283488ms step_avg:42.43ms +[2025-09-05 17:50:08] [Rank 0] step:6681/10000 train_time:283488ms step_avg:42.43ms +[2025-09-05 17:50:09] [Rank 0] step:6701/10000 train_time:284225ms step_avg:42.42ms +[2025-09-05 17:50:09] [Rank 0] step:6701/10000 train_time:284225ms step_avg:42.42ms +[2025-09-05 17:50:10] [Rank 0] step:6721/10000 train_time:284964ms step_avg:42.40ms +[2025-09-05 17:50:10] [Rank 0] step:6721/10000 train_time:284964ms step_avg:42.40ms +[2025-09-05 17:50:11] [Rank 0] step:6741/10000 train_time:285703ms step_avg:42.38ms +[2025-09-05 17:50:11] [Rank 0] step:6741/10000 train_time:285703ms step_avg:42.38ms +[2025-09-05 17:50:11] [Rank 0] step:6761/10000 train_time:286442ms step_avg:42.37ms +[2025-09-05 17:50:11] [Rank 0] step:6761/10000 train_time:286442ms step_avg:42.37ms +[2025-09-05 17:50:12] [Rank 0] step:6781/10000 train_time:287180ms step_avg:42.35ms +[2025-09-05 17:50:12] [Rank 0] step:6781/10000 train_time:287180ms step_avg:42.35ms +[2025-09-05 17:50:13] [Rank 0] step:6801/10000 train_time:287923ms step_avg:42.34ms +[2025-09-05 17:50:13] [Rank 0] step:6801/10000 train_time:287923ms step_avg:42.34ms +[2025-09-05 17:50:13] [Rank 0] step:6821/10000 train_time:288661ms step_avg:42.32ms +[2025-09-05 17:50:13] [Rank 0] step:6821/10000 train_time:288661ms step_avg:42.32ms +[2025-09-05 17:50:14] [Rank 0] step:6841/10000 train_time:289596ms step_avg:42.33ms +[2025-09-05 17:50:14] [Rank 0] step:6841/10000 train_time:289596ms step_avg:42.33ms +[2025-09-05 17:50:15] [Rank 0] step:6861/10000 train_time:290334ms step_avg:42.32ms +[2025-09-05 17:50:15] [Rank 0] step:6861/10000 train_time:290334ms step_avg:42.32ms +[2025-09-05 17:50:16] [Rank 0] step:6881/10000 train_time:291073ms step_avg:42.30ms +[2025-09-05 17:50:16] [Rank 0] step:6881/10000 train_time:291073ms step_avg:42.30ms +[2025-09-05 17:50:17] [Rank 0] step:6901/10000 train_time:291811ms step_avg:42.29ms +[2025-09-05 17:50:17] [Rank 0] step:6901/10000 train_time:291811ms step_avg:42.29ms +[2025-09-05 17:50:17] [Rank 0] step:6921/10000 train_time:292551ms step_avg:42.27ms +[2025-09-05 17:50:17] [Rank 0] step:6921/10000 train_time:292551ms step_avg:42.27ms +[2025-09-05 17:50:18] [Rank 0] step:6941/10000 train_time:293289ms step_avg:42.25ms +[2025-09-05 17:50:18] [Rank 0] step:6941/10000 train_time:293289ms step_avg:42.25ms +[2025-09-05 17:50:19] [Rank 0] step:6961/10000 train_time:294027ms step_avg:42.24ms +[2025-09-05 17:50:19] [Rank 0] step:6961/10000 train_time:294027ms step_avg:42.24ms +[2025-09-05 17:50:20] [Rank 0] step:6981/10000 train_time:294766ms step_avg:42.22ms +[2025-09-05 17:50:20] [Rank 0] step:6981/10000 train_time:294766ms step_avg:42.22ms +[2025-09-05 17:50:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:50:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:50:21] [Rank 0] PRINT: step:7000/10000 train_loss:1.6857 val_loss:1.6720 train_time:295585ms step_avg:42.23ms +[2025-09-05 17:50:21] [Rank 0] PRINT: step:7000/10000 train_loss:1.6857 val_loss:1.6720 train_time:295585ms step_avg:42.23ms +[2025-09-05 17:50:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:50:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:50:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:50:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:51:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:51:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:51:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:51:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:51:43] [Rank 0] Total Loss: 4.3210 +[2025-09-05 17:51:43] [Rank 0] Total Loss: 4.3210 +[2025-09-05 17:51:43] [Rank 0] Total FTA (Unweighted): 0.4525 +[2025-09-05 17:51:43] [Rank 0] Total FTA (Unweighted): 0.4525 +[2025-09-05 17:51:43] [Rank 0] Total FTA (Weighted): 0.4525 +[2025-09-05 17:51:43] [Rank 0] Total FTA (Weighted): 0.4525 +[2025-09-05 17:51:43] [Rank 0] Group 0 Loss: 3.4104 +[2025-09-05 17:51:43] [Rank 0] Group 0 Loss: 3.4104 +[2025-09-05 17:51:43] [Rank 0] Group 1 Loss: 3.1639 +[2025-09-05 17:51:43] [Rank 0] Group 1 Loss: 3.1639 +[2025-09-05 17:51:43] [Rank 0] Group 2 Loss: 3.2781 +[2025-09-05 17:51:43] [Rank 0] Group 2 Loss: 3.2781 +[2025-09-05 17:51:43] [Rank 0] Group 3 Loss: 3.5097 +[2025-09-05 17:51:43] [Rank 0] Group 3 Loss: 3.5097 +[2025-09-05 17:51:43] [Rank 0] Group 4 Loss: 3.7752 +[2025-09-05 17:51:43] [Rank 0] Group 4 Loss: 3.7752 +[2025-09-05 17:51:43] [Rank 0] Group 5 Loss: 4.0152 +[2025-09-05 17:51:43] [Rank 0] Group 5 Loss: 4.0152 +[2025-09-05 17:51:43] [Rank 0] Group 6 Loss: 4.2036 +[2025-09-05 17:51:43] [Rank 0] Group 6 Loss: 4.2036 +[2025-09-05 17:51:43] [Rank 0] Group 7 Loss: 4.4066 +[2025-09-05 17:51:43] [Rank 0] Group 7 Loss: 4.4066 +[2025-09-05 17:51:43] [Rank 0] Group 8 Loss: 4.7101 +[2025-09-05 17:51:43] [Rank 0] Group 8 Loss: 4.7101 +[2025-09-05 17:51:43] [Rank 0] Group 9 Loss: 4.8641 +[2025-09-05 17:51:43] [Rank 0] Group 9 Loss: 4.8641 +[2025-09-05 17:51:43] [Rank 0] Group 10 Loss: 4.9582 +[2025-09-05 17:51:43] [Rank 0] Group 10 Loss: 4.9582 +[2025-09-05 17:51:43] [Rank 0] Group 11 Loss: 4.9981 +[2025-09-05 17:51:43] [Rank 0] Group 11 Loss: 4.9981 +[2025-09-05 17:51:43] [Rank 0] Group 12 Loss: 4.9300 +[2025-09-05 17:51:43] [Rank 0] Group 12 Loss: 4.9300 +[2025-09-05 17:51:43] [Rank 0] Group 13 Loss: 4.9512 +[2025-09-05 17:51:43] [Rank 0] Group 13 Loss: 4.9512 +[2025-09-05 17:51:43] [Rank 0] Group 14 Loss: 5.0203 +[2025-09-05 17:51:43] [Rank 0] Group 14 Loss: 5.0203 +[2025-09-05 17:51:43] [Rank 0] Group 15 Loss: 4.9409 +[2025-09-05 17:51:43] [Rank 0] Group 15 Loss: 4.9409 +[2025-09-05 17:51:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:51:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:51:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:51:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:51:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:51:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:51:43] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 17:51:43] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 17:51:43] [Rank 0] Group 4 FTA: 0.4100 +[2025-09-05 17:51:43] [Rank 0] Group 4 FTA: 0.4100 +[2025-09-05 17:51:43] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 17:51:43] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 17:51:43] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 17:51:43] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 17:51:43] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 17:51:43] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 17:51:43] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:51:43] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:51:43] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 17:51:43] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 17:51:43] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 17:51:43] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 17:51:43] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 17:51:43] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 17:51:43] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 17:51:43] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 17:51:43] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 17:51:43] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 17:51:43] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:51:43] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:51:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:51:43] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:51:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:51:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:51:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:51:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:51:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:51:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:51:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:51:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:51:44] [Rank 0] step:7001/10000 train_time:295595ms step_avg:42.22ms +[2025-09-05 17:51:44] [Rank 0] step:7001/10000 train_time:295595ms step_avg:42.22ms +[2025-09-05 17:51:45] [Rank 0] step:7021/10000 train_time:296274ms step_avg:42.20ms +[2025-09-05 17:51:45] [Rank 0] step:7021/10000 train_time:296274ms step_avg:42.20ms +[2025-09-05 17:51:46] [Rank 0] step:7041/10000 train_time:297013ms step_avg:42.18ms +[2025-09-05 17:51:46] [Rank 0] step:7041/10000 train_time:297013ms step_avg:42.18ms +[2025-09-05 17:51:47] [Rank 0] step:7061/10000 train_time:297751ms step_avg:42.17ms +[2025-09-05 17:51:47] [Rank 0] step:7061/10000 train_time:297751ms step_avg:42.17ms +[2025-09-05 17:51:47] [Rank 0] step:7081/10000 train_time:298488ms step_avg:42.15ms +[2025-09-05 17:51:47] [Rank 0] step:7081/10000 train_time:298488ms step_avg:42.15ms +[2025-09-05 17:51:48] [Rank 0] step:7101/10000 train_time:299226ms step_avg:42.14ms +[2025-09-05 17:51:48] [Rank 0] step:7101/10000 train_time:299226ms step_avg:42.14ms +[2025-09-05 17:51:49] [Rank 0] step:7121/10000 train_time:299964ms step_avg:42.12ms +[2025-09-05 17:51:49] [Rank 0] step:7121/10000 train_time:299964ms step_avg:42.12ms +[2025-09-05 17:51:50] [Rank 0] step:7141/10000 train_time:300702ms step_avg:42.11ms +[2025-09-05 17:51:50] [Rank 0] step:7141/10000 train_time:300702ms step_avg:42.11ms +[2025-09-05 17:51:50] [Rank 0] step:7161/10000 train_time:301441ms step_avg:42.09ms +[2025-09-05 17:51:50] [Rank 0] step:7161/10000 train_time:301441ms step_avg:42.09ms +[2025-09-05 17:51:51] [Rank 0] step:7181/10000 train_time:302179ms step_avg:42.08ms +[2025-09-05 17:51:51] [Rank 0] step:7181/10000 train_time:302179ms step_avg:42.08ms +[2025-09-05 17:51:52] [Rank 0] step:7201/10000 train_time:302917ms step_avg:42.07ms +[2025-09-05 17:51:52] [Rank 0] step:7201/10000 train_time:302917ms step_avg:42.07ms +[2025-09-05 17:51:52] [Rank 0] step:7221/10000 train_time:303655ms step_avg:42.05ms +[2025-09-05 17:51:52] [Rank 0] step:7221/10000 train_time:303655ms step_avg:42.05ms +[2025-09-05 17:51:53] [Rank 0] step:7241/10000 train_time:304393ms step_avg:42.04ms +[2025-09-05 17:51:53] [Rank 0] step:7241/10000 train_time:304393ms step_avg:42.04ms +[2025-09-05 17:51:54] [Rank 0] step:7261/10000 train_time:305131ms step_avg:42.02ms +[2025-09-05 17:51:54] [Rank 0] step:7261/10000 train_time:305131ms step_avg:42.02ms +[2025-09-05 17:51:55] [Rank 0] step:7281/10000 train_time:305869ms step_avg:42.01ms +[2025-09-05 17:51:55] [Rank 0] step:7281/10000 train_time:305869ms step_avg:42.01ms +[2025-09-05 17:51:55] [Rank 0] step:7301/10000 train_time:306608ms step_avg:42.00ms +[2025-09-05 17:51:55] [Rank 0] step:7301/10000 train_time:306608ms step_avg:42.00ms +[2025-09-05 17:51:56] [Rank 0] step:7321/10000 train_time:307346ms step_avg:41.98ms +[2025-09-05 17:51:56] [Rank 0] step:7321/10000 train_time:307346ms step_avg:41.98ms +[2025-09-05 17:51:57] [Rank 0] step:7341/10000 train_time:308083ms step_avg:41.97ms +[2025-09-05 17:51:57] [Rank 0] step:7341/10000 train_time:308083ms step_avg:41.97ms +[2025-09-05 17:51:58] [Rank 0] step:7361/10000 train_time:308821ms step_avg:41.95ms +[2025-09-05 17:51:58] [Rank 0] step:7361/10000 train_time:308821ms step_avg:41.95ms +[2025-09-05 17:51:58] [Rank 0] step:7381/10000 train_time:309559ms step_avg:41.94ms +[2025-09-05 17:51:58] [Rank 0] step:7381/10000 train_time:309559ms step_avg:41.94ms +[2025-09-05 17:51:59] [Rank 0] step:7401/10000 train_time:310296ms step_avg:41.93ms +[2025-09-05 17:51:59] [Rank 0] step:7401/10000 train_time:310296ms step_avg:41.93ms +[2025-09-05 17:52:00] [Rank 0] step:7421/10000 train_time:311035ms step_avg:41.91ms +[2025-09-05 17:52:00] [Rank 0] step:7421/10000 train_time:311035ms step_avg:41.91ms +[2025-09-05 17:52:01] [Rank 0] step:7441/10000 train_time:311772ms step_avg:41.90ms +[2025-09-05 17:52:01] [Rank 0] step:7441/10000 train_time:311772ms step_avg:41.90ms +[2025-09-05 17:52:01] [Rank 0] step:7461/10000 train_time:312509ms step_avg:41.89ms +[2025-09-05 17:52:01] [Rank 0] step:7461/10000 train_time:312509ms step_avg:41.89ms +[2025-09-05 17:52:02] [Rank 0] step:7481/10000 train_time:313248ms step_avg:41.87ms +[2025-09-05 17:52:02] [Rank 0] step:7481/10000 train_time:313248ms step_avg:41.87ms +[2025-09-05 17:52:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:52:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:52:03] [Rank 0] PRINT: step:7500/10000 train_loss:1.6736 val_loss:1.6604 train_time:314067ms step_avg:41.88ms +[2025-09-05 17:52:03] [Rank 0] PRINT: step:7500/10000 train_loss:1.6736 val_loss:1.6604 train_time:314067ms step_avg:41.88ms +[2025-09-05 17:52:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:52:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:52:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:52:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:53:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:53:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:53:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:53:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:53:25] [Rank 0] Total Loss: 4.3237 +[2025-09-05 17:53:25] [Rank 0] Total Loss: 4.3237 +[2025-09-05 17:53:25] [Rank 0] Total FTA (Unweighted): 0.4731 +[2025-09-05 17:53:25] [Rank 0] Total FTA (Unweighted): 0.4731 +[2025-09-05 17:53:25] [Rank 0] Total FTA (Weighted): 0.4731 +[2025-09-05 17:53:25] [Rank 0] Total FTA (Weighted): 0.4731 +[2025-09-05 17:53:25] [Rank 0] Group 0 Loss: 3.3913 +[2025-09-05 17:53:25] [Rank 0] Group 0 Loss: 3.3913 +[2025-09-05 17:53:25] [Rank 0] Group 1 Loss: 3.2107 +[2025-09-05 17:53:25] [Rank 0] Group 1 Loss: 3.2107 +[2025-09-05 17:53:25] [Rank 0] Group 2 Loss: 3.2473 +[2025-09-05 17:53:25] [Rank 0] Group 2 Loss: 3.2473 +[2025-09-05 17:53:25] [Rank 0] Group 3 Loss: 3.5372 +[2025-09-05 17:53:25] [Rank 0] Group 3 Loss: 3.5372 +[2025-09-05 17:53:25] [Rank 0] Group 4 Loss: 3.7877 +[2025-09-05 17:53:25] [Rank 0] Group 4 Loss: 3.7877 +[2025-09-05 17:53:25] [Rank 0] Group 5 Loss: 4.0173 +[2025-09-05 17:53:25] [Rank 0] Group 5 Loss: 4.0173 +[2025-09-05 17:53:25] [Rank 0] Group 6 Loss: 4.2334 +[2025-09-05 17:53:25] [Rank 0] Group 6 Loss: 4.2334 +[2025-09-05 17:53:25] [Rank 0] Group 7 Loss: 4.3967 +[2025-09-05 17:53:25] [Rank 0] Group 7 Loss: 4.3967 +[2025-09-05 17:53:25] [Rank 0] Group 8 Loss: 4.7255 +[2025-09-05 17:53:25] [Rank 0] Group 8 Loss: 4.7255 +[2025-09-05 17:53:25] [Rank 0] Group 9 Loss: 4.8576 +[2025-09-05 17:53:25] [Rank 0] Group 9 Loss: 4.8576 +[2025-09-05 17:53:25] [Rank 0] Group 10 Loss: 4.9561 +[2025-09-05 17:53:25] [Rank 0] Group 10 Loss: 4.9561 +[2025-09-05 17:53:25] [Rank 0] Group 11 Loss: 4.9887 +[2025-09-05 17:53:25] [Rank 0] Group 11 Loss: 4.9887 +[2025-09-05 17:53:25] [Rank 0] Group 12 Loss: 4.9280 +[2025-09-05 17:53:25] [Rank 0] Group 12 Loss: 4.9280 +[2025-09-05 17:53:25] [Rank 0] Group 13 Loss: 4.9705 +[2025-09-05 17:53:25] [Rank 0] Group 13 Loss: 4.9705 +[2025-09-05 17:53:25] [Rank 0] Group 14 Loss: 4.9940 +[2025-09-05 17:53:25] [Rank 0] Group 14 Loss: 4.9940 +[2025-09-05 17:53:25] [Rank 0] Group 15 Loss: 4.9373 +[2025-09-05 17:53:25] [Rank 0] Group 15 Loss: 4.9373 +[2025-09-05 17:53:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:53:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:53:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:53:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:53:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:53:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:53:25] [Rank 0] Group 3 FTA: 0.8300 +[2025-09-05 17:53:25] [Rank 0] Group 3 FTA: 0.8300 +[2025-09-05 17:53:25] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 17:53:25] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 17:53:25] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 17:53:25] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 17:53:25] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 17:53:25] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 17:53:25] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 17:53:25] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 17:53:25] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:53:25] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:53:25] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:53:25] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:53:25] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 17:53:25] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 17:53:25] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 17:53:25] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 17:53:25] [Rank 0] Group 12 FTA: 0.2800 +[2025-09-05 17:53:25] [Rank 0] Group 12 FTA: 0.2800 +[2025-09-05 17:53:25] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 17:53:25] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 17:53:25] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:53:25] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:53:25] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:53:25] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:53:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:53:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:53:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:53:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:53:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:53:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:53:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:53:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:53:26] [Rank 0] step:7501/10000 train_time:314077ms step_avg:41.87ms +[2025-09-05 17:53:26] [Rank 0] step:7501/10000 train_time:314077ms step_avg:41.87ms +[2025-09-05 17:53:27] [Rank 0] step:7521/10000 train_time:314746ms step_avg:41.85ms +[2025-09-05 17:53:27] [Rank 0] step:7521/10000 train_time:314746ms step_avg:41.85ms +[2025-09-05 17:53:28] [Rank 0] step:7541/10000 train_time:315483ms step_avg:41.84ms +[2025-09-05 17:53:28] [Rank 0] step:7541/10000 train_time:315483ms step_avg:41.84ms +[2025-09-05 17:53:28] [Rank 0] step:7561/10000 train_time:316221ms step_avg:41.82ms +[2025-09-05 17:53:28] [Rank 0] step:7561/10000 train_time:316221ms step_avg:41.82ms +[2025-09-05 17:53:29] [Rank 0] step:7581/10000 train_time:316959ms step_avg:41.81ms +[2025-09-05 17:53:29] [Rank 0] step:7581/10000 train_time:316959ms step_avg:41.81ms +[2025-09-05 17:53:30] [Rank 0] step:7601/10000 train_time:317697ms step_avg:41.80ms +[2025-09-05 17:53:30] [Rank 0] step:7601/10000 train_time:317697ms step_avg:41.80ms +[2025-09-05 17:53:31] [Rank 0] step:7621/10000 train_time:318435ms step_avg:41.78ms +[2025-09-05 17:53:31] [Rank 0] step:7621/10000 train_time:318435ms step_avg:41.78ms +[2025-09-05 17:53:32] [Rank 0] step:7641/10000 train_time:319400ms step_avg:41.80ms +[2025-09-05 17:53:32] [Rank 0] step:7641/10000 train_time:319400ms step_avg:41.80ms +[2025-09-05 17:53:33] [Rank 0] step:7661/10000 train_time:320518ms step_avg:41.84ms +[2025-09-05 17:53:33] [Rank 0] step:7661/10000 train_time:320518ms step_avg:41.84ms +[2025-09-05 17:53:33] [Rank 0] step:7681/10000 train_time:321257ms step_avg:41.82ms +[2025-09-05 17:53:33] [Rank 0] step:7681/10000 train_time:321257ms step_avg:41.82ms +[2025-09-05 17:53:34] [Rank 0] step:7701/10000 train_time:321995ms step_avg:41.81ms +[2025-09-05 17:53:34] [Rank 0] step:7701/10000 train_time:321995ms step_avg:41.81ms +[2025-09-05 17:53:35] [Rank 0] step:7721/10000 train_time:322733ms step_avg:41.80ms +[2025-09-05 17:53:35] [Rank 0] step:7721/10000 train_time:322733ms step_avg:41.80ms +[2025-09-05 17:53:36] [Rank 0] step:7741/10000 train_time:323471ms step_avg:41.79ms +[2025-09-05 17:53:36] [Rank 0] step:7741/10000 train_time:323471ms step_avg:41.79ms +[2025-09-05 17:53:36] [Rank 0] step:7761/10000 train_time:324209ms step_avg:41.77ms +[2025-09-05 17:53:36] [Rank 0] step:7761/10000 train_time:324209ms step_avg:41.77ms +[2025-09-05 17:53:37] [Rank 0] step:7781/10000 train_time:324947ms step_avg:41.76ms +[2025-09-05 17:53:37] [Rank 0] step:7781/10000 train_time:324947ms step_avg:41.76ms +[2025-09-05 17:53:38] [Rank 0] step:7801/10000 train_time:325685ms step_avg:41.75ms +[2025-09-05 17:53:38] [Rank 0] step:7801/10000 train_time:325685ms step_avg:41.75ms +[2025-09-05 17:53:39] [Rank 0] step:7821/10000 train_time:326425ms step_avg:41.74ms +[2025-09-05 17:53:39] [Rank 0] step:7821/10000 train_time:326425ms step_avg:41.74ms +[2025-09-05 17:53:39] [Rank 0] step:7841/10000 train_time:327164ms step_avg:41.72ms +[2025-09-05 17:53:39] [Rank 0] step:7841/10000 train_time:327164ms step_avg:41.72ms +[2025-09-05 17:53:40] [Rank 0] step:7861/10000 train_time:327902ms step_avg:41.71ms +[2025-09-05 17:53:40] [Rank 0] step:7861/10000 train_time:327902ms step_avg:41.71ms +[2025-09-05 17:53:41] [Rank 0] step:7881/10000 train_time:328639ms step_avg:41.70ms +[2025-09-05 17:53:41] [Rank 0] step:7881/10000 train_time:328639ms step_avg:41.70ms +[2025-09-05 17:53:41] [Rank 0] step:7901/10000 train_time:329377ms step_avg:41.69ms +[2025-09-05 17:53:41] [Rank 0] step:7901/10000 train_time:329377ms step_avg:41.69ms +[2025-09-05 17:53:42] [Rank 0] step:7921/10000 train_time:330115ms step_avg:41.68ms +[2025-09-05 17:53:42] [Rank 0] step:7921/10000 train_time:330115ms step_avg:41.68ms +[2025-09-05 17:53:43] [Rank 0] step:7941/10000 train_time:330853ms step_avg:41.66ms +[2025-09-05 17:53:43] [Rank 0] step:7941/10000 train_time:330853ms step_avg:41.66ms +[2025-09-05 17:53:44] [Rank 0] step:7961/10000 train_time:331591ms step_avg:41.65ms +[2025-09-05 17:53:44] [Rank 0] step:7961/10000 train_time:331591ms step_avg:41.65ms +[2025-09-05 17:53:44] [Rank 0] step:7981/10000 train_time:332329ms step_avg:41.64ms +[2025-09-05 17:53:44] [Rank 0] step:7981/10000 train_time:332329ms step_avg:41.64ms +[2025-09-05 17:53:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:53:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:53:46] [Rank 0] PRINT: step:8000/10000 train_loss:1.6638 val_loss:1.6517 train_time:333148ms step_avg:41.64ms +[2025-09-05 17:53:46] [Rank 0] PRINT: step:8000/10000 train_loss:1.6638 val_loss:1.6517 train_time:333148ms step_avg:41.64ms +[2025-09-05 17:53:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:53:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:53:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:53:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:55:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:55:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:55:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:55:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:55:07] [Rank 0] Total Loss: 4.3515 +[2025-09-05 17:55:07] [Rank 0] Total Loss: 4.3515 +[2025-09-05 17:55:07] [Rank 0] Total FTA (Unweighted): 0.4713 +[2025-09-05 17:55:07] [Rank 0] Total FTA (Unweighted): 0.4713 +[2025-09-05 17:55:07] [Rank 0] Total FTA (Weighted): 0.4713 +[2025-09-05 17:55:07] [Rank 0] Total FTA (Weighted): 0.4713 +[2025-09-05 17:55:07] [Rank 0] Group 0 Loss: 3.4897 +[2025-09-05 17:55:07] [Rank 0] Group 0 Loss: 3.4897 +[2025-09-05 17:55:07] [Rank 0] Group 1 Loss: 3.2407 +[2025-09-05 17:55:07] [Rank 0] Group 1 Loss: 3.2407 +[2025-09-05 17:55:07] [Rank 0] Group 2 Loss: 3.3023 +[2025-09-05 17:55:07] [Rank 0] Group 2 Loss: 3.3023 +[2025-09-05 17:55:07] [Rank 0] Group 3 Loss: 3.5171 +[2025-09-05 17:55:07] [Rank 0] Group 3 Loss: 3.5171 +[2025-09-05 17:55:07] [Rank 0] Group 4 Loss: 3.8321 +[2025-09-05 17:55:07] [Rank 0] Group 4 Loss: 3.8321 +[2025-09-05 17:55:07] [Rank 0] Group 5 Loss: 4.0436 +[2025-09-05 17:55:07] [Rank 0] Group 5 Loss: 4.0436 +[2025-09-05 17:55:07] [Rank 0] Group 6 Loss: 4.2254 +[2025-09-05 17:55:07] [Rank 0] Group 6 Loss: 4.2254 +[2025-09-05 17:55:07] [Rank 0] Group 7 Loss: 4.4264 +[2025-09-05 17:55:07] [Rank 0] Group 7 Loss: 4.4264 +[2025-09-05 17:55:07] [Rank 0] Group 8 Loss: 4.7463 +[2025-09-05 17:55:07] [Rank 0] Group 8 Loss: 4.7463 +[2025-09-05 17:55:07] [Rank 0] Group 9 Loss: 4.8639 +[2025-09-05 17:55:07] [Rank 0] Group 9 Loss: 4.8639 +[2025-09-05 17:55:07] [Rank 0] Group 10 Loss: 4.9943 +[2025-09-05 17:55:07] [Rank 0] Group 10 Loss: 4.9943 +[2025-09-05 17:55:07] [Rank 0] Group 11 Loss: 5.0091 +[2025-09-05 17:55:07] [Rank 0] Group 11 Loss: 5.0091 +[2025-09-05 17:55:07] [Rank 0] Group 12 Loss: 4.9521 +[2025-09-05 17:55:07] [Rank 0] Group 12 Loss: 4.9521 +[2025-09-05 17:55:07] [Rank 0] Group 13 Loss: 4.9872 +[2025-09-05 17:55:07] [Rank 0] Group 13 Loss: 4.9872 +[2025-09-05 17:55:07] [Rank 0] Group 14 Loss: 5.0282 +[2025-09-05 17:55:07] [Rank 0] Group 14 Loss: 5.0282 +[2025-09-05 17:55:07] [Rank 0] Group 15 Loss: 4.9650 +[2025-09-05 17:55:07] [Rank 0] Group 15 Loss: 4.9650 +[2025-09-05 17:55:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:55:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:55:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:55:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:55:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:55:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:55:07] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 17:55:07] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 17:55:07] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 17:55:07] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 17:55:07] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 17:55:07] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 17:55:07] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 17:55:07] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 17:55:07] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 17:55:07] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 17:55:07] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 17:55:07] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 17:55:07] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:55:07] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:55:07] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 17:55:07] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 17:55:07] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 17:55:07] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 17:55:07] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 17:55:07] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 17:55:07] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 17:55:07] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 17:55:07] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:55:07] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:55:07] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 17:55:07] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 17:55:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:55:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:55:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:55:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:55:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:55:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:55:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:55:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:55:09] [Rank 0] step:8001/10000 train_time:333158ms step_avg:41.64ms +[2025-09-05 17:55:09] [Rank 0] step:8001/10000 train_time:333158ms step_avg:41.64ms +[2025-09-05 17:55:10] [Rank 0] step:8021/10000 train_time:334423ms step_avg:41.69ms +[2025-09-05 17:55:10] [Rank 0] step:8021/10000 train_time:334423ms step_avg:41.69ms +[2025-09-05 17:55:11] [Rank 0] step:8041/10000 train_time:335160ms step_avg:41.68ms +[2025-09-05 17:55:11] [Rank 0] step:8041/10000 train_time:335160ms step_avg:41.68ms +[2025-09-05 17:55:12] [Rank 0] step:8061/10000 train_time:335898ms step_avg:41.67ms +[2025-09-05 17:55:12] [Rank 0] step:8061/10000 train_time:335898ms step_avg:41.67ms +[2025-09-05 17:55:12] [Rank 0] step:8081/10000 train_time:336636ms step_avg:41.66ms +[2025-09-05 17:55:12] [Rank 0] step:8081/10000 train_time:336636ms step_avg:41.66ms +[2025-09-05 17:55:13] [Rank 0] step:8101/10000 train_time:337489ms step_avg:41.66ms +[2025-09-05 17:55:13] [Rank 0] step:8101/10000 train_time:337489ms step_avg:41.66ms +[2025-09-05 17:55:14] [Rank 0] step:8121/10000 train_time:338227ms step_avg:41.65ms +[2025-09-05 17:55:14] [Rank 0] step:8121/10000 train_time:338227ms step_avg:41.65ms +[2025-09-05 17:55:15] [Rank 0] step:8141/10000 train_time:338965ms step_avg:41.64ms +[2025-09-05 17:55:15] [Rank 0] step:8141/10000 train_time:338965ms step_avg:41.64ms +[2025-09-05 17:55:16] [Rank 0] step:8161/10000 train_time:339844ms step_avg:41.64ms +[2025-09-05 17:55:16] [Rank 0] step:8161/10000 train_time:339844ms step_avg:41.64ms +[2025-09-05 17:55:16] [Rank 0] step:8181/10000 train_time:340582ms step_avg:41.63ms +[2025-09-05 17:55:16] [Rank 0] step:8181/10000 train_time:340582ms step_avg:41.63ms +[2025-09-05 17:55:17] [Rank 0] step:8201/10000 train_time:341319ms step_avg:41.62ms +[2025-09-05 17:55:17] [Rank 0] step:8201/10000 train_time:341319ms step_avg:41.62ms +[2025-09-05 17:55:18] [Rank 0] step:8221/10000 train_time:342058ms step_avg:41.61ms +[2025-09-05 17:55:18] [Rank 0] step:8221/10000 train_time:342058ms step_avg:41.61ms +[2025-09-05 17:55:19] [Rank 0] step:8241/10000 train_time:342796ms step_avg:41.60ms +[2025-09-05 17:55:19] [Rank 0] step:8241/10000 train_time:342796ms step_avg:41.60ms +[2025-09-05 17:55:19] [Rank 0] step:8261/10000 train_time:343533ms step_avg:41.58ms +[2025-09-05 17:55:19] [Rank 0] step:8261/10000 train_time:343533ms step_avg:41.58ms +[2025-09-05 17:55:20] [Rank 0] step:8281/10000 train_time:344271ms step_avg:41.57ms +[2025-09-05 17:55:20] [Rank 0] step:8281/10000 train_time:344271ms step_avg:41.57ms +[2025-09-05 17:55:21] [Rank 0] step:8301/10000 train_time:345010ms step_avg:41.56ms +[2025-09-05 17:55:21] [Rank 0] step:8301/10000 train_time:345010ms step_avg:41.56ms +[2025-09-05 17:55:22] [Rank 0] step:8321/10000 train_time:345748ms step_avg:41.55ms +[2025-09-05 17:55:22] [Rank 0] step:8321/10000 train_time:345748ms step_avg:41.55ms +[2025-09-05 17:55:22] [Rank 0] step:8341/10000 train_time:346487ms step_avg:41.54ms +[2025-09-05 17:55:22] [Rank 0] step:8341/10000 train_time:346487ms step_avg:41.54ms +[2025-09-05 17:55:23] [Rank 0] step:8361/10000 train_time:347224ms step_avg:41.53ms +[2025-09-05 17:55:23] [Rank 0] step:8361/10000 train_time:347224ms step_avg:41.53ms +[2025-09-05 17:55:24] [Rank 0] step:8381/10000 train_time:347963ms step_avg:41.52ms +[2025-09-05 17:55:24] [Rank 0] step:8381/10000 train_time:347963ms step_avg:41.52ms +[2025-09-05 17:55:25] [Rank 0] step:8401/10000 train_time:348702ms step_avg:41.51ms +[2025-09-05 17:55:25] [Rank 0] step:8401/10000 train_time:348702ms step_avg:41.51ms +[2025-09-05 17:55:25] [Rank 0] step:8421/10000 train_time:349440ms step_avg:41.50ms +[2025-09-05 17:55:25] [Rank 0] step:8421/10000 train_time:349440ms step_avg:41.50ms +[2025-09-05 17:55:26] [Rank 0] step:8441/10000 train_time:350177ms step_avg:41.49ms +[2025-09-05 17:55:26] [Rank 0] step:8441/10000 train_time:350177ms step_avg:41.49ms +[2025-09-05 17:55:27] [Rank 0] step:8461/10000 train_time:350916ms step_avg:41.47ms +[2025-09-05 17:55:27] [Rank 0] step:8461/10000 train_time:350916ms step_avg:41.47ms +[2025-09-05 17:55:27] [Rank 0] step:8481/10000 train_time:351656ms step_avg:41.46ms +[2025-09-05 17:55:27] [Rank 0] step:8481/10000 train_time:351656ms step_avg:41.46ms +[2025-09-05 17:55:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:55:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:55:29] [Rank 0] PRINT: step:8500/10000 train_loss:1.6547 val_loss:1.6415 train_time:352475ms step_avg:41.47ms +[2025-09-05 17:55:29] [Rank 0] PRINT: step:8500/10000 train_loss:1.6547 val_loss:1.6415 train_time:352475ms step_avg:41.47ms +[2025-09-05 17:55:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:55:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:55:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:55:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:56:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:56:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:56:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:56:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:56:50] [Rank 0] Total Loss: 4.4298 +[2025-09-05 17:56:50] [Rank 0] Total Loss: 4.4298 +[2025-09-05 17:56:50] [Rank 0] Total FTA (Unweighted): 0.4719 +[2025-09-05 17:56:50] [Rank 0] Total FTA (Unweighted): 0.4719 +[2025-09-05 17:56:50] [Rank 0] Total FTA (Weighted): 0.4719 +[2025-09-05 17:56:50] [Rank 0] Total FTA (Weighted): 0.4719 +[2025-09-05 17:56:50] [Rank 0] Group 0 Loss: 3.4778 +[2025-09-05 17:56:50] [Rank 0] Group 0 Loss: 3.4778 +[2025-09-05 17:56:50] [Rank 0] Group 1 Loss: 3.2926 +[2025-09-05 17:56:50] [Rank 0] Group 1 Loss: 3.2926 +[2025-09-05 17:56:50] [Rank 0] Group 2 Loss: 3.4831 +[2025-09-05 17:56:50] [Rank 0] Group 2 Loss: 3.4831 +[2025-09-05 17:56:50] [Rank 0] Group 3 Loss: 3.5815 +[2025-09-05 17:56:50] [Rank 0] Group 3 Loss: 3.5815 +[2025-09-05 17:56:50] [Rank 0] Group 4 Loss: 3.9015 +[2025-09-05 17:56:50] [Rank 0] Group 4 Loss: 3.9015 +[2025-09-05 17:56:50] [Rank 0] Group 5 Loss: 4.1307 +[2025-09-05 17:56:50] [Rank 0] Group 5 Loss: 4.1307 +[2025-09-05 17:56:50] [Rank 0] Group 6 Loss: 4.3335 +[2025-09-05 17:56:50] [Rank 0] Group 6 Loss: 4.3335 +[2025-09-05 17:56:50] [Rank 0] Group 7 Loss: 4.5150 +[2025-09-05 17:56:50] [Rank 0] Group 7 Loss: 4.5150 +[2025-09-05 17:56:50] [Rank 0] Group 8 Loss: 4.8467 +[2025-09-05 17:56:50] [Rank 0] Group 8 Loss: 4.8467 +[2025-09-05 17:56:50] [Rank 0] Group 9 Loss: 4.9243 +[2025-09-05 17:56:50] [Rank 0] Group 9 Loss: 4.9243 +[2025-09-05 17:56:50] [Rank 0] Group 10 Loss: 5.0655 +[2025-09-05 17:56:50] [Rank 0] Group 10 Loss: 5.0655 +[2025-09-05 17:56:50] [Rank 0] Group 11 Loss: 5.1160 +[2025-09-05 17:56:50] [Rank 0] Group 11 Loss: 5.1160 +[2025-09-05 17:56:50] [Rank 0] Group 12 Loss: 5.0299 +[2025-09-05 17:56:50] [Rank 0] Group 12 Loss: 5.0299 +[2025-09-05 17:56:50] [Rank 0] Group 13 Loss: 5.0547 +[2025-09-05 17:56:50] [Rank 0] Group 13 Loss: 5.0547 +[2025-09-05 17:56:50] [Rank 0] Group 14 Loss: 5.0867 +[2025-09-05 17:56:50] [Rank 0] Group 14 Loss: 5.0867 +[2025-09-05 17:56:50] [Rank 0] Group 15 Loss: 5.0381 +[2025-09-05 17:56:50] [Rank 0] Group 15 Loss: 5.0381 +[2025-09-05 17:56:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:56:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:56:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:56:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:56:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:56:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:56:51] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 17:56:51] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 17:56:51] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 17:56:51] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 17:56:51] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 17:56:51] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 17:56:51] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 17:56:51] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 17:56:51] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 17:56:51] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 17:56:51] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 17:56:51] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 17:56:51] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:56:51] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:56:51] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 17:56:51] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 17:56:51] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 17:56:51] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 17:56:51] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 17:56:51] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 17:56:51] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 17:56:51] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 17:56:51] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:56:51] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:56:51] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:56:51] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:56:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:56:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:56:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:56:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:56:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:56:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:56:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:56:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:56:52] [Rank 0] step:8501/10000 train_time:352485ms step_avg:41.46ms +[2025-09-05 17:56:52] [Rank 0] step:8501/10000 train_time:352485ms step_avg:41.46ms +[2025-09-05 17:56:53] [Rank 0] step:8521/10000 train_time:353149ms step_avg:41.44ms +[2025-09-05 17:56:53] [Rank 0] step:8521/10000 train_time:353149ms step_avg:41.44ms +[2025-09-05 17:56:53] [Rank 0] step:8541/10000 train_time:353887ms step_avg:41.43ms +[2025-09-05 17:56:53] [Rank 0] step:8541/10000 train_time:353887ms step_avg:41.43ms +[2025-09-05 17:56:54] [Rank 0] step:8561/10000 train_time:354624ms step_avg:41.42ms +[2025-09-05 17:56:54] [Rank 0] step:8561/10000 train_time:354624ms step_avg:41.42ms +[2025-09-05 17:56:55] [Rank 0] step:8581/10000 train_time:355363ms step_avg:41.41ms +[2025-09-05 17:56:55] [Rank 0] step:8581/10000 train_time:355363ms step_avg:41.41ms +[2025-09-05 17:56:56] [Rank 0] step:8601/10000 train_time:356100ms step_avg:41.40ms +[2025-09-05 17:56:56] [Rank 0] step:8601/10000 train_time:356100ms step_avg:41.40ms +[2025-09-05 17:56:56] [Rank 0] step:8621/10000 train_time:356838ms step_avg:41.39ms +[2025-09-05 17:56:56] [Rank 0] step:8621/10000 train_time:356838ms step_avg:41.39ms +[2025-09-05 17:56:57] [Rank 0] step:8641/10000 train_time:357576ms step_avg:41.38ms +[2025-09-05 17:56:57] [Rank 0] step:8641/10000 train_time:357576ms step_avg:41.38ms +[2025-09-05 17:56:58] [Rank 0] step:8661/10000 train_time:358315ms step_avg:41.37ms +[2025-09-05 17:56:58] [Rank 0] step:8661/10000 train_time:358315ms step_avg:41.37ms +[2025-09-05 17:56:59] [Rank 0] step:8681/10000 train_time:359053ms step_avg:41.36ms +[2025-09-05 17:56:59] [Rank 0] step:8681/10000 train_time:359053ms step_avg:41.36ms +[2025-09-05 17:56:59] [Rank 0] step:8701/10000 train_time:359790ms step_avg:41.35ms +[2025-09-05 17:56:59] [Rank 0] step:8701/10000 train_time:359790ms step_avg:41.35ms +[2025-09-05 17:57:00] [Rank 0] step:8721/10000 train_time:360528ms step_avg:41.34ms +[2025-09-05 17:57:00] [Rank 0] step:8721/10000 train_time:360528ms step_avg:41.34ms +[2025-09-05 17:57:01] [Rank 0] step:8741/10000 train_time:361266ms step_avg:41.33ms +[2025-09-05 17:57:01] [Rank 0] step:8741/10000 train_time:361266ms step_avg:41.33ms +[2025-09-05 17:57:02] [Rank 0] step:8761/10000 train_time:362004ms step_avg:41.32ms +[2025-09-05 17:57:02] [Rank 0] step:8761/10000 train_time:362004ms step_avg:41.32ms +[2025-09-05 17:57:02] [Rank 0] step:8781/10000 train_time:362742ms step_avg:41.31ms +[2025-09-05 17:57:02] [Rank 0] step:8781/10000 train_time:362742ms step_avg:41.31ms +[2025-09-05 17:57:03] [Rank 0] step:8801/10000 train_time:363480ms step_avg:41.30ms +[2025-09-05 17:57:03] [Rank 0] step:8801/10000 train_time:363480ms step_avg:41.30ms +[2025-09-05 17:57:04] [Rank 0] step:8821/10000 train_time:364218ms step_avg:41.29ms +[2025-09-05 17:57:04] [Rank 0] step:8821/10000 train_time:364218ms step_avg:41.29ms +[2025-09-05 17:57:05] [Rank 0] step:8841/10000 train_time:365558ms step_avg:41.35ms +[2025-09-05 17:57:05] [Rank 0] step:8841/10000 train_time:365558ms step_avg:41.35ms +[2025-09-05 17:57:06] [Rank 0] step:8861/10000 train_time:366296ms step_avg:41.34ms +[2025-09-05 17:57:06] [Rank 0] step:8861/10000 train_time:366296ms step_avg:41.34ms +[2025-09-05 17:57:07] [Rank 0] step:8881/10000 train_time:367034ms step_avg:41.33ms +[2025-09-05 17:57:07] [Rank 0] step:8881/10000 train_time:367034ms step_avg:41.33ms +[2025-09-05 17:57:07] [Rank 0] step:8901/10000 train_time:367772ms step_avg:41.32ms +[2025-09-05 17:57:07] [Rank 0] step:8901/10000 train_time:367772ms step_avg:41.32ms +[2025-09-05 17:57:08] [Rank 0] step:8921/10000 train_time:368509ms step_avg:41.31ms +[2025-09-05 17:57:08] [Rank 0] step:8921/10000 train_time:368509ms step_avg:41.31ms +[2025-09-05 17:57:09] [Rank 0] step:8941/10000 train_time:369247ms step_avg:41.30ms +[2025-09-05 17:57:09] [Rank 0] step:8941/10000 train_time:369247ms step_avg:41.30ms +[2025-09-05 17:57:10] [Rank 0] step:8961/10000 train_time:369984ms step_avg:41.29ms +[2025-09-05 17:57:10] [Rank 0] step:8961/10000 train_time:369984ms step_avg:41.29ms +[2025-09-05 17:57:10] [Rank 0] step:8981/10000 train_time:370721ms step_avg:41.28ms +[2025-09-05 17:57:10] [Rank 0] step:8981/10000 train_time:370721ms step_avg:41.28ms +[2025-09-05 17:57:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:57:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:57:12] [Rank 0] PRINT: step:9000/10000 train_loss:1.6456 val_loss:1.6342 train_time:371539ms step_avg:41.28ms +[2025-09-05 17:57:12] [Rank 0] PRINT: step:9000/10000 train_loss:1.6456 val_loss:1.6342 train_time:371539ms step_avg:41.28ms +[2025-09-05 17:57:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:57:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:57:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:57:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:58:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:58:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:58:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:58:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:58:33] [Rank 0] Total Loss: 4.3731 +[2025-09-05 17:58:33] [Rank 0] Total Loss: 4.3731 +[2025-09-05 17:58:33] [Rank 0] Total FTA (Unweighted): 0.4900 +[2025-09-05 17:58:33] [Rank 0] Total FTA (Unweighted): 0.4900 +[2025-09-05 17:58:33] [Rank 0] Total FTA (Weighted): 0.4900 +[2025-09-05 17:58:33] [Rank 0] Total FTA (Weighted): 0.4900 +[2025-09-05 17:58:33] [Rank 0] Group 0 Loss: 3.4730 +[2025-09-05 17:58:33] [Rank 0] Group 0 Loss: 3.4730 +[2025-09-05 17:58:33] [Rank 0] Group 1 Loss: 3.2292 +[2025-09-05 17:58:33] [Rank 0] Group 1 Loss: 3.2292 +[2025-09-05 17:58:33] [Rank 0] Group 2 Loss: 3.3526 +[2025-09-05 17:58:33] [Rank 0] Group 2 Loss: 3.3526 +[2025-09-05 17:58:33] [Rank 0] Group 3 Loss: 3.5277 +[2025-09-05 17:58:33] [Rank 0] Group 3 Loss: 3.5277 +[2025-09-05 17:58:33] [Rank 0] Group 4 Loss: 3.8329 +[2025-09-05 17:58:33] [Rank 0] Group 4 Loss: 3.8329 +[2025-09-05 17:58:33] [Rank 0] Group 5 Loss: 4.0836 +[2025-09-05 17:58:33] [Rank 0] Group 5 Loss: 4.0836 +[2025-09-05 17:58:33] [Rank 0] Group 6 Loss: 4.2610 +[2025-09-05 17:58:33] [Rank 0] Group 6 Loss: 4.2610 +[2025-09-05 17:58:33] [Rank 0] Group 7 Loss: 4.4632 +[2025-09-05 17:58:33] [Rank 0] Group 7 Loss: 4.4632 +[2025-09-05 17:58:33] [Rank 0] Group 8 Loss: 4.7849 +[2025-09-05 17:58:33] [Rank 0] Group 8 Loss: 4.7849 +[2025-09-05 17:58:33] [Rank 0] Group 9 Loss: 4.8882 +[2025-09-05 17:58:33] [Rank 0] Group 9 Loss: 4.8882 +[2025-09-05 17:58:34] [Rank 0] Group 10 Loss: 5.0125 +[2025-09-05 17:58:34] [Rank 0] Group 10 Loss: 5.0125 +[2025-09-05 17:58:34] [Rank 0] Group 11 Loss: 5.0671 +[2025-09-05 17:58:34] [Rank 0] Group 11 Loss: 5.0671 +[2025-09-05 17:58:34] [Rank 0] Group 12 Loss: 4.9740 +[2025-09-05 17:58:34] [Rank 0] Group 12 Loss: 4.9740 +[2025-09-05 17:58:34] [Rank 0] Group 13 Loss: 4.9990 +[2025-09-05 17:58:34] [Rank 0] Group 13 Loss: 4.9990 +[2025-09-05 17:58:34] [Rank 0] Group 14 Loss: 5.0449 +[2025-09-05 17:58:34] [Rank 0] Group 14 Loss: 5.0449 +[2025-09-05 17:58:34] [Rank 0] Group 15 Loss: 4.9766 +[2025-09-05 17:58:34] [Rank 0] Group 15 Loss: 4.9766 +[2025-09-05 17:58:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:58:34] [Rank 0] Group 3 FTA: 0.8800 +[2025-09-05 17:58:34] [Rank 0] Group 3 FTA: 0.8800 +[2025-09-05 17:58:34] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 17:58:34] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 17:58:34] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 17:58:34] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 17:58:34] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 17:58:34] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 17:58:34] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 17:58:34] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 17:58:34] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 17:58:34] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 17:58:34] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:58:34] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:58:34] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 17:58:34] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 17:58:34] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 17:58:34] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 17:58:34] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-05 17:58:34] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-05 17:58:34] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 17:58:34] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 17:58:34] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:58:34] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:58:34] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:58:34] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:58:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:58:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 17:58:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:58:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 17:58:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:58:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 17:58:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:58:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 17:58:36] [Rank 0] step:9001/10000 train_time:371550ms step_avg:41.28ms +[2025-09-05 17:58:36] [Rank 0] step:9001/10000 train_time:371550ms step_avg:41.28ms +[2025-09-05 17:58:37] [Rank 0] step:9021/10000 train_time:372225ms step_avg:41.26ms +[2025-09-05 17:58:37] [Rank 0] step:9021/10000 train_time:372225ms step_avg:41.26ms +[2025-09-05 17:58:37] [Rank 0] step:9041/10000 train_time:372962ms step_avg:41.25ms +[2025-09-05 17:58:37] [Rank 0] step:9041/10000 train_time:372962ms step_avg:41.25ms +[2025-09-05 17:58:38] [Rank 0] step:9061/10000 train_time:373700ms step_avg:41.24ms +[2025-09-05 17:58:38] [Rank 0] step:9061/10000 train_time:373700ms step_avg:41.24ms +[2025-09-05 17:58:39] [Rank 0] step:9081/10000 train_time:374437ms step_avg:41.23ms +[2025-09-05 17:58:39] [Rank 0] step:9081/10000 train_time:374437ms step_avg:41.23ms +[2025-09-05 17:58:40] [Rank 0] step:9101/10000 train_time:375174ms step_avg:41.22ms +[2025-09-05 17:58:40] [Rank 0] step:9101/10000 train_time:375174ms step_avg:41.22ms +[2025-09-05 17:58:40] [Rank 0] step:9121/10000 train_time:375912ms step_avg:41.21ms +[2025-09-05 17:58:40] [Rank 0] step:9121/10000 train_time:375912ms step_avg:41.21ms +[2025-09-05 17:58:41] [Rank 0] step:9141/10000 train_time:376649ms step_avg:41.20ms +[2025-09-05 17:58:41] [Rank 0] step:9141/10000 train_time:376649ms step_avg:41.20ms +[2025-09-05 17:58:42] [Rank 0] step:9161/10000 train_time:377386ms step_avg:41.19ms +[2025-09-05 17:58:42] [Rank 0] step:9161/10000 train_time:377386ms step_avg:41.19ms +[2025-09-05 17:58:42] [Rank 0] step:9181/10000 train_time:378124ms step_avg:41.19ms +[2025-09-05 17:58:42] [Rank 0] step:9181/10000 train_time:378124ms step_avg:41.19ms +[2025-09-05 17:58:43] [Rank 0] step:9201/10000 train_time:378861ms step_avg:41.18ms +[2025-09-05 17:58:43] [Rank 0] step:9201/10000 train_time:378861ms step_avg:41.18ms +[2025-09-05 17:58:44] [Rank 0] step:9221/10000 train_time:379598ms step_avg:41.17ms +[2025-09-05 17:58:44] [Rank 0] step:9221/10000 train_time:379598ms step_avg:41.17ms +[2025-09-05 17:58:45] [Rank 0] step:9241/10000 train_time:380336ms step_avg:41.16ms +[2025-09-05 17:58:45] [Rank 0] step:9241/10000 train_time:380336ms step_avg:41.16ms +[2025-09-05 17:58:45] [Rank 0] step:9261/10000 train_time:381072ms step_avg:41.15ms +[2025-09-05 17:58:45] [Rank 0] step:9261/10000 train_time:381072ms step_avg:41.15ms +[2025-09-05 17:58:46] [Rank 0] step:9281/10000 train_time:381810ms step_avg:41.14ms +[2025-09-05 17:58:46] [Rank 0] step:9281/10000 train_time:381810ms step_avg:41.14ms +[2025-09-05 17:58:47] [Rank 0] step:9301/10000 train_time:382548ms step_avg:41.13ms +[2025-09-05 17:58:47] [Rank 0] step:9301/10000 train_time:382548ms step_avg:41.13ms +[2025-09-05 17:58:48] [Rank 0] step:9321/10000 train_time:383286ms step_avg:41.12ms +[2025-09-05 17:58:48] [Rank 0] step:9321/10000 train_time:383286ms step_avg:41.12ms +[2025-09-05 17:58:48] [Rank 0] step:9341/10000 train_time:384023ms step_avg:41.11ms +[2025-09-05 17:58:48] [Rank 0] step:9341/10000 train_time:384023ms step_avg:41.11ms +[2025-09-05 17:58:49] [Rank 0] step:9361/10000 train_time:384761ms step_avg:41.10ms +[2025-09-05 17:58:49] [Rank 0] step:9361/10000 train_time:384761ms step_avg:41.10ms +[2025-09-05 17:58:50] [Rank 0] step:9381/10000 train_time:385498ms step_avg:41.09ms +[2025-09-05 17:58:50] [Rank 0] step:9381/10000 train_time:385498ms step_avg:41.09ms +[2025-09-05 17:58:51] [Rank 0] step:9401/10000 train_time:386236ms step_avg:41.08ms +[2025-09-05 17:58:51] [Rank 0] step:9401/10000 train_time:386236ms step_avg:41.08ms +[2025-09-05 17:58:51] [Rank 0] step:9421/10000 train_time:386973ms step_avg:41.08ms +[2025-09-05 17:58:51] [Rank 0] step:9421/10000 train_time:386973ms step_avg:41.08ms +[2025-09-05 17:58:52] [Rank 0] step:9441/10000 train_time:387711ms step_avg:41.07ms +[2025-09-05 17:58:52] [Rank 0] step:9441/10000 train_time:387711ms step_avg:41.07ms +[2025-09-05 17:58:53] [Rank 0] step:9461/10000 train_time:388448ms step_avg:41.06ms +[2025-09-05 17:58:53] [Rank 0] step:9461/10000 train_time:388448ms step_avg:41.06ms +[2025-09-05 17:58:54] [Rank 0] step:9481/10000 train_time:389186ms step_avg:41.05ms +[2025-09-05 17:58:54] [Rank 0] step:9481/10000 train_time:389186ms step_avg:41.05ms +[2025-09-05 17:58:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:58:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:58:55] [Rank 0] PRINT: step:9500/10000 train_loss:1.6378 val_loss:1.6272 train_time:390005ms step_avg:41.05ms +[2025-09-05 17:58:55] [Rank 0] PRINT: step:9500/10000 train_loss:1.6378 val_loss:1.6272 train_time:390005ms step_avg:41.05ms +[2025-09-05 17:58:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:58:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:58:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:58:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:00:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:00:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:00:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:00:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:00:16] [Rank 0] Total Loss: 4.3478 +[2025-09-05 18:00:16] [Rank 0] Total Loss: 4.3478 +[2025-09-05 18:00:16] [Rank 0] Total FTA (Unweighted): 0.4863 +[2025-09-05 18:00:16] [Rank 0] Total FTA (Unweighted): 0.4863 +[2025-09-05 18:00:16] [Rank 0] Total FTA (Weighted): 0.4863 +[2025-09-05 18:00:16] [Rank 0] Total FTA (Weighted): 0.4863 +[2025-09-05 18:00:16] [Rank 0] Group 0 Loss: 3.5457 +[2025-09-05 18:00:16] [Rank 0] Group 0 Loss: 3.5457 +[2025-09-05 18:00:16] [Rank 0] Group 1 Loss: 3.2662 +[2025-09-05 18:00:16] [Rank 0] Group 1 Loss: 3.2662 +[2025-09-05 18:00:16] [Rank 0] Group 2 Loss: 3.3635 +[2025-09-05 18:00:16] [Rank 0] Group 2 Loss: 3.3635 +[2025-09-05 18:00:16] [Rank 0] Group 3 Loss: 3.5177 +[2025-09-05 18:00:16] [Rank 0] Group 3 Loss: 3.5177 +[2025-09-05 18:00:16] [Rank 0] Group 4 Loss: 3.8266 +[2025-09-05 18:00:16] [Rank 0] Group 4 Loss: 3.8266 +[2025-09-05 18:00:16] [Rank 0] Group 5 Loss: 4.0409 +[2025-09-05 18:00:16] [Rank 0] Group 5 Loss: 4.0409 +[2025-09-05 18:00:16] [Rank 0] Group 6 Loss: 4.2096 +[2025-09-05 18:00:16] [Rank 0] Group 6 Loss: 4.2096 +[2025-09-05 18:00:16] [Rank 0] Group 7 Loss: 4.4161 +[2025-09-05 18:00:16] [Rank 0] Group 7 Loss: 4.4161 +[2025-09-05 18:00:16] [Rank 0] Group 8 Loss: 4.7405 +[2025-09-05 18:00:16] [Rank 0] Group 8 Loss: 4.7405 +[2025-09-05 18:00:16] [Rank 0] Group 9 Loss: 4.8435 +[2025-09-05 18:00:16] [Rank 0] Group 9 Loss: 4.8435 +[2025-09-05 18:00:16] [Rank 0] Group 10 Loss: 4.9670 +[2025-09-05 18:00:16] [Rank 0] Group 10 Loss: 4.9670 +[2025-09-05 18:00:16] [Rank 0] Group 11 Loss: 5.0043 +[2025-09-05 18:00:16] [Rank 0] Group 11 Loss: 5.0043 +[2025-09-05 18:00:16] [Rank 0] Group 12 Loss: 4.9136 +[2025-09-05 18:00:16] [Rank 0] Group 12 Loss: 4.9136 +[2025-09-05 18:00:16] [Rank 0] Group 13 Loss: 4.9657 +[2025-09-05 18:00:16] [Rank 0] Group 13 Loss: 4.9657 +[2025-09-05 18:00:16] [Rank 0] Group 14 Loss: 5.0052 +[2025-09-05 18:00:16] [Rank 0] Group 14 Loss: 5.0052 +[2025-09-05 18:00:16] [Rank 0] Group 15 Loss: 4.9382 +[2025-09-05 18:00:16] [Rank 0] Group 15 Loss: 4.9382 +[2025-09-05 18:00:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:00:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:00:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:00:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:00:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:00:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:00:16] [Rank 0] Group 3 FTA: 0.7800 +[2025-09-05 18:00:16] [Rank 0] Group 3 FTA: 0.7800 +[2025-09-05 18:00:16] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 18:00:16] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 18:00:16] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:00:16] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:00:16] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:00:16] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:00:16] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 18:00:16] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 18:00:16] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 18:00:16] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 18:00:16] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 18:00:16] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 18:00:16] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 18:00:16] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 18:00:16] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 18:00:16] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 18:00:16] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 18:00:16] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 18:00:16] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 18:00:16] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 18:00:16] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:00:16] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:00:16] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 18:00:16] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 18:00:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 18:00:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 18:00:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 18:00:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 18:00:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 18:00:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 18:00:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 18:00:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 18:00:18] [Rank 0] step:9501/10000 train_time:390014ms step_avg:41.05ms +[2025-09-05 18:00:18] [Rank 0] step:9501/10000 train_time:390014ms step_avg:41.05ms +[2025-09-05 18:00:18] [Rank 0] step:9521/10000 train_time:390684ms step_avg:41.03ms +[2025-09-05 18:00:18] [Rank 0] step:9521/10000 train_time:390684ms step_avg:41.03ms +[2025-09-05 18:00:19] [Rank 0] step:9541/10000 train_time:391422ms step_avg:41.03ms +[2025-09-05 18:00:19] [Rank 0] step:9541/10000 train_time:391422ms step_avg:41.03ms +[2025-09-05 18:00:20] [Rank 0] step:9561/10000 train_time:392160ms step_avg:41.02ms +[2025-09-05 18:00:20] [Rank 0] step:9561/10000 train_time:392160ms step_avg:41.02ms +[2025-09-05 18:00:21] [Rank 0] step:9581/10000 train_time:392897ms step_avg:41.01ms +[2025-09-05 18:00:21] [Rank 0] step:9581/10000 train_time:392897ms step_avg:41.01ms +[2025-09-05 18:00:21] [Rank 0] step:9601/10000 train_time:393635ms step_avg:41.00ms +[2025-09-05 18:00:21] [Rank 0] step:9601/10000 train_time:393635ms step_avg:41.00ms +[2025-09-05 18:00:22] [Rank 0] step:9621/10000 train_time:394373ms step_avg:40.99ms +[2025-09-05 18:00:22] [Rank 0] step:9621/10000 train_time:394373ms step_avg:40.99ms +[2025-09-05 18:00:23] [Rank 0] step:9641/10000 train_time:395111ms step_avg:40.98ms +[2025-09-05 18:00:23] [Rank 0] step:9641/10000 train_time:395111ms step_avg:40.98ms +[2025-09-05 18:00:24] [Rank 0] step:9661/10000 train_time:396123ms step_avg:41.00ms +[2025-09-05 18:00:24] [Rank 0] step:9661/10000 train_time:396123ms step_avg:41.00ms +[2025-09-05 18:00:25] [Rank 0] step:9681/10000 train_time:396861ms step_avg:40.99ms +[2025-09-05 18:00:25] [Rank 0] step:9681/10000 train_time:396861ms step_avg:40.99ms +[2025-09-05 18:00:25] [Rank 0] step:9701/10000 train_time:397598ms step_avg:40.99ms +[2025-09-05 18:00:25] [Rank 0] step:9701/10000 train_time:397598ms step_avg:40.99ms +[2025-09-05 18:00:26] [Rank 0] step:9721/10000 train_time:398336ms step_avg:40.98ms +[2025-09-05 18:00:26] [Rank 0] step:9721/10000 train_time:398336ms step_avg:40.98ms +[2025-09-05 18:00:27] [Rank 0] step:9741/10000 train_time:399076ms step_avg:40.97ms +[2025-09-05 18:00:27] [Rank 0] step:9741/10000 train_time:399076ms step_avg:40.97ms +[2025-09-05 18:00:27] [Rank 0] step:9761/10000 train_time:399814ms step_avg:40.96ms +[2025-09-05 18:00:27] [Rank 0] step:9761/10000 train_time:399814ms step_avg:40.96ms +[2025-09-05 18:00:28] [Rank 0] step:9781/10000 train_time:400553ms step_avg:40.95ms +[2025-09-05 18:00:28] [Rank 0] step:9781/10000 train_time:400553ms step_avg:40.95ms +[2025-09-05 18:00:29] [Rank 0] step:9801/10000 train_time:401291ms step_avg:40.94ms +[2025-09-05 18:00:29] [Rank 0] step:9801/10000 train_time:401291ms step_avg:40.94ms +[2025-09-05 18:00:30] [Rank 0] step:9821/10000 train_time:402154ms step_avg:40.95ms +[2025-09-05 18:00:30] [Rank 0] step:9821/10000 train_time:402154ms step_avg:40.95ms +[2025-09-05 18:00:31] [Rank 0] step:9841/10000 train_time:402893ms step_avg:40.94ms +[2025-09-05 18:00:31] [Rank 0] step:9841/10000 train_time:402893ms step_avg:40.94ms +[2025-09-05 18:00:31] [Rank 0] step:9861/10000 train_time:403631ms step_avg:40.93ms +[2025-09-05 18:00:31] [Rank 0] step:9861/10000 train_time:403631ms step_avg:40.93ms +[2025-09-05 18:00:32] [Rank 0] step:9881/10000 train_time:404516ms step_avg:40.94ms +[2025-09-05 18:00:32] [Rank 0] step:9881/10000 train_time:404516ms step_avg:40.94ms +[2025-09-05 18:00:33] [Rank 0] step:9901/10000 train_time:405255ms step_avg:40.93ms +[2025-09-05 18:00:33] [Rank 0] step:9901/10000 train_time:405255ms step_avg:40.93ms +[2025-09-05 18:00:34] [Rank 0] step:9921/10000 train_time:405994ms step_avg:40.92ms +[2025-09-05 18:00:34] [Rank 0] step:9921/10000 train_time:405994ms step_avg:40.92ms +[2025-09-05 18:00:34] [Rank 0] step:9941/10000 train_time:406731ms step_avg:40.91ms +[2025-09-05 18:00:34] [Rank 0] step:9941/10000 train_time:406731ms step_avg:40.91ms +[2025-09-05 18:00:35] [Rank 0] step:9961/10000 train_time:407468ms step_avg:40.91ms +[2025-09-05 18:00:35] [Rank 0] step:9961/10000 train_time:407468ms step_avg:40.91ms +[2025-09-05 18:00:36] [Rank 0] step:9981/10000 train_time:408206ms step_avg:40.90ms +[2025-09-05 18:00:36] [Rank 0] step:9981/10000 train_time:408206ms step_avg:40.90ms +[2025-09-05 18:00:37] [Rank 0] step:10000/10000 train_time:408908ms step_avg:40.89ms +[2025-09-05 18:00:37] [Rank 0] step:10000/10000 train_time:408908ms step_avg:40.89ms +[2025-09-05 18:00:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:00:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:00:37] [Rank 0] PRINT: step:10000/10000 train_loss:1.6318 val_loss:1.6212 train_time:409032ms step_avg:40.90ms +[2025-09-05 18:00:37] [Rank 0] PRINT: step:10000/10000 train_loss:1.6318 val_loss:1.6212 train_time:409032ms step_avg:40.90ms +[2025-09-05 18:00:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:00:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:00:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:00:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:01:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:01:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:01:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:01:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:01:57] [Rank 0] Total Loss: 4.3263 +[2025-09-05 18:01:57] [Rank 0] Total Loss: 4.3263 +[2025-09-05 18:01:57] [Rank 0] Total FTA (Unweighted): 0.4950 +[2025-09-05 18:01:57] [Rank 0] Total FTA (Unweighted): 0.4950 +[2025-09-05 18:01:57] [Rank 0] Total FTA (Weighted): 0.4950 +[2025-09-05 18:01:57] [Rank 0] Total FTA (Weighted): 0.4950 +[2025-09-05 18:01:57] [Rank 0] Group 0 Loss: 3.5014 +[2025-09-05 18:01:57] [Rank 0] Group 0 Loss: 3.5014 +[2025-09-05 18:01:57] [Rank 0] Group 1 Loss: 3.2378 +[2025-09-05 18:01:57] [Rank 0] Group 1 Loss: 3.2378 +[2025-09-05 18:01:57] [Rank 0] Group 2 Loss: 3.2992 +[2025-09-05 18:01:57] [Rank 0] Group 2 Loss: 3.2992 +[2025-09-05 18:01:57] [Rank 0] Group 3 Loss: 3.4971 +[2025-09-05 18:01:57] [Rank 0] Group 3 Loss: 3.4971 +[2025-09-05 18:01:57] [Rank 0] Group 4 Loss: 3.8189 +[2025-09-05 18:01:57] [Rank 0] Group 4 Loss: 3.8189 +[2025-09-05 18:01:57] [Rank 0] Group 5 Loss: 4.0365 +[2025-09-05 18:01:57] [Rank 0] Group 5 Loss: 4.0365 +[2025-09-05 18:01:57] [Rank 0] Group 6 Loss: 4.2005 +[2025-09-05 18:01:57] [Rank 0] Group 6 Loss: 4.2005 +[2025-09-05 18:01:57] [Rank 0] Group 7 Loss: 4.3920 +[2025-09-05 18:01:57] [Rank 0] Group 7 Loss: 4.3920 +[2025-09-05 18:01:57] [Rank 0] Group 8 Loss: 4.7079 +[2025-09-05 18:01:57] [Rank 0] Group 8 Loss: 4.7079 +[2025-09-05 18:01:57] [Rank 0] Group 9 Loss: 4.8194 +[2025-09-05 18:01:57] [Rank 0] Group 9 Loss: 4.8194 +[2025-09-05 18:01:57] [Rank 0] Group 10 Loss: 4.9520 +[2025-09-05 18:01:57] [Rank 0] Group 10 Loss: 4.9520 +[2025-09-05 18:01:57] [Rank 0] Group 11 Loss: 4.9809 +[2025-09-05 18:01:57] [Rank 0] Group 11 Loss: 4.9809 +[2025-09-05 18:01:57] [Rank 0] Group 12 Loss: 4.9008 +[2025-09-05 18:01:57] [Rank 0] Group 12 Loss: 4.9008 +[2025-09-05 18:01:57] [Rank 0] Group 13 Loss: 4.9633 +[2025-09-05 18:01:57] [Rank 0] Group 13 Loss: 4.9633 +[2025-09-05 18:01:57] [Rank 0] Group 14 Loss: 4.9866 +[2025-09-05 18:01:57] [Rank 0] Group 14 Loss: 4.9866 +[2025-09-05 18:01:57] [Rank 0] Group 15 Loss: 4.9258 +[2025-09-05 18:01:57] [Rank 0] Group 15 Loss: 4.9258 +[2025-09-05 18:01:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:01:57] [Rank 0] Group 3 FTA: 0.8300 +[2025-09-05 18:01:57] [Rank 0] Group 3 FTA: 0.8300 +[2025-09-05 18:01:57] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 18:01:57] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 18:01:57] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:01:57] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:01:57] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 18:01:57] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 18:01:57] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 18:01:57] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 18:01:57] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 18:01:57] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 18:01:57] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 18:01:57] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 18:01:57] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 18:01:57] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 18:01:57] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 18:01:57] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 18:01:57] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 18:01:57] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 18:01:57] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-05 18:01:57] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-05 18:01:57] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 18:01:57] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 18:01:57] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 18:01:57] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 18:01:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 18:01:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_loss_curves.png +[2025-09-05 18:01:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 18:01:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/per_class_acc_curves.png +[2025-09-05 18:01:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 18:01:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_loss_curve.png +[2025-09-05 18:02:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 18:02:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_42/total_acc_curve.png +[2025-09-05 18:02:00] [Rank 0] step:10001/10000 train_time:409042ms step_avg:40.90ms +[2025-09-05 18:02:00] [Rank 0] step:10001/10000 train_time:409042ms step_avg:40.90ms +[2025-09-05 18:02:00] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 18:02:00 2025 --- +[2025-09-05 18:02:00] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 18:02:00 2025 --- +[2025-09-05 18:02:00] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 18:02:00] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..698c211801e0532304376d1733b99e446330691f --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.2, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "bf0c5f1a-1ba3-47b4-8f0e-1daaa5ea4a2e", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..d7e3b35f7dc6348c5cdc9bbca69926deda159bda --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89a795306dd3c633b15ab7104f02c9382b3b8c66f70b182e7b19ed540717efe9 +size 417010 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7f6a796a10bcdd7d5dcbdd31c2b69cc645138bb4 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c07d39fe8a8adac1fa9ef109eb33eb023f2e11fa6cbb7e08e0a85286d9a600dc +size 408246 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..8da61deeb46ffb263e43c6cb28239ef95c439609 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:826004de7f07c54f0bde46a9c1e3f152eaf26126f208bdee0461eeba87ab35ca +size 94970 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..08f7fb3caef45cfe898ed43d4de4ef4bafb9cc3d --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c1fcd36750a04211e4380c0c7f921b596c4ca52533f81b3d66b31525adf5279 +size 106517 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/training_log_bf0c5f1a-1ba3-47b4-8f0e-1daaa5ea4a2e.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/training_log_bf0c5f1a-1ba3-47b4-8f0e-1daaa5ea4a2e.txt new file mode 100644 index 0000000000000000000000000000000000000000..7832893bac8e768a65e0be366de214cc74412539 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/training_log_bf0c5f1a-1ba3-47b4-8f0e-1daaa5ea4a2e.txt @@ -0,0 +1,5614 @@ +[2025-09-05 18:02:25] [Rank 0] PRINT: --- Script Start: Fri Sep 5 18:02:25 2025 --- +[2025-09-05 18:02:25] [Rank 0] PRINT: --- Script Start: Fri Sep 5 18:02:25 2025 --- +[2025-09-05 18:02:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 18:02:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 18:02:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 18:02:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 18:02:25] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 18:02:25] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 18:02:25] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43 +[2025-09-05 18:02:25] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43 +[2025-09-05 18:02:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 18:02:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 18:02:25] [Rank 0] PRINT: Constructing model... +[2025-09-05 18:02:25] [Rank 0] PRINT: Constructing model... +[2025-09-05 18:02:26] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 18:02:26] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 18:02:26] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 18:02:26] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 18:02:27] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 18:02:27] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 18:02:31] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 18:02:31] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 18:02:31] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 18:02:31] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 18:02:31] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 18:02:31] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 18:02:31] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 18:02:31] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 18:02:31] [Rank 0] PRINT: Model returns: +[2025-09-05 18:02:31] [Rank 0] PRINT: Model returns: +[2025-09-05 18:02:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 18:02:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 18:02:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 18:02:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 18:02:31] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 18:02:31] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 18:02:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 18:02:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 18:02:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 18:02:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 18:02:35] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 18:02:35] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 18:02:36] [Rank 0] PRINT: Starting warmup... +[2025-09-05 18:02:36] [Rank 0] PRINT: Starting warmup... +[2025-09-05 18:03:15] [Rank 0] PRINT: Warmup complete. +[2025-09-05 18:03:15] [Rank 0] PRINT: Warmup complete. +[2025-09-05 18:03:15] [Rank 0] PRINT: Starting training... +[2025-09-05 18:03:15] [Rank 0] PRINT: Starting training... +[2025-09-05 18:03:22] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/fixed_eval_indices.json +[2025-09-05 18:03:22] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/fixed_eval_indices.json +[2025-09-05 18:03:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:03:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:03:25] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 18:03:25] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 18:03:59] [Rank 0] step:21/10000 train_time:33612ms step_avg:1600.57ms +[2025-09-05 18:03:59] [Rank 0] step:21/10000 train_time:33612ms step_avg:1600.57ms +[2025-09-05 18:04:00] [Rank 0] step:41/10000 train_time:34339ms step_avg:837.53ms +[2025-09-05 18:04:00] [Rank 0] step:41/10000 train_time:34339ms step_avg:837.53ms +[2025-09-05 18:04:00] [Rank 0] step:61/10000 train_time:35064ms step_avg:574.82ms +[2025-09-05 18:04:00] [Rank 0] step:61/10000 train_time:35064ms step_avg:574.82ms +[2025-09-05 18:04:01] [Rank 0] step:81/10000 train_time:35788ms step_avg:441.83ms +[2025-09-05 18:04:01] [Rank 0] step:81/10000 train_time:35788ms step_avg:441.83ms +[2025-09-05 18:04:02] [Rank 0] step:101/10000 train_time:36514ms step_avg:361.52ms +[2025-09-05 18:04:02] [Rank 0] step:101/10000 train_time:36514ms step_avg:361.52ms +[2025-09-05 18:04:02] [Rank 0] step:121/10000 train_time:37239ms step_avg:307.76ms +[2025-09-05 18:04:02] [Rank 0] step:121/10000 train_time:37239ms step_avg:307.76ms +[2025-09-05 18:04:03] [Rank 0] step:141/10000 train_time:37964ms step_avg:269.25ms +[2025-09-05 18:04:03] [Rank 0] step:141/10000 train_time:37964ms step_avg:269.25ms +[2025-09-05 18:04:04] [Rank 0] step:161/10000 train_time:38690ms step_avg:240.31ms +[2025-09-05 18:04:04] [Rank 0] step:161/10000 train_time:38690ms step_avg:240.31ms +[2025-09-05 18:04:05] [Rank 0] step:181/10000 train_time:39415ms step_avg:217.76ms +[2025-09-05 18:04:05] [Rank 0] step:181/10000 train_time:39415ms step_avg:217.76ms +[2025-09-05 18:04:05] [Rank 0] step:201/10000 train_time:40140ms step_avg:199.70ms +[2025-09-05 18:04:05] [Rank 0] step:201/10000 train_time:40140ms step_avg:199.70ms +[2025-09-05 18:04:06] [Rank 0] step:221/10000 train_time:40865ms step_avg:184.91ms +[2025-09-05 18:04:06] [Rank 0] step:221/10000 train_time:40865ms step_avg:184.91ms +[2025-09-05 18:04:07] [Rank 0] step:241/10000 train_time:41589ms step_avg:172.57ms +[2025-09-05 18:04:07] [Rank 0] step:241/10000 train_time:41589ms step_avg:172.57ms +[2025-09-05 18:04:07] [Rank 0] step:261/10000 train_time:42314ms step_avg:162.12ms +[2025-09-05 18:04:07] [Rank 0] step:261/10000 train_time:42314ms step_avg:162.12ms +[2025-09-05 18:04:08] [Rank 0] step:281/10000 train_time:43040ms step_avg:153.17ms +[2025-09-05 18:04:08] [Rank 0] step:281/10000 train_time:43040ms step_avg:153.17ms +[2025-09-05 18:04:09] [Rank 0] step:301/10000 train_time:43766ms step_avg:145.40ms +[2025-09-05 18:04:09] [Rank 0] step:301/10000 train_time:43766ms step_avg:145.40ms +[2025-09-05 18:04:10] [Rank 0] step:321/10000 train_time:44491ms step_avg:138.60ms +[2025-09-05 18:04:10] [Rank 0] step:321/10000 train_time:44491ms step_avg:138.60ms +[2025-09-05 18:04:10] [Rank 0] step:341/10000 train_time:45224ms step_avg:132.62ms +[2025-09-05 18:04:10] [Rank 0] step:341/10000 train_time:45224ms step_avg:132.62ms +[2025-09-05 18:04:11] [Rank 0] step:361/10000 train_time:45949ms step_avg:127.28ms +[2025-09-05 18:04:11] [Rank 0] step:361/10000 train_time:45949ms step_avg:127.28ms +[2025-09-05 18:04:12] [Rank 0] step:381/10000 train_time:46674ms step_avg:122.50ms +[2025-09-05 18:04:12] [Rank 0] step:381/10000 train_time:46674ms step_avg:122.50ms +[2025-09-05 18:04:13] [Rank 0] step:401/10000 train_time:47399ms step_avg:118.20ms +[2025-09-05 18:04:13] [Rank 0] step:401/10000 train_time:47399ms step_avg:118.20ms +[2025-09-05 18:04:13] [Rank 0] step:421/10000 train_time:48124ms step_avg:114.31ms +[2025-09-05 18:04:13] [Rank 0] step:421/10000 train_time:48124ms step_avg:114.31ms +[2025-09-05 18:04:14] [Rank 0] step:441/10000 train_time:48849ms step_avg:110.77ms +[2025-09-05 18:04:14] [Rank 0] step:441/10000 train_time:48849ms step_avg:110.77ms +[2025-09-05 18:04:15] [Rank 0] step:461/10000 train_time:49574ms step_avg:107.54ms +[2025-09-05 18:04:15] [Rank 0] step:461/10000 train_time:49574ms step_avg:107.54ms +[2025-09-05 18:04:15] [Rank 0] step:481/10000 train_time:50300ms step_avg:104.57ms +[2025-09-05 18:04:15] [Rank 0] step:481/10000 train_time:50300ms step_avg:104.57ms +[2025-09-05 18:04:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:04:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:04:17] [Rank 0] PRINT: step:500/10000 train_loss:4.6433 val_loss:3.2658 train_time:51105ms step_avg:102.21ms +[2025-09-05 18:04:17] [Rank 0] PRINT: step:500/10000 train_loss:4.6433 val_loss:3.2658 train_time:51105ms step_avg:102.21ms +[2025-09-05 18:04:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:04:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:04:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:04:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:05:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:05:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:05:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:05:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:05:38] [Rank 0] Total Loss: 5.2823 +[2025-09-05 18:05:38] [Rank 0] Total Loss: 5.2823 +[2025-09-05 18:05:38] [Rank 0] Total FTA (Unweighted): 0.1019 +[2025-09-05 18:05:38] [Rank 0] Total FTA (Unweighted): 0.1019 +[2025-09-05 18:05:38] [Rank 0] Total FTA (Weighted): 0.1019 +[2025-09-05 18:05:38] [Rank 0] Total FTA (Weighted): 0.1019 +[2025-09-05 18:05:38] [Rank 0] Group 0 Loss: 3.0650 +[2025-09-05 18:05:38] [Rank 0] Group 0 Loss: 3.0650 +[2025-09-05 18:05:38] [Rank 0] Group 1 Loss: 3.1464 +[2025-09-05 18:05:38] [Rank 0] Group 1 Loss: 3.1464 +[2025-09-05 18:05:38] [Rank 0] Group 2 Loss: 3.4839 +[2025-09-05 18:05:38] [Rank 0] Group 2 Loss: 3.4839 +[2025-09-05 18:05:38] [Rank 0] Group 3 Loss: 4.1717 +[2025-09-05 18:05:38] [Rank 0] Group 3 Loss: 4.1717 +[2025-09-05 18:05:38] [Rank 0] Group 4 Loss: 5.1001 +[2025-09-05 18:05:38] [Rank 0] Group 4 Loss: 5.1001 +[2025-09-05 18:05:38] [Rank 0] Group 5 Loss: 5.4386 +[2025-09-05 18:05:38] [Rank 0] Group 5 Loss: 5.4386 +[2025-09-05 18:05:38] [Rank 0] Group 6 Loss: 5.7397 +[2025-09-05 18:05:38] [Rank 0] Group 6 Loss: 5.7397 +[2025-09-05 18:05:38] [Rank 0] Group 7 Loss: 5.7698 +[2025-09-05 18:05:38] [Rank 0] Group 7 Loss: 5.7698 +[2025-09-05 18:05:38] [Rank 0] Group 8 Loss: 5.9808 +[2025-09-05 18:05:38] [Rank 0] Group 8 Loss: 5.9808 +[2025-09-05 18:05:38] [Rank 0] Group 9 Loss: 6.1331 +[2025-09-05 18:05:38] [Rank 0] Group 9 Loss: 6.1331 +[2025-09-05 18:05:38] [Rank 0] Group 10 Loss: 6.1079 +[2025-09-05 18:05:38] [Rank 0] Group 10 Loss: 6.1079 +[2025-09-05 18:05:38] [Rank 0] Group 11 Loss: 6.1949 +[2025-09-05 18:05:38] [Rank 0] Group 11 Loss: 6.1949 +[2025-09-05 18:05:38] [Rank 0] Group 12 Loss: 6.0114 +[2025-09-05 18:05:38] [Rank 0] Group 12 Loss: 6.0114 +[2025-09-05 18:05:38] [Rank 0] Group 13 Loss: 6.0084 +[2025-09-05 18:05:38] [Rank 0] Group 13 Loss: 6.0084 +[2025-09-05 18:05:38] [Rank 0] Group 14 Loss: 6.1269 +[2025-09-05 18:05:38] [Rank 0] Group 14 Loss: 6.1269 +[2025-09-05 18:05:38] [Rank 0] Group 15 Loss: 6.0383 +[2025-09-05 18:05:38] [Rank 0] Group 15 Loss: 6.0383 +[2025-09-05 18:05:38] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 18:05:38] [Rank 0] Group 0 FTA: 0.0000 +[2025-09-05 18:05:38] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 18:05:38] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 18:05:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 18:05:38] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 18:05:38] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 18:05:38] [Rank 0] Group 3 FTA: 0.0800 +[2025-09-05 18:05:38] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 18:05:38] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 18:05:38] [Rank 0] Group 5 FTA: 0.0900 +[2025-09-05 18:05:38] [Rank 0] Group 5 FTA: 0.0900 +[2025-09-05 18:05:38] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 18:05:38] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 18:05:38] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 18:05:38] [Rank 0] Group 7 FTA: 0.0800 +[2025-09-05 18:05:38] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-05 18:05:38] [Rank 0] Group 8 FTA: 0.1300 +[2025-09-05 18:05:38] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 18:05:38] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 18:05:38] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 18:05:38] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 18:05:38] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 18:05:38] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 18:05:38] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:05:38] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:05:38] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 18:05:38] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 18:05:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:05:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:05:38] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:05:38] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:05:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:05:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:05:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:05:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:05:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:05:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:05:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:05:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:05:40] [Rank 0] step:501/10000 train_time:51115ms step_avg:102.03ms +[2025-09-05 18:05:40] [Rank 0] step:501/10000 train_time:51115ms step_avg:102.03ms +[2025-09-05 18:05:40] [Rank 0] step:521/10000 train_time:51776ms step_avg:99.38ms +[2025-09-05 18:05:40] [Rank 0] step:521/10000 train_time:51776ms step_avg:99.38ms +[2025-09-05 18:05:41] [Rank 0] step:541/10000 train_time:52501ms step_avg:97.05ms +[2025-09-05 18:05:41] [Rank 0] step:541/10000 train_time:52501ms step_avg:97.05ms +[2025-09-05 18:05:42] [Rank 0] step:561/10000 train_time:53231ms step_avg:94.89ms +[2025-09-05 18:05:42] [Rank 0] step:561/10000 train_time:53231ms step_avg:94.89ms +[2025-09-05 18:05:42] [Rank 0] step:581/10000 train_time:53956ms step_avg:92.87ms +[2025-09-05 18:05:42] [Rank 0] step:581/10000 train_time:53956ms step_avg:92.87ms +[2025-09-05 18:05:43] [Rank 0] step:601/10000 train_time:54680ms step_avg:90.98ms +[2025-09-05 18:05:43] [Rank 0] step:601/10000 train_time:54680ms step_avg:90.98ms +[2025-09-05 18:05:44] [Rank 0] step:621/10000 train_time:55405ms step_avg:89.22ms +[2025-09-05 18:05:44] [Rank 0] step:621/10000 train_time:55405ms step_avg:89.22ms +[2025-09-05 18:05:45] [Rank 0] step:641/10000 train_time:56130ms step_avg:87.57ms +[2025-09-05 18:05:45] [Rank 0] step:641/10000 train_time:56130ms step_avg:87.57ms +[2025-09-05 18:05:45] [Rank 0] step:661/10000 train_time:56854ms step_avg:86.01ms +[2025-09-05 18:05:45] [Rank 0] step:661/10000 train_time:56854ms step_avg:86.01ms +[2025-09-05 18:05:46] [Rank 0] step:681/10000 train_time:57703ms step_avg:84.73ms +[2025-09-05 18:05:46] [Rank 0] step:681/10000 train_time:57703ms step_avg:84.73ms +[2025-09-05 18:05:47] [Rank 0] step:701/10000 train_time:58428ms step_avg:83.35ms +[2025-09-05 18:05:47] [Rank 0] step:701/10000 train_time:58428ms step_avg:83.35ms +[2025-09-05 18:05:48] [Rank 0] step:721/10000 train_time:59153ms step_avg:82.04ms +[2025-09-05 18:05:48] [Rank 0] step:721/10000 train_time:59153ms step_avg:82.04ms +[2025-09-05 18:05:49] [Rank 0] step:741/10000 train_time:60026ms step_avg:81.01ms +[2025-09-05 18:05:49] [Rank 0] step:741/10000 train_time:60026ms step_avg:81.01ms +[2025-09-05 18:05:49] [Rank 0] step:761/10000 train_time:60755ms step_avg:79.84ms +[2025-09-05 18:05:49] [Rank 0] step:761/10000 train_time:60755ms step_avg:79.84ms +[2025-09-05 18:05:50] [Rank 0] step:781/10000 train_time:61485ms step_avg:78.73ms +[2025-09-05 18:05:50] [Rank 0] step:781/10000 train_time:61485ms step_avg:78.73ms +[2025-09-05 18:05:51] [Rank 0] step:801/10000 train_time:62215ms step_avg:77.67ms +[2025-09-05 18:05:51] [Rank 0] step:801/10000 train_time:62215ms step_avg:77.67ms +[2025-09-05 18:05:52] [Rank 0] step:821/10000 train_time:63572ms step_avg:77.43ms +[2025-09-05 18:05:52] [Rank 0] step:821/10000 train_time:63572ms step_avg:77.43ms +[2025-09-05 18:05:53] [Rank 0] step:841/10000 train_time:64302ms step_avg:76.46ms +[2025-09-05 18:05:53] [Rank 0] step:841/10000 train_time:64302ms step_avg:76.46ms +[2025-09-05 18:05:54] [Rank 0] step:861/10000 train_time:65032ms step_avg:75.53ms +[2025-09-05 18:05:54] [Rank 0] step:861/10000 train_time:65032ms step_avg:75.53ms +[2025-09-05 18:05:54] [Rank 0] step:881/10000 train_time:65762ms step_avg:74.64ms +[2025-09-05 18:05:54] [Rank 0] step:881/10000 train_time:65762ms step_avg:74.64ms +[2025-09-05 18:05:55] [Rank 0] step:901/10000 train_time:66493ms step_avg:73.80ms +[2025-09-05 18:05:55] [Rank 0] step:901/10000 train_time:66493ms step_avg:73.80ms +[2025-09-05 18:05:56] [Rank 0] step:921/10000 train_time:67222ms step_avg:72.99ms +[2025-09-05 18:05:56] [Rank 0] step:921/10000 train_time:67222ms step_avg:72.99ms +[2025-09-05 18:05:56] [Rank 0] step:941/10000 train_time:67951ms step_avg:72.21ms +[2025-09-05 18:05:56] [Rank 0] step:941/10000 train_time:67951ms step_avg:72.21ms +[2025-09-05 18:05:57] [Rank 0] step:961/10000 train_time:68681ms step_avg:71.47ms +[2025-09-05 18:05:57] [Rank 0] step:961/10000 train_time:68681ms step_avg:71.47ms +[2025-09-05 18:05:58] [Rank 0] step:981/10000 train_time:69411ms step_avg:70.76ms +[2025-09-05 18:05:58] [Rank 0] step:981/10000 train_time:69411ms step_avg:70.76ms +[2025-09-05 18:05:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:05:59] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:05:59] [Rank 0] PRINT: step:1000/10000 train_loss:2.9093 val_loss:2.6182 train_time:70222ms step_avg:70.22ms +[2025-09-05 18:05:59] [Rank 0] PRINT: step:1000/10000 train_loss:2.9093 val_loss:2.6182 train_time:70222ms step_avg:70.22ms +[2025-09-05 18:05:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:05:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:05:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:05:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:07:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:07:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:07:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:07:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:07:20] [Rank 0] Total Loss: 4.7855 +[2025-09-05 18:07:20] [Rank 0] Total Loss: 4.7855 +[2025-09-05 18:07:20] [Rank 0] Total FTA (Unweighted): 0.2100 +[2025-09-05 18:07:20] [Rank 0] Total FTA (Unweighted): 0.2100 +[2025-09-05 18:07:20] [Rank 0] Total FTA (Weighted): 0.2100 +[2025-09-05 18:07:20] [Rank 0] Total FTA (Weighted): 0.2100 +[2025-09-05 18:07:20] [Rank 0] Group 0 Loss: 3.0426 +[2025-09-05 18:07:20] [Rank 0] Group 0 Loss: 3.0426 +[2025-09-05 18:07:20] [Rank 0] Group 1 Loss: 2.9874 +[2025-09-05 18:07:20] [Rank 0] Group 1 Loss: 2.9874 +[2025-09-05 18:07:20] [Rank 0] Group 2 Loss: 3.1684 +[2025-09-05 18:07:20] [Rank 0] Group 2 Loss: 3.1684 +[2025-09-05 18:07:20] [Rank 0] Group 3 Loss: 3.6364 +[2025-09-05 18:07:20] [Rank 0] Group 3 Loss: 3.6364 +[2025-09-05 18:07:20] [Rank 0] Group 4 Loss: 4.1760 +[2025-09-05 18:07:20] [Rank 0] Group 4 Loss: 4.1760 +[2025-09-05 18:07:20] [Rank 0] Group 5 Loss: 4.6883 +[2025-09-05 18:07:20] [Rank 0] Group 5 Loss: 4.6883 +[2025-09-05 18:07:20] [Rank 0] Group 6 Loss: 5.0291 +[2025-09-05 18:07:20] [Rank 0] Group 6 Loss: 5.0291 +[2025-09-05 18:07:20] [Rank 0] Group 7 Loss: 5.1621 +[2025-09-05 18:07:20] [Rank 0] Group 7 Loss: 5.1621 +[2025-09-05 18:07:20] [Rank 0] Group 8 Loss: 5.4735 +[2025-09-05 18:07:20] [Rank 0] Group 8 Loss: 5.4735 +[2025-09-05 18:07:20] [Rank 0] Group 9 Loss: 5.5791 +[2025-09-05 18:07:20] [Rank 0] Group 9 Loss: 5.5791 +[2025-09-05 18:07:20] [Rank 0] Group 10 Loss: 5.6211 +[2025-09-05 18:07:20] [Rank 0] Group 10 Loss: 5.6211 +[2025-09-05 18:07:20] [Rank 0] Group 11 Loss: 5.6658 +[2025-09-05 18:07:20] [Rank 0] Group 11 Loss: 5.6658 +[2025-09-05 18:07:20] [Rank 0] Group 12 Loss: 5.5556 +[2025-09-05 18:07:20] [Rank 0] Group 12 Loss: 5.5556 +[2025-09-05 18:07:20] [Rank 0] Group 13 Loss: 5.5583 +[2025-09-05 18:07:20] [Rank 0] Group 13 Loss: 5.5583 +[2025-09-05 18:07:20] [Rank 0] Group 14 Loss: 5.6474 +[2025-09-05 18:07:20] [Rank 0] Group 14 Loss: 5.6474 +[2025-09-05 18:07:20] [Rank 0] Group 15 Loss: 5.5765 +[2025-09-05 18:07:20] [Rank 0] Group 15 Loss: 5.5765 +[2025-09-05 18:07:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:07:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:07:20] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-05 18:07:20] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-05 18:07:20] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 18:07:20] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 18:07:20] [Rank 0] Group 3 FTA: 0.1200 +[2025-09-05 18:07:20] [Rank 0] Group 3 FTA: 0.1200 +[2025-09-05 18:07:20] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 18:07:20] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 18:07:20] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 18:07:20] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 18:07:20] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 18:07:20] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 18:07:20] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 18:07:20] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 18:07:20] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 18:07:20] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 18:07:20] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 18:07:20] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 18:07:20] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 18:07:20] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 18:07:20] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:07:20] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:07:20] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:07:20] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:07:20] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:07:20] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 18:07:20] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:07:20] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:07:20] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:07:20] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:07:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:07:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:07:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:07:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:07:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:07:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:07:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:07:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:07:22] [Rank 0] step:1001/10000 train_time:70231ms step_avg:70.16ms +[2025-09-05 18:07:22] [Rank 0] step:1001/10000 train_time:70231ms step_avg:70.16ms +[2025-09-05 18:07:23] [Rank 0] step:1021/10000 train_time:70902ms step_avg:69.44ms +[2025-09-05 18:07:23] [Rank 0] step:1021/10000 train_time:70902ms step_avg:69.44ms +[2025-09-05 18:07:24] [Rank 0] step:1041/10000 train_time:71633ms step_avg:68.81ms +[2025-09-05 18:07:24] [Rank 0] step:1041/10000 train_time:71633ms step_avg:68.81ms +[2025-09-05 18:07:24] [Rank 0] step:1061/10000 train_time:72363ms step_avg:68.20ms +[2025-09-05 18:07:24] [Rank 0] step:1061/10000 train_time:72363ms step_avg:68.20ms +[2025-09-05 18:07:25] [Rank 0] step:1081/10000 train_time:73095ms step_avg:67.62ms +[2025-09-05 18:07:25] [Rank 0] step:1081/10000 train_time:73095ms step_avg:67.62ms +[2025-09-05 18:07:26] [Rank 0] step:1101/10000 train_time:73825ms step_avg:67.05ms +[2025-09-05 18:07:26] [Rank 0] step:1101/10000 train_time:73825ms step_avg:67.05ms +[2025-09-05 18:07:26] [Rank 0] step:1121/10000 train_time:74555ms step_avg:66.51ms +[2025-09-05 18:07:26] [Rank 0] step:1121/10000 train_time:74555ms step_avg:66.51ms +[2025-09-05 18:07:27] [Rank 0] step:1141/10000 train_time:75286ms step_avg:65.98ms +[2025-09-05 18:07:27] [Rank 0] step:1141/10000 train_time:75286ms step_avg:65.98ms +[2025-09-05 18:07:28] [Rank 0] step:1161/10000 train_time:76016ms step_avg:65.47ms +[2025-09-05 18:07:28] [Rank 0] step:1161/10000 train_time:76016ms step_avg:65.47ms +[2025-09-05 18:07:29] [Rank 0] step:1181/10000 train_time:76745ms step_avg:64.98ms +[2025-09-05 18:07:29] [Rank 0] step:1181/10000 train_time:76745ms step_avg:64.98ms +[2025-09-05 18:07:29] [Rank 0] step:1201/10000 train_time:77476ms step_avg:64.51ms +[2025-09-05 18:07:29] [Rank 0] step:1201/10000 train_time:77476ms step_avg:64.51ms +[2025-09-05 18:07:30] [Rank 0] step:1221/10000 train_time:78206ms step_avg:64.05ms +[2025-09-05 18:07:30] [Rank 0] step:1221/10000 train_time:78206ms step_avg:64.05ms +[2025-09-05 18:07:31] [Rank 0] step:1241/10000 train_time:78937ms step_avg:63.61ms +[2025-09-05 18:07:31] [Rank 0] step:1241/10000 train_time:78937ms step_avg:63.61ms +[2025-09-05 18:07:32] [Rank 0] step:1261/10000 train_time:79667ms step_avg:63.18ms +[2025-09-05 18:07:32] [Rank 0] step:1261/10000 train_time:79667ms step_avg:63.18ms +[2025-09-05 18:07:32] [Rank 0] step:1281/10000 train_time:80397ms step_avg:62.76ms +[2025-09-05 18:07:32] [Rank 0] step:1281/10000 train_time:80397ms step_avg:62.76ms +[2025-09-05 18:07:33] [Rank 0] step:1301/10000 train_time:81128ms step_avg:62.36ms +[2025-09-05 18:07:33] [Rank 0] step:1301/10000 train_time:81128ms step_avg:62.36ms +[2025-09-05 18:07:34] [Rank 0] step:1321/10000 train_time:81858ms step_avg:61.97ms +[2025-09-05 18:07:34] [Rank 0] step:1321/10000 train_time:81858ms step_avg:61.97ms +[2025-09-05 18:07:34] [Rank 0] step:1341/10000 train_time:82587ms step_avg:61.59ms +[2025-09-05 18:07:34] [Rank 0] step:1341/10000 train_time:82587ms step_avg:61.59ms +[2025-09-05 18:07:35] [Rank 0] step:1361/10000 train_time:83317ms step_avg:61.22ms +[2025-09-05 18:07:35] [Rank 0] step:1361/10000 train_time:83317ms step_avg:61.22ms +[2025-09-05 18:07:36] [Rank 0] step:1381/10000 train_time:84048ms step_avg:60.86ms +[2025-09-05 18:07:36] [Rank 0] step:1381/10000 train_time:84048ms step_avg:60.86ms +[2025-09-05 18:07:37] [Rank 0] step:1401/10000 train_time:84777ms step_avg:60.51ms +[2025-09-05 18:07:37] [Rank 0] step:1401/10000 train_time:84777ms step_avg:60.51ms +[2025-09-05 18:07:37] [Rank 0] step:1421/10000 train_time:85508ms step_avg:60.17ms +[2025-09-05 18:07:37] [Rank 0] step:1421/10000 train_time:85508ms step_avg:60.17ms +[2025-09-05 18:07:38] [Rank 0] step:1441/10000 train_time:86238ms step_avg:59.85ms +[2025-09-05 18:07:38] [Rank 0] step:1441/10000 train_time:86238ms step_avg:59.85ms +[2025-09-05 18:07:39] [Rank 0] step:1461/10000 train_time:86968ms step_avg:59.53ms +[2025-09-05 18:07:39] [Rank 0] step:1461/10000 train_time:86968ms step_avg:59.53ms +[2025-09-05 18:07:40] [Rank 0] step:1481/10000 train_time:87697ms step_avg:59.22ms +[2025-09-05 18:07:40] [Rank 0] step:1481/10000 train_time:87697ms step_avg:59.22ms +[2025-09-05 18:07:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:07:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:07:41] [Rank 0] PRINT: step:1500/10000 train_loss:2.4566 val_loss:2.3109 train_time:88509ms step_avg:59.01ms +[2025-09-05 18:07:41] [Rank 0] PRINT: step:1500/10000 train_loss:2.4566 val_loss:2.3109 train_time:88509ms step_avg:59.01ms +[2025-09-05 18:07:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:07:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:07:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:07:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:09:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:09:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:09:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:09:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:09:02] [Rank 0] Total Loss: 4.5300 +[2025-09-05 18:09:02] [Rank 0] Total Loss: 4.5300 +[2025-09-05 18:09:02] [Rank 0] Total FTA (Unweighted): 0.2625 +[2025-09-05 18:09:02] [Rank 0] Total FTA (Unweighted): 0.2625 +[2025-09-05 18:09:02] [Rank 0] Total FTA (Weighted): 0.2625 +[2025-09-05 18:09:02] [Rank 0] Total FTA (Weighted): 0.2625 +[2025-09-05 18:09:02] [Rank 0] Group 0 Loss: 3.0516 +[2025-09-05 18:09:02] [Rank 0] Group 0 Loss: 3.0516 +[2025-09-05 18:09:02] [Rank 0] Group 1 Loss: 2.9185 +[2025-09-05 18:09:02] [Rank 0] Group 1 Loss: 2.9185 +[2025-09-05 18:09:02] [Rank 0] Group 2 Loss: 3.0654 +[2025-09-05 18:09:02] [Rank 0] Group 2 Loss: 3.0654 +[2025-09-05 18:09:02] [Rank 0] Group 3 Loss: 3.4501 +[2025-09-05 18:09:02] [Rank 0] Group 3 Loss: 3.4501 +[2025-09-05 18:09:02] [Rank 0] Group 4 Loss: 3.8107 +[2025-09-05 18:09:02] [Rank 0] Group 4 Loss: 3.8107 +[2025-09-05 18:09:02] [Rank 0] Group 5 Loss: 4.3299 +[2025-09-05 18:09:02] [Rank 0] Group 5 Loss: 4.3299 +[2025-09-05 18:09:02] [Rank 0] Group 6 Loss: 4.6716 +[2025-09-05 18:09:02] [Rank 0] Group 6 Loss: 4.6716 +[2025-09-05 18:09:02] [Rank 0] Group 7 Loss: 4.8181 +[2025-09-05 18:09:02] [Rank 0] Group 7 Loss: 4.8181 +[2025-09-05 18:09:02] [Rank 0] Group 8 Loss: 5.1422 +[2025-09-05 18:09:02] [Rank 0] Group 8 Loss: 5.1422 +[2025-09-05 18:09:02] [Rank 0] Group 9 Loss: 5.2623 +[2025-09-05 18:09:02] [Rank 0] Group 9 Loss: 5.2623 +[2025-09-05 18:09:02] [Rank 0] Group 10 Loss: 5.3199 +[2025-09-05 18:09:02] [Rank 0] Group 10 Loss: 5.3199 +[2025-09-05 18:09:02] [Rank 0] Group 11 Loss: 5.3679 +[2025-09-05 18:09:02] [Rank 0] Group 11 Loss: 5.3679 +[2025-09-05 18:09:02] [Rank 0] Group 12 Loss: 5.2731 +[2025-09-05 18:09:02] [Rank 0] Group 12 Loss: 5.2731 +[2025-09-05 18:09:02] [Rank 0] Group 13 Loss: 5.3130 +[2025-09-05 18:09:02] [Rank 0] Group 13 Loss: 5.3130 +[2025-09-05 18:09:02] [Rank 0] Group 14 Loss: 5.3546 +[2025-09-05 18:09:02] [Rank 0] Group 14 Loss: 5.3546 +[2025-09-05 18:09:02] [Rank 0] Group 15 Loss: 5.3309 +[2025-09-05 18:09:02] [Rank 0] Group 15 Loss: 5.3309 +[2025-09-05 18:09:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:09:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:09:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:09:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:09:02] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 18:09:02] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 18:09:02] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:09:02] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:09:02] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-05 18:09:02] [Rank 0] Group 4 FTA: 0.2100 +[2025-09-05 18:09:02] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 18:09:02] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 18:09:02] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-05 18:09:02] [Rank 0] Group 6 FTA: 0.2000 +[2025-09-05 18:09:02] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 18:09:02] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 18:09:02] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 18:09:02] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 18:09:02] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 18:09:02] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 18:09:02] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 18:09:02] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 18:09:02] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 18:09:02] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 18:09:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:09:02] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:09:02] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:09:02] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:09:02] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:09:02] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:09:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:09:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:09:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:09:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:09:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:09:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:09:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:09:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:09:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:09:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:09:03] [Rank 0] step:1501/10000 train_time:88518ms step_avg:58.97ms +[2025-09-05 18:09:03] [Rank 0] step:1501/10000 train_time:88518ms step_avg:58.97ms +[2025-09-05 18:09:04] [Rank 0] step:1521/10000 train_time:89182ms step_avg:58.63ms +[2025-09-05 18:09:04] [Rank 0] step:1521/10000 train_time:89182ms step_avg:58.63ms +[2025-09-05 18:09:05] [Rank 0] step:1541/10000 train_time:89913ms step_avg:58.35ms +[2025-09-05 18:09:05] [Rank 0] step:1541/10000 train_time:89913ms step_avg:58.35ms +[2025-09-05 18:09:06] [Rank 0] step:1561/10000 train_time:90644ms step_avg:58.07ms +[2025-09-05 18:09:06] [Rank 0] step:1561/10000 train_time:90644ms step_avg:58.07ms +[2025-09-05 18:09:06] [Rank 0] step:1581/10000 train_time:91375ms step_avg:57.80ms +[2025-09-05 18:09:06] [Rank 0] step:1581/10000 train_time:91375ms step_avg:57.80ms +[2025-09-05 18:09:07] [Rank 0] step:1601/10000 train_time:92106ms step_avg:57.53ms +[2025-09-05 18:09:07] [Rank 0] step:1601/10000 train_time:92106ms step_avg:57.53ms +[2025-09-05 18:09:08] [Rank 0] step:1621/10000 train_time:92836ms step_avg:57.27ms +[2025-09-05 18:09:08] [Rank 0] step:1621/10000 train_time:92836ms step_avg:57.27ms +[2025-09-05 18:09:09] [Rank 0] step:1641/10000 train_time:93769ms step_avg:57.14ms +[2025-09-05 18:09:09] [Rank 0] step:1641/10000 train_time:93769ms step_avg:57.14ms +[2025-09-05 18:09:09] [Rank 0] step:1661/10000 train_time:94499ms step_avg:56.89ms +[2025-09-05 18:09:09] [Rank 0] step:1661/10000 train_time:94499ms step_avg:56.89ms +[2025-09-05 18:09:10] [Rank 0] step:1681/10000 train_time:95230ms step_avg:56.65ms +[2025-09-05 18:09:10] [Rank 0] step:1681/10000 train_time:95230ms step_avg:56.65ms +[2025-09-05 18:09:11] [Rank 0] step:1701/10000 train_time:95960ms step_avg:56.41ms +[2025-09-05 18:09:11] [Rank 0] step:1701/10000 train_time:95960ms step_avg:56.41ms +[2025-09-05 18:09:12] [Rank 0] step:1721/10000 train_time:96691ms step_avg:56.18ms +[2025-09-05 18:09:12] [Rank 0] step:1721/10000 train_time:96691ms step_avg:56.18ms +[2025-09-05 18:09:12] [Rank 0] step:1741/10000 train_time:97421ms step_avg:55.96ms +[2025-09-05 18:09:12] [Rank 0] step:1741/10000 train_time:97421ms step_avg:55.96ms +[2025-09-05 18:09:13] [Rank 0] step:1761/10000 train_time:98151ms step_avg:55.74ms +[2025-09-05 18:09:13] [Rank 0] step:1761/10000 train_time:98151ms step_avg:55.74ms +[2025-09-05 18:09:14] [Rank 0] step:1781/10000 train_time:98882ms step_avg:55.52ms +[2025-09-05 18:09:14] [Rank 0] step:1781/10000 train_time:98882ms step_avg:55.52ms +[2025-09-05 18:09:15] [Rank 0] step:1801/10000 train_time:99613ms step_avg:55.31ms +[2025-09-05 18:09:15] [Rank 0] step:1801/10000 train_time:99613ms step_avg:55.31ms +[2025-09-05 18:09:15] [Rank 0] step:1821/10000 train_time:100343ms step_avg:55.10ms +[2025-09-05 18:09:15] [Rank 0] step:1821/10000 train_time:100343ms step_avg:55.10ms +[2025-09-05 18:09:16] [Rank 0] step:1841/10000 train_time:101073ms step_avg:54.90ms +[2025-09-05 18:09:16] [Rank 0] step:1841/10000 train_time:101073ms step_avg:54.90ms +[2025-09-05 18:09:17] [Rank 0] step:1861/10000 train_time:101804ms step_avg:54.70ms +[2025-09-05 18:09:17] [Rank 0] step:1861/10000 train_time:101804ms step_avg:54.70ms +[2025-09-05 18:09:17] [Rank 0] step:1881/10000 train_time:102535ms step_avg:54.51ms +[2025-09-05 18:09:17] [Rank 0] step:1881/10000 train_time:102535ms step_avg:54.51ms +[2025-09-05 18:09:18] [Rank 0] step:1901/10000 train_time:103265ms step_avg:54.32ms +[2025-09-05 18:09:18] [Rank 0] step:1901/10000 train_time:103265ms step_avg:54.32ms +[2025-09-05 18:09:19] [Rank 0] step:1921/10000 train_time:103995ms step_avg:54.14ms +[2025-09-05 18:09:19] [Rank 0] step:1921/10000 train_time:103995ms step_avg:54.14ms +[2025-09-05 18:09:20] [Rank 0] step:1941/10000 train_time:104725ms step_avg:53.95ms +[2025-09-05 18:09:20] [Rank 0] step:1941/10000 train_time:104725ms step_avg:53.95ms +[2025-09-05 18:09:20] [Rank 0] step:1961/10000 train_time:105457ms step_avg:53.78ms +[2025-09-05 18:09:20] [Rank 0] step:1961/10000 train_time:105457ms step_avg:53.78ms +[2025-09-05 18:09:21] [Rank 0] step:1981/10000 train_time:106186ms step_avg:53.60ms +[2025-09-05 18:09:21] [Rank 0] step:1981/10000 train_time:106186ms step_avg:53.60ms +[2025-09-05 18:09:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:09:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:09:23] [Rank 0] PRINT: step:2000/10000 train_loss:2.2202 val_loss:2.1225 train_time:106997ms step_avg:53.50ms +[2025-09-05 18:09:23] [Rank 0] PRINT: step:2000/10000 train_loss:2.2202 val_loss:2.1225 train_time:106997ms step_avg:53.50ms +[2025-09-05 18:09:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:09:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:09:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:09:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:10:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:10:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:10:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:10:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:10:44] [Rank 0] Total Loss: 4.4230 +[2025-09-05 18:10:44] [Rank 0] Total Loss: 4.4230 +[2025-09-05 18:10:44] [Rank 0] Total FTA (Unweighted): 0.2812 +[2025-09-05 18:10:44] [Rank 0] Total FTA (Unweighted): 0.2812 +[2025-09-05 18:10:44] [Rank 0] Total FTA (Weighted): 0.2812 +[2025-09-05 18:10:44] [Rank 0] Total FTA (Weighted): 0.2812 +[2025-09-05 18:10:44] [Rank 0] Group 0 Loss: 3.1266 +[2025-09-05 18:10:44] [Rank 0] Group 0 Loss: 3.1266 +[2025-09-05 18:10:44] [Rank 0] Group 1 Loss: 2.9221 +[2025-09-05 18:10:44] [Rank 0] Group 1 Loss: 2.9221 +[2025-09-05 18:10:44] [Rank 0] Group 2 Loss: 3.0615 +[2025-09-05 18:10:44] [Rank 0] Group 2 Loss: 3.0615 +[2025-09-05 18:10:44] [Rank 0] Group 3 Loss: 3.4336 +[2025-09-05 18:10:44] [Rank 0] Group 3 Loss: 3.4336 +[2025-09-05 18:10:44] [Rank 0] Group 4 Loss: 3.7024 +[2025-09-05 18:10:44] [Rank 0] Group 4 Loss: 3.7024 +[2025-09-05 18:10:44] [Rank 0] Group 5 Loss: 4.1367 +[2025-09-05 18:10:44] [Rank 0] Group 5 Loss: 4.1367 +[2025-09-05 18:10:44] [Rank 0] Group 6 Loss: 4.4698 +[2025-09-05 18:10:44] [Rank 0] Group 6 Loss: 4.4698 +[2025-09-05 18:10:44] [Rank 0] Group 7 Loss: 4.6673 +[2025-09-05 18:10:44] [Rank 0] Group 7 Loss: 4.6673 +[2025-09-05 18:10:44] [Rank 0] Group 8 Loss: 4.9899 +[2025-09-05 18:10:44] [Rank 0] Group 8 Loss: 4.9899 +[2025-09-05 18:10:44] [Rank 0] Group 9 Loss: 5.0952 +[2025-09-05 18:10:44] [Rank 0] Group 9 Loss: 5.0952 +[2025-09-05 18:10:44] [Rank 0] Group 10 Loss: 5.1673 +[2025-09-05 18:10:44] [Rank 0] Group 10 Loss: 5.1673 +[2025-09-05 18:10:44] [Rank 0] Group 11 Loss: 5.2105 +[2025-09-05 18:10:44] [Rank 0] Group 11 Loss: 5.2105 +[2025-09-05 18:10:44] [Rank 0] Group 12 Loss: 5.1565 +[2025-09-05 18:10:44] [Rank 0] Group 12 Loss: 5.1565 +[2025-09-05 18:10:44] [Rank 0] Group 13 Loss: 5.2087 +[2025-09-05 18:10:44] [Rank 0] Group 13 Loss: 5.2087 +[2025-09-05 18:10:44] [Rank 0] Group 14 Loss: 5.2359 +[2025-09-05 18:10:44] [Rank 0] Group 14 Loss: 5.2359 +[2025-09-05 18:10:44] [Rank 0] Group 15 Loss: 5.1834 +[2025-09-05 18:10:44] [Rank 0] Group 15 Loss: 5.1834 +[2025-09-05 18:10:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:10:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:10:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:10:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:10:44] [Rank 0] Group 2 FTA: 0.4800 +[2025-09-05 18:10:44] [Rank 0] Group 2 FTA: 0.4800 +[2025-09-05 18:10:44] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:10:44] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:10:44] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 18:10:44] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 18:10:44] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 18:10:44] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 18:10:44] [Rank 0] Group 6 FTA: 0.2400 +[2025-09-05 18:10:44] [Rank 0] Group 6 FTA: 0.2400 +[2025-09-05 18:10:44] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 18:10:44] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 18:10:44] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 18:10:44] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 18:10:44] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 18:10:44] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 18:10:44] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 18:10:44] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 18:10:44] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 18:10:44] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 18:10:44] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:10:44] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:10:44] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:10:44] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:10:44] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:10:44] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:10:44] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 18:10:44] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 18:10:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:10:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:10:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:10:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:10:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:10:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:10:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:10:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:10:46] [Rank 0] step:2001/10000 train_time:107007ms step_avg:53.48ms +[2025-09-05 18:10:46] [Rank 0] step:2001/10000 train_time:107007ms step_avg:53.48ms +[2025-09-05 18:10:47] [Rank 0] step:2021/10000 train_time:107876ms step_avg:53.38ms +[2025-09-05 18:10:47] [Rank 0] step:2021/10000 train_time:107876ms step_avg:53.38ms +[2025-09-05 18:10:47] [Rank 0] step:2041/10000 train_time:108607ms step_avg:53.21ms +[2025-09-05 18:10:47] [Rank 0] step:2041/10000 train_time:108607ms step_avg:53.21ms +[2025-09-05 18:10:48] [Rank 0] step:2061/10000 train_time:109337ms step_avg:53.05ms +[2025-09-05 18:10:48] [Rank 0] step:2061/10000 train_time:109337ms step_avg:53.05ms +[2025-09-05 18:10:49] [Rank 0] step:2081/10000 train_time:110067ms step_avg:52.89ms +[2025-09-05 18:10:49] [Rank 0] step:2081/10000 train_time:110067ms step_avg:52.89ms +[2025-09-05 18:10:50] [Rank 0] step:2101/10000 train_time:110798ms step_avg:52.74ms +[2025-09-05 18:10:50] [Rank 0] step:2101/10000 train_time:110798ms step_avg:52.74ms +[2025-09-05 18:10:50] [Rank 0] step:2121/10000 train_time:111528ms step_avg:52.58ms +[2025-09-05 18:10:50] [Rank 0] step:2121/10000 train_time:111528ms step_avg:52.58ms +[2025-09-05 18:10:51] [Rank 0] step:2141/10000 train_time:112259ms step_avg:52.43ms +[2025-09-05 18:10:51] [Rank 0] step:2141/10000 train_time:112259ms step_avg:52.43ms +[2025-09-05 18:10:52] [Rank 0] step:2161/10000 train_time:112989ms step_avg:52.29ms +[2025-09-05 18:10:52] [Rank 0] step:2161/10000 train_time:112989ms step_avg:52.29ms +[2025-09-05 18:10:53] [Rank 0] step:2181/10000 train_time:113719ms step_avg:52.14ms +[2025-09-05 18:10:53] [Rank 0] step:2181/10000 train_time:113719ms step_avg:52.14ms +[2025-09-05 18:10:53] [Rank 0] step:2201/10000 train_time:114449ms step_avg:52.00ms +[2025-09-05 18:10:53] [Rank 0] step:2201/10000 train_time:114449ms step_avg:52.00ms +[2025-09-05 18:10:54] [Rank 0] step:2221/10000 train_time:115180ms step_avg:51.86ms +[2025-09-05 18:10:54] [Rank 0] step:2221/10000 train_time:115180ms step_avg:51.86ms +[2025-09-05 18:10:55] [Rank 0] step:2241/10000 train_time:115915ms step_avg:51.72ms +[2025-09-05 18:10:55] [Rank 0] step:2241/10000 train_time:115915ms step_avg:51.72ms +[2025-09-05 18:10:56] [Rank 0] step:2261/10000 train_time:116652ms step_avg:51.59ms +[2025-09-05 18:10:56] [Rank 0] step:2261/10000 train_time:116652ms step_avg:51.59ms +[2025-09-05 18:10:56] [Rank 0] step:2281/10000 train_time:117389ms step_avg:51.46ms +[2025-09-05 18:10:56] [Rank 0] step:2281/10000 train_time:117389ms step_avg:51.46ms +[2025-09-05 18:10:57] [Rank 0] step:2301/10000 train_time:118125ms step_avg:51.34ms +[2025-09-05 18:10:57] [Rank 0] step:2301/10000 train_time:118125ms step_avg:51.34ms +[2025-09-05 18:10:58] [Rank 0] step:2321/10000 train_time:118862ms step_avg:51.21ms +[2025-09-05 18:10:58] [Rank 0] step:2321/10000 train_time:118862ms step_avg:51.21ms +[2025-09-05 18:10:58] [Rank 0] step:2341/10000 train_time:119599ms step_avg:51.09ms +[2025-09-05 18:10:58] [Rank 0] step:2341/10000 train_time:119599ms step_avg:51.09ms +[2025-09-05 18:10:59] [Rank 0] step:2361/10000 train_time:120336ms step_avg:50.97ms +[2025-09-05 18:10:59] [Rank 0] step:2361/10000 train_time:120336ms step_avg:50.97ms +[2025-09-05 18:11:00] [Rank 0] step:2381/10000 train_time:121072ms step_avg:50.85ms +[2025-09-05 18:11:00] [Rank 0] step:2381/10000 train_time:121072ms step_avg:50.85ms +[2025-09-05 18:11:01] [Rank 0] step:2401/10000 train_time:121809ms step_avg:50.73ms +[2025-09-05 18:11:01] [Rank 0] step:2401/10000 train_time:121809ms step_avg:50.73ms +[2025-09-05 18:11:02] [Rank 0] step:2421/10000 train_time:122545ms step_avg:50.62ms +[2025-09-05 18:11:02] [Rank 0] step:2421/10000 train_time:122545ms step_avg:50.62ms +[2025-09-05 18:11:02] [Rank 0] step:2441/10000 train_time:123419ms step_avg:50.56ms +[2025-09-05 18:11:02] [Rank 0] step:2441/10000 train_time:123419ms step_avg:50.56ms +[2025-09-05 18:11:03] [Rank 0] step:2461/10000 train_time:124156ms step_avg:50.45ms +[2025-09-05 18:11:03] [Rank 0] step:2461/10000 train_time:124156ms step_avg:50.45ms +[2025-09-05 18:11:04] [Rank 0] step:2481/10000 train_time:124893ms step_avg:50.34ms +[2025-09-05 18:11:04] [Rank 0] step:2481/10000 train_time:124893ms step_avg:50.34ms +[2025-09-05 18:11:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:11:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:11:05] [Rank 0] PRINT: step:2500/10000 train_loss:2.0647 val_loss:1.9899 train_time:125918ms step_avg:50.37ms +[2025-09-05 18:11:05] [Rank 0] PRINT: step:2500/10000 train_loss:2.0647 val_loss:1.9899 train_time:125918ms step_avg:50.37ms +[2025-09-05 18:11:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:11:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:11:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:11:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:12:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:12:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:12:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:12:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:12:27] [Rank 0] Total Loss: 4.3675 +[2025-09-05 18:12:27] [Rank 0] Total Loss: 4.3675 +[2025-09-05 18:12:27] [Rank 0] Total FTA (Unweighted): 0.3100 +[2025-09-05 18:12:27] [Rank 0] Total FTA (Unweighted): 0.3100 +[2025-09-05 18:12:27] [Rank 0] Total FTA (Weighted): 0.3100 +[2025-09-05 18:12:27] [Rank 0] Total FTA (Weighted): 0.3100 +[2025-09-05 18:12:27] [Rank 0] Group 0 Loss: 3.1620 +[2025-09-05 18:12:27] [Rank 0] Group 0 Loss: 3.1620 +[2025-09-05 18:12:27] [Rank 0] Group 1 Loss: 2.9646 +[2025-09-05 18:12:27] [Rank 0] Group 1 Loss: 2.9646 +[2025-09-05 18:12:27] [Rank 0] Group 2 Loss: 3.0927 +[2025-09-05 18:12:27] [Rank 0] Group 2 Loss: 3.0927 +[2025-09-05 18:12:27] [Rank 0] Group 3 Loss: 3.4220 +[2025-09-05 18:12:27] [Rank 0] Group 3 Loss: 3.4220 +[2025-09-05 18:12:27] [Rank 0] Group 4 Loss: 3.6938 +[2025-09-05 18:12:27] [Rank 0] Group 4 Loss: 3.6938 +[2025-09-05 18:12:27] [Rank 0] Group 5 Loss: 4.0730 +[2025-09-05 18:12:27] [Rank 0] Group 5 Loss: 4.0730 +[2025-09-05 18:12:27] [Rank 0] Group 6 Loss: 4.3913 +[2025-09-05 18:12:27] [Rank 0] Group 6 Loss: 4.3913 +[2025-09-05 18:12:27] [Rank 0] Group 7 Loss: 4.5452 +[2025-09-05 18:12:27] [Rank 0] Group 7 Loss: 4.5452 +[2025-09-05 18:12:27] [Rank 0] Group 8 Loss: 4.8765 +[2025-09-05 18:12:27] [Rank 0] Group 8 Loss: 4.8765 +[2025-09-05 18:12:27] [Rank 0] Group 9 Loss: 5.0350 +[2025-09-05 18:12:27] [Rank 0] Group 9 Loss: 5.0350 +[2025-09-05 18:12:27] [Rank 0] Group 10 Loss: 5.1015 +[2025-09-05 18:12:27] [Rank 0] Group 10 Loss: 5.1015 +[2025-09-05 18:12:27] [Rank 0] Group 11 Loss: 5.1353 +[2025-09-05 18:12:27] [Rank 0] Group 11 Loss: 5.1353 +[2025-09-05 18:12:27] [Rank 0] Group 12 Loss: 5.0347 +[2025-09-05 18:12:27] [Rank 0] Group 12 Loss: 5.0347 +[2025-09-05 18:12:27] [Rank 0] Group 13 Loss: 5.1224 +[2025-09-05 18:12:27] [Rank 0] Group 13 Loss: 5.1224 +[2025-09-05 18:12:27] [Rank 0] Group 14 Loss: 5.1329 +[2025-09-05 18:12:27] [Rank 0] Group 14 Loss: 5.1329 +[2025-09-05 18:12:27] [Rank 0] Group 15 Loss: 5.0973 +[2025-09-05 18:12:27] [Rank 0] Group 15 Loss: 5.0973 +[2025-09-05 18:12:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:12:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:12:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:12:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:12:27] [Rank 0] Group 2 FTA: 0.7200 +[2025-09-05 18:12:27] [Rank 0] Group 2 FTA: 0.7200 +[2025-09-05 18:12:27] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 18:12:27] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 18:12:27] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 18:12:27] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 18:12:27] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 18:12:27] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 18:12:27] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 18:12:27] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 18:12:27] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 18:12:27] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 18:12:27] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:12:27] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:12:27] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 18:12:27] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 18:12:27] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 18:12:27] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 18:12:27] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 18:12:27] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 18:12:27] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:12:27] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:12:27] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:12:27] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:12:27] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:12:27] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:12:27] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:12:27] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:12:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:12:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:12:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:12:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:12:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:12:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:12:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:12:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:12:28] [Rank 0] step:2501/10000 train_time:125928ms step_avg:50.35ms +[2025-09-05 18:12:28] [Rank 0] step:2501/10000 train_time:125928ms step_avg:50.35ms +[2025-09-05 18:12:29] [Rank 0] step:2521/10000 train_time:126592ms step_avg:50.21ms +[2025-09-05 18:12:29] [Rank 0] step:2521/10000 train_time:126592ms step_avg:50.21ms +[2025-09-05 18:12:30] [Rank 0] step:2541/10000 train_time:127329ms step_avg:50.11ms +[2025-09-05 18:12:30] [Rank 0] step:2541/10000 train_time:127329ms step_avg:50.11ms +[2025-09-05 18:12:30] [Rank 0] step:2561/10000 train_time:128066ms step_avg:50.01ms +[2025-09-05 18:12:30] [Rank 0] step:2561/10000 train_time:128066ms step_avg:50.01ms +[2025-09-05 18:12:31] [Rank 0] step:2581/10000 train_time:128804ms step_avg:49.90ms +[2025-09-05 18:12:31] [Rank 0] step:2581/10000 train_time:128804ms step_avg:49.90ms +[2025-09-05 18:12:32] [Rank 0] step:2601/10000 train_time:129541ms step_avg:49.80ms +[2025-09-05 18:12:32] [Rank 0] step:2601/10000 train_time:129541ms step_avg:49.80ms +[2025-09-05 18:12:33] [Rank 0] step:2621/10000 train_time:130277ms step_avg:49.71ms +[2025-09-05 18:12:33] [Rank 0] step:2621/10000 train_time:130277ms step_avg:49.71ms +[2025-09-05 18:12:33] [Rank 0] step:2641/10000 train_time:131014ms step_avg:49.61ms +[2025-09-05 18:12:33] [Rank 0] step:2641/10000 train_time:131014ms step_avg:49.61ms +[2025-09-05 18:12:34] [Rank 0] step:2661/10000 train_time:131752ms step_avg:49.51ms +[2025-09-05 18:12:34] [Rank 0] step:2661/10000 train_time:131752ms step_avg:49.51ms +[2025-09-05 18:12:35] [Rank 0] step:2681/10000 train_time:132489ms step_avg:49.42ms +[2025-09-05 18:12:35] [Rank 0] step:2681/10000 train_time:132489ms step_avg:49.42ms +[2025-09-05 18:12:36] [Rank 0] step:2701/10000 train_time:133225ms step_avg:49.32ms +[2025-09-05 18:12:36] [Rank 0] step:2701/10000 train_time:133225ms step_avg:49.32ms +[2025-09-05 18:12:36] [Rank 0] step:2721/10000 train_time:133962ms step_avg:49.23ms +[2025-09-05 18:12:36] [Rank 0] step:2721/10000 train_time:133962ms step_avg:49.23ms +[2025-09-05 18:12:37] [Rank 0] step:2741/10000 train_time:134699ms step_avg:49.14ms +[2025-09-05 18:12:37] [Rank 0] step:2741/10000 train_time:134699ms step_avg:49.14ms +[2025-09-05 18:12:38] [Rank 0] step:2761/10000 train_time:135436ms step_avg:49.05ms +[2025-09-05 18:12:38] [Rank 0] step:2761/10000 train_time:135436ms step_avg:49.05ms +[2025-09-05 18:12:39] [Rank 0] step:2781/10000 train_time:136172ms step_avg:48.97ms +[2025-09-05 18:12:39] [Rank 0] step:2781/10000 train_time:136172ms step_avg:48.97ms +[2025-09-05 18:12:39] [Rank 0] step:2801/10000 train_time:136909ms step_avg:48.88ms +[2025-09-05 18:12:39] [Rank 0] step:2801/10000 train_time:136909ms step_avg:48.88ms +[2025-09-05 18:12:41] [Rank 0] step:2821/10000 train_time:138253ms step_avg:49.01ms +[2025-09-05 18:12:41] [Rank 0] step:2821/10000 train_time:138253ms step_avg:49.01ms +[2025-09-05 18:12:41] [Rank 0] step:2841/10000 train_time:138990ms step_avg:48.92ms +[2025-09-05 18:12:41] [Rank 0] step:2841/10000 train_time:138990ms step_avg:48.92ms +[2025-09-05 18:12:42] [Rank 0] step:2861/10000 train_time:139727ms step_avg:48.84ms +[2025-09-05 18:12:42] [Rank 0] step:2861/10000 train_time:139727ms step_avg:48.84ms +[2025-09-05 18:12:43] [Rank 0] step:2881/10000 train_time:140463ms step_avg:48.76ms +[2025-09-05 18:12:43] [Rank 0] step:2881/10000 train_time:140463ms step_avg:48.76ms +[2025-09-05 18:12:44] [Rank 0] step:2901/10000 train_time:141199ms step_avg:48.67ms +[2025-09-05 18:12:44] [Rank 0] step:2901/10000 train_time:141199ms step_avg:48.67ms +[2025-09-05 18:12:44] [Rank 0] step:2921/10000 train_time:141936ms step_avg:48.59ms +[2025-09-05 18:12:44] [Rank 0] step:2921/10000 train_time:141936ms step_avg:48.59ms +[2025-09-05 18:12:45] [Rank 0] step:2941/10000 train_time:142672ms step_avg:48.51ms +[2025-09-05 18:12:45] [Rank 0] step:2941/10000 train_time:142672ms step_avg:48.51ms +[2025-09-05 18:12:46] [Rank 0] step:2961/10000 train_time:143409ms step_avg:48.43ms +[2025-09-05 18:12:46] [Rank 0] step:2961/10000 train_time:143409ms step_avg:48.43ms +[2025-09-05 18:12:47] [Rank 0] step:2981/10000 train_time:144145ms step_avg:48.35ms +[2025-09-05 18:12:47] [Rank 0] step:2981/10000 train_time:144145ms step_avg:48.35ms +[2025-09-05 18:12:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:12:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:12:48] [Rank 0] PRINT: step:3000/10000 train_loss:1.9541 val_loss:1.9037 train_time:144963ms step_avg:48.32ms +[2025-09-05 18:12:48] [Rank 0] PRINT: step:3000/10000 train_loss:1.9541 val_loss:1.9037 train_time:144963ms step_avg:48.32ms +[2025-09-05 18:12:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:12:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:12:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:12:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:14:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:14:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:14:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:14:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:14:09] [Rank 0] Total Loss: 4.2340 +[2025-09-05 18:14:09] [Rank 0] Total Loss: 4.2340 +[2025-09-05 18:14:09] [Rank 0] Total FTA (Unweighted): 0.3363 +[2025-09-05 18:14:09] [Rank 0] Total FTA (Unweighted): 0.3363 +[2025-09-05 18:14:09] [Rank 0] Total FTA (Weighted): 0.3362 +[2025-09-05 18:14:09] [Rank 0] Total FTA (Weighted): 0.3362 +[2025-09-05 18:14:09] [Rank 0] Group 0 Loss: 3.1366 +[2025-09-05 18:14:09] [Rank 0] Group 0 Loss: 3.1366 +[2025-09-05 18:14:09] [Rank 0] Group 1 Loss: 2.9048 +[2025-09-05 18:14:09] [Rank 0] Group 1 Loss: 2.9048 +[2025-09-05 18:14:09] [Rank 0] Group 2 Loss: 3.0070 +[2025-09-05 18:14:09] [Rank 0] Group 2 Loss: 3.0070 +[2025-09-05 18:14:09] [Rank 0] Group 3 Loss: 3.3112 +[2025-09-05 18:14:09] [Rank 0] Group 3 Loss: 3.3112 +[2025-09-05 18:14:09] [Rank 0] Group 4 Loss: 3.5625 +[2025-09-05 18:14:09] [Rank 0] Group 4 Loss: 3.5625 +[2025-09-05 18:14:09] [Rank 0] Group 5 Loss: 3.8993 +[2025-09-05 18:14:09] [Rank 0] Group 5 Loss: 3.8993 +[2025-09-05 18:14:09] [Rank 0] Group 6 Loss: 4.1823 +[2025-09-05 18:14:09] [Rank 0] Group 6 Loss: 4.1823 +[2025-09-05 18:14:09] [Rank 0] Group 7 Loss: 4.4092 +[2025-09-05 18:14:09] [Rank 0] Group 7 Loss: 4.4092 +[2025-09-05 18:14:09] [Rank 0] Group 8 Loss: 4.7132 +[2025-09-05 18:14:09] [Rank 0] Group 8 Loss: 4.7132 +[2025-09-05 18:14:09] [Rank 0] Group 9 Loss: 4.8507 +[2025-09-05 18:14:09] [Rank 0] Group 9 Loss: 4.8507 +[2025-09-05 18:14:09] [Rank 0] Group 10 Loss: 4.9427 +[2025-09-05 18:14:09] [Rank 0] Group 10 Loss: 4.9427 +[2025-09-05 18:14:09] [Rank 0] Group 11 Loss: 4.9644 +[2025-09-05 18:14:09] [Rank 0] Group 11 Loss: 4.9644 +[2025-09-05 18:14:09] [Rank 0] Group 12 Loss: 4.9162 +[2025-09-05 18:14:09] [Rank 0] Group 12 Loss: 4.9162 +[2025-09-05 18:14:09] [Rank 0] Group 13 Loss: 4.9889 +[2025-09-05 18:14:09] [Rank 0] Group 13 Loss: 4.9889 +[2025-09-05 18:14:09] [Rank 0] Group 14 Loss: 5.0014 +[2025-09-05 18:14:09] [Rank 0] Group 14 Loss: 5.0014 +[2025-09-05 18:14:09] [Rank 0] Group 15 Loss: 4.9540 +[2025-09-05 18:14:09] [Rank 0] Group 15 Loss: 4.9540 +[2025-09-05 18:14:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:14:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:14:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:14:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:14:09] [Rank 0] Group 2 FTA: 0.9000 +[2025-09-05 18:14:09] [Rank 0] Group 2 FTA: 0.9000 +[2025-09-05 18:14:09] [Rank 0] Group 3 FTA: 0.2800 +[2025-09-05 18:14:09] [Rank 0] Group 3 FTA: 0.2800 +[2025-09-05 18:14:09] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 18:14:09] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 18:14:09] [Rank 0] Group 5 FTA: 0.3300 +[2025-09-05 18:14:09] [Rank 0] Group 5 FTA: 0.3300 +[2025-09-05 18:14:09] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 18:14:09] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 18:14:09] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 18:14:09] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 18:14:09] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 18:14:09] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 18:14:09] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 18:14:09] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 18:14:09] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 18:14:09] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 18:14:09] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 18:14:09] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 18:14:09] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 18:14:09] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 18:14:09] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:14:09] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:14:09] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-05 18:14:09] [Rank 0] Group 14 FTA: 0.0800 +[2025-09-05 18:14:09] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 18:14:09] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 18:14:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:14:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:14:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:14:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:14:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:14:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:14:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:14:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:14:11] [Rank 0] step:3001/10000 train_time:144973ms step_avg:48.31ms +[2025-09-05 18:14:11] [Rank 0] step:3001/10000 train_time:144973ms step_avg:48.31ms +[2025-09-05 18:14:12] [Rank 0] step:3021/10000 train_time:145735ms step_avg:48.24ms +[2025-09-05 18:14:12] [Rank 0] step:3021/10000 train_time:145735ms step_avg:48.24ms +[2025-09-05 18:14:12] [Rank 0] step:3041/10000 train_time:146472ms step_avg:48.17ms +[2025-09-05 18:14:12] [Rank 0] step:3041/10000 train_time:146472ms step_avg:48.17ms +[2025-09-05 18:14:13] [Rank 0] step:3061/10000 train_time:147209ms step_avg:48.09ms +[2025-09-05 18:14:13] [Rank 0] step:3061/10000 train_time:147209ms step_avg:48.09ms +[2025-09-05 18:14:14] [Rank 0] step:3081/10000 train_time:148063ms step_avg:48.06ms +[2025-09-05 18:14:14] [Rank 0] step:3081/10000 train_time:148063ms step_avg:48.06ms +[2025-09-05 18:14:15] [Rank 0] step:3101/10000 train_time:148800ms step_avg:47.98ms +[2025-09-05 18:14:15] [Rank 0] step:3101/10000 train_time:148800ms step_avg:47.98ms +[2025-09-05 18:14:15] [Rank 0] step:3121/10000 train_time:149536ms step_avg:47.91ms +[2025-09-05 18:14:15] [Rank 0] step:3121/10000 train_time:149536ms step_avg:47.91ms +[2025-09-05 18:14:16] [Rank 0] step:3141/10000 train_time:150273ms step_avg:47.84ms +[2025-09-05 18:14:16] [Rank 0] step:3141/10000 train_time:150273ms step_avg:47.84ms +[2025-09-05 18:14:17] [Rank 0] step:3161/10000 train_time:151010ms step_avg:47.77ms +[2025-09-05 18:14:17] [Rank 0] step:3161/10000 train_time:151010ms step_avg:47.77ms +[2025-09-05 18:14:18] [Rank 0] step:3181/10000 train_time:151748ms step_avg:47.70ms +[2025-09-05 18:14:18] [Rank 0] step:3181/10000 train_time:151748ms step_avg:47.70ms +[2025-09-05 18:14:18] [Rank 0] step:3201/10000 train_time:152484ms step_avg:47.64ms +[2025-09-05 18:14:18] [Rank 0] step:3201/10000 train_time:152484ms step_avg:47.64ms +[2025-09-05 18:14:19] [Rank 0] step:3221/10000 train_time:153221ms step_avg:47.57ms +[2025-09-05 18:14:19] [Rank 0] step:3221/10000 train_time:153221ms step_avg:47.57ms +[2025-09-05 18:14:20] [Rank 0] step:3241/10000 train_time:153958ms step_avg:47.50ms +[2025-09-05 18:14:20] [Rank 0] step:3241/10000 train_time:153958ms step_avg:47.50ms +[2025-09-05 18:14:21] [Rank 0] step:3261/10000 train_time:154694ms step_avg:47.44ms +[2025-09-05 18:14:21] [Rank 0] step:3261/10000 train_time:154694ms step_avg:47.44ms +[2025-09-05 18:14:21] [Rank 0] step:3281/10000 train_time:155431ms step_avg:47.37ms +[2025-09-05 18:14:21] [Rank 0] step:3281/10000 train_time:155431ms step_avg:47.37ms +[2025-09-05 18:14:22] [Rank 0] step:3301/10000 train_time:156168ms step_avg:47.31ms +[2025-09-05 18:14:22] [Rank 0] step:3301/10000 train_time:156168ms step_avg:47.31ms +[2025-09-05 18:14:23] [Rank 0] step:3321/10000 train_time:156905ms step_avg:47.25ms +[2025-09-05 18:14:23] [Rank 0] step:3321/10000 train_time:156905ms step_avg:47.25ms +[2025-09-05 18:14:24] [Rank 0] step:3341/10000 train_time:157642ms step_avg:47.18ms +[2025-09-05 18:14:24] [Rank 0] step:3341/10000 train_time:157642ms step_avg:47.18ms +[2025-09-05 18:14:24] [Rank 0] step:3361/10000 train_time:158378ms step_avg:47.12ms +[2025-09-05 18:14:24] [Rank 0] step:3361/10000 train_time:158378ms step_avg:47.12ms +[2025-09-05 18:14:25] [Rank 0] step:3381/10000 train_time:159115ms step_avg:47.06ms +[2025-09-05 18:14:25] [Rank 0] step:3381/10000 train_time:159115ms step_avg:47.06ms +[2025-09-05 18:14:26] [Rank 0] step:3401/10000 train_time:159851ms step_avg:47.00ms +[2025-09-05 18:14:26] [Rank 0] step:3401/10000 train_time:159851ms step_avg:47.00ms +[2025-09-05 18:14:27] [Rank 0] step:3421/10000 train_time:160588ms step_avg:46.94ms +[2025-09-05 18:14:27] [Rank 0] step:3421/10000 train_time:160588ms step_avg:46.94ms +[2025-09-05 18:14:27] [Rank 0] step:3441/10000 train_time:161324ms step_avg:46.88ms +[2025-09-05 18:14:27] [Rank 0] step:3441/10000 train_time:161324ms step_avg:46.88ms +[2025-09-05 18:14:28] [Rank 0] step:3461/10000 train_time:162061ms step_avg:46.83ms +[2025-09-05 18:14:28] [Rank 0] step:3461/10000 train_time:162061ms step_avg:46.83ms +[2025-09-05 18:14:29] [Rank 0] step:3481/10000 train_time:162798ms step_avg:46.77ms +[2025-09-05 18:14:29] [Rank 0] step:3481/10000 train_time:162798ms step_avg:46.77ms +[2025-09-05 18:14:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:14:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:14:30] [Rank 0] PRINT: step:3500/10000 train_loss:1.8777 val_loss:1.8445 train_time:163615ms step_avg:46.75ms +[2025-09-05 18:14:30] [Rank 0] PRINT: step:3500/10000 train_loss:1.8777 val_loss:1.8445 train_time:163615ms step_avg:46.75ms +[2025-09-05 18:14:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:14:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:14:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:14:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:15:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:15:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:15:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:15:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:15:51] [Rank 0] Total Loss: 4.2348 +[2025-09-05 18:15:51] [Rank 0] Total Loss: 4.2348 +[2025-09-05 18:15:51] [Rank 0] Total FTA (Unweighted): 0.3731 +[2025-09-05 18:15:51] [Rank 0] Total FTA (Unweighted): 0.3731 +[2025-09-05 18:15:51] [Rank 0] Total FTA (Weighted): 0.3731 +[2025-09-05 18:15:51] [Rank 0] Total FTA (Weighted): 0.3731 +[2025-09-05 18:15:51] [Rank 0] Group 0 Loss: 3.1551 +[2025-09-05 18:15:51] [Rank 0] Group 0 Loss: 3.1551 +[2025-09-05 18:15:51] [Rank 0] Group 1 Loss: 2.9801 +[2025-09-05 18:15:51] [Rank 0] Group 1 Loss: 2.9801 +[2025-09-05 18:15:51] [Rank 0] Group 2 Loss: 3.0159 +[2025-09-05 18:15:51] [Rank 0] Group 2 Loss: 3.0159 +[2025-09-05 18:15:51] [Rank 0] Group 3 Loss: 3.3137 +[2025-09-05 18:15:51] [Rank 0] Group 3 Loss: 3.3137 +[2025-09-05 18:15:51] [Rank 0] Group 4 Loss: 3.5572 +[2025-09-05 18:15:51] [Rank 0] Group 4 Loss: 3.5572 +[2025-09-05 18:15:51] [Rank 0] Group 5 Loss: 3.8884 +[2025-09-05 18:15:51] [Rank 0] Group 5 Loss: 3.8884 +[2025-09-05 18:15:51] [Rank 0] Group 6 Loss: 4.1960 +[2025-09-05 18:15:51] [Rank 0] Group 6 Loss: 4.1960 +[2025-09-05 18:15:51] [Rank 0] Group 7 Loss: 4.3769 +[2025-09-05 18:15:51] [Rank 0] Group 7 Loss: 4.3769 +[2025-09-05 18:15:51] [Rank 0] Group 8 Loss: 4.7028 +[2025-09-05 18:15:51] [Rank 0] Group 8 Loss: 4.7028 +[2025-09-05 18:15:51] [Rank 0] Group 9 Loss: 4.8381 +[2025-09-05 18:15:51] [Rank 0] Group 9 Loss: 4.8381 +[2025-09-05 18:15:51] [Rank 0] Group 10 Loss: 4.9394 +[2025-09-05 18:15:51] [Rank 0] Group 10 Loss: 4.9394 +[2025-09-05 18:15:51] [Rank 0] Group 11 Loss: 4.9793 +[2025-09-05 18:15:51] [Rank 0] Group 11 Loss: 4.9793 +[2025-09-05 18:15:51] [Rank 0] Group 12 Loss: 4.9063 +[2025-09-05 18:15:51] [Rank 0] Group 12 Loss: 4.9063 +[2025-09-05 18:15:51] [Rank 0] Group 13 Loss: 4.9872 +[2025-09-05 18:15:51] [Rank 0] Group 13 Loss: 4.9872 +[2025-09-05 18:15:51] [Rank 0] Group 14 Loss: 4.9568 +[2025-09-05 18:15:51] [Rank 0] Group 14 Loss: 4.9568 +[2025-09-05 18:15:51] [Rank 0] Group 15 Loss: 4.9628 +[2025-09-05 18:15:51] [Rank 0] Group 15 Loss: 4.9628 +[2025-09-05 18:15:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:15:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:15:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:15:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:15:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:15:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:15:51] [Rank 0] Group 3 FTA: 0.4400 +[2025-09-05 18:15:51] [Rank 0] Group 3 FTA: 0.4400 +[2025-09-05 18:15:51] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 18:15:51] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 18:15:51] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 18:15:51] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 18:15:51] [Rank 0] Group 6 FTA: 0.3200 +[2025-09-05 18:15:51] [Rank 0] Group 6 FTA: 0.3200 +[2025-09-05 18:15:51] [Rank 0] Group 7 FTA: 0.2000 +[2025-09-05 18:15:51] [Rank 0] Group 7 FTA: 0.2000 +[2025-09-05 18:15:51] [Rank 0] Group 8 FTA: 0.2700 +[2025-09-05 18:15:51] [Rank 0] Group 8 FTA: 0.2700 +[2025-09-05 18:15:51] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 18:15:51] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 18:15:51] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 18:15:51] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 18:15:51] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 18:15:51] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 18:15:51] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:15:51] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:15:51] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 18:15:51] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 18:15:51] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:15:51] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:15:51] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 18:15:51] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 18:15:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:15:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:15:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:15:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:15:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:15:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:15:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:15:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:15:53] [Rank 0] step:3501/10000 train_time:163625ms step_avg:46.74ms +[2025-09-05 18:15:53] [Rank 0] step:3501/10000 train_time:163625ms step_avg:46.74ms +[2025-09-05 18:15:53] [Rank 0] step:3521/10000 train_time:164289ms step_avg:46.66ms +[2025-09-05 18:15:53] [Rank 0] step:3521/10000 train_time:164289ms step_avg:46.66ms +[2025-09-05 18:15:54] [Rank 0] step:3541/10000 train_time:165025ms step_avg:46.60ms +[2025-09-05 18:15:54] [Rank 0] step:3541/10000 train_time:165025ms step_avg:46.60ms +[2025-09-05 18:15:55] [Rank 0] step:3561/10000 train_time:165762ms step_avg:46.55ms +[2025-09-05 18:15:55] [Rank 0] step:3561/10000 train_time:165762ms step_avg:46.55ms +[2025-09-05 18:15:56] [Rank 0] step:3581/10000 train_time:166499ms step_avg:46.50ms +[2025-09-05 18:15:56] [Rank 0] step:3581/10000 train_time:166499ms step_avg:46.50ms +[2025-09-05 18:15:56] [Rank 0] step:3601/10000 train_time:167236ms step_avg:46.44ms +[2025-09-05 18:15:56] [Rank 0] step:3601/10000 train_time:167236ms step_avg:46.44ms +[2025-09-05 18:15:57] [Rank 0] step:3621/10000 train_time:167973ms step_avg:46.39ms +[2025-09-05 18:15:57] [Rank 0] step:3621/10000 train_time:167973ms step_avg:46.39ms +[2025-09-05 18:15:58] [Rank 0] step:3641/10000 train_time:168903ms step_avg:46.39ms +[2025-09-05 18:15:58] [Rank 0] step:3641/10000 train_time:168903ms step_avg:46.39ms +[2025-09-05 18:15:59] [Rank 0] step:3661/10000 train_time:169640ms step_avg:46.34ms +[2025-09-05 18:15:59] [Rank 0] step:3661/10000 train_time:169640ms step_avg:46.34ms +[2025-09-05 18:16:00] [Rank 0] step:3681/10000 train_time:170376ms step_avg:46.29ms +[2025-09-05 18:16:00] [Rank 0] step:3681/10000 train_time:170376ms step_avg:46.29ms +[2025-09-05 18:16:00] [Rank 0] step:3701/10000 train_time:171112ms step_avg:46.23ms +[2025-09-05 18:16:00] [Rank 0] step:3701/10000 train_time:171112ms step_avg:46.23ms +[2025-09-05 18:16:01] [Rank 0] step:3721/10000 train_time:171848ms step_avg:46.18ms +[2025-09-05 18:16:01] [Rank 0] step:3721/10000 train_time:171848ms step_avg:46.18ms +[2025-09-05 18:16:02] [Rank 0] step:3741/10000 train_time:172586ms step_avg:46.13ms +[2025-09-05 18:16:02] [Rank 0] step:3741/10000 train_time:172586ms step_avg:46.13ms +[2025-09-05 18:16:02] [Rank 0] step:3761/10000 train_time:173326ms step_avg:46.09ms +[2025-09-05 18:16:02] [Rank 0] step:3761/10000 train_time:173326ms step_avg:46.09ms +[2025-09-05 18:16:03] [Rank 0] step:3781/10000 train_time:174063ms step_avg:46.04ms +[2025-09-05 18:16:03] [Rank 0] step:3781/10000 train_time:174063ms step_avg:46.04ms +[2025-09-05 18:16:04] [Rank 0] step:3801/10000 train_time:174799ms step_avg:45.99ms +[2025-09-05 18:16:04] [Rank 0] step:3801/10000 train_time:174799ms step_avg:45.99ms +[2025-09-05 18:16:05] [Rank 0] step:3821/10000 train_time:175537ms step_avg:45.94ms +[2025-09-05 18:16:05] [Rank 0] step:3821/10000 train_time:175537ms step_avg:45.94ms +[2025-09-05 18:16:05] [Rank 0] step:3841/10000 train_time:176274ms step_avg:45.89ms +[2025-09-05 18:16:05] [Rank 0] step:3841/10000 train_time:176274ms step_avg:45.89ms +[2025-09-05 18:16:06] [Rank 0] step:3861/10000 train_time:177011ms step_avg:45.85ms +[2025-09-05 18:16:06] [Rank 0] step:3861/10000 train_time:177011ms step_avg:45.85ms +[2025-09-05 18:16:07] [Rank 0] step:3881/10000 train_time:177747ms step_avg:45.80ms +[2025-09-05 18:16:07] [Rank 0] step:3881/10000 train_time:177747ms step_avg:45.80ms +[2025-09-05 18:16:08] [Rank 0] step:3901/10000 train_time:178483ms step_avg:45.75ms +[2025-09-05 18:16:08] [Rank 0] step:3901/10000 train_time:178483ms step_avg:45.75ms +[2025-09-05 18:16:08] [Rank 0] step:3921/10000 train_time:179219ms step_avg:45.71ms +[2025-09-05 18:16:08] [Rank 0] step:3921/10000 train_time:179219ms step_avg:45.71ms +[2025-09-05 18:16:09] [Rank 0] step:3941/10000 train_time:179956ms step_avg:45.66ms +[2025-09-05 18:16:09] [Rank 0] step:3941/10000 train_time:179956ms step_avg:45.66ms +[2025-09-05 18:16:10] [Rank 0] step:3961/10000 train_time:180692ms step_avg:45.62ms +[2025-09-05 18:16:10] [Rank 0] step:3961/10000 train_time:180692ms step_avg:45.62ms +[2025-09-05 18:16:11] [Rank 0] step:3981/10000 train_time:181429ms step_avg:45.57ms +[2025-09-05 18:16:11] [Rank 0] step:3981/10000 train_time:181429ms step_avg:45.57ms +[2025-09-05 18:16:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:16:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:16:12] [Rank 0] PRINT: step:4000/10000 train_loss:1.8272 val_loss:1.7946 train_time:182246ms step_avg:45.56ms +[2025-09-05 18:16:12] [Rank 0] PRINT: step:4000/10000 train_loss:1.8272 val_loss:1.7946 train_time:182246ms step_avg:45.56ms +[2025-09-05 18:16:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:16:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:16:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:16:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:17:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:17:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:17:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:17:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:17:33] [Rank 0] Total Loss: 4.2737 +[2025-09-05 18:17:33] [Rank 0] Total Loss: 4.2737 +[2025-09-05 18:17:33] [Rank 0] Total FTA (Unweighted): 0.3838 +[2025-09-05 18:17:33] [Rank 0] Total FTA (Unweighted): 0.3838 +[2025-09-05 18:17:33] [Rank 0] Total FTA (Weighted): 0.3837 +[2025-09-05 18:17:33] [Rank 0] Total FTA (Weighted): 0.3837 +[2025-09-05 18:17:33] [Rank 0] Group 0 Loss: 3.2551 +[2025-09-05 18:17:33] [Rank 0] Group 0 Loss: 3.2551 +[2025-09-05 18:17:33] [Rank 0] Group 1 Loss: 3.0448 +[2025-09-05 18:17:33] [Rank 0] Group 1 Loss: 3.0448 +[2025-09-05 18:17:33] [Rank 0] Group 2 Loss: 3.1611 +[2025-09-05 18:17:33] [Rank 0] Group 2 Loss: 3.1611 +[2025-09-05 18:17:33] [Rank 0] Group 3 Loss: 3.3863 +[2025-09-05 18:17:33] [Rank 0] Group 3 Loss: 3.3863 +[2025-09-05 18:17:33] [Rank 0] Group 4 Loss: 3.6192 +[2025-09-05 18:17:33] [Rank 0] Group 4 Loss: 3.6192 +[2025-09-05 18:17:33] [Rank 0] Group 5 Loss: 3.9529 +[2025-09-05 18:17:33] [Rank 0] Group 5 Loss: 3.9529 +[2025-09-05 18:17:33] [Rank 0] Group 6 Loss: 4.2716 +[2025-09-05 18:17:33] [Rank 0] Group 6 Loss: 4.2716 +[2025-09-05 18:17:33] [Rank 0] Group 7 Loss: 4.3545 +[2025-09-05 18:17:33] [Rank 0] Group 7 Loss: 4.3545 +[2025-09-05 18:17:33] [Rank 0] Group 8 Loss: 4.7073 +[2025-09-05 18:17:33] [Rank 0] Group 8 Loss: 4.7073 +[2025-09-05 18:17:33] [Rank 0] Group 9 Loss: 4.8400 +[2025-09-05 18:17:33] [Rank 0] Group 9 Loss: 4.8400 +[2025-09-05 18:17:33] [Rank 0] Group 10 Loss: 4.9788 +[2025-09-05 18:17:33] [Rank 0] Group 10 Loss: 4.9788 +[2025-09-05 18:17:33] [Rank 0] Group 11 Loss: 4.9907 +[2025-09-05 18:17:33] [Rank 0] Group 11 Loss: 4.9907 +[2025-09-05 18:17:33] [Rank 0] Group 12 Loss: 4.9041 +[2025-09-05 18:17:33] [Rank 0] Group 12 Loss: 4.9041 +[2025-09-05 18:17:33] [Rank 0] Group 13 Loss: 4.9739 +[2025-09-05 18:17:33] [Rank 0] Group 13 Loss: 4.9739 +[2025-09-05 18:17:33] [Rank 0] Group 14 Loss: 4.9910 +[2025-09-05 18:17:33] [Rank 0] Group 14 Loss: 4.9910 +[2025-09-05 18:17:33] [Rank 0] Group 15 Loss: 4.9477 +[2025-09-05 18:17:33] [Rank 0] Group 15 Loss: 4.9477 +[2025-09-05 18:17:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:17:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:17:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:17:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:17:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:17:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:17:33] [Rank 0] Group 3 FTA: 0.4400 +[2025-09-05 18:17:33] [Rank 0] Group 3 FTA: 0.4400 +[2025-09-05 18:17:33] [Rank 0] Group 4 FTA: 0.3600 +[2025-09-05 18:17:33] [Rank 0] Group 4 FTA: 0.3600 +[2025-09-05 18:17:33] [Rank 0] Group 5 FTA: 0.3600 +[2025-09-05 18:17:33] [Rank 0] Group 5 FTA: 0.3600 +[2025-09-05 18:17:33] [Rank 0] Group 6 FTA: 0.3700 +[2025-09-05 18:17:33] [Rank 0] Group 6 FTA: 0.3700 +[2025-09-05 18:17:33] [Rank 0] Group 7 FTA: 0.2300 +[2025-09-05 18:17:33] [Rank 0] Group 7 FTA: 0.2300 +[2025-09-05 18:17:33] [Rank 0] Group 8 FTA: 0.2500 +[2025-09-05 18:17:33] [Rank 0] Group 8 FTA: 0.2500 +[2025-09-05 18:17:33] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 18:17:33] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 18:17:33] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 18:17:33] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 18:17:33] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 18:17:33] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 18:17:33] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 18:17:33] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 18:17:33] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 18:17:33] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 18:17:33] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:17:33] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 18:17:33] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:17:33] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:17:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:17:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:17:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:17:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:17:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:17:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:17:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:17:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:17:35] [Rank 0] step:4001/10000 train_time:182256ms step_avg:45.55ms +[2025-09-05 18:17:35] [Rank 0] step:4001/10000 train_time:182256ms step_avg:45.55ms +[2025-09-05 18:17:36] [Rank 0] step:4021/10000 train_time:183525ms step_avg:45.64ms +[2025-09-05 18:17:36] [Rank 0] step:4021/10000 train_time:183525ms step_avg:45.64ms +[2025-09-05 18:17:37] [Rank 0] step:4041/10000 train_time:184262ms step_avg:45.60ms +[2025-09-05 18:17:37] [Rank 0] step:4041/10000 train_time:184262ms step_avg:45.60ms +[2025-09-05 18:17:38] [Rank 0] step:4061/10000 train_time:184998ms step_avg:45.55ms +[2025-09-05 18:17:38] [Rank 0] step:4061/10000 train_time:184998ms step_avg:45.55ms +[2025-09-05 18:17:38] [Rank 0] step:4081/10000 train_time:185735ms step_avg:45.51ms +[2025-09-05 18:17:38] [Rank 0] step:4081/10000 train_time:185735ms step_avg:45.51ms +[2025-09-05 18:17:39] [Rank 0] step:4101/10000 train_time:186472ms step_avg:45.47ms +[2025-09-05 18:17:39] [Rank 0] step:4101/10000 train_time:186472ms step_avg:45.47ms +[2025-09-05 18:17:40] [Rank 0] step:4121/10000 train_time:187209ms step_avg:45.43ms +[2025-09-05 18:17:40] [Rank 0] step:4121/10000 train_time:187209ms step_avg:45.43ms +[2025-09-05 18:17:41] [Rank 0] step:4141/10000 train_time:187945ms step_avg:45.39ms +[2025-09-05 18:17:41] [Rank 0] step:4141/10000 train_time:187945ms step_avg:45.39ms +[2025-09-05 18:17:41] [Rank 0] step:4161/10000 train_time:188681ms step_avg:45.35ms +[2025-09-05 18:17:41] [Rank 0] step:4161/10000 train_time:188681ms step_avg:45.35ms +[2025-09-05 18:17:42] [Rank 0] step:4181/10000 train_time:189418ms step_avg:45.30ms +[2025-09-05 18:17:42] [Rank 0] step:4181/10000 train_time:189418ms step_avg:45.30ms +[2025-09-05 18:17:43] [Rank 0] step:4201/10000 train_time:190154ms step_avg:45.26ms +[2025-09-05 18:17:43] [Rank 0] step:4201/10000 train_time:190154ms step_avg:45.26ms +[2025-09-05 18:17:44] [Rank 0] step:4221/10000 train_time:190890ms step_avg:45.22ms +[2025-09-05 18:17:44] [Rank 0] step:4221/10000 train_time:190890ms step_avg:45.22ms +[2025-09-05 18:17:44] [Rank 0] step:4241/10000 train_time:191627ms step_avg:45.18ms +[2025-09-05 18:17:44] [Rank 0] step:4241/10000 train_time:191627ms step_avg:45.18ms +[2025-09-05 18:17:45] [Rank 0] step:4261/10000 train_time:192363ms step_avg:45.15ms +[2025-09-05 18:17:45] [Rank 0] step:4261/10000 train_time:192363ms step_avg:45.15ms +[2025-09-05 18:17:46] [Rank 0] step:4281/10000 train_time:193100ms step_avg:45.11ms +[2025-09-05 18:17:46] [Rank 0] step:4281/10000 train_time:193100ms step_avg:45.11ms +[2025-09-05 18:17:46] [Rank 0] step:4301/10000 train_time:193837ms step_avg:45.07ms +[2025-09-05 18:17:46] [Rank 0] step:4301/10000 train_time:193837ms step_avg:45.07ms +[2025-09-05 18:17:47] [Rank 0] step:4321/10000 train_time:194574ms step_avg:45.03ms +[2025-09-05 18:17:47] [Rank 0] step:4321/10000 train_time:194574ms step_avg:45.03ms +[2025-09-05 18:17:48] [Rank 0] step:4341/10000 train_time:195310ms step_avg:44.99ms +[2025-09-05 18:17:48] [Rank 0] step:4341/10000 train_time:195310ms step_avg:44.99ms +[2025-09-05 18:17:49] [Rank 0] step:4361/10000 train_time:196046ms step_avg:44.95ms +[2025-09-05 18:17:49] [Rank 0] step:4361/10000 train_time:196046ms step_avg:44.95ms +[2025-09-05 18:17:49] [Rank 0] step:4381/10000 train_time:196783ms step_avg:44.92ms +[2025-09-05 18:17:49] [Rank 0] step:4381/10000 train_time:196783ms step_avg:44.92ms +[2025-09-05 18:17:50] [Rank 0] step:4401/10000 train_time:197520ms step_avg:44.88ms +[2025-09-05 18:17:50] [Rank 0] step:4401/10000 train_time:197520ms step_avg:44.88ms +[2025-09-05 18:17:51] [Rank 0] step:4421/10000 train_time:198256ms step_avg:44.84ms +[2025-09-05 18:17:51] [Rank 0] step:4421/10000 train_time:198256ms step_avg:44.84ms +[2025-09-05 18:17:52] [Rank 0] step:4441/10000 train_time:198993ms step_avg:44.81ms +[2025-09-05 18:17:52] [Rank 0] step:4441/10000 train_time:198993ms step_avg:44.81ms +[2025-09-05 18:17:52] [Rank 0] step:4461/10000 train_time:199729ms step_avg:44.77ms +[2025-09-05 18:17:52] [Rank 0] step:4461/10000 train_time:199729ms step_avg:44.77ms +[2025-09-05 18:17:53] [Rank 0] step:4481/10000 train_time:200466ms step_avg:44.74ms +[2025-09-05 18:17:53] [Rank 0] step:4481/10000 train_time:200466ms step_avg:44.74ms +[2025-09-05 18:17:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:17:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:17:54] [Rank 0] PRINT: step:4500/10000 train_loss:1.7877 val_loss:1.7636 train_time:201283ms step_avg:44.73ms +[2025-09-05 18:17:54] [Rank 0] PRINT: step:4500/10000 train_loss:1.7877 val_loss:1.7636 train_time:201283ms step_avg:44.73ms +[2025-09-05 18:17:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:17:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:17:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:17:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:19:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:19:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:19:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:19:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:19:15] [Rank 0] Total Loss: 4.2172 +[2025-09-05 18:19:15] [Rank 0] Total Loss: 4.2172 +[2025-09-05 18:19:15] [Rank 0] Total FTA (Unweighted): 0.3888 +[2025-09-05 18:19:15] [Rank 0] Total FTA (Unweighted): 0.3888 +[2025-09-05 18:19:15] [Rank 0] Total FTA (Weighted): 0.3887 +[2025-09-05 18:19:15] [Rank 0] Total FTA (Weighted): 0.3887 +[2025-09-05 18:19:15] [Rank 0] Group 0 Loss: 3.2578 +[2025-09-05 18:19:15] [Rank 0] Group 0 Loss: 3.2578 +[2025-09-05 18:19:15] [Rank 0] Group 1 Loss: 3.0001 +[2025-09-05 18:19:15] [Rank 0] Group 1 Loss: 3.0001 +[2025-09-05 18:19:15] [Rank 0] Group 2 Loss: 3.0694 +[2025-09-05 18:19:15] [Rank 0] Group 2 Loss: 3.0694 +[2025-09-05 18:19:15] [Rank 0] Group 3 Loss: 3.3662 +[2025-09-05 18:19:15] [Rank 0] Group 3 Loss: 3.3662 +[2025-09-05 18:19:15] [Rank 0] Group 4 Loss: 3.5661 +[2025-09-05 18:19:15] [Rank 0] Group 4 Loss: 3.5661 +[2025-09-05 18:19:15] [Rank 0] Group 5 Loss: 3.8393 +[2025-09-05 18:19:15] [Rank 0] Group 5 Loss: 3.8393 +[2025-09-05 18:19:15] [Rank 0] Group 6 Loss: 4.1605 +[2025-09-05 18:19:15] [Rank 0] Group 6 Loss: 4.1605 +[2025-09-05 18:19:15] [Rank 0] Group 7 Loss: 4.3087 +[2025-09-05 18:19:15] [Rank 0] Group 7 Loss: 4.3087 +[2025-09-05 18:19:15] [Rank 0] Group 8 Loss: 4.6812 +[2025-09-05 18:19:15] [Rank 0] Group 8 Loss: 4.6812 +[2025-09-05 18:19:15] [Rank 0] Group 9 Loss: 4.8124 +[2025-09-05 18:19:15] [Rank 0] Group 9 Loss: 4.8124 +[2025-09-05 18:19:15] [Rank 0] Group 10 Loss: 4.8792 +[2025-09-05 18:19:15] [Rank 0] Group 10 Loss: 4.8792 +[2025-09-05 18:19:15] [Rank 0] Group 11 Loss: 4.9053 +[2025-09-05 18:19:15] [Rank 0] Group 11 Loss: 4.9053 +[2025-09-05 18:19:15] [Rank 0] Group 12 Loss: 4.8371 +[2025-09-05 18:19:15] [Rank 0] Group 12 Loss: 4.8371 +[2025-09-05 18:19:15] [Rank 0] Group 13 Loss: 4.9266 +[2025-09-05 18:19:15] [Rank 0] Group 13 Loss: 4.9266 +[2025-09-05 18:19:15] [Rank 0] Group 14 Loss: 4.9560 +[2025-09-05 18:19:15] [Rank 0] Group 14 Loss: 4.9560 +[2025-09-05 18:19:15] [Rank 0] Group 15 Loss: 4.9096 +[2025-09-05 18:19:15] [Rank 0] Group 15 Loss: 4.9096 +[2025-09-05 18:19:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:19:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:19:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:19:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:19:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:19:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:19:15] [Rank 0] Group 3 FTA: 0.4400 +[2025-09-05 18:19:15] [Rank 0] Group 3 FTA: 0.4400 +[2025-09-05 18:19:16] [Rank 0] Group 4 FTA: 0.3300 +[2025-09-05 18:19:16] [Rank 0] Group 4 FTA: 0.3300 +[2025-09-05 18:19:16] [Rank 0] Group 5 FTA: 0.3700 +[2025-09-05 18:19:16] [Rank 0] Group 5 FTA: 0.3700 +[2025-09-05 18:19:16] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:19:16] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:19:16] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 18:19:16] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 18:19:16] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 18:19:16] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 18:19:16] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 18:19:16] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 18:19:16] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 18:19:16] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 18:19:16] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 18:19:16] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 18:19:16] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 18:19:16] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 18:19:16] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 18:19:16] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 18:19:16] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 18:19:16] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 18:19:16] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 18:19:16] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 18:19:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:19:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:19:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:19:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:19:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:19:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:19:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:19:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:19:17] [Rank 0] step:4501/10000 train_time:201292ms step_avg:44.72ms +[2025-09-05 18:19:17] [Rank 0] step:4501/10000 train_time:201292ms step_avg:44.72ms +[2025-09-05 18:19:18] [Rank 0] step:4521/10000 train_time:201964ms step_avg:44.67ms +[2025-09-05 18:19:18] [Rank 0] step:4521/10000 train_time:201964ms step_avg:44.67ms +[2025-09-05 18:19:19] [Rank 0] step:4541/10000 train_time:202700ms step_avg:44.64ms +[2025-09-05 18:19:19] [Rank 0] step:4541/10000 train_time:202700ms step_avg:44.64ms +[2025-09-05 18:19:19] [Rank 0] step:4561/10000 train_time:203437ms step_avg:44.60ms +[2025-09-05 18:19:19] [Rank 0] step:4561/10000 train_time:203437ms step_avg:44.60ms +[2025-09-05 18:19:20] [Rank 0] step:4581/10000 train_time:204174ms step_avg:44.57ms +[2025-09-05 18:19:20] [Rank 0] step:4581/10000 train_time:204174ms step_avg:44.57ms +[2025-09-05 18:19:21] [Rank 0] step:4601/10000 train_time:204910ms step_avg:44.54ms +[2025-09-05 18:19:21] [Rank 0] step:4601/10000 train_time:204910ms step_avg:44.54ms +[2025-09-05 18:19:21] [Rank 0] step:4621/10000 train_time:205646ms step_avg:44.50ms +[2025-09-05 18:19:21] [Rank 0] step:4621/10000 train_time:205646ms step_avg:44.50ms +[2025-09-05 18:19:22] [Rank 0] step:4641/10000 train_time:206383ms step_avg:44.47ms +[2025-09-05 18:19:22] [Rank 0] step:4641/10000 train_time:206383ms step_avg:44.47ms +[2025-09-05 18:19:23] [Rank 0] step:4661/10000 train_time:207119ms step_avg:44.44ms +[2025-09-05 18:19:23] [Rank 0] step:4661/10000 train_time:207119ms step_avg:44.44ms +[2025-09-05 18:19:24] [Rank 0] step:4681/10000 train_time:207855ms step_avg:44.40ms +[2025-09-05 18:19:24] [Rank 0] step:4681/10000 train_time:207855ms step_avg:44.40ms +[2025-09-05 18:19:24] [Rank 0] step:4701/10000 train_time:208591ms step_avg:44.37ms +[2025-09-05 18:19:24] [Rank 0] step:4701/10000 train_time:208591ms step_avg:44.37ms +[2025-09-05 18:19:25] [Rank 0] step:4721/10000 train_time:209328ms step_avg:44.34ms +[2025-09-05 18:19:25] [Rank 0] step:4721/10000 train_time:209328ms step_avg:44.34ms +[2025-09-05 18:19:26] [Rank 0] step:4741/10000 train_time:210064ms step_avg:44.31ms +[2025-09-05 18:19:26] [Rank 0] step:4741/10000 train_time:210064ms step_avg:44.31ms +[2025-09-05 18:19:27] [Rank 0] step:4761/10000 train_time:210801ms step_avg:44.28ms +[2025-09-05 18:19:27] [Rank 0] step:4761/10000 train_time:210801ms step_avg:44.28ms +[2025-09-05 18:19:27] [Rank 0] step:4781/10000 train_time:211538ms step_avg:44.25ms +[2025-09-05 18:19:27] [Rank 0] step:4781/10000 train_time:211538ms step_avg:44.25ms +[2025-09-05 18:19:28] [Rank 0] step:4801/10000 train_time:212391ms step_avg:44.24ms +[2025-09-05 18:19:28] [Rank 0] step:4801/10000 train_time:212391ms step_avg:44.24ms +[2025-09-05 18:19:29] [Rank 0] step:4821/10000 train_time:213126ms step_avg:44.21ms +[2025-09-05 18:19:29] [Rank 0] step:4821/10000 train_time:213126ms step_avg:44.21ms +[2025-09-05 18:19:30] [Rank 0] step:4841/10000 train_time:214178ms step_avg:44.24ms +[2025-09-05 18:19:30] [Rank 0] step:4841/10000 train_time:214178ms step_avg:44.24ms +[2025-09-05 18:19:31] [Rank 0] step:4861/10000 train_time:215050ms step_avg:44.24ms +[2025-09-05 18:19:31] [Rank 0] step:4861/10000 train_time:215050ms step_avg:44.24ms +[2025-09-05 18:19:32] [Rank 0] step:4881/10000 train_time:215786ms step_avg:44.21ms +[2025-09-05 18:19:32] [Rank 0] step:4881/10000 train_time:215786ms step_avg:44.21ms +[2025-09-05 18:19:32] [Rank 0] step:4901/10000 train_time:216522ms step_avg:44.18ms +[2025-09-05 18:19:32] [Rank 0] step:4901/10000 train_time:216522ms step_avg:44.18ms +[2025-09-05 18:19:33] [Rank 0] step:4921/10000 train_time:217257ms step_avg:44.15ms +[2025-09-05 18:19:33] [Rank 0] step:4921/10000 train_time:217257ms step_avg:44.15ms +[2025-09-05 18:19:34] [Rank 0] step:4941/10000 train_time:217994ms step_avg:44.12ms +[2025-09-05 18:19:34] [Rank 0] step:4941/10000 train_time:217994ms step_avg:44.12ms +[2025-09-05 18:19:35] [Rank 0] step:4961/10000 train_time:218730ms step_avg:44.09ms +[2025-09-05 18:19:35] [Rank 0] step:4961/10000 train_time:218730ms step_avg:44.09ms +[2025-09-05 18:19:35] [Rank 0] step:4981/10000 train_time:219466ms step_avg:44.06ms +[2025-09-05 18:19:35] [Rank 0] step:4981/10000 train_time:219466ms step_avg:44.06ms +[2025-09-05 18:19:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:19:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:19:36] [Rank 0] PRINT: step:5000/10000 train_loss:1.7563 val_loss:1.7367 train_time:220283ms step_avg:44.06ms +[2025-09-05 18:19:36] [Rank 0] PRINT: step:5000/10000 train_loss:1.7563 val_loss:1.7367 train_time:220283ms step_avg:44.06ms +[2025-09-05 18:19:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:19:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:19:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:19:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:20:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:20:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:20:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:20:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:20:58] [Rank 0] Total Loss: 4.1372 +[2025-09-05 18:20:58] [Rank 0] Total Loss: 4.1372 +[2025-09-05 18:20:58] [Rank 0] Total FTA (Unweighted): 0.4025 +[2025-09-05 18:20:58] [Rank 0] Total FTA (Unweighted): 0.4025 +[2025-09-05 18:20:58] [Rank 0] Total FTA (Weighted): 0.4025 +[2025-09-05 18:20:58] [Rank 0] Total FTA (Weighted): 0.4025 +[2025-09-05 18:20:58] [Rank 0] Group 0 Loss: 3.1502 +[2025-09-05 18:20:58] [Rank 0] Group 0 Loss: 3.1502 +[2025-09-05 18:20:58] [Rank 0] Group 1 Loss: 2.9615 +[2025-09-05 18:20:58] [Rank 0] Group 1 Loss: 2.9615 +[2025-09-05 18:20:58] [Rank 0] Group 2 Loss: 3.0017 +[2025-09-05 18:20:58] [Rank 0] Group 2 Loss: 3.0017 +[2025-09-05 18:20:58] [Rank 0] Group 3 Loss: 3.2833 +[2025-09-05 18:20:58] [Rank 0] Group 3 Loss: 3.2833 +[2025-09-05 18:20:58] [Rank 0] Group 4 Loss: 3.5003 +[2025-09-05 18:20:58] [Rank 0] Group 4 Loss: 3.5003 +[2025-09-05 18:20:58] [Rank 0] Group 5 Loss: 3.7999 +[2025-09-05 18:20:58] [Rank 0] Group 5 Loss: 3.7999 +[2025-09-05 18:20:58] [Rank 0] Group 6 Loss: 4.0372 +[2025-09-05 18:20:58] [Rank 0] Group 6 Loss: 4.0372 +[2025-09-05 18:20:58] [Rank 0] Group 7 Loss: 4.2521 +[2025-09-05 18:20:58] [Rank 0] Group 7 Loss: 4.2521 +[2025-09-05 18:20:58] [Rank 0] Group 8 Loss: 4.5723 +[2025-09-05 18:20:58] [Rank 0] Group 8 Loss: 4.5723 +[2025-09-05 18:20:58] [Rank 0] Group 9 Loss: 4.7141 +[2025-09-05 18:20:58] [Rank 0] Group 9 Loss: 4.7141 +[2025-09-05 18:20:58] [Rank 0] Group 10 Loss: 4.7959 +[2025-09-05 18:20:58] [Rank 0] Group 10 Loss: 4.7959 +[2025-09-05 18:20:58] [Rank 0] Group 11 Loss: 4.8212 +[2025-09-05 18:20:58] [Rank 0] Group 11 Loss: 4.8212 +[2025-09-05 18:20:58] [Rank 0] Group 12 Loss: 4.7749 +[2025-09-05 18:20:58] [Rank 0] Group 12 Loss: 4.7749 +[2025-09-05 18:20:58] [Rank 0] Group 13 Loss: 4.8301 +[2025-09-05 18:20:58] [Rank 0] Group 13 Loss: 4.8301 +[2025-09-05 18:20:58] [Rank 0] Group 14 Loss: 4.8781 +[2025-09-05 18:20:58] [Rank 0] Group 14 Loss: 4.8781 +[2025-09-05 18:20:58] [Rank 0] Group 15 Loss: 4.8221 +[2025-09-05 18:20:58] [Rank 0] Group 15 Loss: 4.8221 +[2025-09-05 18:20:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:20:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:20:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:20:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:20:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:20:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:20:58] [Rank 0] Group 3 FTA: 0.4500 +[2025-09-05 18:20:58] [Rank 0] Group 3 FTA: 0.4500 +[2025-09-05 18:20:58] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 18:20:58] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 18:20:58] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 18:20:58] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 18:20:58] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:20:58] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:20:58] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 18:20:58] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 18:20:58] [Rank 0] Group 8 FTA: 0.3100 +[2025-09-05 18:20:58] [Rank 0] Group 8 FTA: 0.3100 +[2025-09-05 18:20:58] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 18:20:58] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 18:20:58] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-05 18:20:58] [Rank 0] Group 10 FTA: 0.2500 +[2025-09-05 18:20:58] [Rank 0] Group 11 FTA: 0.2500 +[2025-09-05 18:20:58] [Rank 0] Group 11 FTA: 0.2500 +[2025-09-05 18:20:58] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 18:20:58] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 18:20:58] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 18:20:58] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 18:20:58] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:20:58] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:20:58] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:20:58] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:20:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:20:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:20:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:20:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:20:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:20:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:20:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:20:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:21:00] [Rank 0] step:5001/10000 train_time:220293ms step_avg:44.05ms +[2025-09-05 18:21:00] [Rank 0] step:5001/10000 train_time:220293ms step_avg:44.05ms +[2025-09-05 18:21:00] [Rank 0] step:5021/10000 train_time:220963ms step_avg:44.01ms +[2025-09-05 18:21:00] [Rank 0] step:5021/10000 train_time:220963ms step_avg:44.01ms +[2025-09-05 18:21:01] [Rank 0] step:5041/10000 train_time:221700ms step_avg:43.98ms +[2025-09-05 18:21:01] [Rank 0] step:5041/10000 train_time:221700ms step_avg:43.98ms +[2025-09-05 18:21:02] [Rank 0] step:5061/10000 train_time:222437ms step_avg:43.95ms +[2025-09-05 18:21:02] [Rank 0] step:5061/10000 train_time:222437ms step_avg:43.95ms +[2025-09-05 18:21:03] [Rank 0] step:5081/10000 train_time:223174ms step_avg:43.92ms +[2025-09-05 18:21:03] [Rank 0] step:5081/10000 train_time:223174ms step_avg:43.92ms +[2025-09-05 18:21:03] [Rank 0] step:5101/10000 train_time:223910ms step_avg:43.90ms +[2025-09-05 18:21:03] [Rank 0] step:5101/10000 train_time:223910ms step_avg:43.90ms +[2025-09-05 18:21:04] [Rank 0] step:5121/10000 train_time:224646ms step_avg:43.87ms +[2025-09-05 18:21:04] [Rank 0] step:5121/10000 train_time:224646ms step_avg:43.87ms +[2025-09-05 18:21:05] [Rank 0] step:5141/10000 train_time:225383ms step_avg:43.84ms +[2025-09-05 18:21:05] [Rank 0] step:5141/10000 train_time:225383ms step_avg:43.84ms +[2025-09-05 18:21:05] [Rank 0] step:5161/10000 train_time:226120ms step_avg:43.81ms +[2025-09-05 18:21:05] [Rank 0] step:5161/10000 train_time:226120ms step_avg:43.81ms +[2025-09-05 18:21:06] [Rank 0] step:5181/10000 train_time:226857ms step_avg:43.79ms +[2025-09-05 18:21:06] [Rank 0] step:5181/10000 train_time:226857ms step_avg:43.79ms +[2025-09-05 18:21:07] [Rank 0] step:5201/10000 train_time:227595ms step_avg:43.76ms +[2025-09-05 18:21:07] [Rank 0] step:5201/10000 train_time:227595ms step_avg:43.76ms +[2025-09-05 18:21:08] [Rank 0] step:5221/10000 train_time:228332ms step_avg:43.73ms +[2025-09-05 18:21:08] [Rank 0] step:5221/10000 train_time:228332ms step_avg:43.73ms +[2025-09-05 18:21:08] [Rank 0] step:5241/10000 train_time:229068ms step_avg:43.71ms +[2025-09-05 18:21:08] [Rank 0] step:5241/10000 train_time:229068ms step_avg:43.71ms +[2025-09-05 18:21:09] [Rank 0] step:5261/10000 train_time:229805ms step_avg:43.68ms +[2025-09-05 18:21:09] [Rank 0] step:5261/10000 train_time:229805ms step_avg:43.68ms +[2025-09-05 18:21:10] [Rank 0] step:5281/10000 train_time:230541ms step_avg:43.65ms +[2025-09-05 18:21:10] [Rank 0] step:5281/10000 train_time:230541ms step_avg:43.65ms +[2025-09-05 18:21:11] [Rank 0] step:5301/10000 train_time:231277ms step_avg:43.63ms +[2025-09-05 18:21:11] [Rank 0] step:5301/10000 train_time:231277ms step_avg:43.63ms +[2025-09-05 18:21:11] [Rank 0] step:5321/10000 train_time:232013ms step_avg:43.60ms +[2025-09-05 18:21:11] [Rank 0] step:5321/10000 train_time:232013ms step_avg:43.60ms +[2025-09-05 18:21:12] [Rank 0] step:5341/10000 train_time:232750ms step_avg:43.58ms +[2025-09-05 18:21:12] [Rank 0] step:5341/10000 train_time:232750ms step_avg:43.58ms +[2025-09-05 18:21:13] [Rank 0] step:5361/10000 train_time:233487ms step_avg:43.55ms +[2025-09-05 18:21:13] [Rank 0] step:5361/10000 train_time:233487ms step_avg:43.55ms +[2025-09-05 18:21:14] [Rank 0] step:5381/10000 train_time:234223ms step_avg:43.53ms +[2025-09-05 18:21:14] [Rank 0] step:5381/10000 train_time:234223ms step_avg:43.53ms +[2025-09-05 18:21:14] [Rank 0] step:5401/10000 train_time:234960ms step_avg:43.50ms +[2025-09-05 18:21:14] [Rank 0] step:5401/10000 train_time:234960ms step_avg:43.50ms +[2025-09-05 18:21:15] [Rank 0] step:5421/10000 train_time:235696ms step_avg:43.48ms +[2025-09-05 18:21:15] [Rank 0] step:5421/10000 train_time:235696ms step_avg:43.48ms +[2025-09-05 18:21:16] [Rank 0] step:5441/10000 train_time:236433ms step_avg:43.45ms +[2025-09-05 18:21:16] [Rank 0] step:5441/10000 train_time:236433ms step_avg:43.45ms +[2025-09-05 18:21:17] [Rank 0] step:5461/10000 train_time:237170ms step_avg:43.43ms +[2025-09-05 18:21:17] [Rank 0] step:5461/10000 train_time:237170ms step_avg:43.43ms +[2025-09-05 18:21:17] [Rank 0] step:5481/10000 train_time:237906ms step_avg:43.41ms +[2025-09-05 18:21:17] [Rank 0] step:5481/10000 train_time:237906ms step_avg:43.41ms +[2025-09-05 18:21:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:21:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:21:18] [Rank 0] PRINT: step:5500/10000 train_loss:1.7332 val_loss:1.7162 train_time:238724ms step_avg:43.40ms +[2025-09-05 18:21:18] [Rank 0] PRINT: step:5500/10000 train_loss:1.7332 val_loss:1.7162 train_time:238724ms step_avg:43.40ms +[2025-09-05 18:21:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:21:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:21:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:21:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:22:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:22:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:22:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:22:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:22:40] [Rank 0] Total Loss: 4.1456 +[2025-09-05 18:22:40] [Rank 0] Total Loss: 4.1456 +[2025-09-05 18:22:40] [Rank 0] Total FTA (Unweighted): 0.4256 +[2025-09-05 18:22:40] [Rank 0] Total FTA (Unweighted): 0.4256 +[2025-09-05 18:22:40] [Rank 0] Total FTA (Weighted): 0.4256 +[2025-09-05 18:22:40] [Rank 0] Total FTA (Weighted): 0.4256 +[2025-09-05 18:22:40] [Rank 0] Group 0 Loss: 3.2186 +[2025-09-05 18:22:40] [Rank 0] Group 0 Loss: 3.2186 +[2025-09-05 18:22:40] [Rank 0] Group 1 Loss: 3.0306 +[2025-09-05 18:22:40] [Rank 0] Group 1 Loss: 3.0306 +[2025-09-05 18:22:40] [Rank 0] Group 2 Loss: 3.0190 +[2025-09-05 18:22:40] [Rank 0] Group 2 Loss: 3.0190 +[2025-09-05 18:22:40] [Rank 0] Group 3 Loss: 3.2735 +[2025-09-05 18:22:40] [Rank 0] Group 3 Loss: 3.2735 +[2025-09-05 18:22:40] [Rank 0] Group 4 Loss: 3.5153 +[2025-09-05 18:22:40] [Rank 0] Group 4 Loss: 3.5153 +[2025-09-05 18:22:40] [Rank 0] Group 5 Loss: 3.7959 +[2025-09-05 18:22:40] [Rank 0] Group 5 Loss: 3.7959 +[2025-09-05 18:22:40] [Rank 0] Group 6 Loss: 4.0531 +[2025-09-05 18:22:40] [Rank 0] Group 6 Loss: 4.0531 +[2025-09-05 18:22:40] [Rank 0] Group 7 Loss: 4.2644 +[2025-09-05 18:22:40] [Rank 0] Group 7 Loss: 4.2644 +[2025-09-05 18:22:40] [Rank 0] Group 8 Loss: 4.5632 +[2025-09-05 18:22:40] [Rank 0] Group 8 Loss: 4.5632 +[2025-09-05 18:22:40] [Rank 0] Group 9 Loss: 4.6907 +[2025-09-05 18:22:40] [Rank 0] Group 9 Loss: 4.6907 +[2025-09-05 18:22:40] [Rank 0] Group 10 Loss: 4.7887 +[2025-09-05 18:22:40] [Rank 0] Group 10 Loss: 4.7887 +[2025-09-05 18:22:40] [Rank 0] Group 11 Loss: 4.8048 +[2025-09-05 18:22:40] [Rank 0] Group 11 Loss: 4.8048 +[2025-09-05 18:22:40] [Rank 0] Group 12 Loss: 4.7873 +[2025-09-05 18:22:40] [Rank 0] Group 12 Loss: 4.7873 +[2025-09-05 18:22:40] [Rank 0] Group 13 Loss: 4.8417 +[2025-09-05 18:22:40] [Rank 0] Group 13 Loss: 4.8417 +[2025-09-05 18:22:40] [Rank 0] Group 14 Loss: 4.8628 +[2025-09-05 18:22:40] [Rank 0] Group 14 Loss: 4.8628 +[2025-09-05 18:22:40] [Rank 0] Group 15 Loss: 4.8193 +[2025-09-05 18:22:40] [Rank 0] Group 15 Loss: 4.8193 +[2025-09-05 18:22:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:22:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:22:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:22:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:22:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:22:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:22:40] [Rank 0] Group 3 FTA: 0.6500 +[2025-09-05 18:22:40] [Rank 0] Group 3 FTA: 0.6500 +[2025-09-05 18:22:40] [Rank 0] Group 4 FTA: 0.4100 +[2025-09-05 18:22:40] [Rank 0] Group 4 FTA: 0.4100 +[2025-09-05 18:22:40] [Rank 0] Group 5 FTA: 0.4400 +[2025-09-05 18:22:40] [Rank 0] Group 5 FTA: 0.4400 +[2025-09-05 18:22:40] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 18:22:40] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 18:22:40] [Rank 0] Group 7 FTA: 0.2800 +[2025-09-05 18:22:40] [Rank 0] Group 7 FTA: 0.2800 +[2025-09-05 18:22:40] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 18:22:40] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 18:22:40] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 18:22:40] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 18:22:40] [Rank 0] Group 10 FTA: 0.2600 +[2025-09-05 18:22:40] [Rank 0] Group 10 FTA: 0.2600 +[2025-09-05 18:22:40] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 18:22:40] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 18:22:40] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 18:22:40] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 18:22:40] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 18:22:40] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 18:22:40] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:22:40] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:22:40] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:22:40] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:22:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:22:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:22:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:22:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:22:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:22:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:22:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:22:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:22:41] [Rank 0] step:5501/10000 train_time:238733ms step_avg:43.40ms +[2025-09-05 18:22:41] [Rank 0] step:5501/10000 train_time:238733ms step_avg:43.40ms +[2025-09-05 18:22:42] [Rank 0] step:5521/10000 train_time:239396ms step_avg:43.36ms +[2025-09-05 18:22:42] [Rank 0] step:5521/10000 train_time:239396ms step_avg:43.36ms +[2025-09-05 18:22:43] [Rank 0] step:5541/10000 train_time:240132ms step_avg:43.34ms +[2025-09-05 18:22:43] [Rank 0] step:5541/10000 train_time:240132ms step_avg:43.34ms +[2025-09-05 18:22:43] [Rank 0] step:5561/10000 train_time:240869ms step_avg:43.31ms +[2025-09-05 18:22:43] [Rank 0] step:5561/10000 train_time:240869ms step_avg:43.31ms +[2025-09-05 18:22:44] [Rank 0] step:5581/10000 train_time:241605ms step_avg:43.29ms +[2025-09-05 18:22:44] [Rank 0] step:5581/10000 train_time:241605ms step_avg:43.29ms +[2025-09-05 18:22:45] [Rank 0] step:5601/10000 train_time:242341ms step_avg:43.27ms +[2025-09-05 18:22:45] [Rank 0] step:5601/10000 train_time:242341ms step_avg:43.27ms +[2025-09-05 18:22:46] [Rank 0] step:5621/10000 train_time:243078ms step_avg:43.24ms +[2025-09-05 18:22:46] [Rank 0] step:5621/10000 train_time:243078ms step_avg:43.24ms +[2025-09-05 18:22:47] [Rank 0] step:5641/10000 train_time:244436ms step_avg:43.33ms +[2025-09-05 18:22:47] [Rank 0] step:5641/10000 train_time:244436ms step_avg:43.33ms +[2025-09-05 18:22:48] [Rank 0] step:5661/10000 train_time:245172ms step_avg:43.31ms +[2025-09-05 18:22:48] [Rank 0] step:5661/10000 train_time:245172ms step_avg:43.31ms +[2025-09-05 18:22:48] [Rank 0] step:5681/10000 train_time:245909ms step_avg:43.29ms +[2025-09-05 18:22:48] [Rank 0] step:5681/10000 train_time:245909ms step_avg:43.29ms +[2025-09-05 18:22:49] [Rank 0] step:5701/10000 train_time:246645ms step_avg:43.26ms +[2025-09-05 18:22:49] [Rank 0] step:5701/10000 train_time:246645ms step_avg:43.26ms +[2025-09-05 18:22:50] [Rank 0] step:5721/10000 train_time:247382ms step_avg:43.24ms +[2025-09-05 18:22:50] [Rank 0] step:5721/10000 train_time:247382ms step_avg:43.24ms +[2025-09-05 18:22:51] [Rank 0] step:5741/10000 train_time:248119ms step_avg:43.22ms +[2025-09-05 18:22:51] [Rank 0] step:5741/10000 train_time:248119ms step_avg:43.22ms +[2025-09-05 18:22:51] [Rank 0] step:5761/10000 train_time:248856ms step_avg:43.20ms +[2025-09-05 18:22:51] [Rank 0] step:5761/10000 train_time:248856ms step_avg:43.20ms +[2025-09-05 18:22:52] [Rank 0] step:5781/10000 train_time:249593ms step_avg:43.17ms +[2025-09-05 18:22:52] [Rank 0] step:5781/10000 train_time:249593ms step_avg:43.17ms +[2025-09-05 18:22:53] [Rank 0] step:5801/10000 train_time:250329ms step_avg:43.15ms +[2025-09-05 18:22:53] [Rank 0] step:5801/10000 train_time:250329ms step_avg:43.15ms +[2025-09-05 18:22:54] [Rank 0] step:5821/10000 train_time:251067ms step_avg:43.13ms +[2025-09-05 18:22:54] [Rank 0] step:5821/10000 train_time:251067ms step_avg:43.13ms +[2025-09-05 18:22:54] [Rank 0] step:5841/10000 train_time:251803ms step_avg:43.11ms +[2025-09-05 18:22:54] [Rank 0] step:5841/10000 train_time:251803ms step_avg:43.11ms +[2025-09-05 18:22:55] [Rank 0] step:5861/10000 train_time:252540ms step_avg:43.09ms +[2025-09-05 18:22:55] [Rank 0] step:5861/10000 train_time:252540ms step_avg:43.09ms +[2025-09-05 18:22:56] [Rank 0] step:5881/10000 train_time:253277ms step_avg:43.07ms +[2025-09-05 18:22:56] [Rank 0] step:5881/10000 train_time:253277ms step_avg:43.07ms +[2025-09-05 18:22:56] [Rank 0] step:5901/10000 train_time:254014ms step_avg:43.05ms +[2025-09-05 18:22:56] [Rank 0] step:5901/10000 train_time:254014ms step_avg:43.05ms +[2025-09-05 18:22:57] [Rank 0] step:5921/10000 train_time:254750ms step_avg:43.02ms +[2025-09-05 18:22:57] [Rank 0] step:5921/10000 train_time:254750ms step_avg:43.02ms +[2025-09-05 18:22:58] [Rank 0] step:5941/10000 train_time:255487ms step_avg:43.00ms +[2025-09-05 18:22:58] [Rank 0] step:5941/10000 train_time:255487ms step_avg:43.00ms +[2025-09-05 18:22:59] [Rank 0] step:5961/10000 train_time:256224ms step_avg:42.98ms +[2025-09-05 18:22:59] [Rank 0] step:5961/10000 train_time:256224ms step_avg:42.98ms +[2025-09-05 18:22:59] [Rank 0] step:5981/10000 train_time:256961ms step_avg:42.96ms +[2025-09-05 18:22:59] [Rank 0] step:5981/10000 train_time:256961ms step_avg:42.96ms +[2025-09-05 18:23:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:23:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:23:01] [Rank 0] PRINT: step:6000/10000 train_loss:1.7147 val_loss:1.6987 train_time:257778ms step_avg:42.96ms +[2025-09-05 18:23:01] [Rank 0] PRINT: step:6000/10000 train_loss:1.7147 val_loss:1.6987 train_time:257778ms step_avg:42.96ms +[2025-09-05 18:23:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:23:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:23:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:23:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:24:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:24:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:24:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:24:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:24:22] [Rank 0] Total Loss: 4.0994 +[2025-09-05 18:24:22] [Rank 0] Total Loss: 4.0994 +[2025-09-05 18:24:22] [Rank 0] Total FTA (Unweighted): 0.4406 +[2025-09-05 18:24:22] [Rank 0] Total FTA (Unweighted): 0.4406 +[2025-09-05 18:24:22] [Rank 0] Total FTA (Weighted): 0.4406 +[2025-09-05 18:24:22] [Rank 0] Total FTA (Weighted): 0.4406 +[2025-09-05 18:24:22] [Rank 0] Group 0 Loss: 3.1840 +[2025-09-05 18:24:22] [Rank 0] Group 0 Loss: 3.1840 +[2025-09-05 18:24:22] [Rank 0] Group 1 Loss: 2.9561 +[2025-09-05 18:24:22] [Rank 0] Group 1 Loss: 2.9561 +[2025-09-05 18:24:22] [Rank 0] Group 2 Loss: 3.0263 +[2025-09-05 18:24:22] [Rank 0] Group 2 Loss: 3.0263 +[2025-09-05 18:24:22] [Rank 0] Group 3 Loss: 3.2734 +[2025-09-05 18:24:22] [Rank 0] Group 3 Loss: 3.2734 +[2025-09-05 18:24:22] [Rank 0] Group 4 Loss: 3.4713 +[2025-09-05 18:24:22] [Rank 0] Group 4 Loss: 3.4713 +[2025-09-05 18:24:22] [Rank 0] Group 5 Loss: 3.7565 +[2025-09-05 18:24:22] [Rank 0] Group 5 Loss: 3.7565 +[2025-09-05 18:24:22] [Rank 0] Group 6 Loss: 3.9920 +[2025-09-05 18:24:22] [Rank 0] Group 6 Loss: 3.9920 +[2025-09-05 18:24:22] [Rank 0] Group 7 Loss: 4.1952 +[2025-09-05 18:24:22] [Rank 0] Group 7 Loss: 4.1952 +[2025-09-05 18:24:22] [Rank 0] Group 8 Loss: 4.5173 +[2025-09-05 18:24:22] [Rank 0] Group 8 Loss: 4.5173 +[2025-09-05 18:24:22] [Rank 0] Group 9 Loss: 4.6403 +[2025-09-05 18:24:22] [Rank 0] Group 9 Loss: 4.6403 +[2025-09-05 18:24:22] [Rank 0] Group 10 Loss: 4.7541 +[2025-09-05 18:24:22] [Rank 0] Group 10 Loss: 4.7541 +[2025-09-05 18:24:22] [Rank 0] Group 11 Loss: 4.7463 +[2025-09-05 18:24:22] [Rank 0] Group 11 Loss: 4.7463 +[2025-09-05 18:24:22] [Rank 0] Group 12 Loss: 4.7409 +[2025-09-05 18:24:22] [Rank 0] Group 12 Loss: 4.7409 +[2025-09-05 18:24:22] [Rank 0] Group 13 Loss: 4.7886 +[2025-09-05 18:24:22] [Rank 0] Group 13 Loss: 4.7886 +[2025-09-05 18:24:22] [Rank 0] Group 14 Loss: 4.7800 +[2025-09-05 18:24:22] [Rank 0] Group 14 Loss: 4.7800 +[2025-09-05 18:24:22] [Rank 0] Group 15 Loss: 4.7673 +[2025-09-05 18:24:22] [Rank 0] Group 15 Loss: 4.7673 +[2025-09-05 18:24:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:24:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:24:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:24:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:24:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:24:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:24:22] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:24:22] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:24:22] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 18:24:22] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 18:24:22] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 18:24:22] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 18:24:22] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 18:24:22] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 18:24:22] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 18:24:22] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 18:24:22] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 18:24:22] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 18:24:22] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:24:22] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:24:22] [Rank 0] Group 10 FTA: 0.2800 +[2025-09-05 18:24:22] [Rank 0] Group 10 FTA: 0.2800 +[2025-09-05 18:24:22] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 18:24:22] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 18:24:22] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 18:24:22] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 18:24:22] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 18:24:22] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 18:24:22] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:24:22] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:24:22] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:24:22] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:24:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:24:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:24:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:24:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:24:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:24:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:24:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:24:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:24:23] [Rank 0] step:6001/10000 train_time:257788ms step_avg:42.96ms +[2025-09-05 18:24:23] [Rank 0] step:6001/10000 train_time:257788ms step_avg:42.96ms +[2025-09-05 18:24:25] [Rank 0] step:6021/10000 train_time:259063ms step_avg:43.03ms +[2025-09-05 18:24:25] [Rank 0] step:6021/10000 train_time:259063ms step_avg:43.03ms +[2025-09-05 18:24:25] [Rank 0] step:6041/10000 train_time:259800ms step_avg:43.01ms +[2025-09-05 18:24:25] [Rank 0] step:6041/10000 train_time:259800ms step_avg:43.01ms +[2025-09-05 18:24:26] [Rank 0] step:6061/10000 train_time:260536ms step_avg:42.99ms +[2025-09-05 18:24:26] [Rank 0] step:6061/10000 train_time:260536ms step_avg:42.99ms +[2025-09-05 18:24:27] [Rank 0] step:6081/10000 train_time:261272ms step_avg:42.97ms +[2025-09-05 18:24:27] [Rank 0] step:6081/10000 train_time:261272ms step_avg:42.97ms +[2025-09-05 18:24:28] [Rank 0] step:6101/10000 train_time:262009ms step_avg:42.95ms +[2025-09-05 18:24:28] [Rank 0] step:6101/10000 train_time:262009ms step_avg:42.95ms +[2025-09-05 18:24:28] [Rank 0] step:6121/10000 train_time:262746ms step_avg:42.93ms +[2025-09-05 18:24:28] [Rank 0] step:6121/10000 train_time:262746ms step_avg:42.93ms +[2025-09-05 18:24:29] [Rank 0] step:6141/10000 train_time:263483ms step_avg:42.91ms +[2025-09-05 18:24:29] [Rank 0] step:6141/10000 train_time:263483ms step_avg:42.91ms +[2025-09-05 18:24:30] [Rank 0] step:6161/10000 train_time:264220ms step_avg:42.89ms +[2025-09-05 18:24:30] [Rank 0] step:6161/10000 train_time:264220ms step_avg:42.89ms +[2025-09-05 18:24:31] [Rank 0] step:6181/10000 train_time:264957ms step_avg:42.87ms +[2025-09-05 18:24:31] [Rank 0] step:6181/10000 train_time:264957ms step_avg:42.87ms +[2025-09-05 18:24:31] [Rank 0] step:6201/10000 train_time:265694ms step_avg:42.85ms +[2025-09-05 18:24:31] [Rank 0] step:6201/10000 train_time:265694ms step_avg:42.85ms +[2025-09-05 18:24:32] [Rank 0] step:6221/10000 train_time:266431ms step_avg:42.83ms +[2025-09-05 18:24:32] [Rank 0] step:6221/10000 train_time:266431ms step_avg:42.83ms +[2025-09-05 18:24:33] [Rank 0] step:6241/10000 train_time:267168ms step_avg:42.81ms +[2025-09-05 18:24:33] [Rank 0] step:6241/10000 train_time:267168ms step_avg:42.81ms +[2025-09-05 18:24:34] [Rank 0] step:6261/10000 train_time:267904ms step_avg:42.79ms +[2025-09-05 18:24:34] [Rank 0] step:6261/10000 train_time:267904ms step_avg:42.79ms +[2025-09-05 18:24:34] [Rank 0] step:6281/10000 train_time:268641ms step_avg:42.77ms +[2025-09-05 18:24:34] [Rank 0] step:6281/10000 train_time:268641ms step_avg:42.77ms +[2025-09-05 18:24:35] [Rank 0] step:6301/10000 train_time:269378ms step_avg:42.75ms +[2025-09-05 18:24:35] [Rank 0] step:6301/10000 train_time:269378ms step_avg:42.75ms +[2025-09-05 18:24:36] [Rank 0] step:6321/10000 train_time:270114ms step_avg:42.73ms +[2025-09-05 18:24:36] [Rank 0] step:6321/10000 train_time:270114ms step_avg:42.73ms +[2025-09-05 18:24:36] [Rank 0] step:6341/10000 train_time:270852ms step_avg:42.71ms +[2025-09-05 18:24:36] [Rank 0] step:6341/10000 train_time:270852ms step_avg:42.71ms +[2025-09-05 18:24:37] [Rank 0] step:6361/10000 train_time:271589ms step_avg:42.70ms +[2025-09-05 18:24:37] [Rank 0] step:6361/10000 train_time:271589ms step_avg:42.70ms +[2025-09-05 18:24:38] [Rank 0] step:6381/10000 train_time:272326ms step_avg:42.68ms +[2025-09-05 18:24:38] [Rank 0] step:6381/10000 train_time:272326ms step_avg:42.68ms +[2025-09-05 18:24:39] [Rank 0] step:6401/10000 train_time:273063ms step_avg:42.66ms +[2025-09-05 18:24:39] [Rank 0] step:6401/10000 train_time:273063ms step_avg:42.66ms +[2025-09-05 18:24:39] [Rank 0] step:6421/10000 train_time:273800ms step_avg:42.64ms +[2025-09-05 18:24:39] [Rank 0] step:6421/10000 train_time:273800ms step_avg:42.64ms +[2025-09-05 18:24:40] [Rank 0] step:6441/10000 train_time:274537ms step_avg:42.62ms +[2025-09-05 18:24:40] [Rank 0] step:6441/10000 train_time:274537ms step_avg:42.62ms +[2025-09-05 18:24:41] [Rank 0] step:6461/10000 train_time:275273ms step_avg:42.61ms +[2025-09-05 18:24:41] [Rank 0] step:6461/10000 train_time:275273ms step_avg:42.61ms +[2025-09-05 18:24:42] [Rank 0] step:6481/10000 train_time:276010ms step_avg:42.59ms +[2025-09-05 18:24:42] [Rank 0] step:6481/10000 train_time:276010ms step_avg:42.59ms +[2025-09-05 18:24:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:24:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:24:43] [Rank 0] PRINT: step:6500/10000 train_loss:1.7004 val_loss:1.6867 train_time:276827ms step_avg:42.59ms +[2025-09-05 18:24:43] [Rank 0] PRINT: step:6500/10000 train_loss:1.7004 val_loss:1.6867 train_time:276827ms step_avg:42.59ms +[2025-09-05 18:24:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:24:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:24:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:24:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:26:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:26:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:26:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:26:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:26:05] [Rank 0] Total Loss: 4.1015 +[2025-09-05 18:26:05] [Rank 0] Total Loss: 4.1015 +[2025-09-05 18:26:05] [Rank 0] Total FTA (Unweighted): 0.4419 +[2025-09-05 18:26:05] [Rank 0] Total FTA (Unweighted): 0.4419 +[2025-09-05 18:26:05] [Rank 0] Total FTA (Weighted): 0.4419 +[2025-09-05 18:26:05] [Rank 0] Total FTA (Weighted): 0.4419 +[2025-09-05 18:26:05] [Rank 0] Group 0 Loss: 3.1932 +[2025-09-05 18:26:05] [Rank 0] Group 0 Loss: 3.1932 +[2025-09-05 18:26:05] [Rank 0] Group 1 Loss: 2.9797 +[2025-09-05 18:26:05] [Rank 0] Group 1 Loss: 2.9797 +[2025-09-05 18:26:05] [Rank 0] Group 2 Loss: 3.0130 +[2025-09-05 18:26:05] [Rank 0] Group 2 Loss: 3.0130 +[2025-09-05 18:26:05] [Rank 0] Group 3 Loss: 3.2845 +[2025-09-05 18:26:05] [Rank 0] Group 3 Loss: 3.2845 +[2025-09-05 18:26:05] [Rank 0] Group 4 Loss: 3.4917 +[2025-09-05 18:26:05] [Rank 0] Group 4 Loss: 3.4917 +[2025-09-05 18:26:05] [Rank 0] Group 5 Loss: 3.7603 +[2025-09-05 18:26:05] [Rank 0] Group 5 Loss: 3.7603 +[2025-09-05 18:26:05] [Rank 0] Group 6 Loss: 3.9665 +[2025-09-05 18:26:05] [Rank 0] Group 6 Loss: 3.9665 +[2025-09-05 18:26:05] [Rank 0] Group 7 Loss: 4.1989 +[2025-09-05 18:26:05] [Rank 0] Group 7 Loss: 4.1989 +[2025-09-05 18:26:05] [Rank 0] Group 8 Loss: 4.5260 +[2025-09-05 18:26:05] [Rank 0] Group 8 Loss: 4.5260 +[2025-09-05 18:26:05] [Rank 0] Group 9 Loss: 4.6354 +[2025-09-05 18:26:05] [Rank 0] Group 9 Loss: 4.6354 +[2025-09-05 18:26:05] [Rank 0] Group 10 Loss: 4.7596 +[2025-09-05 18:26:05] [Rank 0] Group 10 Loss: 4.7596 +[2025-09-05 18:26:05] [Rank 0] Group 11 Loss: 4.7613 +[2025-09-05 18:26:05] [Rank 0] Group 11 Loss: 4.7613 +[2025-09-05 18:26:05] [Rank 0] Group 12 Loss: 4.7120 +[2025-09-05 18:26:05] [Rank 0] Group 12 Loss: 4.7120 +[2025-09-05 18:26:05] [Rank 0] Group 13 Loss: 4.7713 +[2025-09-05 18:26:05] [Rank 0] Group 13 Loss: 4.7713 +[2025-09-05 18:26:05] [Rank 0] Group 14 Loss: 4.8025 +[2025-09-05 18:26:05] [Rank 0] Group 14 Loss: 4.8025 +[2025-09-05 18:26:05] [Rank 0] Group 15 Loss: 4.7685 +[2025-09-05 18:26:05] [Rank 0] Group 15 Loss: 4.7685 +[2025-09-05 18:26:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:26:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:26:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:26:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:26:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:26:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:26:05] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:26:05] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:26:05] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 18:26:05] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 18:26:05] [Rank 0] Group 5 FTA: 0.4400 +[2025-09-05 18:26:05] [Rank 0] Group 5 FTA: 0.4400 +[2025-09-05 18:26:05] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:26:05] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:26:05] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 18:26:05] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 18:26:05] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 18:26:05] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 18:26:05] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:26:05] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:26:05] [Rank 0] Group 10 FTA: 0.3200 +[2025-09-05 18:26:05] [Rank 0] Group 10 FTA: 0.3200 +[2025-09-05 18:26:05] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 18:26:05] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 18:26:05] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 18:26:05] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 18:26:05] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 18:26:05] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 18:26:05] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 18:26:05] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 18:26:05] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:26:05] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:26:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:26:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:26:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:26:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:26:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:26:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:26:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:26:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:26:06] [Rank 0] step:6501/10000 train_time:276836ms step_avg:42.58ms +[2025-09-05 18:26:06] [Rank 0] step:6501/10000 train_time:276836ms step_avg:42.58ms +[2025-09-05 18:26:07] [Rank 0] step:6521/10000 train_time:277519ms step_avg:42.56ms +[2025-09-05 18:26:07] [Rank 0] step:6521/10000 train_time:277519ms step_avg:42.56ms +[2025-09-05 18:26:08] [Rank 0] step:6541/10000 train_time:278256ms step_avg:42.54ms +[2025-09-05 18:26:08] [Rank 0] step:6541/10000 train_time:278256ms step_avg:42.54ms +[2025-09-05 18:26:08] [Rank 0] step:6561/10000 train_time:278992ms step_avg:42.52ms +[2025-09-05 18:26:08] [Rank 0] step:6561/10000 train_time:278992ms step_avg:42.52ms +[2025-09-05 18:26:09] [Rank 0] step:6581/10000 train_time:279729ms step_avg:42.51ms +[2025-09-05 18:26:09] [Rank 0] step:6581/10000 train_time:279729ms step_avg:42.51ms +[2025-09-05 18:26:10] [Rank 0] step:6601/10000 train_time:280465ms step_avg:42.49ms +[2025-09-05 18:26:10] [Rank 0] step:6601/10000 train_time:280465ms step_avg:42.49ms +[2025-09-05 18:26:11] [Rank 0] step:6621/10000 train_time:281201ms step_avg:42.47ms +[2025-09-05 18:26:11] [Rank 0] step:6621/10000 train_time:281201ms step_avg:42.47ms +[2025-09-05 18:26:11] [Rank 0] step:6641/10000 train_time:281939ms step_avg:42.45ms +[2025-09-05 18:26:11] [Rank 0] step:6641/10000 train_time:281939ms step_avg:42.45ms +[2025-09-05 18:26:12] [Rank 0] step:6661/10000 train_time:282675ms step_avg:42.44ms +[2025-09-05 18:26:12] [Rank 0] step:6661/10000 train_time:282675ms step_avg:42.44ms +[2025-09-05 18:26:13] [Rank 0] step:6681/10000 train_time:283412ms step_avg:42.42ms +[2025-09-05 18:26:13] [Rank 0] step:6681/10000 train_time:283412ms step_avg:42.42ms +[2025-09-05 18:26:14] [Rank 0] step:6701/10000 train_time:284149ms step_avg:42.40ms +[2025-09-05 18:26:14] [Rank 0] step:6701/10000 train_time:284149ms step_avg:42.40ms +[2025-09-05 18:26:14] [Rank 0] step:6721/10000 train_time:284886ms step_avg:42.39ms +[2025-09-05 18:26:14] [Rank 0] step:6721/10000 train_time:284886ms step_avg:42.39ms +[2025-09-05 18:26:15] [Rank 0] step:6741/10000 train_time:285622ms step_avg:42.37ms +[2025-09-05 18:26:15] [Rank 0] step:6741/10000 train_time:285622ms step_avg:42.37ms +[2025-09-05 18:26:16] [Rank 0] step:6761/10000 train_time:286360ms step_avg:42.35ms +[2025-09-05 18:26:16] [Rank 0] step:6761/10000 train_time:286360ms step_avg:42.35ms +[2025-09-05 18:26:17] [Rank 0] step:6781/10000 train_time:287097ms step_avg:42.34ms +[2025-09-05 18:26:17] [Rank 0] step:6781/10000 train_time:287097ms step_avg:42.34ms +[2025-09-05 18:26:17] [Rank 0] step:6801/10000 train_time:287833ms step_avg:42.32ms +[2025-09-05 18:26:17] [Rank 0] step:6801/10000 train_time:287833ms step_avg:42.32ms +[2025-09-05 18:26:18] [Rank 0] step:6821/10000 train_time:288570ms step_avg:42.31ms +[2025-09-05 18:26:18] [Rank 0] step:6821/10000 train_time:288570ms step_avg:42.31ms +[2025-09-05 18:26:19] [Rank 0] step:6841/10000 train_time:289912ms step_avg:42.38ms +[2025-09-05 18:26:19] [Rank 0] step:6841/10000 train_time:289912ms step_avg:42.38ms +[2025-09-05 18:26:20] [Rank 0] step:6861/10000 train_time:290649ms step_avg:42.36ms +[2025-09-05 18:26:20] [Rank 0] step:6861/10000 train_time:290649ms step_avg:42.36ms +[2025-09-05 18:26:21] [Rank 0] step:6881/10000 train_time:291386ms step_avg:42.35ms +[2025-09-05 18:26:21] [Rank 0] step:6881/10000 train_time:291386ms step_avg:42.35ms +[2025-09-05 18:26:22] [Rank 0] step:6901/10000 train_time:292123ms step_avg:42.33ms +[2025-09-05 18:26:22] [Rank 0] step:6901/10000 train_time:292123ms step_avg:42.33ms +[2025-09-05 18:26:22] [Rank 0] step:6921/10000 train_time:292860ms step_avg:42.31ms +[2025-09-05 18:26:22] [Rank 0] step:6921/10000 train_time:292860ms step_avg:42.31ms +[2025-09-05 18:26:23] [Rank 0] step:6941/10000 train_time:293597ms step_avg:42.30ms +[2025-09-05 18:26:23] [Rank 0] step:6941/10000 train_time:293597ms step_avg:42.30ms +[2025-09-05 18:26:24] [Rank 0] step:6961/10000 train_time:294334ms step_avg:42.28ms +[2025-09-05 18:26:24] [Rank 0] step:6961/10000 train_time:294334ms step_avg:42.28ms +[2025-09-05 18:26:24] [Rank 0] step:6981/10000 train_time:295070ms step_avg:42.27ms +[2025-09-05 18:26:24] [Rank 0] step:6981/10000 train_time:295070ms step_avg:42.27ms +[2025-09-05 18:26:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:26:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:26:26] [Rank 0] PRINT: step:7000/10000 train_loss:1.6878 val_loss:1.6728 train_time:295888ms step_avg:42.27ms +[2025-09-05 18:26:26] [Rank 0] PRINT: step:7000/10000 train_loss:1.6878 val_loss:1.6728 train_time:295888ms step_avg:42.27ms +[2025-09-05 18:26:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:26:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:26:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:26:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:27:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:27:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:27:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:27:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:27:47] [Rank 0] Total Loss: 4.1176 +[2025-09-05 18:27:47] [Rank 0] Total Loss: 4.1176 +[2025-09-05 18:27:47] [Rank 0] Total FTA (Unweighted): 0.4562 +[2025-09-05 18:27:47] [Rank 0] Total FTA (Unweighted): 0.4562 +[2025-09-05 18:27:47] [Rank 0] Total FTA (Weighted): 0.4562 +[2025-09-05 18:27:47] [Rank 0] Total FTA (Weighted): 0.4562 +[2025-09-05 18:27:47] [Rank 0] Group 0 Loss: 3.1972 +[2025-09-05 18:27:47] [Rank 0] Group 0 Loss: 3.1972 +[2025-09-05 18:27:47] [Rank 0] Group 1 Loss: 2.9653 +[2025-09-05 18:27:47] [Rank 0] Group 1 Loss: 2.9653 +[2025-09-05 18:27:47] [Rank 0] Group 2 Loss: 3.0316 +[2025-09-05 18:27:47] [Rank 0] Group 2 Loss: 3.0316 +[2025-09-05 18:27:47] [Rank 0] Group 3 Loss: 3.3277 +[2025-09-05 18:27:47] [Rank 0] Group 3 Loss: 3.3277 +[2025-09-05 18:27:47] [Rank 0] Group 4 Loss: 3.5218 +[2025-09-05 18:27:47] [Rank 0] Group 4 Loss: 3.5218 +[2025-09-05 18:27:47] [Rank 0] Group 5 Loss: 3.8053 +[2025-09-05 18:27:47] [Rank 0] Group 5 Loss: 3.8053 +[2025-09-05 18:27:47] [Rank 0] Group 6 Loss: 3.9916 +[2025-09-05 18:27:47] [Rank 0] Group 6 Loss: 3.9916 +[2025-09-05 18:27:47] [Rank 0] Group 7 Loss: 4.2177 +[2025-09-05 18:27:47] [Rank 0] Group 7 Loss: 4.2177 +[2025-09-05 18:27:47] [Rank 0] Group 8 Loss: 4.5382 +[2025-09-05 18:27:47] [Rank 0] Group 8 Loss: 4.5382 +[2025-09-05 18:27:47] [Rank 0] Group 9 Loss: 4.6528 +[2025-09-05 18:27:47] [Rank 0] Group 9 Loss: 4.6528 +[2025-09-05 18:27:47] [Rank 0] Group 10 Loss: 4.7856 +[2025-09-05 18:27:47] [Rank 0] Group 10 Loss: 4.7856 +[2025-09-05 18:27:47] [Rank 0] Group 11 Loss: 4.7663 +[2025-09-05 18:27:47] [Rank 0] Group 11 Loss: 4.7663 +[2025-09-05 18:27:47] [Rank 0] Group 12 Loss: 4.7373 +[2025-09-05 18:27:47] [Rank 0] Group 12 Loss: 4.7373 +[2025-09-05 18:27:47] [Rank 0] Group 13 Loss: 4.7646 +[2025-09-05 18:27:47] [Rank 0] Group 13 Loss: 4.7646 +[2025-09-05 18:27:47] [Rank 0] Group 14 Loss: 4.8172 +[2025-09-05 18:27:47] [Rank 0] Group 14 Loss: 4.8172 +[2025-09-05 18:27:47] [Rank 0] Group 15 Loss: 4.7610 +[2025-09-05 18:27:47] [Rank 0] Group 15 Loss: 4.7610 +[2025-09-05 18:27:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:27:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:27:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:27:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:27:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:27:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:27:47] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:27:47] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:27:47] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:27:47] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:27:47] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 18:27:47] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 18:27:47] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:27:47] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:27:47] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:27:47] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:27:47] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 18:27:47] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 18:27:47] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:27:47] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:27:47] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 18:27:47] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 18:27:47] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 18:27:47] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 18:27:47] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 18:27:47] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 18:27:47] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 18:27:47] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 18:27:47] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 18:27:47] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 18:27:47] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:27:47] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:27:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:27:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:27:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:27:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:27:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:27:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:27:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:27:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:27:49] [Rank 0] step:7001/10000 train_time:295897ms step_avg:42.27ms +[2025-09-05 18:27:49] [Rank 0] step:7001/10000 train_time:295897ms step_avg:42.27ms +[2025-09-05 18:27:49] [Rank 0] step:7021/10000 train_time:296566ms step_avg:42.24ms +[2025-09-05 18:27:49] [Rank 0] step:7021/10000 train_time:296566ms step_avg:42.24ms +[2025-09-05 18:27:50] [Rank 0] step:7041/10000 train_time:297302ms step_avg:42.22ms +[2025-09-05 18:27:50] [Rank 0] step:7041/10000 train_time:297302ms step_avg:42.22ms +[2025-09-05 18:27:51] [Rank 0] step:7061/10000 train_time:298039ms step_avg:42.21ms +[2025-09-05 18:27:51] [Rank 0] step:7061/10000 train_time:298039ms step_avg:42.21ms +[2025-09-05 18:27:52] [Rank 0] step:7081/10000 train_time:298776ms step_avg:42.19ms +[2025-09-05 18:27:52] [Rank 0] step:7081/10000 train_time:298776ms step_avg:42.19ms +[2025-09-05 18:27:52] [Rank 0] step:7101/10000 train_time:299512ms step_avg:42.18ms +[2025-09-05 18:27:52] [Rank 0] step:7101/10000 train_time:299512ms step_avg:42.18ms +[2025-09-05 18:27:53] [Rank 0] step:7121/10000 train_time:300248ms step_avg:42.16ms +[2025-09-05 18:27:53] [Rank 0] step:7121/10000 train_time:300248ms step_avg:42.16ms +[2025-09-05 18:27:54] [Rank 0] step:7141/10000 train_time:301131ms step_avg:42.17ms +[2025-09-05 18:27:54] [Rank 0] step:7141/10000 train_time:301131ms step_avg:42.17ms +[2025-09-05 18:27:55] [Rank 0] step:7161/10000 train_time:301868ms step_avg:42.15ms +[2025-09-05 18:27:55] [Rank 0] step:7161/10000 train_time:301868ms step_avg:42.15ms +[2025-09-05 18:27:55] [Rank 0] step:7181/10000 train_time:302605ms step_avg:42.14ms +[2025-09-05 18:27:55] [Rank 0] step:7181/10000 train_time:302605ms step_avg:42.14ms +[2025-09-05 18:27:56] [Rank 0] step:7201/10000 train_time:303490ms step_avg:42.15ms +[2025-09-05 18:27:56] [Rank 0] step:7201/10000 train_time:303490ms step_avg:42.15ms +[2025-09-05 18:27:57] [Rank 0] step:7221/10000 train_time:304295ms step_avg:42.14ms +[2025-09-05 18:27:57] [Rank 0] step:7221/10000 train_time:304295ms step_avg:42.14ms +[2025-09-05 18:27:58] [Rank 0] step:7241/10000 train_time:305032ms step_avg:42.13ms +[2025-09-05 18:27:58] [Rank 0] step:7241/10000 train_time:305032ms step_avg:42.13ms +[2025-09-05 18:27:59] [Rank 0] step:7261/10000 train_time:305769ms step_avg:42.11ms +[2025-09-05 18:27:59] [Rank 0] step:7261/10000 train_time:305769ms step_avg:42.11ms +[2025-09-05 18:27:59] [Rank 0] step:7281/10000 train_time:306505ms step_avg:42.10ms +[2025-09-05 18:27:59] [Rank 0] step:7281/10000 train_time:306505ms step_avg:42.10ms +[2025-09-05 18:28:00] [Rank 0] step:7301/10000 train_time:307244ms step_avg:42.08ms +[2025-09-05 18:28:00] [Rank 0] step:7301/10000 train_time:307244ms step_avg:42.08ms +[2025-09-05 18:28:01] [Rank 0] step:7321/10000 train_time:307981ms step_avg:42.07ms +[2025-09-05 18:28:01] [Rank 0] step:7321/10000 train_time:307981ms step_avg:42.07ms +[2025-09-05 18:28:02] [Rank 0] step:7341/10000 train_time:308719ms step_avg:42.05ms +[2025-09-05 18:28:02] [Rank 0] step:7341/10000 train_time:308719ms step_avg:42.05ms +[2025-09-05 18:28:02] [Rank 0] step:7361/10000 train_time:309455ms step_avg:42.04ms +[2025-09-05 18:28:02] [Rank 0] step:7361/10000 train_time:309455ms step_avg:42.04ms +[2025-09-05 18:28:03] [Rank 0] step:7381/10000 train_time:310192ms step_avg:42.03ms +[2025-09-05 18:28:03] [Rank 0] step:7381/10000 train_time:310192ms step_avg:42.03ms +[2025-09-05 18:28:04] [Rank 0] step:7401/10000 train_time:310928ms step_avg:42.01ms +[2025-09-05 18:28:04] [Rank 0] step:7401/10000 train_time:310928ms step_avg:42.01ms +[2025-09-05 18:28:04] [Rank 0] step:7421/10000 train_time:311665ms step_avg:42.00ms +[2025-09-05 18:28:04] [Rank 0] step:7421/10000 train_time:311665ms step_avg:42.00ms +[2025-09-05 18:28:05] [Rank 0] step:7441/10000 train_time:312402ms step_avg:41.98ms +[2025-09-05 18:28:05] [Rank 0] step:7441/10000 train_time:312402ms step_avg:41.98ms +[2025-09-05 18:28:06] [Rank 0] step:7461/10000 train_time:313139ms step_avg:41.97ms +[2025-09-05 18:28:06] [Rank 0] step:7461/10000 train_time:313139ms step_avg:41.97ms +[2025-09-05 18:28:07] [Rank 0] step:7481/10000 train_time:313876ms step_avg:41.96ms +[2025-09-05 18:28:07] [Rank 0] step:7481/10000 train_time:313876ms step_avg:41.96ms +[2025-09-05 18:28:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:28:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:28:08] [Rank 0] PRINT: step:7500/10000 train_loss:1.6762 val_loss:1.6646 train_time:314693ms step_avg:41.96ms +[2025-09-05 18:28:08] [Rank 0] PRINT: step:7500/10000 train_loss:1.6762 val_loss:1.6646 train_time:314693ms step_avg:41.96ms +[2025-09-05 18:28:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:28:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:28:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:28:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:29:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:29:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:29:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:29:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:29:29] [Rank 0] Total Loss: 4.1326 +[2025-09-05 18:29:29] [Rank 0] Total Loss: 4.1326 +[2025-09-05 18:29:29] [Rank 0] Total FTA (Unweighted): 0.4631 +[2025-09-05 18:29:29] [Rank 0] Total FTA (Unweighted): 0.4631 +[2025-09-05 18:29:29] [Rank 0] Total FTA (Weighted): 0.4631 +[2025-09-05 18:29:29] [Rank 0] Total FTA (Weighted): 0.4631 +[2025-09-05 18:29:29] [Rank 0] Group 0 Loss: 3.2599 +[2025-09-05 18:29:29] [Rank 0] Group 0 Loss: 3.2599 +[2025-09-05 18:29:29] [Rank 0] Group 1 Loss: 2.9339 +[2025-09-05 18:29:29] [Rank 0] Group 1 Loss: 2.9339 +[2025-09-05 18:29:29] [Rank 0] Group 2 Loss: 3.0336 +[2025-09-05 18:29:29] [Rank 0] Group 2 Loss: 3.0336 +[2025-09-05 18:29:29] [Rank 0] Group 3 Loss: 3.3670 +[2025-09-05 18:29:29] [Rank 0] Group 3 Loss: 3.3670 +[2025-09-05 18:29:29] [Rank 0] Group 4 Loss: 3.5188 +[2025-09-05 18:29:29] [Rank 0] Group 4 Loss: 3.5188 +[2025-09-05 18:29:29] [Rank 0] Group 5 Loss: 3.8500 +[2025-09-05 18:29:29] [Rank 0] Group 5 Loss: 3.8500 +[2025-09-05 18:29:29] [Rank 0] Group 6 Loss: 3.9776 +[2025-09-05 18:29:29] [Rank 0] Group 6 Loss: 3.9776 +[2025-09-05 18:29:29] [Rank 0] Group 7 Loss: 4.2279 +[2025-09-05 18:29:29] [Rank 0] Group 7 Loss: 4.2279 +[2025-09-05 18:29:29] [Rank 0] Group 8 Loss: 4.5494 +[2025-09-05 18:29:29] [Rank 0] Group 8 Loss: 4.5494 +[2025-09-05 18:29:29] [Rank 0] Group 9 Loss: 4.6892 +[2025-09-05 18:29:29] [Rank 0] Group 9 Loss: 4.6892 +[2025-09-05 18:29:29] [Rank 0] Group 10 Loss: 4.7847 +[2025-09-05 18:29:29] [Rank 0] Group 10 Loss: 4.7847 +[2025-09-05 18:29:29] [Rank 0] Group 11 Loss: 4.8012 +[2025-09-05 18:29:29] [Rank 0] Group 11 Loss: 4.8012 +[2025-09-05 18:29:29] [Rank 0] Group 12 Loss: 4.7548 +[2025-09-05 18:29:29] [Rank 0] Group 12 Loss: 4.7548 +[2025-09-05 18:29:29] [Rank 0] Group 13 Loss: 4.7703 +[2025-09-05 18:29:29] [Rank 0] Group 13 Loss: 4.7703 +[2025-09-05 18:29:29] [Rank 0] Group 14 Loss: 4.8227 +[2025-09-05 18:29:29] [Rank 0] Group 14 Loss: 4.8227 +[2025-09-05 18:29:29] [Rank 0] Group 15 Loss: 4.7815 +[2025-09-05 18:29:29] [Rank 0] Group 15 Loss: 4.7815 +[2025-09-05 18:29:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:29:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:29:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:29:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:29:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:29:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:29:29] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:29:29] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:29:29] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:29:29] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:29:29] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 18:29:29] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 18:29:29] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:29:29] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:29:29] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:29:29] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:29:29] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 18:29:29] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 18:29:29] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:29:29] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:29:29] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 18:29:29] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 18:29:29] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 18:29:29] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 18:29:29] [Rank 0] Group 12 FTA: 0.2400 +[2025-09-05 18:29:29] [Rank 0] Group 12 FTA: 0.2400 +[2025-09-05 18:29:29] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 18:29:29] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 18:29:29] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:29:29] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:29:29] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:29:29] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:29:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:29:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:29:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:29:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:29:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:29:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:29:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:29:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:29:31] [Rank 0] step:7501/10000 train_time:314703ms step_avg:41.95ms +[2025-09-05 18:29:31] [Rank 0] step:7501/10000 train_time:314703ms step_avg:41.95ms +[2025-09-05 18:29:31] [Rank 0] step:7521/10000 train_time:315374ms step_avg:41.93ms +[2025-09-05 18:29:31] [Rank 0] step:7521/10000 train_time:315374ms step_avg:41.93ms +[2025-09-05 18:29:32] [Rank 0] step:7541/10000 train_time:316111ms step_avg:41.92ms +[2025-09-05 18:29:32] [Rank 0] step:7541/10000 train_time:316111ms step_avg:41.92ms +[2025-09-05 18:29:33] [Rank 0] step:7561/10000 train_time:316847ms step_avg:41.91ms +[2025-09-05 18:29:33] [Rank 0] step:7561/10000 train_time:316847ms step_avg:41.91ms +[2025-09-05 18:29:34] [Rank 0] step:7581/10000 train_time:317584ms step_avg:41.89ms +[2025-09-05 18:29:34] [Rank 0] step:7581/10000 train_time:317584ms step_avg:41.89ms +[2025-09-05 18:29:34] [Rank 0] step:7601/10000 train_time:318321ms step_avg:41.88ms +[2025-09-05 18:29:34] [Rank 0] step:7601/10000 train_time:318321ms step_avg:41.88ms +[2025-09-05 18:29:35] [Rank 0] step:7621/10000 train_time:319057ms step_avg:41.87ms +[2025-09-05 18:29:35] [Rank 0] step:7621/10000 train_time:319057ms step_avg:41.87ms +[2025-09-05 18:29:36] [Rank 0] step:7641/10000 train_time:319816ms step_avg:41.86ms +[2025-09-05 18:29:36] [Rank 0] step:7641/10000 train_time:319816ms step_avg:41.86ms +[2025-09-05 18:29:37] [Rank 0] step:7661/10000 train_time:321140ms step_avg:41.92ms +[2025-09-05 18:29:37] [Rank 0] step:7661/10000 train_time:321140ms step_avg:41.92ms +[2025-09-05 18:29:38] [Rank 0] step:7681/10000 train_time:321877ms step_avg:41.91ms +[2025-09-05 18:29:38] [Rank 0] step:7681/10000 train_time:321877ms step_avg:41.91ms +[2025-09-05 18:29:39] [Rank 0] step:7701/10000 train_time:322614ms step_avg:41.89ms +[2025-09-05 18:29:39] [Rank 0] step:7701/10000 train_time:322614ms step_avg:41.89ms +[2025-09-05 18:29:39] [Rank 0] step:7721/10000 train_time:323351ms step_avg:41.88ms +[2025-09-05 18:29:39] [Rank 0] step:7721/10000 train_time:323351ms step_avg:41.88ms +[2025-09-05 18:29:40] [Rank 0] step:7741/10000 train_time:324088ms step_avg:41.87ms +[2025-09-05 18:29:40] [Rank 0] step:7741/10000 train_time:324088ms step_avg:41.87ms +[2025-09-05 18:29:41] [Rank 0] step:7761/10000 train_time:324825ms step_avg:41.85ms +[2025-09-05 18:29:41] [Rank 0] step:7761/10000 train_time:324825ms step_avg:41.85ms +[2025-09-05 18:29:42] [Rank 0] step:7781/10000 train_time:325561ms step_avg:41.84ms +[2025-09-05 18:29:42] [Rank 0] step:7781/10000 train_time:325561ms step_avg:41.84ms +[2025-09-05 18:29:42] [Rank 0] step:7801/10000 train_time:326298ms step_avg:41.83ms +[2025-09-05 18:29:42] [Rank 0] step:7801/10000 train_time:326298ms step_avg:41.83ms +[2025-09-05 18:29:43] [Rank 0] step:7821/10000 train_time:327035ms step_avg:41.81ms +[2025-09-05 18:29:43] [Rank 0] step:7821/10000 train_time:327035ms step_avg:41.81ms +[2025-09-05 18:29:44] [Rank 0] step:7841/10000 train_time:327771ms step_avg:41.80ms +[2025-09-05 18:29:44] [Rank 0] step:7841/10000 train_time:327771ms step_avg:41.80ms +[2025-09-05 18:29:45] [Rank 0] step:7861/10000 train_time:328507ms step_avg:41.79ms +[2025-09-05 18:29:45] [Rank 0] step:7861/10000 train_time:328507ms step_avg:41.79ms +[2025-09-05 18:29:45] [Rank 0] step:7881/10000 train_time:329244ms step_avg:41.78ms +[2025-09-05 18:29:45] [Rank 0] step:7881/10000 train_time:329244ms step_avg:41.78ms +[2025-09-05 18:29:46] [Rank 0] step:7901/10000 train_time:329981ms step_avg:41.76ms +[2025-09-05 18:29:46] [Rank 0] step:7901/10000 train_time:329981ms step_avg:41.76ms +[2025-09-05 18:29:47] [Rank 0] step:7921/10000 train_time:330717ms step_avg:41.75ms +[2025-09-05 18:29:47] [Rank 0] step:7921/10000 train_time:330717ms step_avg:41.75ms +[2025-09-05 18:29:48] [Rank 0] step:7941/10000 train_time:331455ms step_avg:41.74ms +[2025-09-05 18:29:48] [Rank 0] step:7941/10000 train_time:331455ms step_avg:41.74ms +[2025-09-05 18:29:48] [Rank 0] step:7961/10000 train_time:332192ms step_avg:41.73ms +[2025-09-05 18:29:48] [Rank 0] step:7961/10000 train_time:332192ms step_avg:41.73ms +[2025-09-05 18:29:49] [Rank 0] step:7981/10000 train_time:332929ms step_avg:41.72ms +[2025-09-05 18:29:49] [Rank 0] step:7981/10000 train_time:332929ms step_avg:41.72ms +[2025-09-05 18:29:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:29:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:29:50] [Rank 0] PRINT: step:8000/10000 train_loss:1.6680 val_loss:1.6555 train_time:333746ms step_avg:41.72ms +[2025-09-05 18:29:50] [Rank 0] PRINT: step:8000/10000 train_loss:1.6680 val_loss:1.6555 train_time:333746ms step_avg:41.72ms +[2025-09-05 18:29:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:29:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:29:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:29:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:31:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:31:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:31:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:31:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:31:12] [Rank 0] Total Loss: 4.1220 +[2025-09-05 18:31:12] [Rank 0] Total Loss: 4.1220 +[2025-09-05 18:31:12] [Rank 0] Total FTA (Unweighted): 0.4725 +[2025-09-05 18:31:12] [Rank 0] Total FTA (Unweighted): 0.4725 +[2025-09-05 18:31:12] [Rank 0] Total FTA (Weighted): 0.4725 +[2025-09-05 18:31:12] [Rank 0] Total FTA (Weighted): 0.4725 +[2025-09-05 18:31:12] [Rank 0] Group 0 Loss: 3.3091 +[2025-09-05 18:31:12] [Rank 0] Group 0 Loss: 3.3091 +[2025-09-05 18:31:12] [Rank 0] Group 1 Loss: 2.9145 +[2025-09-05 18:31:12] [Rank 0] Group 1 Loss: 2.9145 +[2025-09-05 18:31:12] [Rank 0] Group 2 Loss: 3.0327 +[2025-09-05 18:31:12] [Rank 0] Group 2 Loss: 3.0327 +[2025-09-05 18:31:12] [Rank 0] Group 3 Loss: 3.3365 +[2025-09-05 18:31:12] [Rank 0] Group 3 Loss: 3.3365 +[2025-09-05 18:31:12] [Rank 0] Group 4 Loss: 3.5539 +[2025-09-05 18:31:12] [Rank 0] Group 4 Loss: 3.5539 +[2025-09-05 18:31:12] [Rank 0] Group 5 Loss: 3.8088 +[2025-09-05 18:31:12] [Rank 0] Group 5 Loss: 3.8088 +[2025-09-05 18:31:12] [Rank 0] Group 6 Loss: 3.9893 +[2025-09-05 18:31:12] [Rank 0] Group 6 Loss: 3.9893 +[2025-09-05 18:31:12] [Rank 0] Group 7 Loss: 4.2575 +[2025-09-05 18:31:12] [Rank 0] Group 7 Loss: 4.2575 +[2025-09-05 18:31:12] [Rank 0] Group 8 Loss: 4.5059 +[2025-09-05 18:31:12] [Rank 0] Group 8 Loss: 4.5059 +[2025-09-05 18:31:12] [Rank 0] Group 9 Loss: 4.6390 +[2025-09-05 18:31:12] [Rank 0] Group 9 Loss: 4.6390 +[2025-09-05 18:31:12] [Rank 0] Group 10 Loss: 4.7544 +[2025-09-05 18:31:12] [Rank 0] Group 10 Loss: 4.7544 +[2025-09-05 18:31:12] [Rank 0] Group 11 Loss: 4.7662 +[2025-09-05 18:31:12] [Rank 0] Group 11 Loss: 4.7662 +[2025-09-05 18:31:12] [Rank 0] Group 12 Loss: 4.7552 +[2025-09-05 18:31:12] [Rank 0] Group 12 Loss: 4.7552 +[2025-09-05 18:31:12] [Rank 0] Group 13 Loss: 4.7846 +[2025-09-05 18:31:12] [Rank 0] Group 13 Loss: 4.7846 +[2025-09-05 18:31:12] [Rank 0] Group 14 Loss: 4.7953 +[2025-09-05 18:31:12] [Rank 0] Group 14 Loss: 4.7953 +[2025-09-05 18:31:12] [Rank 0] Group 15 Loss: 4.7496 +[2025-09-05 18:31:12] [Rank 0] Group 15 Loss: 4.7496 +[2025-09-05 18:31:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:31:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:31:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:31:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:31:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:31:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:31:12] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 18:31:12] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 18:31:12] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:31:12] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:31:12] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 18:31:12] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 18:31:12] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:31:12] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:31:12] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:31:12] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:31:12] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 18:31:12] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 18:31:12] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:31:12] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 18:31:12] [Rank 0] Group 10 FTA: 0.3200 +[2025-09-05 18:31:12] [Rank 0] Group 10 FTA: 0.3200 +[2025-09-05 18:31:12] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 18:31:12] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 18:31:12] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 18:31:12] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 18:31:12] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-05 18:31:12] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-05 18:31:12] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 18:31:12] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 18:31:12] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:31:12] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 18:31:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:31:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:31:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:31:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:31:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:31:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:31:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:31:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:31:13] [Rank 0] step:8001/10000 train_time:333755ms step_avg:41.71ms +[2025-09-05 18:31:13] [Rank 0] step:8001/10000 train_time:333755ms step_avg:41.71ms +[2025-09-05 18:31:14] [Rank 0] step:8021/10000 train_time:335023ms step_avg:41.77ms +[2025-09-05 18:31:14] [Rank 0] step:8021/10000 train_time:335023ms step_avg:41.77ms +[2025-09-05 18:31:15] [Rank 0] step:8041/10000 train_time:335759ms step_avg:41.76ms +[2025-09-05 18:31:15] [Rank 0] step:8041/10000 train_time:335759ms step_avg:41.76ms +[2025-09-05 18:31:16] [Rank 0] step:8061/10000 train_time:336496ms step_avg:41.74ms +[2025-09-05 18:31:16] [Rank 0] step:8061/10000 train_time:336496ms step_avg:41.74ms +[2025-09-05 18:31:17] [Rank 0] step:8081/10000 train_time:337232ms step_avg:41.73ms +[2025-09-05 18:31:17] [Rank 0] step:8081/10000 train_time:337232ms step_avg:41.73ms +[2025-09-05 18:31:17] [Rank 0] step:8101/10000 train_time:337969ms step_avg:41.72ms +[2025-09-05 18:31:17] [Rank 0] step:8101/10000 train_time:337969ms step_avg:41.72ms +[2025-09-05 18:31:18] [Rank 0] step:8121/10000 train_time:338706ms step_avg:41.71ms +[2025-09-05 18:31:18] [Rank 0] step:8121/10000 train_time:338706ms step_avg:41.71ms +[2025-09-05 18:31:19] [Rank 0] step:8141/10000 train_time:339442ms step_avg:41.70ms +[2025-09-05 18:31:19] [Rank 0] step:8141/10000 train_time:339442ms step_avg:41.70ms +[2025-09-05 18:31:20] [Rank 0] step:8161/10000 train_time:340179ms step_avg:41.68ms +[2025-09-05 18:31:20] [Rank 0] step:8161/10000 train_time:340179ms step_avg:41.68ms +[2025-09-05 18:31:20] [Rank 0] step:8181/10000 train_time:340916ms step_avg:41.67ms +[2025-09-05 18:31:20] [Rank 0] step:8181/10000 train_time:340916ms step_avg:41.67ms +[2025-09-05 18:31:21] [Rank 0] step:8201/10000 train_time:341652ms step_avg:41.66ms +[2025-09-05 18:31:21] [Rank 0] step:8201/10000 train_time:341652ms step_avg:41.66ms +[2025-09-05 18:31:22] [Rank 0] step:8221/10000 train_time:342389ms step_avg:41.65ms +[2025-09-05 18:31:22] [Rank 0] step:8221/10000 train_time:342389ms step_avg:41.65ms +[2025-09-05 18:31:23] [Rank 0] step:8241/10000 train_time:343126ms step_avg:41.64ms +[2025-09-05 18:31:23] [Rank 0] step:8241/10000 train_time:343126ms step_avg:41.64ms +[2025-09-05 18:31:23] [Rank 0] step:8261/10000 train_time:343863ms step_avg:41.62ms +[2025-09-05 18:31:23] [Rank 0] step:8261/10000 train_time:343863ms step_avg:41.62ms +[2025-09-05 18:31:24] [Rank 0] step:8281/10000 train_time:344599ms step_avg:41.61ms +[2025-09-05 18:31:24] [Rank 0] step:8281/10000 train_time:344599ms step_avg:41.61ms +[2025-09-05 18:31:25] [Rank 0] step:8301/10000 train_time:345336ms step_avg:41.60ms +[2025-09-05 18:31:25] [Rank 0] step:8301/10000 train_time:345336ms step_avg:41.60ms +[2025-09-05 18:31:25] [Rank 0] step:8321/10000 train_time:346073ms step_avg:41.59ms +[2025-09-05 18:31:25] [Rank 0] step:8321/10000 train_time:346073ms step_avg:41.59ms +[2025-09-05 18:31:26] [Rank 0] step:8341/10000 train_time:346810ms step_avg:41.58ms +[2025-09-05 18:31:26] [Rank 0] step:8341/10000 train_time:346810ms step_avg:41.58ms +[2025-09-05 18:31:27] [Rank 0] step:8361/10000 train_time:347547ms step_avg:41.57ms +[2025-09-05 18:31:27] [Rank 0] step:8361/10000 train_time:347547ms step_avg:41.57ms +[2025-09-05 18:31:28] [Rank 0] step:8381/10000 train_time:348283ms step_avg:41.56ms +[2025-09-05 18:31:28] [Rank 0] step:8381/10000 train_time:348283ms step_avg:41.56ms +[2025-09-05 18:31:28] [Rank 0] step:8401/10000 train_time:349020ms step_avg:41.55ms +[2025-09-05 18:31:28] [Rank 0] step:8401/10000 train_time:349020ms step_avg:41.55ms +[2025-09-05 18:31:29] [Rank 0] step:8421/10000 train_time:349757ms step_avg:41.53ms +[2025-09-05 18:31:29] [Rank 0] step:8421/10000 train_time:349757ms step_avg:41.53ms +[2025-09-05 18:31:30] [Rank 0] step:8441/10000 train_time:350493ms step_avg:41.52ms +[2025-09-05 18:31:30] [Rank 0] step:8441/10000 train_time:350493ms step_avg:41.52ms +[2025-09-05 18:31:31] [Rank 0] step:8461/10000 train_time:351230ms step_avg:41.51ms +[2025-09-05 18:31:31] [Rank 0] step:8461/10000 train_time:351230ms step_avg:41.51ms +[2025-09-05 18:31:31] [Rank 0] step:8481/10000 train_time:351967ms step_avg:41.50ms +[2025-09-05 18:31:31] [Rank 0] step:8481/10000 train_time:351967ms step_avg:41.50ms +[2025-09-05 18:31:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:31:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:31:33] [Rank 0] PRINT: step:8500/10000 train_loss:1.6585 val_loss:1.6466 train_time:352785ms step_avg:41.50ms +[2025-09-05 18:31:33] [Rank 0] PRINT: step:8500/10000 train_loss:1.6585 val_loss:1.6466 train_time:352785ms step_avg:41.50ms +[2025-09-05 18:31:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:31:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:31:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:31:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:32:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:32:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:32:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:32:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:32:54] [Rank 0] Total Loss: 4.1349 +[2025-09-05 18:32:54] [Rank 0] Total Loss: 4.1349 +[2025-09-05 18:32:54] [Rank 0] Total FTA (Unweighted): 0.4662 +[2025-09-05 18:32:54] [Rank 0] Total FTA (Unweighted): 0.4662 +[2025-09-05 18:32:54] [Rank 0] Total FTA (Weighted): 0.4662 +[2025-09-05 18:32:54] [Rank 0] Total FTA (Weighted): 0.4662 +[2025-09-05 18:32:54] [Rank 0] Group 0 Loss: 3.2120 +[2025-09-05 18:32:54] [Rank 0] Group 0 Loss: 3.2120 +[2025-09-05 18:32:54] [Rank 0] Group 1 Loss: 2.9345 +[2025-09-05 18:32:54] [Rank 0] Group 1 Loss: 2.9345 +[2025-09-05 18:32:54] [Rank 0] Group 2 Loss: 3.1271 +[2025-09-05 18:32:54] [Rank 0] Group 2 Loss: 3.1271 +[2025-09-05 18:32:54] [Rank 0] Group 3 Loss: 3.3847 +[2025-09-05 18:32:54] [Rank 0] Group 3 Loss: 3.3847 +[2025-09-05 18:32:54] [Rank 0] Group 4 Loss: 3.5874 +[2025-09-05 18:32:54] [Rank 0] Group 4 Loss: 3.5874 +[2025-09-05 18:32:54] [Rank 0] Group 5 Loss: 3.8089 +[2025-09-05 18:32:54] [Rank 0] Group 5 Loss: 3.8089 +[2025-09-05 18:32:54] [Rank 0] Group 6 Loss: 3.9830 +[2025-09-05 18:32:54] [Rank 0] Group 6 Loss: 3.9830 +[2025-09-05 18:32:54] [Rank 0] Group 7 Loss: 4.2443 +[2025-09-05 18:32:54] [Rank 0] Group 7 Loss: 4.2443 +[2025-09-05 18:32:54] [Rank 0] Group 8 Loss: 4.5144 +[2025-09-05 18:32:54] [Rank 0] Group 8 Loss: 4.5144 +[2025-09-05 18:32:54] [Rank 0] Group 9 Loss: 4.6903 +[2025-09-05 18:32:54] [Rank 0] Group 9 Loss: 4.6903 +[2025-09-05 18:32:54] [Rank 0] Group 10 Loss: 4.7731 +[2025-09-05 18:32:54] [Rank 0] Group 10 Loss: 4.7731 +[2025-09-05 18:32:54] [Rank 0] Group 11 Loss: 4.7951 +[2025-09-05 18:32:54] [Rank 0] Group 11 Loss: 4.7951 +[2025-09-05 18:32:54] [Rank 0] Group 12 Loss: 4.7397 +[2025-09-05 18:32:54] [Rank 0] Group 12 Loss: 4.7397 +[2025-09-05 18:32:54] [Rank 0] Group 13 Loss: 4.7922 +[2025-09-05 18:32:54] [Rank 0] Group 13 Loss: 4.7922 +[2025-09-05 18:32:54] [Rank 0] Group 14 Loss: 4.8055 +[2025-09-05 18:32:54] [Rank 0] Group 14 Loss: 4.8055 +[2025-09-05 18:32:54] [Rank 0] Group 15 Loss: 4.7669 +[2025-09-05 18:32:54] [Rank 0] Group 15 Loss: 4.7669 +[2025-09-05 18:32:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:32:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:32:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:32:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:32:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:32:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:32:54] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:32:54] [Rank 0] Group 3 FTA: 0.6800 +[2025-09-05 18:32:54] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:32:54] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:32:54] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 18:32:54] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 18:32:54] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:32:54] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:32:54] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:32:54] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:32:54] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 18:32:54] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 18:32:54] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 18:32:54] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 18:32:54] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 18:32:54] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 18:32:54] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 18:32:54] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 18:32:54] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 18:32:54] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 18:32:54] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 18:32:54] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 18:32:54] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:32:54] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 18:32:54] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:32:54] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:32:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:32:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:32:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:32:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:32:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:32:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:32:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:32:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:32:56] [Rank 0] step:8501/10000 train_time:352794ms step_avg:41.50ms +[2025-09-05 18:32:56] [Rank 0] step:8501/10000 train_time:352794ms step_avg:41.50ms +[2025-09-05 18:32:56] [Rank 0] step:8521/10000 train_time:353468ms step_avg:41.48ms +[2025-09-05 18:32:56] [Rank 0] step:8521/10000 train_time:353468ms step_avg:41.48ms +[2025-09-05 18:32:57] [Rank 0] step:8541/10000 train_time:354205ms step_avg:41.47ms +[2025-09-05 18:32:57] [Rank 0] step:8541/10000 train_time:354205ms step_avg:41.47ms +[2025-09-05 18:32:58] [Rank 0] step:8561/10000 train_time:354941ms step_avg:41.46ms +[2025-09-05 18:32:58] [Rank 0] step:8561/10000 train_time:354941ms step_avg:41.46ms +[2025-09-05 18:32:59] [Rank 0] step:8581/10000 train_time:355678ms step_avg:41.45ms +[2025-09-05 18:32:59] [Rank 0] step:8581/10000 train_time:355678ms step_avg:41.45ms +[2025-09-05 18:32:59] [Rank 0] step:8601/10000 train_time:356415ms step_avg:41.44ms +[2025-09-05 18:32:59] [Rank 0] step:8601/10000 train_time:356415ms step_avg:41.44ms +[2025-09-05 18:33:00] [Rank 0] step:8621/10000 train_time:357151ms step_avg:41.43ms +[2025-09-05 18:33:00] [Rank 0] step:8621/10000 train_time:357151ms step_avg:41.43ms +[2025-09-05 18:33:01] [Rank 0] step:8641/10000 train_time:357888ms step_avg:41.42ms +[2025-09-05 18:33:01] [Rank 0] step:8641/10000 train_time:357888ms step_avg:41.42ms +[2025-09-05 18:33:02] [Rank 0] step:8661/10000 train_time:358625ms step_avg:41.41ms +[2025-09-05 18:33:02] [Rank 0] step:8661/10000 train_time:358625ms step_avg:41.41ms +[2025-09-05 18:33:02] [Rank 0] step:8681/10000 train_time:359361ms step_avg:41.40ms +[2025-09-05 18:33:02] [Rank 0] step:8681/10000 train_time:359361ms step_avg:41.40ms +[2025-09-05 18:33:03] [Rank 0] step:8701/10000 train_time:360098ms step_avg:41.39ms +[2025-09-05 18:33:03] [Rank 0] step:8701/10000 train_time:360098ms step_avg:41.39ms +[2025-09-05 18:33:04] [Rank 0] step:8721/10000 train_time:360836ms step_avg:41.38ms +[2025-09-05 18:33:04] [Rank 0] step:8721/10000 train_time:360836ms step_avg:41.38ms +[2025-09-05 18:33:05] [Rank 0] step:8741/10000 train_time:361572ms step_avg:41.37ms +[2025-09-05 18:33:05] [Rank 0] step:8741/10000 train_time:361572ms step_avg:41.37ms +[2025-09-05 18:33:05] [Rank 0] step:8761/10000 train_time:362308ms step_avg:41.35ms +[2025-09-05 18:33:05] [Rank 0] step:8761/10000 train_time:362308ms step_avg:41.35ms +[2025-09-05 18:33:06] [Rank 0] step:8781/10000 train_time:363045ms step_avg:41.34ms +[2025-09-05 18:33:06] [Rank 0] step:8781/10000 train_time:363045ms step_avg:41.34ms +[2025-09-05 18:33:07] [Rank 0] step:8801/10000 train_time:363782ms step_avg:41.33ms +[2025-09-05 18:33:07] [Rank 0] step:8801/10000 train_time:363782ms step_avg:41.33ms +[2025-09-05 18:33:07] [Rank 0] step:8821/10000 train_time:364520ms step_avg:41.32ms +[2025-09-05 18:33:07] [Rank 0] step:8821/10000 train_time:364520ms step_avg:41.32ms +[2025-09-05 18:33:09] [Rank 0] step:8841/10000 train_time:365864ms step_avg:41.38ms +[2025-09-05 18:33:09] [Rank 0] step:8841/10000 train_time:365864ms step_avg:41.38ms +[2025-09-05 18:33:10] [Rank 0] step:8861/10000 train_time:366601ms step_avg:41.37ms +[2025-09-05 18:33:10] [Rank 0] step:8861/10000 train_time:366601ms step_avg:41.37ms +[2025-09-05 18:33:10] [Rank 0] step:8881/10000 train_time:367484ms step_avg:41.38ms +[2025-09-05 18:33:10] [Rank 0] step:8881/10000 train_time:367484ms step_avg:41.38ms +[2025-09-05 18:33:11] [Rank 0] step:8901/10000 train_time:368221ms step_avg:41.37ms +[2025-09-05 18:33:11] [Rank 0] step:8901/10000 train_time:368221ms step_avg:41.37ms +[2025-09-05 18:33:12] [Rank 0] step:8921/10000 train_time:368958ms step_avg:41.36ms +[2025-09-05 18:33:12] [Rank 0] step:8921/10000 train_time:368958ms step_avg:41.36ms +[2025-09-05 18:33:13] [Rank 0] step:8941/10000 train_time:369901ms step_avg:41.37ms +[2025-09-05 18:33:13] [Rank 0] step:8941/10000 train_time:369901ms step_avg:41.37ms +[2025-09-05 18:33:14] [Rank 0] step:8961/10000 train_time:370638ms step_avg:41.36ms +[2025-09-05 18:33:14] [Rank 0] step:8961/10000 train_time:370638ms step_avg:41.36ms +[2025-09-05 18:33:14] [Rank 0] step:8981/10000 train_time:371375ms step_avg:41.35ms +[2025-09-05 18:33:14] [Rank 0] step:8981/10000 train_time:371375ms step_avg:41.35ms +[2025-09-05 18:33:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:33:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:33:16] [Rank 0] PRINT: step:9000/10000 train_loss:1.6496 val_loss:1.6374 train_time:372192ms step_avg:41.35ms +[2025-09-05 18:33:16] [Rank 0] PRINT: step:9000/10000 train_loss:1.6496 val_loss:1.6374 train_time:372192ms step_avg:41.35ms +[2025-09-05 18:33:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:33:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:33:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:33:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:34:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:34:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:34:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:34:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:34:38] [Rank 0] Total Loss: 4.1778 +[2025-09-05 18:34:38] [Rank 0] Total Loss: 4.1778 +[2025-09-05 18:34:38] [Rank 0] Total FTA (Unweighted): 0.4781 +[2025-09-05 18:34:38] [Rank 0] Total FTA (Unweighted): 0.4781 +[2025-09-05 18:34:38] [Rank 0] Total FTA (Weighted): 0.4781 +[2025-09-05 18:34:38] [Rank 0] Total FTA (Weighted): 0.4781 +[2025-09-05 18:34:38] [Rank 0] Group 0 Loss: 3.3134 +[2025-09-05 18:34:38] [Rank 0] Group 0 Loss: 3.3134 +[2025-09-05 18:34:38] [Rank 0] Group 1 Loss: 2.9511 +[2025-09-05 18:34:38] [Rank 0] Group 1 Loss: 2.9511 +[2025-09-05 18:34:38] [Rank 0] Group 2 Loss: 3.1315 +[2025-09-05 18:34:38] [Rank 0] Group 2 Loss: 3.1315 +[2025-09-05 18:34:38] [Rank 0] Group 3 Loss: 3.4245 +[2025-09-05 18:34:38] [Rank 0] Group 3 Loss: 3.4245 +[2025-09-05 18:34:38] [Rank 0] Group 4 Loss: 3.6188 +[2025-09-05 18:34:38] [Rank 0] Group 4 Loss: 3.6188 +[2025-09-05 18:34:38] [Rank 0] Group 5 Loss: 3.8668 +[2025-09-05 18:34:38] [Rank 0] Group 5 Loss: 3.8668 +[2025-09-05 18:34:38] [Rank 0] Group 6 Loss: 4.0326 +[2025-09-05 18:34:38] [Rank 0] Group 6 Loss: 4.0326 +[2025-09-05 18:34:38] [Rank 0] Group 7 Loss: 4.2729 +[2025-09-05 18:34:38] [Rank 0] Group 7 Loss: 4.2729 +[2025-09-05 18:34:38] [Rank 0] Group 8 Loss: 4.5678 +[2025-09-05 18:34:38] [Rank 0] Group 8 Loss: 4.5678 +[2025-09-05 18:34:38] [Rank 0] Group 9 Loss: 4.7246 +[2025-09-05 18:34:38] [Rank 0] Group 9 Loss: 4.7246 +[2025-09-05 18:34:38] [Rank 0] Group 10 Loss: 4.8171 +[2025-09-05 18:34:38] [Rank 0] Group 10 Loss: 4.8171 +[2025-09-05 18:34:38] [Rank 0] Group 11 Loss: 4.8180 +[2025-09-05 18:34:38] [Rank 0] Group 11 Loss: 4.8180 +[2025-09-05 18:34:38] [Rank 0] Group 12 Loss: 4.7868 +[2025-09-05 18:34:38] [Rank 0] Group 12 Loss: 4.7868 +[2025-09-05 18:34:38] [Rank 0] Group 13 Loss: 4.8437 +[2025-09-05 18:34:38] [Rank 0] Group 13 Loss: 4.8437 +[2025-09-05 18:34:38] [Rank 0] Group 14 Loss: 4.8680 +[2025-09-05 18:34:38] [Rank 0] Group 14 Loss: 4.8680 +[2025-09-05 18:34:38] [Rank 0] Group 15 Loss: 4.8080 +[2025-09-05 18:34:38] [Rank 0] Group 15 Loss: 4.8080 +[2025-09-05 18:34:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:34:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:34:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:34:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:34:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:34:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:34:38] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 18:34:38] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 18:34:38] [Rank 0] Group 4 FTA: 0.4900 +[2025-09-05 18:34:38] [Rank 0] Group 4 FTA: 0.4900 +[2025-09-05 18:34:38] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:34:38] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:34:38] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:34:38] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:34:38] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:34:38] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:34:38] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 18:34:38] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 18:34:38] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 18:34:38] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 18:34:38] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 18:34:38] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 18:34:38] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 18:34:38] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 18:34:38] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 18:34:38] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 18:34:38] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 18:34:38] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 18:34:38] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 18:34:38] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 18:34:38] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:34:38] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 18:34:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:34:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:34:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:34:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:34:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:34:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:34:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:34:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:34:39] [Rank 0] step:9001/10000 train_time:372201ms step_avg:41.35ms +[2025-09-05 18:34:39] [Rank 0] step:9001/10000 train_time:372201ms step_avg:41.35ms +[2025-09-05 18:34:40] [Rank 0] step:9021/10000 train_time:372873ms step_avg:41.33ms +[2025-09-05 18:34:40] [Rank 0] step:9021/10000 train_time:372873ms step_avg:41.33ms +[2025-09-05 18:34:41] [Rank 0] step:9041/10000 train_time:373610ms step_avg:41.32ms +[2025-09-05 18:34:41] [Rank 0] step:9041/10000 train_time:373610ms step_avg:41.32ms +[2025-09-05 18:34:41] [Rank 0] step:9061/10000 train_time:374347ms step_avg:41.31ms +[2025-09-05 18:34:41] [Rank 0] step:9061/10000 train_time:374347ms step_avg:41.31ms +[2025-09-05 18:34:42] [Rank 0] step:9081/10000 train_time:375084ms step_avg:41.30ms +[2025-09-05 18:34:42] [Rank 0] step:9081/10000 train_time:375084ms step_avg:41.30ms +[2025-09-05 18:34:43] [Rank 0] step:9101/10000 train_time:375821ms step_avg:41.29ms +[2025-09-05 18:34:43] [Rank 0] step:9101/10000 train_time:375821ms step_avg:41.29ms +[2025-09-05 18:34:44] [Rank 0] step:9121/10000 train_time:376558ms step_avg:41.28ms +[2025-09-05 18:34:44] [Rank 0] step:9121/10000 train_time:376558ms step_avg:41.28ms +[2025-09-05 18:34:44] [Rank 0] step:9141/10000 train_time:377294ms step_avg:41.27ms +[2025-09-05 18:34:44] [Rank 0] step:9141/10000 train_time:377294ms step_avg:41.27ms +[2025-09-05 18:34:45] [Rank 0] step:9161/10000 train_time:378031ms step_avg:41.27ms +[2025-09-05 18:34:45] [Rank 0] step:9161/10000 train_time:378031ms step_avg:41.27ms +[2025-09-05 18:34:46] [Rank 0] step:9181/10000 train_time:378768ms step_avg:41.26ms +[2025-09-05 18:34:46] [Rank 0] step:9181/10000 train_time:378768ms step_avg:41.26ms +[2025-09-05 18:34:47] [Rank 0] step:9201/10000 train_time:379504ms step_avg:41.25ms +[2025-09-05 18:34:47] [Rank 0] step:9201/10000 train_time:379504ms step_avg:41.25ms +[2025-09-05 18:34:47] [Rank 0] step:9221/10000 train_time:380241ms step_avg:41.24ms +[2025-09-05 18:34:47] [Rank 0] step:9221/10000 train_time:380241ms step_avg:41.24ms +[2025-09-05 18:34:48] [Rank 0] step:9241/10000 train_time:380978ms step_avg:41.23ms +[2025-09-05 18:34:48] [Rank 0] step:9241/10000 train_time:380978ms step_avg:41.23ms +[2025-09-05 18:34:49] [Rank 0] step:9261/10000 train_time:381713ms step_avg:41.22ms +[2025-09-05 18:34:49] [Rank 0] step:9261/10000 train_time:381713ms step_avg:41.22ms +[2025-09-05 18:34:50] [Rank 0] step:9281/10000 train_time:382449ms step_avg:41.21ms +[2025-09-05 18:34:50] [Rank 0] step:9281/10000 train_time:382449ms step_avg:41.21ms +[2025-09-05 18:34:50] [Rank 0] step:9301/10000 train_time:383186ms step_avg:41.20ms +[2025-09-05 18:34:50] [Rank 0] step:9301/10000 train_time:383186ms step_avg:41.20ms +[2025-09-05 18:34:51] [Rank 0] step:9321/10000 train_time:383922ms step_avg:41.19ms +[2025-09-05 18:34:51] [Rank 0] step:9321/10000 train_time:383922ms step_avg:41.19ms +[2025-09-05 18:34:52] [Rank 0] step:9341/10000 train_time:384658ms step_avg:41.18ms +[2025-09-05 18:34:52] [Rank 0] step:9341/10000 train_time:384658ms step_avg:41.18ms +[2025-09-05 18:34:53] [Rank 0] step:9361/10000 train_time:385395ms step_avg:41.17ms +[2025-09-05 18:34:53] [Rank 0] step:9361/10000 train_time:385395ms step_avg:41.17ms +[2025-09-05 18:34:53] [Rank 0] step:9381/10000 train_time:386132ms step_avg:41.16ms +[2025-09-05 18:34:53] [Rank 0] step:9381/10000 train_time:386132ms step_avg:41.16ms +[2025-09-05 18:34:54] [Rank 0] step:9401/10000 train_time:386868ms step_avg:41.15ms +[2025-09-05 18:34:54] [Rank 0] step:9401/10000 train_time:386868ms step_avg:41.15ms +[2025-09-05 18:34:55] [Rank 0] step:9421/10000 train_time:387605ms step_avg:41.14ms +[2025-09-05 18:34:55] [Rank 0] step:9421/10000 train_time:387605ms step_avg:41.14ms +[2025-09-05 18:34:55] [Rank 0] step:9441/10000 train_time:388342ms step_avg:41.13ms +[2025-09-05 18:34:55] [Rank 0] step:9441/10000 train_time:388342ms step_avg:41.13ms +[2025-09-05 18:34:56] [Rank 0] step:9461/10000 train_time:389078ms step_avg:41.12ms +[2025-09-05 18:34:56] [Rank 0] step:9461/10000 train_time:389078ms step_avg:41.12ms +[2025-09-05 18:34:57] [Rank 0] step:9481/10000 train_time:389815ms step_avg:41.12ms +[2025-09-05 18:34:57] [Rank 0] step:9481/10000 train_time:389815ms step_avg:41.12ms +[2025-09-05 18:34:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:34:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:34:58] [Rank 0] PRINT: step:9500/10000 train_loss:1.6414 val_loss:1.6307 train_time:390633ms step_avg:41.12ms +[2025-09-05 18:34:58] [Rank 0] PRINT: step:9500/10000 train_loss:1.6414 val_loss:1.6307 train_time:390633ms step_avg:41.12ms +[2025-09-05 18:34:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:34:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:34:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:34:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:36:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:36:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:36:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:36:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:36:20] [Rank 0] Total Loss: 4.1733 +[2025-09-05 18:36:20] [Rank 0] Total Loss: 4.1733 +[2025-09-05 18:36:20] [Rank 0] Total FTA (Unweighted): 0.4781 +[2025-09-05 18:36:20] [Rank 0] Total FTA (Unweighted): 0.4781 +[2025-09-05 18:36:20] [Rank 0] Total FTA (Weighted): 0.4781 +[2025-09-05 18:36:20] [Rank 0] Total FTA (Weighted): 0.4781 +[2025-09-05 18:36:20] [Rank 0] Group 0 Loss: 3.2969 +[2025-09-05 18:36:20] [Rank 0] Group 0 Loss: 3.2969 +[2025-09-05 18:36:20] [Rank 0] Group 1 Loss: 2.9496 +[2025-09-05 18:36:20] [Rank 0] Group 1 Loss: 2.9496 +[2025-09-05 18:36:20] [Rank 0] Group 2 Loss: 3.1110 +[2025-09-05 18:36:20] [Rank 0] Group 2 Loss: 3.1110 +[2025-09-05 18:36:20] [Rank 0] Group 3 Loss: 3.4491 +[2025-09-05 18:36:20] [Rank 0] Group 3 Loss: 3.4491 +[2025-09-05 18:36:20] [Rank 0] Group 4 Loss: 3.6312 +[2025-09-05 18:36:20] [Rank 0] Group 4 Loss: 3.6312 +[2025-09-05 18:36:20] [Rank 0] Group 5 Loss: 3.8672 +[2025-09-05 18:36:20] [Rank 0] Group 5 Loss: 3.8672 +[2025-09-05 18:36:20] [Rank 0] Group 6 Loss: 4.0163 +[2025-09-05 18:36:20] [Rank 0] Group 6 Loss: 4.0163 +[2025-09-05 18:36:20] [Rank 0] Group 7 Loss: 4.2681 +[2025-09-05 18:36:20] [Rank 0] Group 7 Loss: 4.2681 +[2025-09-05 18:36:20] [Rank 0] Group 8 Loss: 4.5746 +[2025-09-05 18:36:20] [Rank 0] Group 8 Loss: 4.5746 +[2025-09-05 18:36:20] [Rank 0] Group 9 Loss: 4.7083 +[2025-09-05 18:36:20] [Rank 0] Group 9 Loss: 4.7083 +[2025-09-05 18:36:20] [Rank 0] Group 10 Loss: 4.8041 +[2025-09-05 18:36:20] [Rank 0] Group 10 Loss: 4.8041 +[2025-09-05 18:36:20] [Rank 0] Group 11 Loss: 4.8334 +[2025-09-05 18:36:20] [Rank 0] Group 11 Loss: 4.8334 +[2025-09-05 18:36:20] [Rank 0] Group 12 Loss: 4.7667 +[2025-09-05 18:36:20] [Rank 0] Group 12 Loss: 4.7667 +[2025-09-05 18:36:20] [Rank 0] Group 13 Loss: 4.8438 +[2025-09-05 18:36:20] [Rank 0] Group 13 Loss: 4.8438 +[2025-09-05 18:36:20] [Rank 0] Group 14 Loss: 4.8589 +[2025-09-05 18:36:20] [Rank 0] Group 14 Loss: 4.8589 +[2025-09-05 18:36:20] [Rank 0] Group 15 Loss: 4.7930 +[2025-09-05 18:36:20] [Rank 0] Group 15 Loss: 4.7930 +[2025-09-05 18:36:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:36:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:36:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:36:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:36:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:36:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:36:20] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 18:36:20] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 18:36:20] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:36:20] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:36:20] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:36:20] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:36:20] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:36:20] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:36:20] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:36:20] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 18:36:20] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 18:36:20] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 18:36:20] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 18:36:20] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 18:36:20] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 18:36:20] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 18:36:20] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 18:36:20] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 18:36:20] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 18:36:20] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 18:36:20] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 18:36:20] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 18:36:20] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 18:36:20] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 18:36:20] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:36:20] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 18:36:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:36:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:36:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:36:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:36:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:36:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:36:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:36:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:36:21] [Rank 0] step:9501/10000 train_time:390642ms step_avg:41.12ms +[2025-09-05 18:36:21] [Rank 0] step:9501/10000 train_time:390642ms step_avg:41.12ms +[2025-09-05 18:36:22] [Rank 0] step:9521/10000 train_time:391308ms step_avg:41.10ms +[2025-09-05 18:36:22] [Rank 0] step:9521/10000 train_time:391308ms step_avg:41.10ms +[2025-09-05 18:36:23] [Rank 0] step:9541/10000 train_time:392181ms step_avg:41.10ms +[2025-09-05 18:36:23] [Rank 0] step:9541/10000 train_time:392181ms step_avg:41.10ms +[2025-09-05 18:36:24] [Rank 0] step:9561/10000 train_time:392918ms step_avg:41.10ms +[2025-09-05 18:36:24] [Rank 0] step:9561/10000 train_time:392918ms step_avg:41.10ms +[2025-09-05 18:36:24] [Rank 0] step:9581/10000 train_time:393655ms step_avg:41.09ms +[2025-09-05 18:36:24] [Rank 0] step:9581/10000 train_time:393655ms step_avg:41.09ms +[2025-09-05 18:36:25] [Rank 0] step:9601/10000 train_time:394391ms step_avg:41.08ms +[2025-09-05 18:36:25] [Rank 0] step:9601/10000 train_time:394391ms step_avg:41.08ms +[2025-09-05 18:36:26] [Rank 0] step:9621/10000 train_time:395128ms step_avg:41.07ms +[2025-09-05 18:36:26] [Rank 0] step:9621/10000 train_time:395128ms step_avg:41.07ms +[2025-09-05 18:36:26] [Rank 0] step:9641/10000 train_time:395865ms step_avg:41.06ms +[2025-09-05 18:36:26] [Rank 0] step:9641/10000 train_time:395865ms step_avg:41.06ms +[2025-09-05 18:36:27] [Rank 0] step:9661/10000 train_time:396880ms step_avg:41.08ms +[2025-09-05 18:36:27] [Rank 0] step:9661/10000 train_time:396880ms step_avg:41.08ms +[2025-09-05 18:36:28] [Rank 0] step:9681/10000 train_time:397617ms step_avg:41.07ms +[2025-09-05 18:36:28] [Rank 0] step:9681/10000 train_time:397617ms step_avg:41.07ms +[2025-09-05 18:36:29] [Rank 0] step:9701/10000 train_time:398354ms step_avg:41.06ms +[2025-09-05 18:36:29] [Rank 0] step:9701/10000 train_time:398354ms step_avg:41.06ms +[2025-09-05 18:36:30] [Rank 0] step:9721/10000 train_time:399091ms step_avg:41.05ms +[2025-09-05 18:36:30] [Rank 0] step:9721/10000 train_time:399091ms step_avg:41.05ms +[2025-09-05 18:36:30] [Rank 0] step:9741/10000 train_time:399828ms step_avg:41.05ms +[2025-09-05 18:36:30] [Rank 0] step:9741/10000 train_time:399828ms step_avg:41.05ms +[2025-09-05 18:36:31] [Rank 0] step:9761/10000 train_time:400564ms step_avg:41.04ms +[2025-09-05 18:36:31] [Rank 0] step:9761/10000 train_time:400564ms step_avg:41.04ms +[2025-09-05 18:36:32] [Rank 0] step:9781/10000 train_time:401300ms step_avg:41.03ms +[2025-09-05 18:36:32] [Rank 0] step:9781/10000 train_time:401300ms step_avg:41.03ms +[2025-09-05 18:36:33] [Rank 0] step:9801/10000 train_time:402037ms step_avg:41.02ms +[2025-09-05 18:36:33] [Rank 0] step:9801/10000 train_time:402037ms step_avg:41.02ms +[2025-09-05 18:36:33] [Rank 0] step:9821/10000 train_time:402773ms step_avg:41.01ms +[2025-09-05 18:36:33] [Rank 0] step:9821/10000 train_time:402773ms step_avg:41.01ms +[2025-09-05 18:36:34] [Rank 0] step:9841/10000 train_time:403510ms step_avg:41.00ms +[2025-09-05 18:36:34] [Rank 0] step:9841/10000 train_time:403510ms step_avg:41.00ms +[2025-09-05 18:36:35] [Rank 0] step:9861/10000 train_time:404248ms step_avg:40.99ms +[2025-09-05 18:36:35] [Rank 0] step:9861/10000 train_time:404248ms step_avg:40.99ms +[2025-09-05 18:36:36] [Rank 0] step:9881/10000 train_time:404984ms step_avg:40.99ms +[2025-09-05 18:36:36] [Rank 0] step:9881/10000 train_time:404984ms step_avg:40.99ms +[2025-09-05 18:36:36] [Rank 0] step:9901/10000 train_time:405721ms step_avg:40.98ms +[2025-09-05 18:36:36] [Rank 0] step:9901/10000 train_time:405721ms step_avg:40.98ms +[2025-09-05 18:36:37] [Rank 0] step:9921/10000 train_time:406457ms step_avg:40.97ms +[2025-09-05 18:36:37] [Rank 0] step:9921/10000 train_time:406457ms step_avg:40.97ms +[2025-09-05 18:36:38] [Rank 0] step:9941/10000 train_time:407193ms step_avg:40.96ms +[2025-09-05 18:36:38] [Rank 0] step:9941/10000 train_time:407193ms step_avg:40.96ms +[2025-09-05 18:36:39] [Rank 0] step:9961/10000 train_time:407930ms step_avg:40.95ms +[2025-09-05 18:36:39] [Rank 0] step:9961/10000 train_time:407930ms step_avg:40.95ms +[2025-09-05 18:36:39] [Rank 0] step:9981/10000 train_time:408667ms step_avg:40.94ms +[2025-09-05 18:36:39] [Rank 0] step:9981/10000 train_time:408667ms step_avg:40.94ms +[2025-09-05 18:36:40] [Rank 0] step:10000/10000 train_time:409367ms step_avg:40.94ms +[2025-09-05 18:36:40] [Rank 0] step:10000/10000 train_time:409367ms step_avg:40.94ms +[2025-09-05 18:36:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:36:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:36:40] [Rank 0] PRINT: step:10000/10000 train_loss:1.6354 val_loss:1.6244 train_time:409489ms step_avg:40.95ms +[2025-09-05 18:36:40] [Rank 0] PRINT: step:10000/10000 train_loss:1.6354 val_loss:1.6244 train_time:409489ms step_avg:40.95ms +[2025-09-05 18:36:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:36:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:36:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:36:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:38:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:38:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:38:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:38:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:38:02] [Rank 0] Total Loss: 4.1732 +[2025-09-05 18:38:02] [Rank 0] Total Loss: 4.1732 +[2025-09-05 18:38:02] [Rank 0] Total FTA (Unweighted): 0.4825 +[2025-09-05 18:38:02] [Rank 0] Total FTA (Unweighted): 0.4825 +[2025-09-05 18:38:02] [Rank 0] Total FTA (Weighted): 0.4825 +[2025-09-05 18:38:02] [Rank 0] Total FTA (Weighted): 0.4825 +[2025-09-05 18:38:02] [Rank 0] Group 0 Loss: 3.3711 +[2025-09-05 18:38:02] [Rank 0] Group 0 Loss: 3.3711 +[2025-09-05 18:38:02] [Rank 0] Group 1 Loss: 2.9909 +[2025-09-05 18:38:02] [Rank 0] Group 1 Loss: 2.9909 +[2025-09-05 18:38:02] [Rank 0] Group 2 Loss: 3.0902 +[2025-09-05 18:38:02] [Rank 0] Group 2 Loss: 3.0902 +[2025-09-05 18:38:02] [Rank 0] Group 3 Loss: 3.4177 +[2025-09-05 18:38:02] [Rank 0] Group 3 Loss: 3.4177 +[2025-09-05 18:38:02] [Rank 0] Group 4 Loss: 3.6312 +[2025-09-05 18:38:02] [Rank 0] Group 4 Loss: 3.6312 +[2025-09-05 18:38:02] [Rank 0] Group 5 Loss: 3.8690 +[2025-09-05 18:38:02] [Rank 0] Group 5 Loss: 3.8690 +[2025-09-05 18:38:02] [Rank 0] Group 6 Loss: 4.0070 +[2025-09-05 18:38:02] [Rank 0] Group 6 Loss: 4.0070 +[2025-09-05 18:38:02] [Rank 0] Group 7 Loss: 4.2608 +[2025-09-05 18:38:02] [Rank 0] Group 7 Loss: 4.2608 +[2025-09-05 18:38:02] [Rank 0] Group 8 Loss: 4.5627 +[2025-09-05 18:38:02] [Rank 0] Group 8 Loss: 4.5627 +[2025-09-05 18:38:02] [Rank 0] Group 9 Loss: 4.7134 +[2025-09-05 18:38:02] [Rank 0] Group 9 Loss: 4.7134 +[2025-09-05 18:38:02] [Rank 0] Group 10 Loss: 4.8037 +[2025-09-05 18:38:02] [Rank 0] Group 10 Loss: 4.8037 +[2025-09-05 18:38:02] [Rank 0] Group 11 Loss: 4.8161 +[2025-09-05 18:38:02] [Rank 0] Group 11 Loss: 4.8161 +[2025-09-05 18:38:02] [Rank 0] Group 12 Loss: 4.7648 +[2025-09-05 18:38:02] [Rank 0] Group 12 Loss: 4.7648 +[2025-09-05 18:38:02] [Rank 0] Group 13 Loss: 4.8262 +[2025-09-05 18:38:02] [Rank 0] Group 13 Loss: 4.8262 +[2025-09-05 18:38:02] [Rank 0] Group 14 Loss: 4.8583 +[2025-09-05 18:38:02] [Rank 0] Group 14 Loss: 4.8583 +[2025-09-05 18:38:02] [Rank 0] Group 15 Loss: 4.7873 +[2025-09-05 18:38:02] [Rank 0] Group 15 Loss: 4.7873 +[2025-09-05 18:38:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:38:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:38:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:38:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:38:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:38:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:38:02] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 18:38:02] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 18:38:02] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:38:02] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 18:38:02] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:38:02] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 18:38:02] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:38:02] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:38:02] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 18:38:02] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 18:38:02] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 18:38:02] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 18:38:02] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 18:38:02] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 18:38:02] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 18:38:02] [Rank 0] Group 10 FTA: 0.3900 +[2025-09-05 18:38:02] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 18:38:02] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 18:38:02] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 18:38:02] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 18:38:02] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 18:38:02] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 18:38:02] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 18:38:02] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 18:38:02] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:38:02] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 18:38:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:38:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_loss_curves.png +[2025-09-05 18:38:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:38:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/per_class_acc_curves.png +[2025-09-05 18:38:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:38:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_loss_curve.png +[2025-09-05 18:38:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:38:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_43/total_acc_curve.png +[2025-09-05 18:38:04] [Rank 0] step:10001/10000 train_time:409499ms step_avg:40.95ms +[2025-09-05 18:38:04] [Rank 0] step:10001/10000 train_time:409499ms step_avg:40.95ms +[2025-09-05 18:38:04] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 18:38:04 2025 --- +[2025-09-05 18:38:04] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 18:38:04 2025 --- +[2025-09-05 18:38:04] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 18:38:04] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..31d157736cf9fad64c42ddf01ab64cbf6dd54d96 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.2, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "3c29caa3-376b-4b61-a3f3-5e4108b47c89", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..0e7178d2e215d3776c862a7c132b0ededecbb78c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa3a83fd094dc46690e18289366e60e74b05d27f0ea64c1209101b12153860ad +size 423695 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..68f7c7defc11496de648479772c23e38811f0902 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:542b33aa74661f368f582176897a243ee7ef28b16bc5f89345bb3c2a04bb290a +size 422952 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..113d22e6b723fc80c1391ba227dff1c3b287a37c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5655968faf94cfb0d1d281f6f26ff7885f65a4237dc4d1537e0c7003807b2b21 +size 94867 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..a07435b7c68895ef92022f97c5b950eb0d3b8fec --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dadb5fc106f1ef75d1cf5ff9f0912b56cf25bb04bd4d95ab1735ebfe33c1e763 +size 112806 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/training_log_3c29caa3-376b-4b61-a3f3-5e4108b47c89.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/training_log_3c29caa3-376b-4b61-a3f3-5e4108b47c89.txt new file mode 100644 index 0000000000000000000000000000000000000000..9acaa13f99b43e9b9d04777cbd132a9b1824928a --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/training_log_3c29caa3-376b-4b61-a3f3-5e4108b47c89.txt @@ -0,0 +1,5614 @@ +[2025-09-05 18:38:29] [Rank 0] PRINT: --- Script Start: Fri Sep 5 18:38:29 2025 --- +[2025-09-05 18:38:29] [Rank 0] PRINT: --- Script Start: Fri Sep 5 18:38:29 2025 --- +[2025-09-05 18:38:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 18:38:29] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 18:38:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 18:38:29] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 18:38:29] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-05 18:38:29] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-05 18:38:29] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44 +[2025-09-05 18:38:29] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44 +[2025-09-05 18:38:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 18:38:29] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 18:38:29] [Rank 0] PRINT: Constructing model... +[2025-09-05 18:38:29] [Rank 0] PRINT: Constructing model... +[2025-09-05 18:38:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 18:38:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 18:38:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 18:38:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 18:38:30] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 18:38:30] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 18:38:34] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 18:38:34] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 18:38:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 18:38:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 18:38:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 18:38:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 18:38:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 18:38:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 18:38:34] [Rank 0] PRINT: Model returns: +[2025-09-05 18:38:34] [Rank 0] PRINT: Model returns: +[2025-09-05 18:38:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 18:38:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 18:38:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 18:38:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 18:38:34] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 18:38:34] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 18:38:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 18:38:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 18:38:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 18:38:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 18:38:39] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 18:38:39] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 18:38:39] [Rank 0] PRINT: Starting warmup... +[2025-09-05 18:38:39] [Rank 0] PRINT: Starting warmup... +[2025-09-05 18:39:19] [Rank 0] PRINT: Warmup complete. +[2025-09-05 18:39:19] [Rank 0] PRINT: Warmup complete. +[2025-09-05 18:39:19] [Rank 0] PRINT: Starting training... +[2025-09-05 18:39:19] [Rank 0] PRINT: Starting training... +[2025-09-05 18:39:26] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/fixed_eval_indices.json +[2025-09-05 18:39:26] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/fixed_eval_indices.json +[2025-09-05 18:39:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:39:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:39:29] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 18:39:29] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 18:40:02] [Rank 0] step:21/10000 train_time:32909ms step_avg:1567.07ms +[2025-09-05 18:40:02] [Rank 0] step:21/10000 train_time:32909ms step_avg:1567.07ms +[2025-09-05 18:40:03] [Rank 0] step:41/10000 train_time:33635ms step_avg:820.38ms +[2025-09-05 18:40:03] [Rank 0] step:41/10000 train_time:33635ms step_avg:820.38ms +[2025-09-05 18:40:04] [Rank 0] step:61/10000 train_time:34361ms step_avg:563.29ms +[2025-09-05 18:40:04] [Rank 0] step:61/10000 train_time:34361ms step_avg:563.29ms +[2025-09-05 18:40:05] [Rank 0] step:81/10000 train_time:35086ms step_avg:433.16ms +[2025-09-05 18:40:05] [Rank 0] step:81/10000 train_time:35086ms step_avg:433.16ms +[2025-09-05 18:40:05] [Rank 0] step:101/10000 train_time:35812ms step_avg:354.57ms +[2025-09-05 18:40:05] [Rank 0] step:101/10000 train_time:35812ms step_avg:354.57ms +[2025-09-05 18:40:06] [Rank 0] step:121/10000 train_time:36538ms step_avg:301.96ms +[2025-09-05 18:40:06] [Rank 0] step:121/10000 train_time:36538ms step_avg:301.96ms +[2025-09-05 18:40:07] [Rank 0] step:141/10000 train_time:37263ms step_avg:264.28ms +[2025-09-05 18:40:07] [Rank 0] step:141/10000 train_time:37263ms step_avg:264.28ms +[2025-09-05 18:40:07] [Rank 0] step:161/10000 train_time:37990ms step_avg:235.96ms +[2025-09-05 18:40:07] [Rank 0] step:161/10000 train_time:37990ms step_avg:235.96ms +[2025-09-05 18:40:08] [Rank 0] step:181/10000 train_time:38716ms step_avg:213.90ms +[2025-09-05 18:40:08] [Rank 0] step:181/10000 train_time:38716ms step_avg:213.90ms +[2025-09-05 18:40:09] [Rank 0] step:201/10000 train_time:39442ms step_avg:196.23ms +[2025-09-05 18:40:09] [Rank 0] step:201/10000 train_time:39442ms step_avg:196.23ms +[2025-09-05 18:40:10] [Rank 0] step:221/10000 train_time:40167ms step_avg:181.75ms +[2025-09-05 18:40:10] [Rank 0] step:221/10000 train_time:40167ms step_avg:181.75ms +[2025-09-05 18:40:10] [Rank 0] step:241/10000 train_time:40892ms step_avg:169.68ms +[2025-09-05 18:40:10] [Rank 0] step:241/10000 train_time:40892ms step_avg:169.68ms +[2025-09-05 18:40:11] [Rank 0] step:261/10000 train_time:41618ms step_avg:159.46ms +[2025-09-05 18:40:11] [Rank 0] step:261/10000 train_time:41618ms step_avg:159.46ms +[2025-09-05 18:40:12] [Rank 0] step:281/10000 train_time:42344ms step_avg:150.69ms +[2025-09-05 18:40:12] [Rank 0] step:281/10000 train_time:42344ms step_avg:150.69ms +[2025-09-05 18:40:13] [Rank 0] step:301/10000 train_time:43070ms step_avg:143.09ms +[2025-09-05 18:40:13] [Rank 0] step:301/10000 train_time:43070ms step_avg:143.09ms +[2025-09-05 18:40:13] [Rank 0] step:321/10000 train_time:43795ms step_avg:136.43ms +[2025-09-05 18:40:13] [Rank 0] step:321/10000 train_time:43795ms step_avg:136.43ms +[2025-09-05 18:40:14] [Rank 0] step:341/10000 train_time:44521ms step_avg:130.56ms +[2025-09-05 18:40:14] [Rank 0] step:341/10000 train_time:44521ms step_avg:130.56ms +[2025-09-05 18:40:15] [Rank 0] step:361/10000 train_time:45246ms step_avg:125.34ms +[2025-09-05 18:40:15] [Rank 0] step:361/10000 train_time:45246ms step_avg:125.34ms +[2025-09-05 18:40:15] [Rank 0] step:381/10000 train_time:45971ms step_avg:120.66ms +[2025-09-05 18:40:15] [Rank 0] step:381/10000 train_time:45971ms step_avg:120.66ms +[2025-09-05 18:40:16] [Rank 0] step:401/10000 train_time:46750ms step_avg:116.58ms +[2025-09-05 18:40:16] [Rank 0] step:401/10000 train_time:46750ms step_avg:116.58ms +[2025-09-05 18:40:17] [Rank 0] step:421/10000 train_time:47474ms step_avg:112.77ms +[2025-09-05 18:40:17] [Rank 0] step:421/10000 train_time:47474ms step_avg:112.77ms +[2025-09-05 18:40:18] [Rank 0] step:441/10000 train_time:48199ms step_avg:109.30ms +[2025-09-05 18:40:18] [Rank 0] step:441/10000 train_time:48199ms step_avg:109.30ms +[2025-09-05 18:40:18] [Rank 0] step:461/10000 train_time:48925ms step_avg:106.13ms +[2025-09-05 18:40:18] [Rank 0] step:461/10000 train_time:48925ms step_avg:106.13ms +[2025-09-05 18:40:19] [Rank 0] step:481/10000 train_time:49650ms step_avg:103.22ms +[2025-09-05 18:40:19] [Rank 0] step:481/10000 train_time:49650ms step_avg:103.22ms +[2025-09-05 18:40:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:40:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:40:20] [Rank 0] PRINT: step:500/10000 train_loss:4.6523 val_loss:3.2264 train_time:50455ms step_avg:100.91ms +[2025-09-05 18:40:20] [Rank 0] PRINT: step:500/10000 train_loss:4.6523 val_loss:3.2264 train_time:50455ms step_avg:100.91ms +[2025-09-05 18:40:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:40:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:40:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:40:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:41:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:41:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:41:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:41:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:41:41] [Rank 0] Total Loss: 5.2870 +[2025-09-05 18:41:41] [Rank 0] Total Loss: 5.2870 +[2025-09-05 18:41:41] [Rank 0] Total FTA (Unweighted): 0.1306 +[2025-09-05 18:41:41] [Rank 0] Total FTA (Unweighted): 0.1306 +[2025-09-05 18:41:41] [Rank 0] Total FTA (Weighted): 0.1306 +[2025-09-05 18:41:41] [Rank 0] Total FTA (Weighted): 0.1306 +[2025-09-05 18:41:41] [Rank 0] Group 0 Loss: 3.2237 +[2025-09-05 18:41:41] [Rank 0] Group 0 Loss: 3.2237 +[2025-09-05 18:41:41] [Rank 0] Group 1 Loss: 3.1875 +[2025-09-05 18:41:41] [Rank 0] Group 1 Loss: 3.1875 +[2025-09-05 18:41:41] [Rank 0] Group 2 Loss: 3.4784 +[2025-09-05 18:41:41] [Rank 0] Group 2 Loss: 3.4784 +[2025-09-05 18:41:41] [Rank 0] Group 3 Loss: 4.1340 +[2025-09-05 18:41:41] [Rank 0] Group 3 Loss: 4.1340 +[2025-09-05 18:41:41] [Rank 0] Group 4 Loss: 5.0152 +[2025-09-05 18:41:41] [Rank 0] Group 4 Loss: 5.0152 +[2025-09-05 18:41:41] [Rank 0] Group 5 Loss: 5.4308 +[2025-09-05 18:41:41] [Rank 0] Group 5 Loss: 5.4308 +[2025-09-05 18:41:41] [Rank 0] Group 6 Loss: 5.7094 +[2025-09-05 18:41:41] [Rank 0] Group 6 Loss: 5.7094 +[2025-09-05 18:41:41] [Rank 0] Group 7 Loss: 5.7687 +[2025-09-05 18:41:41] [Rank 0] Group 7 Loss: 5.7687 +[2025-09-05 18:41:41] [Rank 0] Group 8 Loss: 5.9770 +[2025-09-05 18:41:41] [Rank 0] Group 8 Loss: 5.9770 +[2025-09-05 18:41:41] [Rank 0] Group 9 Loss: 6.1533 +[2025-09-05 18:41:41] [Rank 0] Group 9 Loss: 6.1533 +[2025-09-05 18:41:41] [Rank 0] Group 10 Loss: 6.1075 +[2025-09-05 18:41:41] [Rank 0] Group 10 Loss: 6.1075 +[2025-09-05 18:41:41] [Rank 0] Group 11 Loss: 6.1913 +[2025-09-05 18:41:41] [Rank 0] Group 11 Loss: 6.1913 +[2025-09-05 18:41:41] [Rank 0] Group 12 Loss: 6.0306 +[2025-09-05 18:41:41] [Rank 0] Group 12 Loss: 6.0306 +[2025-09-05 18:41:41] [Rank 0] Group 13 Loss: 6.0326 +[2025-09-05 18:41:41] [Rank 0] Group 13 Loss: 6.0326 +[2025-09-05 18:41:41] [Rank 0] Group 14 Loss: 6.1164 +[2025-09-05 18:41:41] [Rank 0] Group 14 Loss: 6.1164 +[2025-09-05 18:41:41] [Rank 0] Group 15 Loss: 6.0357 +[2025-09-05 18:41:41] [Rank 0] Group 15 Loss: 6.0357 +[2025-09-05 18:41:41] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 18:41:41] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 18:41:42] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 18:41:42] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 18:41:42] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 18:41:42] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 18:41:42] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 18:41:42] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 18:41:42] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 18:41:42] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 18:41:42] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 18:41:42] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 18:41:42] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 18:41:42] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 18:41:42] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 18:41:42] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 18:41:42] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 18:41:42] [Rank 0] Group 8 FTA: 0.1400 +[2025-09-05 18:41:42] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 18:41:42] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 18:41:42] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 18:41:42] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 18:41:42] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:41:42] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:41:42] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:41:42] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:41:42] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:41:42] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:41:42] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:41:42] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:41:42] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:41:42] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 18:41:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:41:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:41:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:41:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:41:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:41:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:41:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:41:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:41:43] [Rank 0] step:501/10000 train_time:50464ms step_avg:100.73ms +[2025-09-05 18:41:43] [Rank 0] step:501/10000 train_time:50464ms step_avg:100.73ms +[2025-09-05 18:41:44] [Rank 0] step:521/10000 train_time:51135ms step_avg:98.15ms +[2025-09-05 18:41:44] [Rank 0] step:521/10000 train_time:51135ms step_avg:98.15ms +[2025-09-05 18:41:45] [Rank 0] step:541/10000 train_time:51860ms step_avg:95.86ms +[2025-09-05 18:41:45] [Rank 0] step:541/10000 train_time:51860ms step_avg:95.86ms +[2025-09-05 18:41:45] [Rank 0] step:561/10000 train_time:52586ms step_avg:93.74ms +[2025-09-05 18:41:45] [Rank 0] step:561/10000 train_time:52586ms step_avg:93.74ms +[2025-09-05 18:41:46] [Rank 0] step:581/10000 train_time:53313ms step_avg:91.76ms +[2025-09-05 18:41:46] [Rank 0] step:581/10000 train_time:53313ms step_avg:91.76ms +[2025-09-05 18:41:47] [Rank 0] step:601/10000 train_time:54037ms step_avg:89.91ms +[2025-09-05 18:41:47] [Rank 0] step:601/10000 train_time:54037ms step_avg:89.91ms +[2025-09-05 18:41:48] [Rank 0] step:621/10000 train_time:54763ms step_avg:88.18ms +[2025-09-05 18:41:48] [Rank 0] step:621/10000 train_time:54763ms step_avg:88.18ms +[2025-09-05 18:41:48] [Rank 0] step:641/10000 train_time:55488ms step_avg:86.56ms +[2025-09-05 18:41:48] [Rank 0] step:641/10000 train_time:55488ms step_avg:86.56ms +[2025-09-05 18:41:49] [Rank 0] step:661/10000 train_time:56213ms step_avg:85.04ms +[2025-09-05 18:41:49] [Rank 0] step:661/10000 train_time:56213ms step_avg:85.04ms +[2025-09-05 18:41:50] [Rank 0] step:681/10000 train_time:56938ms step_avg:83.61ms +[2025-09-05 18:41:50] [Rank 0] step:681/10000 train_time:56938ms step_avg:83.61ms +[2025-09-05 18:41:51] [Rank 0] step:701/10000 train_time:57665ms step_avg:82.26ms +[2025-09-05 18:41:51] [Rank 0] step:701/10000 train_time:57665ms step_avg:82.26ms +[2025-09-05 18:41:51] [Rank 0] step:721/10000 train_time:58389ms step_avg:80.98ms +[2025-09-05 18:41:51] [Rank 0] step:721/10000 train_time:58389ms step_avg:80.98ms +[2025-09-05 18:41:52] [Rank 0] step:741/10000 train_time:59115ms step_avg:79.78ms +[2025-09-05 18:41:52] [Rank 0] step:741/10000 train_time:59115ms step_avg:79.78ms +[2025-09-05 18:41:53] [Rank 0] step:761/10000 train_time:59845ms step_avg:78.64ms +[2025-09-05 18:41:53] [Rank 0] step:761/10000 train_time:59845ms step_avg:78.64ms +[2025-09-05 18:41:53] [Rank 0] step:781/10000 train_time:60576ms step_avg:77.56ms +[2025-09-05 18:41:53] [Rank 0] step:781/10000 train_time:60576ms step_avg:77.56ms +[2025-09-05 18:41:54] [Rank 0] step:801/10000 train_time:61307ms step_avg:76.54ms +[2025-09-05 18:41:54] [Rank 0] step:801/10000 train_time:61307ms step_avg:76.54ms +[2025-09-05 18:41:56] [Rank 0] step:821/10000 train_time:62668ms step_avg:76.33ms +[2025-09-05 18:41:56] [Rank 0] step:821/10000 train_time:62668ms step_avg:76.33ms +[2025-09-05 18:41:56] [Rank 0] step:841/10000 train_time:63399ms step_avg:75.38ms +[2025-09-05 18:41:56] [Rank 0] step:841/10000 train_time:63399ms step_avg:75.38ms +[2025-09-05 18:41:57] [Rank 0] step:861/10000 train_time:64129ms step_avg:74.48ms +[2025-09-05 18:41:57] [Rank 0] step:861/10000 train_time:64129ms step_avg:74.48ms +[2025-09-05 18:41:58] [Rank 0] step:881/10000 train_time:64859ms step_avg:73.62ms +[2025-09-05 18:41:58] [Rank 0] step:881/10000 train_time:64859ms step_avg:73.62ms +[2025-09-05 18:41:58] [Rank 0] step:901/10000 train_time:65589ms step_avg:72.80ms +[2025-09-05 18:41:58] [Rank 0] step:901/10000 train_time:65589ms step_avg:72.80ms +[2025-09-05 18:41:59] [Rank 0] step:921/10000 train_time:66320ms step_avg:72.01ms +[2025-09-05 18:41:59] [Rank 0] step:921/10000 train_time:66320ms step_avg:72.01ms +[2025-09-05 18:42:00] [Rank 0] step:941/10000 train_time:67050ms step_avg:71.25ms +[2025-09-05 18:42:00] [Rank 0] step:941/10000 train_time:67050ms step_avg:71.25ms +[2025-09-05 18:42:01] [Rank 0] step:961/10000 train_time:67781ms step_avg:70.53ms +[2025-09-05 18:42:01] [Rank 0] step:961/10000 train_time:67781ms step_avg:70.53ms +[2025-09-05 18:42:01] [Rank 0] step:981/10000 train_time:68512ms step_avg:69.84ms +[2025-09-05 18:42:01] [Rank 0] step:981/10000 train_time:68512ms step_avg:69.84ms +[2025-09-05 18:42:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:42:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:42:03] [Rank 0] PRINT: step:1000/10000 train_loss:2.8758 val_loss:2.5938 train_time:69323ms step_avg:69.32ms +[2025-09-05 18:42:03] [Rank 0] PRINT: step:1000/10000 train_loss:2.8758 val_loss:2.5938 train_time:69323ms step_avg:69.32ms +[2025-09-05 18:42:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:42:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:42:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:42:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:43:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:43:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:43:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:43:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:43:23] [Rank 0] Total Loss: 4.7621 +[2025-09-05 18:43:23] [Rank 0] Total Loss: 4.7621 +[2025-09-05 18:43:23] [Rank 0] Total FTA (Unweighted): 0.2062 +[2025-09-05 18:43:23] [Rank 0] Total FTA (Unweighted): 0.2062 +[2025-09-05 18:43:23] [Rank 0] Total FTA (Weighted): 0.2062 +[2025-09-05 18:43:23] [Rank 0] Total FTA (Weighted): 0.2062 +[2025-09-05 18:43:23] [Rank 0] Group 0 Loss: 3.1144 +[2025-09-05 18:43:23] [Rank 0] Group 0 Loss: 3.1144 +[2025-09-05 18:43:23] [Rank 0] Group 1 Loss: 3.0768 +[2025-09-05 18:43:23] [Rank 0] Group 1 Loss: 3.0768 +[2025-09-05 18:43:23] [Rank 0] Group 2 Loss: 3.1448 +[2025-09-05 18:43:23] [Rank 0] Group 2 Loss: 3.1448 +[2025-09-05 18:43:23] [Rank 0] Group 3 Loss: 3.5703 +[2025-09-05 18:43:23] [Rank 0] Group 3 Loss: 3.5703 +[2025-09-05 18:43:23] [Rank 0] Group 4 Loss: 4.1089 +[2025-09-05 18:43:23] [Rank 0] Group 4 Loss: 4.1089 +[2025-09-05 18:43:23] [Rank 0] Group 5 Loss: 4.6597 +[2025-09-05 18:43:23] [Rank 0] Group 5 Loss: 4.6597 +[2025-09-05 18:43:23] [Rank 0] Group 6 Loss: 4.9808 +[2025-09-05 18:43:23] [Rank 0] Group 6 Loss: 4.9808 +[2025-09-05 18:43:23] [Rank 0] Group 7 Loss: 5.1325 +[2025-09-05 18:43:23] [Rank 0] Group 7 Loss: 5.1325 +[2025-09-05 18:43:23] [Rank 0] Group 8 Loss: 5.4269 +[2025-09-05 18:43:23] [Rank 0] Group 8 Loss: 5.4269 +[2025-09-05 18:43:23] [Rank 0] Group 9 Loss: 5.5662 +[2025-09-05 18:43:23] [Rank 0] Group 9 Loss: 5.5662 +[2025-09-05 18:43:23] [Rank 0] Group 10 Loss: 5.6000 +[2025-09-05 18:43:23] [Rank 0] Group 10 Loss: 5.6000 +[2025-09-05 18:43:23] [Rank 0] Group 11 Loss: 5.6407 +[2025-09-05 18:43:23] [Rank 0] Group 11 Loss: 5.6407 +[2025-09-05 18:43:23] [Rank 0] Group 12 Loss: 5.5097 +[2025-09-05 18:43:23] [Rank 0] Group 12 Loss: 5.5097 +[2025-09-05 18:43:23] [Rank 0] Group 13 Loss: 5.5222 +[2025-09-05 18:43:23] [Rank 0] Group 13 Loss: 5.5222 +[2025-09-05 18:43:23] [Rank 0] Group 14 Loss: 5.6138 +[2025-09-05 18:43:23] [Rank 0] Group 14 Loss: 5.6138 +[2025-09-05 18:43:23] [Rank 0] Group 15 Loss: 5.5265 +[2025-09-05 18:43:23] [Rank 0] Group 15 Loss: 5.5265 +[2025-09-05 18:43:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:43:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:43:23] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 18:43:23] [Rank 0] Group 1 FTA: 0.5200 +[2025-09-05 18:43:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 18:43:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 18:43:24] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 18:43:24] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 18:43:24] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-05 18:43:24] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-05 18:43:24] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 18:43:24] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 18:43:24] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 18:43:24] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 18:43:24] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 18:43:24] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 18:43:24] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 18:43:24] [Rank 0] Group 8 FTA: 0.1900 +[2025-09-05 18:43:24] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 18:43:24] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 18:43:24] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 18:43:24] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 18:43:24] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 18:43:24] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 18:43:24] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:43:24] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 18:43:24] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 18:43:24] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 18:43:24] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:43:24] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:43:24] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:43:24] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:43:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:43:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:43:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:43:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:43:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:43:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:43:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:43:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:43:25] [Rank 0] step:1001/10000 train_time:69332ms step_avg:69.26ms +[2025-09-05 18:43:25] [Rank 0] step:1001/10000 train_time:69332ms step_avg:69.26ms +[2025-09-05 18:43:26] [Rank 0] step:1021/10000 train_time:70004ms step_avg:68.56ms +[2025-09-05 18:43:26] [Rank 0] step:1021/10000 train_time:70004ms step_avg:68.56ms +[2025-09-05 18:43:27] [Rank 0] step:1041/10000 train_time:70735ms step_avg:67.95ms +[2025-09-05 18:43:27] [Rank 0] step:1041/10000 train_time:70735ms step_avg:67.95ms +[2025-09-05 18:43:28] [Rank 0] step:1061/10000 train_time:71465ms step_avg:67.36ms +[2025-09-05 18:43:28] [Rank 0] step:1061/10000 train_time:71465ms step_avg:67.36ms +[2025-09-05 18:43:28] [Rank 0] step:1081/10000 train_time:72196ms step_avg:66.79ms +[2025-09-05 18:43:28] [Rank 0] step:1081/10000 train_time:72196ms step_avg:66.79ms +[2025-09-05 18:43:29] [Rank 0] step:1101/10000 train_time:72926ms step_avg:66.24ms +[2025-09-05 18:43:29] [Rank 0] step:1101/10000 train_time:72926ms step_avg:66.24ms +[2025-09-05 18:43:30] [Rank 0] step:1121/10000 train_time:73657ms step_avg:65.71ms +[2025-09-05 18:43:30] [Rank 0] step:1121/10000 train_time:73657ms step_avg:65.71ms +[2025-09-05 18:43:31] [Rank 0] step:1141/10000 train_time:74388ms step_avg:65.20ms +[2025-09-05 18:43:31] [Rank 0] step:1141/10000 train_time:74388ms step_avg:65.20ms +[2025-09-05 18:43:31] [Rank 0] step:1161/10000 train_time:75118ms step_avg:64.70ms +[2025-09-05 18:43:31] [Rank 0] step:1161/10000 train_time:75118ms step_avg:64.70ms +[2025-09-05 18:43:32] [Rank 0] step:1181/10000 train_time:75849ms step_avg:64.22ms +[2025-09-05 18:43:32] [Rank 0] step:1181/10000 train_time:75849ms step_avg:64.22ms +[2025-09-05 18:43:33] [Rank 0] step:1201/10000 train_time:76580ms step_avg:63.76ms +[2025-09-05 18:43:33] [Rank 0] step:1201/10000 train_time:76580ms step_avg:63.76ms +[2025-09-05 18:43:33] [Rank 0] step:1221/10000 train_time:77310ms step_avg:63.32ms +[2025-09-05 18:43:33] [Rank 0] step:1221/10000 train_time:77310ms step_avg:63.32ms +[2025-09-05 18:43:34] [Rank 0] step:1241/10000 train_time:78041ms step_avg:62.89ms +[2025-09-05 18:43:34] [Rank 0] step:1241/10000 train_time:78041ms step_avg:62.89ms +[2025-09-05 18:43:35] [Rank 0] step:1261/10000 train_time:78771ms step_avg:62.47ms +[2025-09-05 18:43:35] [Rank 0] step:1261/10000 train_time:78771ms step_avg:62.47ms +[2025-09-05 18:43:36] [Rank 0] step:1281/10000 train_time:79501ms step_avg:62.06ms +[2025-09-05 18:43:36] [Rank 0] step:1281/10000 train_time:79501ms step_avg:62.06ms +[2025-09-05 18:43:36] [Rank 0] step:1301/10000 train_time:80232ms step_avg:61.67ms +[2025-09-05 18:43:36] [Rank 0] step:1301/10000 train_time:80232ms step_avg:61.67ms +[2025-09-05 18:43:37] [Rank 0] step:1321/10000 train_time:80962ms step_avg:61.29ms +[2025-09-05 18:43:37] [Rank 0] step:1321/10000 train_time:80962ms step_avg:61.29ms +[2025-09-05 18:43:38] [Rank 0] step:1341/10000 train_time:81693ms step_avg:60.92ms +[2025-09-05 18:43:38] [Rank 0] step:1341/10000 train_time:81693ms step_avg:60.92ms +[2025-09-05 18:43:39] [Rank 0] step:1361/10000 train_time:82423ms step_avg:60.56ms +[2025-09-05 18:43:39] [Rank 0] step:1361/10000 train_time:82423ms step_avg:60.56ms +[2025-09-05 18:43:39] [Rank 0] step:1381/10000 train_time:83154ms step_avg:60.21ms +[2025-09-05 18:43:39] [Rank 0] step:1381/10000 train_time:83154ms step_avg:60.21ms +[2025-09-05 18:43:40] [Rank 0] step:1401/10000 train_time:83884ms step_avg:59.87ms +[2025-09-05 18:43:40] [Rank 0] step:1401/10000 train_time:83884ms step_avg:59.87ms +[2025-09-05 18:43:41] [Rank 0] step:1421/10000 train_time:84615ms step_avg:59.55ms +[2025-09-05 18:43:41] [Rank 0] step:1421/10000 train_time:84615ms step_avg:59.55ms +[2025-09-05 18:43:42] [Rank 0] step:1441/10000 train_time:85346ms step_avg:59.23ms +[2025-09-05 18:43:42] [Rank 0] step:1441/10000 train_time:85346ms step_avg:59.23ms +[2025-09-05 18:43:42] [Rank 0] step:1461/10000 train_time:86076ms step_avg:58.92ms +[2025-09-05 18:43:42] [Rank 0] step:1461/10000 train_time:86076ms step_avg:58.92ms +[2025-09-05 18:43:43] [Rank 0] step:1481/10000 train_time:86949ms step_avg:58.71ms +[2025-09-05 18:43:43] [Rank 0] step:1481/10000 train_time:86949ms step_avg:58.71ms +[2025-09-05 18:43:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:43:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:43:44] [Rank 0] PRINT: step:1500/10000 train_loss:2.4308 val_loss:2.2857 train_time:87759ms step_avg:58.51ms +[2025-09-05 18:43:44] [Rank 0] PRINT: step:1500/10000 train_loss:2.4308 val_loss:2.2857 train_time:87759ms step_avg:58.51ms +[2025-09-05 18:43:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:43:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:43:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:43:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:45:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:45:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:45:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:45:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:45:05] [Rank 0] Total Loss: 4.5405 +[2025-09-05 18:45:05] [Rank 0] Total Loss: 4.5405 +[2025-09-05 18:45:05] [Rank 0] Total FTA (Unweighted): 0.2662 +[2025-09-05 18:45:05] [Rank 0] Total FTA (Unweighted): 0.2662 +[2025-09-05 18:45:05] [Rank 0] Total FTA (Weighted): 0.2662 +[2025-09-05 18:45:05] [Rank 0] Total FTA (Weighted): 0.2662 +[2025-09-05 18:45:05] [Rank 0] Group 0 Loss: 3.1712 +[2025-09-05 18:45:05] [Rank 0] Group 0 Loss: 3.1712 +[2025-09-05 18:45:05] [Rank 0] Group 1 Loss: 2.9825 +[2025-09-05 18:45:05] [Rank 0] Group 1 Loss: 2.9825 +[2025-09-05 18:45:05] [Rank 0] Group 2 Loss: 3.0064 +[2025-09-05 18:45:05] [Rank 0] Group 2 Loss: 3.0064 +[2025-09-05 18:45:05] [Rank 0] Group 3 Loss: 3.4470 +[2025-09-05 18:45:05] [Rank 0] Group 3 Loss: 3.4470 +[2025-09-05 18:45:05] [Rank 0] Group 4 Loss: 3.7968 +[2025-09-05 18:45:05] [Rank 0] Group 4 Loss: 3.7968 +[2025-09-05 18:45:05] [Rank 0] Group 5 Loss: 4.3113 +[2025-09-05 18:45:05] [Rank 0] Group 5 Loss: 4.3113 +[2025-09-05 18:45:05] [Rank 0] Group 6 Loss: 4.6419 +[2025-09-05 18:45:05] [Rank 0] Group 6 Loss: 4.6419 +[2025-09-05 18:45:05] [Rank 0] Group 7 Loss: 4.8100 +[2025-09-05 18:45:05] [Rank 0] Group 7 Loss: 4.8100 +[2025-09-05 18:45:05] [Rank 0] Group 8 Loss: 5.1615 +[2025-09-05 18:45:05] [Rank 0] Group 8 Loss: 5.1615 +[2025-09-05 18:45:05] [Rank 0] Group 9 Loss: 5.2777 +[2025-09-05 18:45:05] [Rank 0] Group 9 Loss: 5.2777 +[2025-09-05 18:45:05] [Rank 0] Group 10 Loss: 5.3601 +[2025-09-05 18:45:05] [Rank 0] Group 10 Loss: 5.3601 +[2025-09-05 18:45:05] [Rank 0] Group 11 Loss: 5.3996 +[2025-09-05 18:45:05] [Rank 0] Group 11 Loss: 5.3996 +[2025-09-05 18:45:05] [Rank 0] Group 12 Loss: 5.2774 +[2025-09-05 18:45:05] [Rank 0] Group 12 Loss: 5.2774 +[2025-09-05 18:45:05] [Rank 0] Group 13 Loss: 5.3260 +[2025-09-05 18:45:05] [Rank 0] Group 13 Loss: 5.3260 +[2025-09-05 18:45:05] [Rank 0] Group 14 Loss: 5.3626 +[2025-09-05 18:45:05] [Rank 0] Group 14 Loss: 5.3626 +[2025-09-05 18:45:05] [Rank 0] Group 15 Loss: 5.3157 +[2025-09-05 18:45:05] [Rank 0] Group 15 Loss: 5.3157 +[2025-09-05 18:45:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:45:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:45:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:45:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:45:05] [Rank 0] Group 2 FTA: 0.4100 +[2025-09-05 18:45:05] [Rank 0] Group 2 FTA: 0.4100 +[2025-09-05 18:45:05] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:45:05] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:45:05] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 18:45:05] [Rank 0] Group 4 FTA: 0.1700 +[2025-09-05 18:45:05] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 18:45:05] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 18:45:05] [Rank 0] Group 6 FTA: 0.1900 +[2025-09-05 18:45:05] [Rank 0] Group 6 FTA: 0.1900 +[2025-09-05 18:45:05] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 18:45:05] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 18:45:05] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:45:05] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:45:05] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 18:45:05] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 18:45:05] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 18:45:05] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 18:45:05] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:45:05] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 18:45:05] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:45:05] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:45:05] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:45:05] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:45:05] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:45:05] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:45:05] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:45:05] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:45:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:45:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:45:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:45:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:45:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:45:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:45:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:45:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:45:07] [Rank 0] step:1501/10000 train_time:87769ms step_avg:58.47ms +[2025-09-05 18:45:07] [Rank 0] step:1501/10000 train_time:87769ms step_avg:58.47ms +[2025-09-05 18:45:07] [Rank 0] step:1521/10000 train_time:88432ms step_avg:58.14ms +[2025-09-05 18:45:07] [Rank 0] step:1521/10000 train_time:88432ms step_avg:58.14ms +[2025-09-05 18:45:08] [Rank 0] step:1541/10000 train_time:89163ms step_avg:57.86ms +[2025-09-05 18:45:08] [Rank 0] step:1541/10000 train_time:89163ms step_avg:57.86ms +[2025-09-05 18:45:09] [Rank 0] step:1561/10000 train_time:89893ms step_avg:57.59ms +[2025-09-05 18:45:09] [Rank 0] step:1561/10000 train_time:89893ms step_avg:57.59ms +[2025-09-05 18:45:09] [Rank 0] step:1581/10000 train_time:90623ms step_avg:57.32ms +[2025-09-05 18:45:09] [Rank 0] step:1581/10000 train_time:90623ms step_avg:57.32ms +[2025-09-05 18:45:10] [Rank 0] step:1601/10000 train_time:91354ms step_avg:57.06ms +[2025-09-05 18:45:10] [Rank 0] step:1601/10000 train_time:91354ms step_avg:57.06ms +[2025-09-05 18:45:11] [Rank 0] step:1621/10000 train_time:92084ms step_avg:56.81ms +[2025-09-05 18:45:11] [Rank 0] step:1621/10000 train_time:92084ms step_avg:56.81ms +[2025-09-05 18:45:12] [Rank 0] step:1641/10000 train_time:93433ms step_avg:56.94ms +[2025-09-05 18:45:12] [Rank 0] step:1641/10000 train_time:93433ms step_avg:56.94ms +[2025-09-05 18:45:13] [Rank 0] step:1661/10000 train_time:94164ms step_avg:56.69ms +[2025-09-05 18:45:13] [Rank 0] step:1661/10000 train_time:94164ms step_avg:56.69ms +[2025-09-05 18:45:14] [Rank 0] step:1681/10000 train_time:94894ms step_avg:56.45ms +[2025-09-05 18:45:14] [Rank 0] step:1681/10000 train_time:94894ms step_avg:56.45ms +[2025-09-05 18:45:15] [Rank 0] step:1701/10000 train_time:95625ms step_avg:56.22ms +[2025-09-05 18:45:15] [Rank 0] step:1701/10000 train_time:95625ms step_avg:56.22ms +[2025-09-05 18:45:15] [Rank 0] step:1721/10000 train_time:96355ms step_avg:55.99ms +[2025-09-05 18:45:15] [Rank 0] step:1721/10000 train_time:96355ms step_avg:55.99ms +[2025-09-05 18:45:16] [Rank 0] step:1741/10000 train_time:97085ms step_avg:55.76ms +[2025-09-05 18:45:16] [Rank 0] step:1741/10000 train_time:97085ms step_avg:55.76ms +[2025-09-05 18:45:17] [Rank 0] step:1761/10000 train_time:97815ms step_avg:55.55ms +[2025-09-05 18:45:17] [Rank 0] step:1761/10000 train_time:97815ms step_avg:55.55ms +[2025-09-05 18:45:17] [Rank 0] step:1781/10000 train_time:98546ms step_avg:55.33ms +[2025-09-05 18:45:17] [Rank 0] step:1781/10000 train_time:98546ms step_avg:55.33ms +[2025-09-05 18:45:18] [Rank 0] step:1801/10000 train_time:99277ms step_avg:55.12ms +[2025-09-05 18:45:18] [Rank 0] step:1801/10000 train_time:99277ms step_avg:55.12ms +[2025-09-05 18:45:19] [Rank 0] step:1821/10000 train_time:100007ms step_avg:54.92ms +[2025-09-05 18:45:19] [Rank 0] step:1821/10000 train_time:100007ms step_avg:54.92ms +[2025-09-05 18:45:20] [Rank 0] step:1841/10000 train_time:100737ms step_avg:54.72ms +[2025-09-05 18:45:20] [Rank 0] step:1841/10000 train_time:100737ms step_avg:54.72ms +[2025-09-05 18:45:20] [Rank 0] step:1861/10000 train_time:101467ms step_avg:54.52ms +[2025-09-05 18:45:20] [Rank 0] step:1861/10000 train_time:101467ms step_avg:54.52ms +[2025-09-05 18:45:21] [Rank 0] step:1881/10000 train_time:102198ms step_avg:54.33ms +[2025-09-05 18:45:21] [Rank 0] step:1881/10000 train_time:102198ms step_avg:54.33ms +[2025-09-05 18:45:22] [Rank 0] step:1901/10000 train_time:102929ms step_avg:54.14ms +[2025-09-05 18:45:22] [Rank 0] step:1901/10000 train_time:102929ms step_avg:54.14ms +[2025-09-05 18:45:23] [Rank 0] step:1921/10000 train_time:103659ms step_avg:53.96ms +[2025-09-05 18:45:23] [Rank 0] step:1921/10000 train_time:103659ms step_avg:53.96ms +[2025-09-05 18:45:23] [Rank 0] step:1941/10000 train_time:104390ms step_avg:53.78ms +[2025-09-05 18:45:23] [Rank 0] step:1941/10000 train_time:104390ms step_avg:53.78ms +[2025-09-05 18:45:24] [Rank 0] step:1961/10000 train_time:105120ms step_avg:53.61ms +[2025-09-05 18:45:24] [Rank 0] step:1961/10000 train_time:105120ms step_avg:53.61ms +[2025-09-05 18:45:25] [Rank 0] step:1981/10000 train_time:105851ms step_avg:53.43ms +[2025-09-05 18:45:25] [Rank 0] step:1981/10000 train_time:105851ms step_avg:53.43ms +[2025-09-05 18:45:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:45:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:45:26] [Rank 0] PRINT: step:2000/10000 train_loss:2.2022 val_loss:2.1060 train_time:106661ms step_avg:53.33ms +[2025-09-05 18:45:26] [Rank 0] PRINT: step:2000/10000 train_loss:2.2022 val_loss:2.1060 train_time:106661ms step_avg:53.33ms +[2025-09-05 18:45:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:45:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:45:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:45:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:46:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:46:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:46:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:46:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:46:47] [Rank 0] Total Loss: 4.4782 +[2025-09-05 18:46:47] [Rank 0] Total Loss: 4.4782 +[2025-09-05 18:46:47] [Rank 0] Total FTA (Unweighted): 0.2963 +[2025-09-05 18:46:47] [Rank 0] Total FTA (Unweighted): 0.2963 +[2025-09-05 18:46:47] [Rank 0] Total FTA (Weighted): 0.2963 +[2025-09-05 18:46:47] [Rank 0] Total FTA (Weighted): 0.2963 +[2025-09-05 18:46:47] [Rank 0] Group 0 Loss: 3.1849 +[2025-09-05 18:46:47] [Rank 0] Group 0 Loss: 3.1849 +[2025-09-05 18:46:47] [Rank 0] Group 1 Loss: 3.1038 +[2025-09-05 18:46:47] [Rank 0] Group 1 Loss: 3.1038 +[2025-09-05 18:46:47] [Rank 0] Group 2 Loss: 3.0571 +[2025-09-05 18:46:47] [Rank 0] Group 2 Loss: 3.0571 +[2025-09-05 18:46:47] [Rank 0] Group 3 Loss: 3.5034 +[2025-09-05 18:46:47] [Rank 0] Group 3 Loss: 3.5034 +[2025-09-05 18:46:47] [Rank 0] Group 4 Loss: 3.7289 +[2025-09-05 18:46:47] [Rank 0] Group 4 Loss: 3.7289 +[2025-09-05 18:46:47] [Rank 0] Group 5 Loss: 4.1937 +[2025-09-05 18:46:47] [Rank 0] Group 5 Loss: 4.1937 +[2025-09-05 18:46:47] [Rank 0] Group 6 Loss: 4.5701 +[2025-09-05 18:46:47] [Rank 0] Group 6 Loss: 4.5701 +[2025-09-05 18:46:47] [Rank 0] Group 7 Loss: 4.6984 +[2025-09-05 18:46:47] [Rank 0] Group 7 Loss: 4.6984 +[2025-09-05 18:46:47] [Rank 0] Group 8 Loss: 5.0334 +[2025-09-05 18:46:47] [Rank 0] Group 8 Loss: 5.0334 +[2025-09-05 18:46:47] [Rank 0] Group 9 Loss: 5.1656 +[2025-09-05 18:46:47] [Rank 0] Group 9 Loss: 5.1656 +[2025-09-05 18:46:47] [Rank 0] Group 10 Loss: 5.2778 +[2025-09-05 18:46:47] [Rank 0] Group 10 Loss: 5.2778 +[2025-09-05 18:46:47] [Rank 0] Group 11 Loss: 5.2716 +[2025-09-05 18:46:47] [Rank 0] Group 11 Loss: 5.2716 +[2025-09-05 18:46:47] [Rank 0] Group 12 Loss: 5.1655 +[2025-09-05 18:46:47] [Rank 0] Group 12 Loss: 5.1655 +[2025-09-05 18:46:47] [Rank 0] Group 13 Loss: 5.2129 +[2025-09-05 18:46:47] [Rank 0] Group 13 Loss: 5.2129 +[2025-09-05 18:46:47] [Rank 0] Group 14 Loss: 5.2753 +[2025-09-05 18:46:47] [Rank 0] Group 14 Loss: 5.2753 +[2025-09-05 18:46:47] [Rank 0] Group 15 Loss: 5.2094 +[2025-09-05 18:46:47] [Rank 0] Group 15 Loss: 5.2094 +[2025-09-05 18:46:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:46:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:46:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:46:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:46:47] [Rank 0] Group 2 FTA: 0.5400 +[2025-09-05 18:46:47] [Rank 0] Group 2 FTA: 0.5400 +[2025-09-05 18:46:47] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:46:47] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:46:47] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 18:46:47] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 18:46:47] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 18:46:47] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 18:46:47] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 18:46:47] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 18:46:47] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 18:46:47] [Rank 0] Group 7 FTA: 0.1400 +[2025-09-05 18:46:47] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:46:47] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:46:47] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 18:46:47] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 18:46:47] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 18:46:47] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 18:46:47] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 18:46:47] [Rank 0] Group 11 FTA: 0.1700 +[2025-09-05 18:46:47] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 18:46:47] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 18:46:47] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:46:47] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:46:47] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:46:47] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 18:46:47] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:46:47] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:46:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:46:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:46:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:46:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:46:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:46:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:46:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:46:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:46:49] [Rank 0] step:2001/10000 train_time:106670ms step_avg:53.31ms +[2025-09-05 18:46:49] [Rank 0] step:2001/10000 train_time:106670ms step_avg:53.31ms +[2025-09-05 18:46:50] [Rank 0] step:2021/10000 train_time:107345ms step_avg:53.11ms +[2025-09-05 18:46:50] [Rank 0] step:2021/10000 train_time:107345ms step_avg:53.11ms +[2025-09-05 18:46:50] [Rank 0] step:2041/10000 train_time:108075ms step_avg:52.95ms +[2025-09-05 18:46:50] [Rank 0] step:2041/10000 train_time:108075ms step_avg:52.95ms +[2025-09-05 18:46:51] [Rank 0] step:2061/10000 train_time:108806ms step_avg:52.79ms +[2025-09-05 18:46:51] [Rank 0] step:2061/10000 train_time:108806ms step_avg:52.79ms +[2025-09-05 18:46:52] [Rank 0] step:2081/10000 train_time:109536ms step_avg:52.64ms +[2025-09-05 18:46:52] [Rank 0] step:2081/10000 train_time:109536ms step_avg:52.64ms +[2025-09-05 18:46:53] [Rank 0] step:2101/10000 train_time:110386ms step_avg:52.54ms +[2025-09-05 18:46:53] [Rank 0] step:2101/10000 train_time:110386ms step_avg:52.54ms +[2025-09-05 18:46:53] [Rank 0] step:2121/10000 train_time:111116ms step_avg:52.39ms +[2025-09-05 18:46:53] [Rank 0] step:2121/10000 train_time:111116ms step_avg:52.39ms +[2025-09-05 18:46:54] [Rank 0] step:2141/10000 train_time:111847ms step_avg:52.24ms +[2025-09-05 18:46:54] [Rank 0] step:2141/10000 train_time:111847ms step_avg:52.24ms +[2025-09-05 18:46:55] [Rank 0] step:2161/10000 train_time:112578ms step_avg:52.10ms +[2025-09-05 18:46:55] [Rank 0] step:2161/10000 train_time:112578ms step_avg:52.10ms +[2025-09-05 18:46:56] [Rank 0] step:2181/10000 train_time:113473ms step_avg:52.03ms +[2025-09-05 18:46:56] [Rank 0] step:2181/10000 train_time:113473ms step_avg:52.03ms +[2025-09-05 18:46:56] [Rank 0] step:2201/10000 train_time:114204ms step_avg:51.89ms +[2025-09-05 18:46:56] [Rank 0] step:2201/10000 train_time:114204ms step_avg:51.89ms +[2025-09-05 18:46:57] [Rank 0] step:2221/10000 train_time:114934ms step_avg:51.75ms +[2025-09-05 18:46:57] [Rank 0] step:2221/10000 train_time:114934ms step_avg:51.75ms +[2025-09-05 18:46:58] [Rank 0] step:2241/10000 train_time:115670ms step_avg:51.62ms +[2025-09-05 18:46:58] [Rank 0] step:2241/10000 train_time:115670ms step_avg:51.62ms +[2025-09-05 18:46:59] [Rank 0] step:2261/10000 train_time:116407ms step_avg:51.48ms +[2025-09-05 18:46:59] [Rank 0] step:2261/10000 train_time:116407ms step_avg:51.48ms +[2025-09-05 18:46:59] [Rank 0] step:2281/10000 train_time:117143ms step_avg:51.36ms +[2025-09-05 18:46:59] [Rank 0] step:2281/10000 train_time:117143ms step_avg:51.36ms +[2025-09-05 18:47:00] [Rank 0] step:2301/10000 train_time:117880ms step_avg:51.23ms +[2025-09-05 18:47:00] [Rank 0] step:2301/10000 train_time:117880ms step_avg:51.23ms +[2025-09-05 18:47:01] [Rank 0] step:2321/10000 train_time:118616ms step_avg:51.11ms +[2025-09-05 18:47:01] [Rank 0] step:2321/10000 train_time:118616ms step_avg:51.11ms +[2025-09-05 18:47:02] [Rank 0] step:2341/10000 train_time:119353ms step_avg:50.98ms +[2025-09-05 18:47:02] [Rank 0] step:2341/10000 train_time:119353ms step_avg:50.98ms +[2025-09-05 18:47:02] [Rank 0] step:2361/10000 train_time:120090ms step_avg:50.86ms +[2025-09-05 18:47:02] [Rank 0] step:2361/10000 train_time:120090ms step_avg:50.86ms +[2025-09-05 18:47:03] [Rank 0] step:2381/10000 train_time:120827ms step_avg:50.75ms +[2025-09-05 18:47:03] [Rank 0] step:2381/10000 train_time:120827ms step_avg:50.75ms +[2025-09-05 18:47:04] [Rank 0] step:2401/10000 train_time:121564ms step_avg:50.63ms +[2025-09-05 18:47:04] [Rank 0] step:2401/10000 train_time:121564ms step_avg:50.63ms +[2025-09-05 18:47:04] [Rank 0] step:2421/10000 train_time:122300ms step_avg:50.52ms +[2025-09-05 18:47:04] [Rank 0] step:2421/10000 train_time:122300ms step_avg:50.52ms +[2025-09-05 18:47:05] [Rank 0] step:2441/10000 train_time:123037ms step_avg:50.40ms +[2025-09-05 18:47:05] [Rank 0] step:2441/10000 train_time:123037ms step_avg:50.40ms +[2025-09-05 18:47:06] [Rank 0] step:2461/10000 train_time:123774ms step_avg:50.29ms +[2025-09-05 18:47:06] [Rank 0] step:2461/10000 train_time:123774ms step_avg:50.29ms +[2025-09-05 18:47:07] [Rank 0] step:2481/10000 train_time:124509ms step_avg:50.19ms +[2025-09-05 18:47:07] [Rank 0] step:2481/10000 train_time:124509ms step_avg:50.19ms +[2025-09-05 18:47:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:47:07] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:47:08] [Rank 0] PRINT: step:2500/10000 train_loss:2.0513 val_loss:1.9776 train_time:125327ms step_avg:50.13ms +[2025-09-05 18:47:08] [Rank 0] PRINT: step:2500/10000 train_loss:2.0513 val_loss:1.9776 train_time:125327ms step_avg:50.13ms +[2025-09-05 18:47:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:47:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:47:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:47:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:48:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:48:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:48:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:48:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:48:29] [Rank 0] Total Loss: 4.4099 +[2025-09-05 18:48:29] [Rank 0] Total Loss: 4.4099 +[2025-09-05 18:48:29] [Rank 0] Total FTA (Unweighted): 0.3150 +[2025-09-05 18:48:29] [Rank 0] Total FTA (Unweighted): 0.3150 +[2025-09-05 18:48:29] [Rank 0] Total FTA (Weighted): 0.3150 +[2025-09-05 18:48:29] [Rank 0] Total FTA (Weighted): 0.3150 +[2025-09-05 18:48:29] [Rank 0] Group 0 Loss: 3.1723 +[2025-09-05 18:48:29] [Rank 0] Group 0 Loss: 3.1723 +[2025-09-05 18:48:29] [Rank 0] Group 1 Loss: 3.0772 +[2025-09-05 18:48:29] [Rank 0] Group 1 Loss: 3.0772 +[2025-09-05 18:48:29] [Rank 0] Group 2 Loss: 3.0616 +[2025-09-05 18:48:29] [Rank 0] Group 2 Loss: 3.0616 +[2025-09-05 18:48:29] [Rank 0] Group 3 Loss: 3.4928 +[2025-09-05 18:48:29] [Rank 0] Group 3 Loss: 3.4928 +[2025-09-05 18:48:29] [Rank 0] Group 4 Loss: 3.6907 +[2025-09-05 18:48:29] [Rank 0] Group 4 Loss: 3.6907 +[2025-09-05 18:48:29] [Rank 0] Group 5 Loss: 4.0983 +[2025-09-05 18:48:29] [Rank 0] Group 5 Loss: 4.0983 +[2025-09-05 18:48:29] [Rank 0] Group 6 Loss: 4.4514 +[2025-09-05 18:48:29] [Rank 0] Group 6 Loss: 4.4514 +[2025-09-05 18:48:29] [Rank 0] Group 7 Loss: 4.6237 +[2025-09-05 18:48:29] [Rank 0] Group 7 Loss: 4.6237 +[2025-09-05 18:48:29] [Rank 0] Group 8 Loss: 4.9207 +[2025-09-05 18:48:29] [Rank 0] Group 8 Loss: 4.9207 +[2025-09-05 18:48:29] [Rank 0] Group 9 Loss: 5.0539 +[2025-09-05 18:48:29] [Rank 0] Group 9 Loss: 5.0539 +[2025-09-05 18:48:29] [Rank 0] Group 10 Loss: 5.1858 +[2025-09-05 18:48:29] [Rank 0] Group 10 Loss: 5.1858 +[2025-09-05 18:48:29] [Rank 0] Group 11 Loss: 5.1834 +[2025-09-05 18:48:29] [Rank 0] Group 11 Loss: 5.1834 +[2025-09-05 18:48:29] [Rank 0] Group 12 Loss: 5.0836 +[2025-09-05 18:48:29] [Rank 0] Group 12 Loss: 5.0836 +[2025-09-05 18:48:29] [Rank 0] Group 13 Loss: 5.1364 +[2025-09-05 18:48:29] [Rank 0] Group 13 Loss: 5.1364 +[2025-09-05 18:48:29] [Rank 0] Group 14 Loss: 5.1819 +[2025-09-05 18:48:29] [Rank 0] Group 14 Loss: 5.1819 +[2025-09-05 18:48:29] [Rank 0] Group 15 Loss: 5.1439 +[2025-09-05 18:48:29] [Rank 0] Group 15 Loss: 5.1439 +[2025-09-05 18:48:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:48:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:48:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:48:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:48:29] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 18:48:29] [Rank 0] Group 2 FTA: 0.8000 +[2025-09-05 18:48:29] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:48:29] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 18:48:29] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 18:48:29] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 18:48:29] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 18:48:29] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 18:48:29] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-05 18:48:29] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-05 18:48:29] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 18:48:29] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 18:48:29] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:48:29] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:48:29] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 18:48:29] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 18:48:29] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 18:48:29] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 18:48:29] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 18:48:29] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 18:48:29] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 18:48:29] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 18:48:29] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:48:29] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 18:48:29] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 18:48:29] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 18:48:29] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:48:29] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:48:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:48:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:48:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:48:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:48:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:48:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:48:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:48:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:48:31] [Rank 0] step:2501/10000 train_time:125337ms step_avg:50.11ms +[2025-09-05 18:48:31] [Rank 0] step:2501/10000 train_time:125337ms step_avg:50.11ms +[2025-09-05 18:48:31] [Rank 0] step:2521/10000 train_time:126009ms step_avg:49.98ms +[2025-09-05 18:48:31] [Rank 0] step:2521/10000 train_time:126009ms step_avg:49.98ms +[2025-09-05 18:48:32] [Rank 0] step:2541/10000 train_time:126745ms step_avg:49.88ms +[2025-09-05 18:48:32] [Rank 0] step:2541/10000 train_time:126745ms step_avg:49.88ms +[2025-09-05 18:48:33] [Rank 0] step:2561/10000 train_time:127482ms step_avg:49.78ms +[2025-09-05 18:48:33] [Rank 0] step:2561/10000 train_time:127482ms step_avg:49.78ms +[2025-09-05 18:48:34] [Rank 0] step:2581/10000 train_time:128218ms step_avg:49.68ms +[2025-09-05 18:48:34] [Rank 0] step:2581/10000 train_time:128218ms step_avg:49.68ms +[2025-09-05 18:48:34] [Rank 0] step:2601/10000 train_time:128956ms step_avg:49.58ms +[2025-09-05 18:48:34] [Rank 0] step:2601/10000 train_time:128956ms step_avg:49.58ms +[2025-09-05 18:48:35] [Rank 0] step:2621/10000 train_time:129692ms step_avg:49.48ms +[2025-09-05 18:48:35] [Rank 0] step:2621/10000 train_time:129692ms step_avg:49.48ms +[2025-09-05 18:48:36] [Rank 0] step:2641/10000 train_time:130429ms step_avg:49.39ms +[2025-09-05 18:48:36] [Rank 0] step:2641/10000 train_time:130429ms step_avg:49.39ms +[2025-09-05 18:48:37] [Rank 0] step:2661/10000 train_time:131166ms step_avg:49.29ms +[2025-09-05 18:48:37] [Rank 0] step:2661/10000 train_time:131166ms step_avg:49.29ms +[2025-09-05 18:48:37] [Rank 0] step:2681/10000 train_time:131903ms step_avg:49.20ms +[2025-09-05 18:48:37] [Rank 0] step:2681/10000 train_time:131903ms step_avg:49.20ms +[2025-09-05 18:48:38] [Rank 0] step:2701/10000 train_time:132639ms step_avg:49.11ms +[2025-09-05 18:48:38] [Rank 0] step:2701/10000 train_time:132639ms step_avg:49.11ms +[2025-09-05 18:48:39] [Rank 0] step:2721/10000 train_time:133376ms step_avg:49.02ms +[2025-09-05 18:48:39] [Rank 0] step:2721/10000 train_time:133376ms step_avg:49.02ms +[2025-09-05 18:48:40] [Rank 0] step:2741/10000 train_time:134113ms step_avg:48.93ms +[2025-09-05 18:48:40] [Rank 0] step:2741/10000 train_time:134113ms step_avg:48.93ms +[2025-09-05 18:48:40] [Rank 0] step:2761/10000 train_time:134850ms step_avg:48.84ms +[2025-09-05 18:48:40] [Rank 0] step:2761/10000 train_time:134850ms step_avg:48.84ms +[2025-09-05 18:48:41] [Rank 0] step:2781/10000 train_time:135586ms step_avg:48.75ms +[2025-09-05 18:48:41] [Rank 0] step:2781/10000 train_time:135586ms step_avg:48.75ms +[2025-09-05 18:48:42] [Rank 0] step:2801/10000 train_time:136322ms step_avg:48.67ms +[2025-09-05 18:48:42] [Rank 0] step:2801/10000 train_time:136322ms step_avg:48.67ms +[2025-09-05 18:48:43] [Rank 0] step:2821/10000 train_time:137686ms step_avg:48.81ms +[2025-09-05 18:48:43] [Rank 0] step:2821/10000 train_time:137686ms step_avg:48.81ms +[2025-09-05 18:48:44] [Rank 0] step:2841/10000 train_time:138423ms step_avg:48.72ms +[2025-09-05 18:48:44] [Rank 0] step:2841/10000 train_time:138423ms step_avg:48.72ms +[2025-09-05 18:48:45] [Rank 0] step:2861/10000 train_time:139159ms step_avg:48.64ms +[2025-09-05 18:48:45] [Rank 0] step:2861/10000 train_time:139159ms step_avg:48.64ms +[2025-09-05 18:48:45] [Rank 0] step:2881/10000 train_time:139896ms step_avg:48.56ms +[2025-09-05 18:48:45] [Rank 0] step:2881/10000 train_time:139896ms step_avg:48.56ms +[2025-09-05 18:48:46] [Rank 0] step:2901/10000 train_time:140633ms step_avg:48.48ms +[2025-09-05 18:48:46] [Rank 0] step:2901/10000 train_time:140633ms step_avg:48.48ms +[2025-09-05 18:48:47] [Rank 0] step:2921/10000 train_time:141369ms step_avg:48.40ms +[2025-09-05 18:48:47] [Rank 0] step:2921/10000 train_time:141369ms step_avg:48.40ms +[2025-09-05 18:48:48] [Rank 0] step:2941/10000 train_time:142105ms step_avg:48.32ms +[2025-09-05 18:48:48] [Rank 0] step:2941/10000 train_time:142105ms step_avg:48.32ms +[2025-09-05 18:48:48] [Rank 0] step:2961/10000 train_time:142841ms step_avg:48.24ms +[2025-09-05 18:48:48] [Rank 0] step:2961/10000 train_time:142841ms step_avg:48.24ms +[2025-09-05 18:48:49] [Rank 0] step:2981/10000 train_time:143578ms step_avg:48.16ms +[2025-09-05 18:48:49] [Rank 0] step:2981/10000 train_time:143578ms step_avg:48.16ms +[2025-09-05 18:48:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:48:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:48:50] [Rank 0] PRINT: step:3000/10000 train_loss:1.9431 val_loss:1.8932 train_time:144396ms step_avg:48.13ms +[2025-09-05 18:48:50] [Rank 0] PRINT: step:3000/10000 train_loss:1.9431 val_loss:1.8932 train_time:144396ms step_avg:48.13ms +[2025-09-05 18:48:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:48:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:48:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:48:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:50:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:50:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:50:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:50:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:50:12] [Rank 0] Total Loss: 4.3448 +[2025-09-05 18:50:12] [Rank 0] Total Loss: 4.3448 +[2025-09-05 18:50:12] [Rank 0] Total FTA (Unweighted): 0.3325 +[2025-09-05 18:50:12] [Rank 0] Total FTA (Unweighted): 0.3325 +[2025-09-05 18:50:12] [Rank 0] Total FTA (Weighted): 0.3325 +[2025-09-05 18:50:12] [Rank 0] Total FTA (Weighted): 0.3325 +[2025-09-05 18:50:12] [Rank 0] Group 0 Loss: 3.1712 +[2025-09-05 18:50:12] [Rank 0] Group 0 Loss: 3.1712 +[2025-09-05 18:50:12] [Rank 0] Group 1 Loss: 3.0823 +[2025-09-05 18:50:12] [Rank 0] Group 1 Loss: 3.0823 +[2025-09-05 18:50:12] [Rank 0] Group 2 Loss: 3.0666 +[2025-09-05 18:50:12] [Rank 0] Group 2 Loss: 3.0666 +[2025-09-05 18:50:12] [Rank 0] Group 3 Loss: 3.4646 +[2025-09-05 18:50:12] [Rank 0] Group 3 Loss: 3.4646 +[2025-09-05 18:50:12] [Rank 0] Group 4 Loss: 3.6541 +[2025-09-05 18:50:12] [Rank 0] Group 4 Loss: 3.6541 +[2025-09-05 18:50:12] [Rank 0] Group 5 Loss: 4.0362 +[2025-09-05 18:50:12] [Rank 0] Group 5 Loss: 4.0362 +[2025-09-05 18:50:12] [Rank 0] Group 6 Loss: 4.3317 +[2025-09-05 18:50:12] [Rank 0] Group 6 Loss: 4.3317 +[2025-09-05 18:50:12] [Rank 0] Group 7 Loss: 4.5214 +[2025-09-05 18:50:12] [Rank 0] Group 7 Loss: 4.5214 +[2025-09-05 18:50:12] [Rank 0] Group 8 Loss: 4.8353 +[2025-09-05 18:50:12] [Rank 0] Group 8 Loss: 4.8353 +[2025-09-05 18:50:12] [Rank 0] Group 9 Loss: 4.9689 +[2025-09-05 18:50:12] [Rank 0] Group 9 Loss: 4.9689 +[2025-09-05 18:50:12] [Rank 0] Group 10 Loss: 5.1049 +[2025-09-05 18:50:12] [Rank 0] Group 10 Loss: 5.1049 +[2025-09-05 18:50:12] [Rank 0] Group 11 Loss: 5.0929 +[2025-09-05 18:50:12] [Rank 0] Group 11 Loss: 5.0929 +[2025-09-05 18:50:12] [Rank 0] Group 12 Loss: 4.9974 +[2025-09-05 18:50:12] [Rank 0] Group 12 Loss: 4.9974 +[2025-09-05 18:50:12] [Rank 0] Group 13 Loss: 5.0357 +[2025-09-05 18:50:12] [Rank 0] Group 13 Loss: 5.0357 +[2025-09-05 18:50:12] [Rank 0] Group 14 Loss: 5.0953 +[2025-09-05 18:50:12] [Rank 0] Group 14 Loss: 5.0953 +[2025-09-05 18:50:12] [Rank 0] Group 15 Loss: 5.0585 +[2025-09-05 18:50:12] [Rank 0] Group 15 Loss: 5.0585 +[2025-09-05 18:50:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:50:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:50:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:50:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:50:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:50:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:50:12] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 18:50:12] [Rank 0] Group 3 FTA: 0.2000 +[2025-09-05 18:50:12] [Rank 0] Group 4 FTA: 0.2800 +[2025-09-05 18:50:12] [Rank 0] Group 4 FTA: 0.2800 +[2025-09-05 18:50:12] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 18:50:12] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 18:50:12] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 18:50:12] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 18:50:12] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 18:50:12] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 18:50:12] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:50:12] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 18:50:12] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 18:50:12] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 18:50:12] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 18:50:12] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 18:50:12] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 18:50:12] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 18:50:12] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:50:12] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 18:50:12] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:50:12] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 18:50:12] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:50:12] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:50:12] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:50:12] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:50:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:50:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:50:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:50:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:50:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:50:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:50:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:50:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:50:13] [Rank 0] step:3001/10000 train_time:144405ms step_avg:48.12ms +[2025-09-05 18:50:13] [Rank 0] step:3001/10000 train_time:144405ms step_avg:48.12ms +[2025-09-05 18:50:14] [Rank 0] step:3021/10000 train_time:145079ms step_avg:48.02ms +[2025-09-05 18:50:14] [Rank 0] step:3021/10000 train_time:145079ms step_avg:48.02ms +[2025-09-05 18:50:15] [Rank 0] step:3041/10000 train_time:145816ms step_avg:47.95ms +[2025-09-05 18:50:15] [Rank 0] step:3041/10000 train_time:145816ms step_avg:47.95ms +[2025-09-05 18:50:15] [Rank 0] step:3061/10000 train_time:146553ms step_avg:47.88ms +[2025-09-05 18:50:15] [Rank 0] step:3061/10000 train_time:146553ms step_avg:47.88ms +[2025-09-05 18:50:16] [Rank 0] step:3081/10000 train_time:147290ms step_avg:47.81ms +[2025-09-05 18:50:16] [Rank 0] step:3081/10000 train_time:147290ms step_avg:47.81ms +[2025-09-05 18:50:17] [Rank 0] step:3101/10000 train_time:148026ms step_avg:47.73ms +[2025-09-05 18:50:17] [Rank 0] step:3101/10000 train_time:148026ms step_avg:47.73ms +[2025-09-05 18:50:17] [Rank 0] step:3121/10000 train_time:148762ms step_avg:47.66ms +[2025-09-05 18:50:17] [Rank 0] step:3121/10000 train_time:148762ms step_avg:47.66ms +[2025-09-05 18:50:18] [Rank 0] step:3141/10000 train_time:149499ms step_avg:47.60ms +[2025-09-05 18:50:18] [Rank 0] step:3141/10000 train_time:149499ms step_avg:47.60ms +[2025-09-05 18:50:19] [Rank 0] step:3161/10000 train_time:150236ms step_avg:47.53ms +[2025-09-05 18:50:19] [Rank 0] step:3161/10000 train_time:150236ms step_avg:47.53ms +[2025-09-05 18:50:20] [Rank 0] step:3181/10000 train_time:150973ms step_avg:47.46ms +[2025-09-05 18:50:20] [Rank 0] step:3181/10000 train_time:150973ms step_avg:47.46ms +[2025-09-05 18:50:20] [Rank 0] step:3201/10000 train_time:151710ms step_avg:47.39ms +[2025-09-05 18:50:20] [Rank 0] step:3201/10000 train_time:151710ms step_avg:47.39ms +[2025-09-05 18:50:21] [Rank 0] step:3221/10000 train_time:152446ms step_avg:47.33ms +[2025-09-05 18:50:21] [Rank 0] step:3221/10000 train_time:152446ms step_avg:47.33ms +[2025-09-05 18:50:22] [Rank 0] step:3241/10000 train_time:153183ms step_avg:47.26ms +[2025-09-05 18:50:22] [Rank 0] step:3241/10000 train_time:153183ms step_avg:47.26ms +[2025-09-05 18:50:23] [Rank 0] step:3261/10000 train_time:153919ms step_avg:47.20ms +[2025-09-05 18:50:23] [Rank 0] step:3261/10000 train_time:153919ms step_avg:47.20ms +[2025-09-05 18:50:23] [Rank 0] step:3281/10000 train_time:154655ms step_avg:47.14ms +[2025-09-05 18:50:23] [Rank 0] step:3281/10000 train_time:154655ms step_avg:47.14ms +[2025-09-05 18:50:24] [Rank 0] step:3301/10000 train_time:155392ms step_avg:47.07ms +[2025-09-05 18:50:24] [Rank 0] step:3301/10000 train_time:155392ms step_avg:47.07ms +[2025-09-05 18:50:25] [Rank 0] step:3321/10000 train_time:156128ms step_avg:47.01ms +[2025-09-05 18:50:25] [Rank 0] step:3321/10000 train_time:156128ms step_avg:47.01ms +[2025-09-05 18:50:26] [Rank 0] step:3341/10000 train_time:156865ms step_avg:46.95ms +[2025-09-05 18:50:26] [Rank 0] step:3341/10000 train_time:156865ms step_avg:46.95ms +[2025-09-05 18:50:26] [Rank 0] step:3361/10000 train_time:157602ms step_avg:46.89ms +[2025-09-05 18:50:26] [Rank 0] step:3361/10000 train_time:157602ms step_avg:46.89ms +[2025-09-05 18:50:27] [Rank 0] step:3381/10000 train_time:158338ms step_avg:46.83ms +[2025-09-05 18:50:27] [Rank 0] step:3381/10000 train_time:158338ms step_avg:46.83ms +[2025-09-05 18:50:28] [Rank 0] step:3401/10000 train_time:159075ms step_avg:46.77ms +[2025-09-05 18:50:28] [Rank 0] step:3401/10000 train_time:159075ms step_avg:46.77ms +[2025-09-05 18:50:29] [Rank 0] step:3421/10000 train_time:159811ms step_avg:46.71ms +[2025-09-05 18:50:29] [Rank 0] step:3421/10000 train_time:159811ms step_avg:46.71ms +[2025-09-05 18:50:29] [Rank 0] step:3441/10000 train_time:160548ms step_avg:46.66ms +[2025-09-05 18:50:29] [Rank 0] step:3441/10000 train_time:160548ms step_avg:46.66ms +[2025-09-05 18:50:30] [Rank 0] step:3461/10000 train_time:161285ms step_avg:46.60ms +[2025-09-05 18:50:30] [Rank 0] step:3461/10000 train_time:161285ms step_avg:46.60ms +[2025-09-05 18:50:31] [Rank 0] step:3481/10000 train_time:162021ms step_avg:46.54ms +[2025-09-05 18:50:31] [Rank 0] step:3481/10000 train_time:162021ms step_avg:46.54ms +[2025-09-05 18:50:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:50:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:50:32] [Rank 0] PRINT: step:3500/10000 train_loss:1.8706 val_loss:1.8328 train_time:162839ms step_avg:46.53ms +[2025-09-05 18:50:32] [Rank 0] PRINT: step:3500/10000 train_loss:1.8706 val_loss:1.8328 train_time:162839ms step_avg:46.53ms +[2025-09-05 18:50:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:50:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:50:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:50:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:51:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:51:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:51:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:51:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:51:53] [Rank 0] Total Loss: 4.2583 +[2025-09-05 18:51:53] [Rank 0] Total Loss: 4.2583 +[2025-09-05 18:51:53] [Rank 0] Total FTA (Unweighted): 0.3706 +[2025-09-05 18:51:53] [Rank 0] Total FTA (Unweighted): 0.3706 +[2025-09-05 18:51:53] [Rank 0] Total FTA (Weighted): 0.3706 +[2025-09-05 18:51:53] [Rank 0] Total FTA (Weighted): 0.3706 +[2025-09-05 18:51:53] [Rank 0] Group 0 Loss: 3.1741 +[2025-09-05 18:51:53] [Rank 0] Group 0 Loss: 3.1741 +[2025-09-05 18:51:53] [Rank 0] Group 1 Loss: 3.0110 +[2025-09-05 18:51:53] [Rank 0] Group 1 Loss: 3.0110 +[2025-09-05 18:51:53] [Rank 0] Group 2 Loss: 3.0070 +[2025-09-05 18:51:53] [Rank 0] Group 2 Loss: 3.0070 +[2025-09-05 18:51:53] [Rank 0] Group 3 Loss: 3.3980 +[2025-09-05 18:51:53] [Rank 0] Group 3 Loss: 3.3980 +[2025-09-05 18:51:53] [Rank 0] Group 4 Loss: 3.5975 +[2025-09-05 18:51:53] [Rank 0] Group 4 Loss: 3.5975 +[2025-09-05 18:51:53] [Rank 0] Group 5 Loss: 3.9119 +[2025-09-05 18:51:53] [Rank 0] Group 5 Loss: 3.9119 +[2025-09-05 18:51:53] [Rank 0] Group 6 Loss: 4.2012 +[2025-09-05 18:51:53] [Rank 0] Group 6 Loss: 4.2012 +[2025-09-05 18:51:53] [Rank 0] Group 7 Loss: 4.4392 +[2025-09-05 18:51:53] [Rank 0] Group 7 Loss: 4.4392 +[2025-09-05 18:51:53] [Rank 0] Group 8 Loss: 4.7525 +[2025-09-05 18:51:53] [Rank 0] Group 8 Loss: 4.7525 +[2025-09-05 18:51:53] [Rank 0] Group 9 Loss: 4.8762 +[2025-09-05 18:51:53] [Rank 0] Group 9 Loss: 4.8762 +[2025-09-05 18:51:53] [Rank 0] Group 10 Loss: 4.9645 +[2025-09-05 18:51:53] [Rank 0] Group 10 Loss: 4.9645 +[2025-09-05 18:51:53] [Rank 0] Group 11 Loss: 4.9618 +[2025-09-05 18:51:53] [Rank 0] Group 11 Loss: 4.9618 +[2025-09-05 18:51:53] [Rank 0] Group 12 Loss: 4.9181 +[2025-09-05 18:51:53] [Rank 0] Group 12 Loss: 4.9181 +[2025-09-05 18:51:53] [Rank 0] Group 13 Loss: 4.9539 +[2025-09-05 18:51:53] [Rank 0] Group 13 Loss: 4.9539 +[2025-09-05 18:51:53] [Rank 0] Group 14 Loss: 5.0011 +[2025-09-05 18:51:53] [Rank 0] Group 14 Loss: 5.0011 +[2025-09-05 18:51:53] [Rank 0] Group 15 Loss: 4.9644 +[2025-09-05 18:51:53] [Rank 0] Group 15 Loss: 4.9644 +[2025-09-05 18:51:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:51:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:51:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:51:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:51:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:51:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:51:53] [Rank 0] Group 3 FTA: 0.3500 +[2025-09-05 18:51:53] [Rank 0] Group 3 FTA: 0.3500 +[2025-09-05 18:51:53] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 18:51:53] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 18:51:53] [Rank 0] Group 5 FTA: 0.3800 +[2025-09-05 18:51:53] [Rank 0] Group 5 FTA: 0.3800 +[2025-09-05 18:51:53] [Rank 0] Group 6 FTA: 0.3700 +[2025-09-05 18:51:53] [Rank 0] Group 6 FTA: 0.3700 +[2025-09-05 18:51:53] [Rank 0] Group 7 FTA: 0.2100 +[2025-09-05 18:51:53] [Rank 0] Group 7 FTA: 0.2100 +[2025-09-05 18:51:53] [Rank 0] Group 8 FTA: 0.2700 +[2025-09-05 18:51:53] [Rank 0] Group 8 FTA: 0.2700 +[2025-09-05 18:51:53] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 18:51:53] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 18:51:53] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 18:51:53] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 18:51:53] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 18:51:53] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 18:51:53] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 18:51:53] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 18:51:53] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 18:51:53] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 18:51:53] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 18:51:53] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 18:51:53] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:51:53] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:51:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:51:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:51:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:51:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:51:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:51:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:51:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:51:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:51:55] [Rank 0] step:3501/10000 train_time:162848ms step_avg:46.51ms +[2025-09-05 18:51:55] [Rank 0] step:3501/10000 train_time:162848ms step_avg:46.51ms +[2025-09-05 18:51:55] [Rank 0] step:3521/10000 train_time:163526ms step_avg:46.44ms +[2025-09-05 18:51:55] [Rank 0] step:3521/10000 train_time:163526ms step_avg:46.44ms +[2025-09-05 18:51:56] [Rank 0] step:3541/10000 train_time:164263ms step_avg:46.39ms +[2025-09-05 18:51:56] [Rank 0] step:3541/10000 train_time:164263ms step_avg:46.39ms +[2025-09-05 18:51:57] [Rank 0] step:3561/10000 train_time:164999ms step_avg:46.34ms +[2025-09-05 18:51:57] [Rank 0] step:3561/10000 train_time:164999ms step_avg:46.34ms +[2025-09-05 18:51:57] [Rank 0] step:3581/10000 train_time:165736ms step_avg:46.28ms +[2025-09-05 18:51:57] [Rank 0] step:3581/10000 train_time:165736ms step_avg:46.28ms +[2025-09-05 18:51:58] [Rank 0] step:3601/10000 train_time:166472ms step_avg:46.23ms +[2025-09-05 18:51:58] [Rank 0] step:3601/10000 train_time:166472ms step_avg:46.23ms +[2025-09-05 18:51:59] [Rank 0] step:3621/10000 train_time:167209ms step_avg:46.18ms +[2025-09-05 18:51:59] [Rank 0] step:3621/10000 train_time:167209ms step_avg:46.18ms +[2025-09-05 18:52:00] [Rank 0] step:3641/10000 train_time:168564ms step_avg:46.30ms +[2025-09-05 18:52:00] [Rank 0] step:3641/10000 train_time:168564ms step_avg:46.30ms +[2025-09-05 18:52:01] [Rank 0] step:3661/10000 train_time:169300ms step_avg:46.24ms +[2025-09-05 18:52:01] [Rank 0] step:3661/10000 train_time:169300ms step_avg:46.24ms +[2025-09-05 18:52:02] [Rank 0] step:3681/10000 train_time:170037ms step_avg:46.19ms +[2025-09-05 18:52:02] [Rank 0] step:3681/10000 train_time:170037ms step_avg:46.19ms +[2025-09-05 18:52:03] [Rank 0] step:3701/10000 train_time:170775ms step_avg:46.14ms +[2025-09-05 18:52:03] [Rank 0] step:3701/10000 train_time:170775ms step_avg:46.14ms +[2025-09-05 18:52:03] [Rank 0] step:3721/10000 train_time:171512ms step_avg:46.09ms +[2025-09-05 18:52:03] [Rank 0] step:3721/10000 train_time:171512ms step_avg:46.09ms +[2025-09-05 18:52:04] [Rank 0] step:3741/10000 train_time:172249ms step_avg:46.04ms +[2025-09-05 18:52:04] [Rank 0] step:3741/10000 train_time:172249ms step_avg:46.04ms +[2025-09-05 18:52:05] [Rank 0] step:3761/10000 train_time:172987ms step_avg:45.99ms +[2025-09-05 18:52:05] [Rank 0] step:3761/10000 train_time:172987ms step_avg:45.99ms +[2025-09-05 18:52:05] [Rank 0] step:3781/10000 train_time:173724ms step_avg:45.95ms +[2025-09-05 18:52:05] [Rank 0] step:3781/10000 train_time:173724ms step_avg:45.95ms +[2025-09-05 18:52:06] [Rank 0] step:3801/10000 train_time:174461ms step_avg:45.90ms +[2025-09-05 18:52:06] [Rank 0] step:3801/10000 train_time:174461ms step_avg:45.90ms +[2025-09-05 18:52:07] [Rank 0] step:3821/10000 train_time:175198ms step_avg:45.85ms +[2025-09-05 18:52:07] [Rank 0] step:3821/10000 train_time:175198ms step_avg:45.85ms +[2025-09-05 18:52:08] [Rank 0] step:3841/10000 train_time:175935ms step_avg:45.80ms +[2025-09-05 18:52:08] [Rank 0] step:3841/10000 train_time:175935ms step_avg:45.80ms +[2025-09-05 18:52:09] [Rank 0] step:3861/10000 train_time:176785ms step_avg:45.79ms +[2025-09-05 18:52:09] [Rank 0] step:3861/10000 train_time:176785ms step_avg:45.79ms +[2025-09-05 18:52:09] [Rank 0] step:3881/10000 train_time:177534ms step_avg:45.74ms +[2025-09-05 18:52:09] [Rank 0] step:3881/10000 train_time:177534ms step_avg:45.74ms +[2025-09-05 18:52:10] [Rank 0] step:3901/10000 train_time:178271ms step_avg:45.70ms +[2025-09-05 18:52:10] [Rank 0] step:3901/10000 train_time:178271ms step_avg:45.70ms +[2025-09-05 18:52:11] [Rank 0] step:3921/10000 train_time:179008ms step_avg:45.65ms +[2025-09-05 18:52:11] [Rank 0] step:3921/10000 train_time:179008ms step_avg:45.65ms +[2025-09-05 18:52:12] [Rank 0] step:3941/10000 train_time:179886ms step_avg:45.64ms +[2025-09-05 18:52:12] [Rank 0] step:3941/10000 train_time:179886ms step_avg:45.64ms +[2025-09-05 18:52:12] [Rank 0] step:3961/10000 train_time:180623ms step_avg:45.60ms +[2025-09-05 18:52:12] [Rank 0] step:3961/10000 train_time:180623ms step_avg:45.60ms +[2025-09-05 18:52:13] [Rank 0] step:3981/10000 train_time:181360ms step_avg:45.56ms +[2025-09-05 18:52:13] [Rank 0] step:3981/10000 train_time:181360ms step_avg:45.56ms +[2025-09-05 18:52:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:52:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:52:14] [Rank 0] PRINT: step:4000/10000 train_loss:1.8184 val_loss:1.7893 train_time:182177ms step_avg:45.54ms +[2025-09-05 18:52:14] [Rank 0] PRINT: step:4000/10000 train_loss:1.8184 val_loss:1.7893 train_time:182177ms step_avg:45.54ms +[2025-09-05 18:52:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:52:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:52:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:52:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:53:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:53:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:53:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:53:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:53:36] [Rank 0] Total Loss: 4.2796 +[2025-09-05 18:53:36] [Rank 0] Total Loss: 4.2796 +[2025-09-05 18:53:36] [Rank 0] Total FTA (Unweighted): 0.3862 +[2025-09-05 18:53:36] [Rank 0] Total FTA (Unweighted): 0.3862 +[2025-09-05 18:53:36] [Rank 0] Total FTA (Weighted): 0.3862 +[2025-09-05 18:53:36] [Rank 0] Total FTA (Weighted): 0.3862 +[2025-09-05 18:53:36] [Rank 0] Group 0 Loss: 3.2057 +[2025-09-05 18:53:36] [Rank 0] Group 0 Loss: 3.2057 +[2025-09-05 18:53:36] [Rank 0] Group 1 Loss: 3.0912 +[2025-09-05 18:53:36] [Rank 0] Group 1 Loss: 3.0912 +[2025-09-05 18:53:36] [Rank 0] Group 2 Loss: 3.1128 +[2025-09-05 18:53:36] [Rank 0] Group 2 Loss: 3.1128 +[2025-09-05 18:53:36] [Rank 0] Group 3 Loss: 3.4802 +[2025-09-05 18:53:36] [Rank 0] Group 3 Loss: 3.4802 +[2025-09-05 18:53:36] [Rank 0] Group 4 Loss: 3.5889 +[2025-09-05 18:53:36] [Rank 0] Group 4 Loss: 3.5889 +[2025-09-05 18:53:36] [Rank 0] Group 5 Loss: 3.9263 +[2025-09-05 18:53:36] [Rank 0] Group 5 Loss: 3.9263 +[2025-09-05 18:53:36] [Rank 0] Group 6 Loss: 4.2257 +[2025-09-05 18:53:36] [Rank 0] Group 6 Loss: 4.2257 +[2025-09-05 18:53:36] [Rank 0] Group 7 Loss: 4.4356 +[2025-09-05 18:53:36] [Rank 0] Group 7 Loss: 4.4356 +[2025-09-05 18:53:36] [Rank 0] Group 8 Loss: 4.7485 +[2025-09-05 18:53:36] [Rank 0] Group 8 Loss: 4.7485 +[2025-09-05 18:53:36] [Rank 0] Group 9 Loss: 4.8761 +[2025-09-05 18:53:36] [Rank 0] Group 9 Loss: 4.8761 +[2025-09-05 18:53:36] [Rank 0] Group 10 Loss: 4.9864 +[2025-09-05 18:53:36] [Rank 0] Group 10 Loss: 4.9864 +[2025-09-05 18:53:36] [Rank 0] Group 11 Loss: 4.9729 +[2025-09-05 18:53:36] [Rank 0] Group 11 Loss: 4.9729 +[2025-09-05 18:53:36] [Rank 0] Group 12 Loss: 4.9041 +[2025-09-05 18:53:36] [Rank 0] Group 12 Loss: 4.9041 +[2025-09-05 18:53:36] [Rank 0] Group 13 Loss: 4.9684 +[2025-09-05 18:53:36] [Rank 0] Group 13 Loss: 4.9684 +[2025-09-05 18:53:36] [Rank 0] Group 14 Loss: 4.9987 +[2025-09-05 18:53:36] [Rank 0] Group 14 Loss: 4.9987 +[2025-09-05 18:53:36] [Rank 0] Group 15 Loss: 4.9520 +[2025-09-05 18:53:36] [Rank 0] Group 15 Loss: 4.9520 +[2025-09-05 18:53:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:53:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:53:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:53:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:53:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:53:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:53:36] [Rank 0] Group 3 FTA: 0.4100 +[2025-09-05 18:53:36] [Rank 0] Group 3 FTA: 0.4100 +[2025-09-05 18:53:36] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 18:53:36] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 18:53:36] [Rank 0] Group 5 FTA: 0.3500 +[2025-09-05 18:53:36] [Rank 0] Group 5 FTA: 0.3500 +[2025-09-05 18:53:36] [Rank 0] Group 6 FTA: 0.3600 +[2025-09-05 18:53:36] [Rank 0] Group 6 FTA: 0.3600 +[2025-09-05 18:53:36] [Rank 0] Group 7 FTA: 0.2400 +[2025-09-05 18:53:36] [Rank 0] Group 7 FTA: 0.2400 +[2025-09-05 18:53:36] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 18:53:36] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 18:53:36] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 18:53:36] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 18:53:36] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 18:53:36] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 18:53:36] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 18:53:36] [Rank 0] Group 11 FTA: 0.2300 +[2025-09-05 18:53:36] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 18:53:36] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 18:53:36] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 18:53:36] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 18:53:36] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 18:53:36] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 18:53:36] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:53:36] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:53:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:53:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:53:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:53:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:53:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:53:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:53:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:53:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:53:37] [Rank 0] step:4001/10000 train_time:182186ms step_avg:45.54ms +[2025-09-05 18:53:37] [Rank 0] step:4001/10000 train_time:182186ms step_avg:45.54ms +[2025-09-05 18:53:39] [Rank 0] step:4021/10000 train_time:183462ms step_avg:45.63ms +[2025-09-05 18:53:39] [Rank 0] step:4021/10000 train_time:183462ms step_avg:45.63ms +[2025-09-05 18:53:39] [Rank 0] step:4041/10000 train_time:184198ms step_avg:45.58ms +[2025-09-05 18:53:39] [Rank 0] step:4041/10000 train_time:184198ms step_avg:45.58ms +[2025-09-05 18:53:40] [Rank 0] step:4061/10000 train_time:184935ms step_avg:45.54ms +[2025-09-05 18:53:40] [Rank 0] step:4061/10000 train_time:184935ms step_avg:45.54ms +[2025-09-05 18:53:41] [Rank 0] step:4081/10000 train_time:185671ms step_avg:45.50ms +[2025-09-05 18:53:41] [Rank 0] step:4081/10000 train_time:185671ms step_avg:45.50ms +[2025-09-05 18:53:42] [Rank 0] step:4101/10000 train_time:186408ms step_avg:45.45ms +[2025-09-05 18:53:42] [Rank 0] step:4101/10000 train_time:186408ms step_avg:45.45ms +[2025-09-05 18:53:42] [Rank 0] step:4121/10000 train_time:187145ms step_avg:45.41ms +[2025-09-05 18:53:42] [Rank 0] step:4121/10000 train_time:187145ms step_avg:45.41ms +[2025-09-05 18:53:43] [Rank 0] step:4141/10000 train_time:187881ms step_avg:45.37ms +[2025-09-05 18:53:43] [Rank 0] step:4141/10000 train_time:187881ms step_avg:45.37ms +[2025-09-05 18:53:44] [Rank 0] step:4161/10000 train_time:188619ms step_avg:45.33ms +[2025-09-05 18:53:44] [Rank 0] step:4161/10000 train_time:188619ms step_avg:45.33ms +[2025-09-05 18:53:45] [Rank 0] step:4181/10000 train_time:189355ms step_avg:45.29ms +[2025-09-05 18:53:45] [Rank 0] step:4181/10000 train_time:189355ms step_avg:45.29ms +[2025-09-05 18:53:45] [Rank 0] step:4201/10000 train_time:190092ms step_avg:45.25ms +[2025-09-05 18:53:45] [Rank 0] step:4201/10000 train_time:190092ms step_avg:45.25ms +[2025-09-05 18:53:46] [Rank 0] step:4221/10000 train_time:190828ms step_avg:45.21ms +[2025-09-05 18:53:46] [Rank 0] step:4221/10000 train_time:190828ms step_avg:45.21ms +[2025-09-05 18:53:47] [Rank 0] step:4241/10000 train_time:191565ms step_avg:45.17ms +[2025-09-05 18:53:47] [Rank 0] step:4241/10000 train_time:191565ms step_avg:45.17ms +[2025-09-05 18:53:47] [Rank 0] step:4261/10000 train_time:192302ms step_avg:45.13ms +[2025-09-05 18:53:47] [Rank 0] step:4261/10000 train_time:192302ms step_avg:45.13ms +[2025-09-05 18:53:48] [Rank 0] step:4281/10000 train_time:193039ms step_avg:45.09ms +[2025-09-05 18:53:48] [Rank 0] step:4281/10000 train_time:193039ms step_avg:45.09ms +[2025-09-05 18:53:49] [Rank 0] step:4301/10000 train_time:193776ms step_avg:45.05ms +[2025-09-05 18:53:49] [Rank 0] step:4301/10000 train_time:193776ms step_avg:45.05ms +[2025-09-05 18:53:50] [Rank 0] step:4321/10000 train_time:194513ms step_avg:45.02ms +[2025-09-05 18:53:50] [Rank 0] step:4321/10000 train_time:194513ms step_avg:45.02ms +[2025-09-05 18:53:50] [Rank 0] step:4341/10000 train_time:195250ms step_avg:44.98ms +[2025-09-05 18:53:50] [Rank 0] step:4341/10000 train_time:195250ms step_avg:44.98ms +[2025-09-05 18:53:51] [Rank 0] step:4361/10000 train_time:195987ms step_avg:44.94ms +[2025-09-05 18:53:51] [Rank 0] step:4361/10000 train_time:195987ms step_avg:44.94ms +[2025-09-05 18:53:52] [Rank 0] step:4381/10000 train_time:196724ms step_avg:44.90ms +[2025-09-05 18:53:52] [Rank 0] step:4381/10000 train_time:196724ms step_avg:44.90ms +[2025-09-05 18:53:53] [Rank 0] step:4401/10000 train_time:197460ms step_avg:44.87ms +[2025-09-05 18:53:53] [Rank 0] step:4401/10000 train_time:197460ms step_avg:44.87ms +[2025-09-05 18:53:53] [Rank 0] step:4421/10000 train_time:198198ms step_avg:44.83ms +[2025-09-05 18:53:53] [Rank 0] step:4421/10000 train_time:198198ms step_avg:44.83ms +[2025-09-05 18:53:54] [Rank 0] step:4441/10000 train_time:198935ms step_avg:44.80ms +[2025-09-05 18:53:54] [Rank 0] step:4441/10000 train_time:198935ms step_avg:44.80ms +[2025-09-05 18:53:55] [Rank 0] step:4461/10000 train_time:199672ms step_avg:44.76ms +[2025-09-05 18:53:55] [Rank 0] step:4461/10000 train_time:199672ms step_avg:44.76ms +[2025-09-05 18:53:56] [Rank 0] step:4481/10000 train_time:200409ms step_avg:44.72ms +[2025-09-05 18:53:56] [Rank 0] step:4481/10000 train_time:200409ms step_avg:44.72ms +[2025-09-05 18:53:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:53:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:53:57] [Rank 0] PRINT: step:4500/10000 train_loss:1.7813 val_loss:1.7564 train_time:201227ms step_avg:44.72ms +[2025-09-05 18:53:57] [Rank 0] PRINT: step:4500/10000 train_loss:1.7813 val_loss:1.7564 train_time:201227ms step_avg:44.72ms +[2025-09-05 18:53:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:53:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:53:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:53:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:55:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:55:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:55:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:55:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:55:18] [Rank 0] Total Loss: 4.2869 +[2025-09-05 18:55:18] [Rank 0] Total Loss: 4.2869 +[2025-09-05 18:55:18] [Rank 0] Total FTA (Unweighted): 0.3987 +[2025-09-05 18:55:18] [Rank 0] Total FTA (Unweighted): 0.3987 +[2025-09-05 18:55:18] [Rank 0] Total FTA (Weighted): 0.3987 +[2025-09-05 18:55:18] [Rank 0] Total FTA (Weighted): 0.3987 +[2025-09-05 18:55:18] [Rank 0] Group 0 Loss: 3.2139 +[2025-09-05 18:55:18] [Rank 0] Group 0 Loss: 3.2139 +[2025-09-05 18:55:18] [Rank 0] Group 1 Loss: 3.0747 +[2025-09-05 18:55:18] [Rank 0] Group 1 Loss: 3.0747 +[2025-09-05 18:55:18] [Rank 0] Group 2 Loss: 3.0966 +[2025-09-05 18:55:18] [Rank 0] Group 2 Loss: 3.0966 +[2025-09-05 18:55:18] [Rank 0] Group 3 Loss: 3.4461 +[2025-09-05 18:55:18] [Rank 0] Group 3 Loss: 3.4461 +[2025-09-05 18:55:18] [Rank 0] Group 4 Loss: 3.6722 +[2025-09-05 18:55:18] [Rank 0] Group 4 Loss: 3.6722 +[2025-09-05 18:55:18] [Rank 0] Group 5 Loss: 3.9472 +[2025-09-05 18:55:18] [Rank 0] Group 5 Loss: 3.9472 +[2025-09-05 18:55:18] [Rank 0] Group 6 Loss: 4.2621 +[2025-09-05 18:55:18] [Rank 0] Group 6 Loss: 4.2621 +[2025-09-05 18:55:18] [Rank 0] Group 7 Loss: 4.4324 +[2025-09-05 18:55:18] [Rank 0] Group 7 Loss: 4.4324 +[2025-09-05 18:55:18] [Rank 0] Group 8 Loss: 4.7146 +[2025-09-05 18:55:18] [Rank 0] Group 8 Loss: 4.7146 +[2025-09-05 18:55:18] [Rank 0] Group 9 Loss: 4.8666 +[2025-09-05 18:55:18] [Rank 0] Group 9 Loss: 4.8666 +[2025-09-05 18:55:18] [Rank 0] Group 10 Loss: 5.0011 +[2025-09-05 18:55:18] [Rank 0] Group 10 Loss: 5.0011 +[2025-09-05 18:55:18] [Rank 0] Group 11 Loss: 4.9846 +[2025-09-05 18:55:18] [Rank 0] Group 11 Loss: 4.9846 +[2025-09-05 18:55:18] [Rank 0] Group 12 Loss: 4.9058 +[2025-09-05 18:55:18] [Rank 0] Group 12 Loss: 4.9058 +[2025-09-05 18:55:18] [Rank 0] Group 13 Loss: 4.9746 +[2025-09-05 18:55:18] [Rank 0] Group 13 Loss: 4.9746 +[2025-09-05 18:55:18] [Rank 0] Group 14 Loss: 5.0318 +[2025-09-05 18:55:18] [Rank 0] Group 14 Loss: 5.0318 +[2025-09-05 18:55:18] [Rank 0] Group 15 Loss: 4.9661 +[2025-09-05 18:55:18] [Rank 0] Group 15 Loss: 4.9661 +[2025-09-05 18:55:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:55:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:55:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:55:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:55:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:55:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:55:18] [Rank 0] Group 3 FTA: 0.4900 +[2025-09-05 18:55:18] [Rank 0] Group 3 FTA: 0.4900 +[2025-09-05 18:55:18] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 18:55:18] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 18:55:18] [Rank 0] Group 5 FTA: 0.4000 +[2025-09-05 18:55:18] [Rank 0] Group 5 FTA: 0.4000 +[2025-09-05 18:55:18] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:55:18] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:55:18] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 18:55:18] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 18:55:18] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 18:55:18] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 18:55:18] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 18:55:18] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 18:55:18] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 18:55:18] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 18:55:18] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 18:55:18] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 18:55:18] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 18:55:18] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 18:55:18] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 18:55:18] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 18:55:18] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-05 18:55:18] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-05 18:55:18] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:55:18] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 18:55:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:55:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:55:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:55:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:55:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:55:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:55:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:55:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:55:20] [Rank 0] step:4501/10000 train_time:201236ms step_avg:44.71ms +[2025-09-05 18:55:20] [Rank 0] step:4501/10000 train_time:201236ms step_avg:44.71ms +[2025-09-05 18:55:20] [Rank 0] step:4521/10000 train_time:201905ms step_avg:44.66ms +[2025-09-05 18:55:20] [Rank 0] step:4521/10000 train_time:201905ms step_avg:44.66ms +[2025-09-05 18:55:21] [Rank 0] step:4541/10000 train_time:202788ms step_avg:44.66ms +[2025-09-05 18:55:21] [Rank 0] step:4541/10000 train_time:202788ms step_avg:44.66ms +[2025-09-05 18:55:22] [Rank 0] step:4561/10000 train_time:203524ms step_avg:44.62ms +[2025-09-05 18:55:22] [Rank 0] step:4561/10000 train_time:203524ms step_avg:44.62ms +[2025-09-05 18:55:23] [Rank 0] step:4581/10000 train_time:204261ms step_avg:44.59ms +[2025-09-05 18:55:23] [Rank 0] step:4581/10000 train_time:204261ms step_avg:44.59ms +[2025-09-05 18:55:23] [Rank 0] step:4601/10000 train_time:204998ms step_avg:44.56ms +[2025-09-05 18:55:23] [Rank 0] step:4601/10000 train_time:204998ms step_avg:44.56ms +[2025-09-05 18:55:24] [Rank 0] step:4621/10000 train_time:205733ms step_avg:44.52ms +[2025-09-05 18:55:24] [Rank 0] step:4621/10000 train_time:205733ms step_avg:44.52ms +[2025-09-05 18:55:25] [Rank 0] step:4641/10000 train_time:206470ms step_avg:44.49ms +[2025-09-05 18:55:25] [Rank 0] step:4641/10000 train_time:206470ms step_avg:44.49ms +[2025-09-05 18:55:26] [Rank 0] step:4661/10000 train_time:207207ms step_avg:44.46ms +[2025-09-05 18:55:26] [Rank 0] step:4661/10000 train_time:207207ms step_avg:44.46ms +[2025-09-05 18:55:26] [Rank 0] step:4681/10000 train_time:207944ms step_avg:44.42ms +[2025-09-05 18:55:26] [Rank 0] step:4681/10000 train_time:207944ms step_avg:44.42ms +[2025-09-05 18:55:27] [Rank 0] step:4701/10000 train_time:208679ms step_avg:44.39ms +[2025-09-05 18:55:27] [Rank 0] step:4701/10000 train_time:208679ms step_avg:44.39ms +[2025-09-05 18:55:28] [Rank 0] step:4721/10000 train_time:209416ms step_avg:44.36ms +[2025-09-05 18:55:28] [Rank 0] step:4721/10000 train_time:209416ms step_avg:44.36ms +[2025-09-05 18:55:29] [Rank 0] step:4741/10000 train_time:210153ms step_avg:44.33ms +[2025-09-05 18:55:29] [Rank 0] step:4741/10000 train_time:210153ms step_avg:44.33ms +[2025-09-05 18:55:29] [Rank 0] step:4761/10000 train_time:210889ms step_avg:44.30ms +[2025-09-05 18:55:29] [Rank 0] step:4761/10000 train_time:210889ms step_avg:44.30ms +[2025-09-05 18:55:30] [Rank 0] step:4781/10000 train_time:211626ms step_avg:44.26ms +[2025-09-05 18:55:30] [Rank 0] step:4781/10000 train_time:211626ms step_avg:44.26ms +[2025-09-05 18:55:31] [Rank 0] step:4801/10000 train_time:212361ms step_avg:44.23ms +[2025-09-05 18:55:31] [Rank 0] step:4801/10000 train_time:212361ms step_avg:44.23ms +[2025-09-05 18:55:32] [Rank 0] step:4821/10000 train_time:213098ms step_avg:44.20ms +[2025-09-05 18:55:32] [Rank 0] step:4821/10000 train_time:213098ms step_avg:44.20ms +[2025-09-05 18:55:33] [Rank 0] step:4841/10000 train_time:214143ms step_avg:44.24ms +[2025-09-05 18:55:33] [Rank 0] step:4841/10000 train_time:214143ms step_avg:44.24ms +[2025-09-05 18:55:33] [Rank 0] step:4861/10000 train_time:214880ms step_avg:44.20ms +[2025-09-05 18:55:33] [Rank 0] step:4861/10000 train_time:214880ms step_avg:44.20ms +[2025-09-05 18:55:34] [Rank 0] step:4881/10000 train_time:215617ms step_avg:44.17ms +[2025-09-05 18:55:34] [Rank 0] step:4881/10000 train_time:215617ms step_avg:44.17ms +[2025-09-05 18:55:35] [Rank 0] step:4901/10000 train_time:216353ms step_avg:44.14ms +[2025-09-05 18:55:35] [Rank 0] step:4901/10000 train_time:216353ms step_avg:44.14ms +[2025-09-05 18:55:36] [Rank 0] step:4921/10000 train_time:217090ms step_avg:44.11ms +[2025-09-05 18:55:36] [Rank 0] step:4921/10000 train_time:217090ms step_avg:44.11ms +[2025-09-05 18:55:36] [Rank 0] step:4941/10000 train_time:217826ms step_avg:44.09ms +[2025-09-05 18:55:36] [Rank 0] step:4941/10000 train_time:217826ms step_avg:44.09ms +[2025-09-05 18:55:37] [Rank 0] step:4961/10000 train_time:218563ms step_avg:44.06ms +[2025-09-05 18:55:37] [Rank 0] step:4961/10000 train_time:218563ms step_avg:44.06ms +[2025-09-05 18:55:38] [Rank 0] step:4981/10000 train_time:219300ms step_avg:44.03ms +[2025-09-05 18:55:38] [Rank 0] step:4981/10000 train_time:219300ms step_avg:44.03ms +[2025-09-05 18:55:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:55:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:55:39] [Rank 0] PRINT: step:5000/10000 train_loss:1.7510 val_loss:1.7303 train_time:220118ms step_avg:44.02ms +[2025-09-05 18:55:39] [Rank 0] PRINT: step:5000/10000 train_loss:1.7510 val_loss:1.7303 train_time:220118ms step_avg:44.02ms +[2025-09-05 18:55:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:55:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:55:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:55:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:57:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:57:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:57:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:57:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:57:00] [Rank 0] Total Loss: 4.2048 +[2025-09-05 18:57:00] [Rank 0] Total Loss: 4.2048 +[2025-09-05 18:57:00] [Rank 0] Total FTA (Unweighted): 0.4181 +[2025-09-05 18:57:00] [Rank 0] Total FTA (Unweighted): 0.4181 +[2025-09-05 18:57:00] [Rank 0] Total FTA (Weighted): 0.4181 +[2025-09-05 18:57:00] [Rank 0] Total FTA (Weighted): 0.4181 +[2025-09-05 18:57:00] [Rank 0] Group 0 Loss: 3.1878 +[2025-09-05 18:57:00] [Rank 0] Group 0 Loss: 3.1878 +[2025-09-05 18:57:00] [Rank 0] Group 1 Loss: 3.0771 +[2025-09-05 18:57:00] [Rank 0] Group 1 Loss: 3.0771 +[2025-09-05 18:57:00] [Rank 0] Group 2 Loss: 3.0206 +[2025-09-05 18:57:00] [Rank 0] Group 2 Loss: 3.0206 +[2025-09-05 18:57:00] [Rank 0] Group 3 Loss: 3.3681 +[2025-09-05 18:57:00] [Rank 0] Group 3 Loss: 3.3681 +[2025-09-05 18:57:00] [Rank 0] Group 4 Loss: 3.5957 +[2025-09-05 18:57:00] [Rank 0] Group 4 Loss: 3.5957 +[2025-09-05 18:57:00] [Rank 0] Group 5 Loss: 3.8681 +[2025-09-05 18:57:00] [Rank 0] Group 5 Loss: 3.8681 +[2025-09-05 18:57:00] [Rank 0] Group 6 Loss: 4.1614 +[2025-09-05 18:57:00] [Rank 0] Group 6 Loss: 4.1614 +[2025-09-05 18:57:00] [Rank 0] Group 7 Loss: 4.3423 +[2025-09-05 18:57:00] [Rank 0] Group 7 Loss: 4.3423 +[2025-09-05 18:57:00] [Rank 0] Group 8 Loss: 4.6252 +[2025-09-05 18:57:00] [Rank 0] Group 8 Loss: 4.6252 +[2025-09-05 18:57:00] [Rank 0] Group 9 Loss: 4.7688 +[2025-09-05 18:57:00] [Rank 0] Group 9 Loss: 4.7688 +[2025-09-05 18:57:00] [Rank 0] Group 10 Loss: 4.9049 +[2025-09-05 18:57:00] [Rank 0] Group 10 Loss: 4.9049 +[2025-09-05 18:57:00] [Rank 0] Group 11 Loss: 4.8706 +[2025-09-05 18:57:00] [Rank 0] Group 11 Loss: 4.8706 +[2025-09-05 18:57:00] [Rank 0] Group 12 Loss: 4.8310 +[2025-09-05 18:57:00] [Rank 0] Group 12 Loss: 4.8310 +[2025-09-05 18:57:00] [Rank 0] Group 13 Loss: 4.8694 +[2025-09-05 18:57:00] [Rank 0] Group 13 Loss: 4.8694 +[2025-09-05 18:57:00] [Rank 0] Group 14 Loss: 4.9148 +[2025-09-05 18:57:00] [Rank 0] Group 14 Loss: 4.9148 +[2025-09-05 18:57:00] [Rank 0] Group 15 Loss: 4.8711 +[2025-09-05 18:57:00] [Rank 0] Group 15 Loss: 4.8711 +[2025-09-05 18:57:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:57:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:57:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:57:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:57:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:57:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:57:00] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 18:57:00] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 18:57:00] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 18:57:00] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 18:57:00] [Rank 0] Group 5 FTA: 0.4400 +[2025-09-05 18:57:00] [Rank 0] Group 5 FTA: 0.4400 +[2025-09-05 18:57:00] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:57:00] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 18:57:00] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 18:57:00] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 18:57:00] [Rank 0] Group 8 FTA: 0.3200 +[2025-09-05 18:57:00] [Rank 0] Group 8 FTA: 0.3200 +[2025-09-05 18:57:00] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 18:57:00] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 18:57:00] [Rank 0] Group 10 FTA: 0.2700 +[2025-09-05 18:57:00] [Rank 0] Group 10 FTA: 0.2700 +[2025-09-05 18:57:00] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 18:57:00] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 18:57:00] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 18:57:00] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 18:57:00] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 18:57:00] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 18:57:00] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:57:00] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:57:00] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:57:00] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:57:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:57:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:57:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:57:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:57:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:57:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:57:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:57:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:57:02] [Rank 0] step:5001/10000 train_time:220126ms step_avg:44.02ms +[2025-09-05 18:57:02] [Rank 0] step:5001/10000 train_time:220126ms step_avg:44.02ms +[2025-09-05 18:57:03] [Rank 0] step:5021/10000 train_time:220791ms step_avg:43.97ms +[2025-09-05 18:57:03] [Rank 0] step:5021/10000 train_time:220791ms step_avg:43.97ms +[2025-09-05 18:57:03] [Rank 0] step:5041/10000 train_time:221528ms step_avg:43.95ms +[2025-09-05 18:57:03] [Rank 0] step:5041/10000 train_time:221528ms step_avg:43.95ms +[2025-09-05 18:57:04] [Rank 0] step:5061/10000 train_time:222264ms step_avg:43.92ms +[2025-09-05 18:57:04] [Rank 0] step:5061/10000 train_time:222264ms step_avg:43.92ms +[2025-09-05 18:57:05] [Rank 0] step:5081/10000 train_time:223000ms step_avg:43.89ms +[2025-09-05 18:57:05] [Rank 0] step:5081/10000 train_time:223000ms step_avg:43.89ms +[2025-09-05 18:57:05] [Rank 0] step:5101/10000 train_time:223737ms step_avg:43.86ms +[2025-09-05 18:57:05] [Rank 0] step:5101/10000 train_time:223737ms step_avg:43.86ms +[2025-09-05 18:57:06] [Rank 0] step:5121/10000 train_time:224474ms step_avg:43.83ms +[2025-09-05 18:57:06] [Rank 0] step:5121/10000 train_time:224474ms step_avg:43.83ms +[2025-09-05 18:57:07] [Rank 0] step:5141/10000 train_time:225210ms step_avg:43.81ms +[2025-09-05 18:57:07] [Rank 0] step:5141/10000 train_time:225210ms step_avg:43.81ms +[2025-09-05 18:57:08] [Rank 0] step:5161/10000 train_time:225947ms step_avg:43.78ms +[2025-09-05 18:57:08] [Rank 0] step:5161/10000 train_time:225947ms step_avg:43.78ms +[2025-09-05 18:57:08] [Rank 0] step:5181/10000 train_time:226684ms step_avg:43.75ms +[2025-09-05 18:57:08] [Rank 0] step:5181/10000 train_time:226684ms step_avg:43.75ms +[2025-09-05 18:57:09] [Rank 0] step:5201/10000 train_time:227421ms step_avg:43.73ms +[2025-09-05 18:57:09] [Rank 0] step:5201/10000 train_time:227421ms step_avg:43.73ms +[2025-09-05 18:57:10] [Rank 0] step:5221/10000 train_time:228158ms step_avg:43.70ms +[2025-09-05 18:57:10] [Rank 0] step:5221/10000 train_time:228158ms step_avg:43.70ms +[2025-09-05 18:57:11] [Rank 0] step:5241/10000 train_time:228895ms step_avg:43.67ms +[2025-09-05 18:57:11] [Rank 0] step:5241/10000 train_time:228895ms step_avg:43.67ms +[2025-09-05 18:57:11] [Rank 0] step:5261/10000 train_time:229632ms step_avg:43.65ms +[2025-09-05 18:57:11] [Rank 0] step:5261/10000 train_time:229632ms step_avg:43.65ms +[2025-09-05 18:57:12] [Rank 0] step:5281/10000 train_time:230369ms step_avg:43.62ms +[2025-09-05 18:57:12] [Rank 0] step:5281/10000 train_time:230369ms step_avg:43.62ms +[2025-09-05 18:57:13] [Rank 0] step:5301/10000 train_time:231106ms step_avg:43.60ms +[2025-09-05 18:57:13] [Rank 0] step:5301/10000 train_time:231106ms step_avg:43.60ms +[2025-09-05 18:57:14] [Rank 0] step:5321/10000 train_time:231843ms step_avg:43.57ms +[2025-09-05 18:57:14] [Rank 0] step:5321/10000 train_time:231843ms step_avg:43.57ms +[2025-09-05 18:57:14] [Rank 0] step:5341/10000 train_time:232580ms step_avg:43.55ms +[2025-09-05 18:57:14] [Rank 0] step:5341/10000 train_time:232580ms step_avg:43.55ms +[2025-09-05 18:57:15] [Rank 0] step:5361/10000 train_time:233317ms step_avg:43.52ms +[2025-09-05 18:57:15] [Rank 0] step:5361/10000 train_time:233317ms step_avg:43.52ms +[2025-09-05 18:57:16] [Rank 0] step:5381/10000 train_time:234053ms step_avg:43.50ms +[2025-09-05 18:57:16] [Rank 0] step:5381/10000 train_time:234053ms step_avg:43.50ms +[2025-09-05 18:57:17] [Rank 0] step:5401/10000 train_time:234789ms step_avg:43.47ms +[2025-09-05 18:57:17] [Rank 0] step:5401/10000 train_time:234789ms step_avg:43.47ms +[2025-09-05 18:57:17] [Rank 0] step:5421/10000 train_time:235525ms step_avg:43.45ms +[2025-09-05 18:57:17] [Rank 0] step:5421/10000 train_time:235525ms step_avg:43.45ms +[2025-09-05 18:57:18] [Rank 0] step:5441/10000 train_time:236263ms step_avg:43.42ms +[2025-09-05 18:57:18] [Rank 0] step:5441/10000 train_time:236263ms step_avg:43.42ms +[2025-09-05 18:57:19] [Rank 0] step:5461/10000 train_time:237001ms step_avg:43.40ms +[2025-09-05 18:57:19] [Rank 0] step:5461/10000 train_time:237001ms step_avg:43.40ms +[2025-09-05 18:57:19] [Rank 0] step:5481/10000 train_time:237738ms step_avg:43.37ms +[2025-09-05 18:57:19] [Rank 0] step:5481/10000 train_time:237738ms step_avg:43.37ms +[2025-09-05 18:57:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:57:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:57:21] [Rank 0] PRINT: step:5500/10000 train_loss:1.7288 val_loss:1.7101 train_time:238554ms step_avg:43.37ms +[2025-09-05 18:57:21] [Rank 0] PRINT: step:5500/10000 train_loss:1.7288 val_loss:1.7101 train_time:238554ms step_avg:43.37ms +[2025-09-05 18:57:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:57:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:57:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:57:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:58:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:58:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 18:58:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:58:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 18:58:41] [Rank 0] Total Loss: 4.2322 +[2025-09-05 18:58:41] [Rank 0] Total Loss: 4.2322 +[2025-09-05 18:58:41] [Rank 0] Total FTA (Unweighted): 0.4288 +[2025-09-05 18:58:41] [Rank 0] Total FTA (Unweighted): 0.4288 +[2025-09-05 18:58:41] [Rank 0] Total FTA (Weighted): 0.4288 +[2025-09-05 18:58:41] [Rank 0] Total FTA (Weighted): 0.4288 +[2025-09-05 18:58:41] [Rank 0] Group 0 Loss: 3.1844 +[2025-09-05 18:58:41] [Rank 0] Group 0 Loss: 3.1844 +[2025-09-05 18:58:41] [Rank 0] Group 1 Loss: 3.1350 +[2025-09-05 18:58:41] [Rank 0] Group 1 Loss: 3.1350 +[2025-09-05 18:58:41] [Rank 0] Group 2 Loss: 3.0514 +[2025-09-05 18:58:41] [Rank 0] Group 2 Loss: 3.0514 +[2025-09-05 18:58:41] [Rank 0] Group 3 Loss: 3.3931 +[2025-09-05 18:58:41] [Rank 0] Group 3 Loss: 3.3931 +[2025-09-05 18:58:41] [Rank 0] Group 4 Loss: 3.6184 +[2025-09-05 18:58:41] [Rank 0] Group 4 Loss: 3.6184 +[2025-09-05 18:58:41] [Rank 0] Group 5 Loss: 3.9230 +[2025-09-05 18:58:41] [Rank 0] Group 5 Loss: 3.9230 +[2025-09-05 18:58:41] [Rank 0] Group 6 Loss: 4.2078 +[2025-09-05 18:58:41] [Rank 0] Group 6 Loss: 4.2078 +[2025-09-05 18:58:41] [Rank 0] Group 7 Loss: 4.3767 +[2025-09-05 18:58:41] [Rank 0] Group 7 Loss: 4.3767 +[2025-09-05 18:58:41] [Rank 0] Group 8 Loss: 4.6506 +[2025-09-05 18:58:41] [Rank 0] Group 8 Loss: 4.6506 +[2025-09-05 18:58:41] [Rank 0] Group 9 Loss: 4.7962 +[2025-09-05 18:58:41] [Rank 0] Group 9 Loss: 4.7962 +[2025-09-05 18:58:41] [Rank 0] Group 10 Loss: 4.9447 +[2025-09-05 18:58:41] [Rank 0] Group 10 Loss: 4.9447 +[2025-09-05 18:58:41] [Rank 0] Group 11 Loss: 4.8899 +[2025-09-05 18:58:41] [Rank 0] Group 11 Loss: 4.8899 +[2025-09-05 18:58:41] [Rank 0] Group 12 Loss: 4.8528 +[2025-09-05 18:58:41] [Rank 0] Group 12 Loss: 4.8528 +[2025-09-05 18:58:41] [Rank 0] Group 13 Loss: 4.8867 +[2025-09-05 18:58:41] [Rank 0] Group 13 Loss: 4.8867 +[2025-09-05 18:58:41] [Rank 0] Group 14 Loss: 4.9303 +[2025-09-05 18:58:41] [Rank 0] Group 14 Loss: 4.9303 +[2025-09-05 18:58:42] [Rank 0] Group 15 Loss: 4.8737 +[2025-09-05 18:58:42] [Rank 0] Group 15 Loss: 4.8737 +[2025-09-05 18:58:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:58:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 18:58:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:58:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 18:58:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:58:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 18:58:42] [Rank 0] Group 3 FTA: 0.5800 +[2025-09-05 18:58:42] [Rank 0] Group 3 FTA: 0.5800 +[2025-09-05 18:58:42] [Rank 0] Group 4 FTA: 0.4000 +[2025-09-05 18:58:42] [Rank 0] Group 4 FTA: 0.4000 +[2025-09-05 18:58:42] [Rank 0] Group 5 FTA: 0.4400 +[2025-09-05 18:58:42] [Rank 0] Group 5 FTA: 0.4400 +[2025-09-05 18:58:42] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:58:42] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 18:58:42] [Rank 0] Group 7 FTA: 0.3000 +[2025-09-05 18:58:42] [Rank 0] Group 7 FTA: 0.3000 +[2025-09-05 18:58:42] [Rank 0] Group 8 FTA: 0.3200 +[2025-09-05 18:58:42] [Rank 0] Group 8 FTA: 0.3200 +[2025-09-05 18:58:42] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 18:58:42] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 18:58:42] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 18:58:42] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 18:58:42] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 18:58:42] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 18:58:42] [Rank 0] Group 12 FTA: 0.2400 +[2025-09-05 18:58:42] [Rank 0] Group 12 FTA: 0.2400 +[2025-09-05 18:58:42] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 18:58:42] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 18:58:42] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:58:42] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 18:58:42] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:58:42] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 18:58:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:58:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 18:58:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:58:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 18:58:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:58:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 18:58:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:58:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 18:58:43] [Rank 0] step:5501/10000 train_time:238564ms step_avg:43.37ms +[2025-09-05 18:58:43] [Rank 0] step:5501/10000 train_time:238564ms step_avg:43.37ms +[2025-09-05 18:58:44] [Rank 0] step:5521/10000 train_time:239235ms step_avg:43.33ms +[2025-09-05 18:58:44] [Rank 0] step:5521/10000 train_time:239235ms step_avg:43.33ms +[2025-09-05 18:58:44] [Rank 0] step:5541/10000 train_time:239972ms step_avg:43.31ms +[2025-09-05 18:58:44] [Rank 0] step:5541/10000 train_time:239972ms step_avg:43.31ms +[2025-09-05 18:58:45] [Rank 0] step:5561/10000 train_time:240709ms step_avg:43.29ms +[2025-09-05 18:58:45] [Rank 0] step:5561/10000 train_time:240709ms step_avg:43.29ms +[2025-09-05 18:58:46] [Rank 0] step:5581/10000 train_time:241446ms step_avg:43.26ms +[2025-09-05 18:58:46] [Rank 0] step:5581/10000 train_time:241446ms step_avg:43.26ms +[2025-09-05 18:58:47] [Rank 0] step:5601/10000 train_time:242182ms step_avg:43.24ms +[2025-09-05 18:58:47] [Rank 0] step:5601/10000 train_time:242182ms step_avg:43.24ms +[2025-09-05 18:58:47] [Rank 0] step:5621/10000 train_time:242919ms step_avg:43.22ms +[2025-09-05 18:58:47] [Rank 0] step:5621/10000 train_time:242919ms step_avg:43.22ms +[2025-09-05 18:58:49] [Rank 0] step:5641/10000 train_time:244254ms step_avg:43.30ms +[2025-09-05 18:58:49] [Rank 0] step:5641/10000 train_time:244254ms step_avg:43.30ms +[2025-09-05 18:58:50] [Rank 0] step:5661/10000 train_time:244990ms step_avg:43.28ms +[2025-09-05 18:58:50] [Rank 0] step:5661/10000 train_time:244990ms step_avg:43.28ms +[2025-09-05 18:58:50] [Rank 0] step:5681/10000 train_time:245726ms step_avg:43.25ms +[2025-09-05 18:58:50] [Rank 0] step:5681/10000 train_time:245726ms step_avg:43.25ms +[2025-09-05 18:58:51] [Rank 0] step:5701/10000 train_time:246463ms step_avg:43.23ms +[2025-09-05 18:58:51] [Rank 0] step:5701/10000 train_time:246463ms step_avg:43.23ms +[2025-09-05 18:58:52] [Rank 0] step:5721/10000 train_time:247200ms step_avg:43.21ms +[2025-09-05 18:58:52] [Rank 0] step:5721/10000 train_time:247200ms step_avg:43.21ms +[2025-09-05 18:58:52] [Rank 0] step:5741/10000 train_time:247936ms step_avg:43.19ms +[2025-09-05 18:58:52] [Rank 0] step:5741/10000 train_time:247936ms step_avg:43.19ms +[2025-09-05 18:58:53] [Rank 0] step:5761/10000 train_time:248673ms step_avg:43.16ms +[2025-09-05 18:58:53] [Rank 0] step:5761/10000 train_time:248673ms step_avg:43.16ms +[2025-09-05 18:58:54] [Rank 0] step:5781/10000 train_time:249409ms step_avg:43.14ms +[2025-09-05 18:58:54] [Rank 0] step:5781/10000 train_time:249409ms step_avg:43.14ms +[2025-09-05 18:58:55] [Rank 0] step:5801/10000 train_time:250146ms step_avg:43.12ms +[2025-09-05 18:58:55] [Rank 0] step:5801/10000 train_time:250146ms step_avg:43.12ms +[2025-09-05 18:58:55] [Rank 0] step:5821/10000 train_time:250883ms step_avg:43.10ms +[2025-09-05 18:58:55] [Rank 0] step:5821/10000 train_time:250883ms step_avg:43.10ms +[2025-09-05 18:58:56] [Rank 0] step:5841/10000 train_time:251619ms step_avg:43.08ms +[2025-09-05 18:58:56] [Rank 0] step:5841/10000 train_time:251619ms step_avg:43.08ms +[2025-09-05 18:58:57] [Rank 0] step:5861/10000 train_time:252355ms step_avg:43.06ms +[2025-09-05 18:58:57] [Rank 0] step:5861/10000 train_time:252355ms step_avg:43.06ms +[2025-09-05 18:58:58] [Rank 0] step:5881/10000 train_time:253092ms step_avg:43.04ms +[2025-09-05 18:58:58] [Rank 0] step:5881/10000 train_time:253092ms step_avg:43.04ms +[2025-09-05 18:58:58] [Rank 0] step:5901/10000 train_time:253829ms step_avg:43.01ms +[2025-09-05 18:58:58] [Rank 0] step:5901/10000 train_time:253829ms step_avg:43.01ms +[2025-09-05 18:58:59] [Rank 0] step:5921/10000 train_time:254566ms step_avg:42.99ms +[2025-09-05 18:58:59] [Rank 0] step:5921/10000 train_time:254566ms step_avg:42.99ms +[2025-09-05 18:59:00] [Rank 0] step:5941/10000 train_time:255304ms step_avg:42.97ms +[2025-09-05 18:59:00] [Rank 0] step:5941/10000 train_time:255304ms step_avg:42.97ms +[2025-09-05 18:59:01] [Rank 0] step:5961/10000 train_time:256040ms step_avg:42.95ms +[2025-09-05 18:59:01] [Rank 0] step:5961/10000 train_time:256040ms step_avg:42.95ms +[2025-09-05 18:59:01] [Rank 0] step:5981/10000 train_time:256776ms step_avg:42.93ms +[2025-09-05 18:59:01] [Rank 0] step:5981/10000 train_time:256776ms step_avg:42.93ms +[2025-09-05 18:59:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:59:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 18:59:02] [Rank 0] PRINT: step:6000/10000 train_loss:1.7103 val_loss:1.6931 train_time:257594ms step_avg:42.93ms +[2025-09-05 18:59:02] [Rank 0] PRINT: step:6000/10000 train_loss:1.7103 val_loss:1.6931 train_time:257594ms step_avg:42.93ms +[2025-09-05 18:59:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:59:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 18:59:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 18:59:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:00:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:00:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:00:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:00:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:00:23] [Rank 0] Total Loss: 4.2196 +[2025-09-05 19:00:23] [Rank 0] Total Loss: 4.2196 +[2025-09-05 19:00:23] [Rank 0] Total FTA (Unweighted): 0.4413 +[2025-09-05 19:00:23] [Rank 0] Total FTA (Unweighted): 0.4413 +[2025-09-05 19:00:23] [Rank 0] Total FTA (Weighted): 0.4412 +[2025-09-05 19:00:23] [Rank 0] Total FTA (Weighted): 0.4412 +[2025-09-05 19:00:23] [Rank 0] Group 0 Loss: 3.2583 +[2025-09-05 19:00:23] [Rank 0] Group 0 Loss: 3.2583 +[2025-09-05 19:00:23] [Rank 0] Group 1 Loss: 3.1191 +[2025-09-05 19:00:23] [Rank 0] Group 1 Loss: 3.1191 +[2025-09-05 19:00:23] [Rank 0] Group 2 Loss: 3.1403 +[2025-09-05 19:00:23] [Rank 0] Group 2 Loss: 3.1403 +[2025-09-05 19:00:23] [Rank 0] Group 3 Loss: 3.4101 +[2025-09-05 19:00:23] [Rank 0] Group 3 Loss: 3.4101 +[2025-09-05 19:00:23] [Rank 0] Group 4 Loss: 3.6234 +[2025-09-05 19:00:23] [Rank 0] Group 4 Loss: 3.6234 +[2025-09-05 19:00:23] [Rank 0] Group 5 Loss: 3.9101 +[2025-09-05 19:00:23] [Rank 0] Group 5 Loss: 3.9101 +[2025-09-05 19:00:23] [Rank 0] Group 6 Loss: 4.1605 +[2025-09-05 19:00:23] [Rank 0] Group 6 Loss: 4.1605 +[2025-09-05 19:00:23] [Rank 0] Group 7 Loss: 4.3471 +[2025-09-05 19:00:23] [Rank 0] Group 7 Loss: 4.3471 +[2025-09-05 19:00:23] [Rank 0] Group 8 Loss: 4.5949 +[2025-09-05 19:00:23] [Rank 0] Group 8 Loss: 4.5949 +[2025-09-05 19:00:23] [Rank 0] Group 9 Loss: 4.7813 +[2025-09-05 19:00:23] [Rank 0] Group 9 Loss: 4.7813 +[2025-09-05 19:00:23] [Rank 0] Group 10 Loss: 4.8908 +[2025-09-05 19:00:23] [Rank 0] Group 10 Loss: 4.8908 +[2025-09-05 19:00:23] [Rank 0] Group 11 Loss: 4.8476 +[2025-09-05 19:00:23] [Rank 0] Group 11 Loss: 4.8476 +[2025-09-05 19:00:23] [Rank 0] Group 12 Loss: 4.8240 +[2025-09-05 19:00:23] [Rank 0] Group 12 Loss: 4.8240 +[2025-09-05 19:00:23] [Rank 0] Group 13 Loss: 4.8702 +[2025-09-05 19:00:23] [Rank 0] Group 13 Loss: 4.8702 +[2025-09-05 19:00:23] [Rank 0] Group 14 Loss: 4.9021 +[2025-09-05 19:00:23] [Rank 0] Group 14 Loss: 4.9021 +[2025-09-05 19:00:23] [Rank 0] Group 15 Loss: 4.8343 +[2025-09-05 19:00:23] [Rank 0] Group 15 Loss: 4.8343 +[2025-09-05 19:00:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:00:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:00:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:00:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:00:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:00:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:00:24] [Rank 0] Group 3 FTA: 0.5400 +[2025-09-05 19:00:24] [Rank 0] Group 3 FTA: 0.5400 +[2025-09-05 19:00:24] [Rank 0] Group 4 FTA: 0.4000 +[2025-09-05 19:00:24] [Rank 0] Group 4 FTA: 0.4000 +[2025-09-05 19:00:24] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 19:00:24] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 19:00:24] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 19:00:24] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 19:00:24] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 19:00:24] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 19:00:24] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 19:00:24] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 19:00:24] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 19:00:24] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 19:00:24] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 19:00:24] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 19:00:24] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 19:00:24] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 19:00:24] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 19:00:24] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 19:00:24] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 19:00:24] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 19:00:24] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:00:24] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:00:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:00:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:00:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:00:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:00:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:00:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:00:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:00:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:00:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:00:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:00:25] [Rank 0] step:6001/10000 train_time:257604ms step_avg:42.93ms +[2025-09-05 19:00:25] [Rank 0] step:6001/10000 train_time:257604ms step_avg:42.93ms +[2025-09-05 19:00:26] [Rank 0] step:6021/10000 train_time:258902ms step_avg:43.00ms +[2025-09-05 19:00:26] [Rank 0] step:6021/10000 train_time:258902ms step_avg:43.00ms +[2025-09-05 19:00:27] [Rank 0] step:6041/10000 train_time:259639ms step_avg:42.98ms +[2025-09-05 19:00:27] [Rank 0] step:6041/10000 train_time:259639ms step_avg:42.98ms +[2025-09-05 19:00:28] [Rank 0] step:6061/10000 train_time:260376ms step_avg:42.96ms +[2025-09-05 19:00:28] [Rank 0] step:6061/10000 train_time:260376ms step_avg:42.96ms +[2025-09-05 19:00:29] [Rank 0] step:6081/10000 train_time:261112ms step_avg:42.94ms +[2025-09-05 19:00:29] [Rank 0] step:6081/10000 train_time:261112ms step_avg:42.94ms +[2025-09-05 19:00:29] [Rank 0] step:6101/10000 train_time:261848ms step_avg:42.92ms +[2025-09-05 19:00:29] [Rank 0] step:6101/10000 train_time:261848ms step_avg:42.92ms +[2025-09-05 19:00:30] [Rank 0] step:6121/10000 train_time:262585ms step_avg:42.90ms +[2025-09-05 19:00:30] [Rank 0] step:6121/10000 train_time:262585ms step_avg:42.90ms +[2025-09-05 19:00:31] [Rank 0] step:6141/10000 train_time:263321ms step_avg:42.88ms +[2025-09-05 19:00:31] [Rank 0] step:6141/10000 train_time:263321ms step_avg:42.88ms +[2025-09-05 19:00:31] [Rank 0] step:6161/10000 train_time:264058ms step_avg:42.86ms +[2025-09-05 19:00:31] [Rank 0] step:6161/10000 train_time:264058ms step_avg:42.86ms +[2025-09-05 19:00:32] [Rank 0] step:6181/10000 train_time:264794ms step_avg:42.84ms +[2025-09-05 19:00:32] [Rank 0] step:6181/10000 train_time:264794ms step_avg:42.84ms +[2025-09-05 19:00:33] [Rank 0] step:6201/10000 train_time:265531ms step_avg:42.82ms +[2025-09-05 19:00:33] [Rank 0] step:6201/10000 train_time:265531ms step_avg:42.82ms +[2025-09-05 19:00:34] [Rank 0] step:6221/10000 train_time:266268ms step_avg:42.80ms +[2025-09-05 19:00:34] [Rank 0] step:6221/10000 train_time:266268ms step_avg:42.80ms +[2025-09-05 19:00:35] [Rank 0] step:6241/10000 train_time:267005ms step_avg:42.78ms +[2025-09-05 19:00:35] [Rank 0] step:6241/10000 train_time:267005ms step_avg:42.78ms +[2025-09-05 19:00:35] [Rank 0] step:6261/10000 train_time:267882ms step_avg:42.79ms +[2025-09-05 19:00:35] [Rank 0] step:6261/10000 train_time:267882ms step_avg:42.79ms +[2025-09-05 19:00:36] [Rank 0] step:6281/10000 train_time:268619ms step_avg:42.77ms +[2025-09-05 19:00:36] [Rank 0] step:6281/10000 train_time:268619ms step_avg:42.77ms +[2025-09-05 19:00:37] [Rank 0] step:6301/10000 train_time:269355ms step_avg:42.75ms +[2025-09-05 19:00:37] [Rank 0] step:6301/10000 train_time:269355ms step_avg:42.75ms +[2025-09-05 19:00:38] [Rank 0] step:6321/10000 train_time:270289ms step_avg:42.76ms +[2025-09-05 19:00:38] [Rank 0] step:6321/10000 train_time:270289ms step_avg:42.76ms +[2025-09-05 19:00:38] [Rank 0] step:6341/10000 train_time:271026ms step_avg:42.74ms +[2025-09-05 19:00:38] [Rank 0] step:6341/10000 train_time:271026ms step_avg:42.74ms +[2025-09-05 19:00:39] [Rank 0] step:6361/10000 train_time:271763ms step_avg:42.72ms +[2025-09-05 19:00:39] [Rank 0] step:6361/10000 train_time:271763ms step_avg:42.72ms +[2025-09-05 19:00:40] [Rank 0] step:6381/10000 train_time:272499ms step_avg:42.70ms +[2025-09-05 19:00:40] [Rank 0] step:6381/10000 train_time:272499ms step_avg:42.70ms +[2025-09-05 19:00:41] [Rank 0] step:6401/10000 train_time:273236ms step_avg:42.69ms +[2025-09-05 19:00:41] [Rank 0] step:6401/10000 train_time:273236ms step_avg:42.69ms +[2025-09-05 19:00:41] [Rank 0] step:6421/10000 train_time:273973ms step_avg:42.67ms +[2025-09-05 19:00:41] [Rank 0] step:6421/10000 train_time:273973ms step_avg:42.67ms +[2025-09-05 19:00:42] [Rank 0] step:6441/10000 train_time:274710ms step_avg:42.65ms +[2025-09-05 19:00:42] [Rank 0] step:6441/10000 train_time:274710ms step_avg:42.65ms +[2025-09-05 19:00:43] [Rank 0] step:6461/10000 train_time:275447ms step_avg:42.63ms +[2025-09-05 19:00:43] [Rank 0] step:6461/10000 train_time:275447ms step_avg:42.63ms +[2025-09-05 19:00:44] [Rank 0] step:6481/10000 train_time:276183ms step_avg:42.61ms +[2025-09-05 19:00:44] [Rank 0] step:6481/10000 train_time:276183ms step_avg:42.61ms +[2025-09-05 19:00:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:00:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:00:45] [Rank 0] PRINT: step:6500/10000 train_loss:1.6976 val_loss:1.6816 train_time:277001ms step_avg:42.62ms +[2025-09-05 19:00:45] [Rank 0] PRINT: step:6500/10000 train_loss:1.6976 val_loss:1.6816 train_time:277001ms step_avg:42.62ms +[2025-09-05 19:00:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:00:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:00:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:00:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:02:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:02:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:02:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:02:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:02:06] [Rank 0] Total Loss: 4.1987 +[2025-09-05 19:02:06] [Rank 0] Total Loss: 4.1987 +[2025-09-05 19:02:06] [Rank 0] Total FTA (Unweighted): 0.4512 +[2025-09-05 19:02:06] [Rank 0] Total FTA (Unweighted): 0.4512 +[2025-09-05 19:02:06] [Rank 0] Total FTA (Weighted): 0.4512 +[2025-09-05 19:02:06] [Rank 0] Total FTA (Weighted): 0.4512 +[2025-09-05 19:02:06] [Rank 0] Group 0 Loss: 3.1766 +[2025-09-05 19:02:06] [Rank 0] Group 0 Loss: 3.1766 +[2025-09-05 19:02:06] [Rank 0] Group 1 Loss: 3.0444 +[2025-09-05 19:02:06] [Rank 0] Group 1 Loss: 3.0444 +[2025-09-05 19:02:06] [Rank 0] Group 2 Loss: 3.1535 +[2025-09-05 19:02:06] [Rank 0] Group 2 Loss: 3.1535 +[2025-09-05 19:02:06] [Rank 0] Group 3 Loss: 3.4072 +[2025-09-05 19:02:06] [Rank 0] Group 3 Loss: 3.4072 +[2025-09-05 19:02:06] [Rank 0] Group 4 Loss: 3.6034 +[2025-09-05 19:02:06] [Rank 0] Group 4 Loss: 3.6034 +[2025-09-05 19:02:06] [Rank 0] Group 5 Loss: 3.8814 +[2025-09-05 19:02:06] [Rank 0] Group 5 Loss: 3.8814 +[2025-09-05 19:02:06] [Rank 0] Group 6 Loss: 4.1542 +[2025-09-05 19:02:06] [Rank 0] Group 6 Loss: 4.1542 +[2025-09-05 19:02:06] [Rank 0] Group 7 Loss: 4.3195 +[2025-09-05 19:02:06] [Rank 0] Group 7 Loss: 4.3195 +[2025-09-05 19:02:06] [Rank 0] Group 8 Loss: 4.5986 +[2025-09-05 19:02:06] [Rank 0] Group 8 Loss: 4.5986 +[2025-09-05 19:02:06] [Rank 0] Group 9 Loss: 4.7591 +[2025-09-05 19:02:06] [Rank 0] Group 9 Loss: 4.7591 +[2025-09-05 19:02:06] [Rank 0] Group 10 Loss: 4.8709 +[2025-09-05 19:02:06] [Rank 0] Group 10 Loss: 4.8709 +[2025-09-05 19:02:06] [Rank 0] Group 11 Loss: 4.8472 +[2025-09-05 19:02:06] [Rank 0] Group 11 Loss: 4.8472 +[2025-09-05 19:02:06] [Rank 0] Group 12 Loss: 4.8033 +[2025-09-05 19:02:06] [Rank 0] Group 12 Loss: 4.8033 +[2025-09-05 19:02:06] [Rank 0] Group 13 Loss: 4.8532 +[2025-09-05 19:02:06] [Rank 0] Group 13 Loss: 4.8532 +[2025-09-05 19:02:06] [Rank 0] Group 14 Loss: 4.8789 +[2025-09-05 19:02:06] [Rank 0] Group 14 Loss: 4.8789 +[2025-09-05 19:02:06] [Rank 0] Group 15 Loss: 4.8280 +[2025-09-05 19:02:06] [Rank 0] Group 15 Loss: 4.8280 +[2025-09-05 19:02:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:02:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:02:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:02:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:02:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:02:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:02:07] [Rank 0] Group 3 FTA: 0.5800 +[2025-09-05 19:02:07] [Rank 0] Group 3 FTA: 0.5800 +[2025-09-05 19:02:07] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 19:02:07] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 19:02:07] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 19:02:07] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 19:02:07] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 19:02:07] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 19:02:07] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 19:02:07] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 19:02:07] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 19:02:07] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 19:02:07] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 19:02:07] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 19:02:07] [Rank 0] Group 10 FTA: 0.3400 +[2025-09-05 19:02:07] [Rank 0] Group 10 FTA: 0.3400 +[2025-09-05 19:02:07] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 19:02:07] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 19:02:07] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:02:07] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:02:07] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 19:02:07] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 19:02:07] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:02:07] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:02:07] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:02:07] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:02:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:02:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:02:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:02:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:02:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:02:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:02:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:02:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:02:08] [Rank 0] step:6501/10000 train_time:277011ms step_avg:42.61ms +[2025-09-05 19:02:08] [Rank 0] step:6501/10000 train_time:277011ms step_avg:42.61ms +[2025-09-05 19:02:09] [Rank 0] step:6521/10000 train_time:277684ms step_avg:42.58ms +[2025-09-05 19:02:09] [Rank 0] step:6521/10000 train_time:277684ms step_avg:42.58ms +[2025-09-05 19:02:09] [Rank 0] step:6541/10000 train_time:278421ms step_avg:42.57ms +[2025-09-05 19:02:09] [Rank 0] step:6541/10000 train_time:278421ms step_avg:42.57ms +[2025-09-05 19:02:10] [Rank 0] step:6561/10000 train_time:279157ms step_avg:42.55ms +[2025-09-05 19:02:10] [Rank 0] step:6561/10000 train_time:279157ms step_avg:42.55ms +[2025-09-05 19:02:11] [Rank 0] step:6581/10000 train_time:279893ms step_avg:42.53ms +[2025-09-05 19:02:11] [Rank 0] step:6581/10000 train_time:279893ms step_avg:42.53ms +[2025-09-05 19:02:12] [Rank 0] step:6601/10000 train_time:280630ms step_avg:42.51ms +[2025-09-05 19:02:12] [Rank 0] step:6601/10000 train_time:280630ms step_avg:42.51ms +[2025-09-05 19:02:12] [Rank 0] step:6621/10000 train_time:281367ms step_avg:42.50ms +[2025-09-05 19:02:12] [Rank 0] step:6621/10000 train_time:281367ms step_avg:42.50ms +[2025-09-05 19:02:13] [Rank 0] step:6641/10000 train_time:282104ms step_avg:42.48ms +[2025-09-05 19:02:13] [Rank 0] step:6641/10000 train_time:282104ms step_avg:42.48ms +[2025-09-05 19:02:14] [Rank 0] step:6661/10000 train_time:282841ms step_avg:42.46ms +[2025-09-05 19:02:14] [Rank 0] step:6661/10000 train_time:282841ms step_avg:42.46ms +[2025-09-05 19:02:15] [Rank 0] step:6681/10000 train_time:283577ms step_avg:42.45ms +[2025-09-05 19:02:15] [Rank 0] step:6681/10000 train_time:283577ms step_avg:42.45ms +[2025-09-05 19:02:15] [Rank 0] step:6701/10000 train_time:284313ms step_avg:42.43ms +[2025-09-05 19:02:15] [Rank 0] step:6701/10000 train_time:284313ms step_avg:42.43ms +[2025-09-05 19:02:16] [Rank 0] step:6721/10000 train_time:285050ms step_avg:42.41ms +[2025-09-05 19:02:16] [Rank 0] step:6721/10000 train_time:285050ms step_avg:42.41ms +[2025-09-05 19:02:17] [Rank 0] step:6741/10000 train_time:285787ms step_avg:42.40ms +[2025-09-05 19:02:17] [Rank 0] step:6741/10000 train_time:285787ms step_avg:42.40ms +[2025-09-05 19:02:18] [Rank 0] step:6761/10000 train_time:286524ms step_avg:42.38ms +[2025-09-05 19:02:18] [Rank 0] step:6761/10000 train_time:286524ms step_avg:42.38ms +[2025-09-05 19:02:18] [Rank 0] step:6781/10000 train_time:287262ms step_avg:42.36ms +[2025-09-05 19:02:18] [Rank 0] step:6781/10000 train_time:287262ms step_avg:42.36ms +[2025-09-05 19:02:19] [Rank 0] step:6801/10000 train_time:288001ms step_avg:42.35ms +[2025-09-05 19:02:19] [Rank 0] step:6801/10000 train_time:288001ms step_avg:42.35ms +[2025-09-05 19:02:20] [Rank 0] step:6821/10000 train_time:288738ms step_avg:42.33ms +[2025-09-05 19:02:20] [Rank 0] step:6821/10000 train_time:288738ms step_avg:42.33ms +[2025-09-05 19:02:21] [Rank 0] step:6841/10000 train_time:289672ms step_avg:42.34ms +[2025-09-05 19:02:21] [Rank 0] step:6841/10000 train_time:289672ms step_avg:42.34ms +[2025-09-05 19:02:21] [Rank 0] step:6861/10000 train_time:290409ms step_avg:42.33ms +[2025-09-05 19:02:21] [Rank 0] step:6861/10000 train_time:290409ms step_avg:42.33ms +[2025-09-05 19:02:22] [Rank 0] step:6881/10000 train_time:291146ms step_avg:42.31ms +[2025-09-05 19:02:22] [Rank 0] step:6881/10000 train_time:291146ms step_avg:42.31ms +[2025-09-05 19:02:23] [Rank 0] step:6901/10000 train_time:291883ms step_avg:42.30ms +[2025-09-05 19:02:23] [Rank 0] step:6901/10000 train_time:291883ms step_avg:42.30ms +[2025-09-05 19:02:24] [Rank 0] step:6921/10000 train_time:292619ms step_avg:42.28ms +[2025-09-05 19:02:24] [Rank 0] step:6921/10000 train_time:292619ms step_avg:42.28ms +[2025-09-05 19:02:24] [Rank 0] step:6941/10000 train_time:293356ms step_avg:42.26ms +[2025-09-05 19:02:24] [Rank 0] step:6941/10000 train_time:293356ms step_avg:42.26ms +[2025-09-05 19:02:25] [Rank 0] step:6961/10000 train_time:294093ms step_avg:42.25ms +[2025-09-05 19:02:25] [Rank 0] step:6961/10000 train_time:294093ms step_avg:42.25ms +[2025-09-05 19:02:26] [Rank 0] step:6981/10000 train_time:294830ms step_avg:42.23ms +[2025-09-05 19:02:26] [Rank 0] step:6981/10000 train_time:294830ms step_avg:42.23ms +[2025-09-05 19:02:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:02:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:02:27] [Rank 0] PRINT: step:7000/10000 train_loss:1.6859 val_loss:1.6726 train_time:295648ms step_avg:42.24ms +[2025-09-05 19:02:27] [Rank 0] PRINT: step:7000/10000 train_loss:1.6859 val_loss:1.6726 train_time:295648ms step_avg:42.24ms +[2025-09-05 19:02:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:02:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:02:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:02:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:03:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:03:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:03:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:03:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:03:49] [Rank 0] Total Loss: 4.1550 +[2025-09-05 19:03:49] [Rank 0] Total Loss: 4.1550 +[2025-09-05 19:03:49] [Rank 0] Total FTA (Unweighted): 0.4513 +[2025-09-05 19:03:49] [Rank 0] Total FTA (Unweighted): 0.4513 +[2025-09-05 19:03:49] [Rank 0] Total FTA (Weighted): 0.4512 +[2025-09-05 19:03:49] [Rank 0] Total FTA (Weighted): 0.4512 +[2025-09-05 19:03:49] [Rank 0] Group 0 Loss: 3.1954 +[2025-09-05 19:03:49] [Rank 0] Group 0 Loss: 3.1954 +[2025-09-05 19:03:49] [Rank 0] Group 1 Loss: 3.0575 +[2025-09-05 19:03:49] [Rank 0] Group 1 Loss: 3.0575 +[2025-09-05 19:03:49] [Rank 0] Group 2 Loss: 3.0340 +[2025-09-05 19:03:49] [Rank 0] Group 2 Loss: 3.0340 +[2025-09-05 19:03:49] [Rank 0] Group 3 Loss: 3.3576 +[2025-09-05 19:03:49] [Rank 0] Group 3 Loss: 3.3576 +[2025-09-05 19:03:49] [Rank 0] Group 4 Loss: 3.5921 +[2025-09-05 19:03:49] [Rank 0] Group 4 Loss: 3.5921 +[2025-09-05 19:03:49] [Rank 0] Group 5 Loss: 3.8195 +[2025-09-05 19:03:49] [Rank 0] Group 5 Loss: 3.8195 +[2025-09-05 19:03:49] [Rank 0] Group 6 Loss: 4.0631 +[2025-09-05 19:03:49] [Rank 0] Group 6 Loss: 4.0631 +[2025-09-05 19:03:49] [Rank 0] Group 7 Loss: 4.2837 +[2025-09-05 19:03:49] [Rank 0] Group 7 Loss: 4.2837 +[2025-09-05 19:03:49] [Rank 0] Group 8 Loss: 4.5565 +[2025-09-05 19:03:49] [Rank 0] Group 8 Loss: 4.5565 +[2025-09-05 19:03:49] [Rank 0] Group 9 Loss: 4.6956 +[2025-09-05 19:03:49] [Rank 0] Group 9 Loss: 4.6956 +[2025-09-05 19:03:49] [Rank 0] Group 10 Loss: 4.7925 +[2025-09-05 19:03:49] [Rank 0] Group 10 Loss: 4.7925 +[2025-09-05 19:03:49] [Rank 0] Group 11 Loss: 4.8081 +[2025-09-05 19:03:49] [Rank 0] Group 11 Loss: 4.8081 +[2025-09-05 19:03:49] [Rank 0] Group 12 Loss: 4.7733 +[2025-09-05 19:03:49] [Rank 0] Group 12 Loss: 4.7733 +[2025-09-05 19:03:49] [Rank 0] Group 13 Loss: 4.8096 +[2025-09-05 19:03:49] [Rank 0] Group 13 Loss: 4.8096 +[2025-09-05 19:03:49] [Rank 0] Group 14 Loss: 4.8523 +[2025-09-05 19:03:49] [Rank 0] Group 14 Loss: 4.8523 +[2025-09-05 19:03:49] [Rank 0] Group 15 Loss: 4.7891 +[2025-09-05 19:03:49] [Rank 0] Group 15 Loss: 4.7891 +[2025-09-05 19:03:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:03:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:03:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:03:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:03:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:03:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:03:49] [Rank 0] Group 3 FTA: 0.6200 +[2025-09-05 19:03:49] [Rank 0] Group 3 FTA: 0.6200 +[2025-09-05 19:03:49] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 19:03:49] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 19:03:49] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:03:49] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:03:49] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 19:03:49] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 19:03:49] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 19:03:49] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 19:03:49] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 19:03:49] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 19:03:49] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 19:03:49] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 19:03:49] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 19:03:49] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 19:03:49] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 19:03:49] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 19:03:49] [Rank 0] Group 12 FTA: 0.2300 +[2025-09-05 19:03:49] [Rank 0] Group 12 FTA: 0.2300 +[2025-09-05 19:03:49] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 19:03:49] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 19:03:49] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:03:49] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:03:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:03:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:03:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:03:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:03:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:03:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:03:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:03:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:03:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:03:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:03:50] [Rank 0] step:7001/10000 train_time:295658ms step_avg:42.23ms +[2025-09-05 19:03:50] [Rank 0] step:7001/10000 train_time:295658ms step_avg:42.23ms +[2025-09-05 19:03:51] [Rank 0] step:7021/10000 train_time:296337ms step_avg:42.21ms +[2025-09-05 19:03:51] [Rank 0] step:7021/10000 train_time:296337ms step_avg:42.21ms +[2025-09-05 19:03:52] [Rank 0] step:7041/10000 train_time:297073ms step_avg:42.19ms +[2025-09-05 19:03:52] [Rank 0] step:7041/10000 train_time:297073ms step_avg:42.19ms +[2025-09-05 19:03:53] [Rank 0] step:7061/10000 train_time:297809ms step_avg:42.18ms +[2025-09-05 19:03:53] [Rank 0] step:7061/10000 train_time:297809ms step_avg:42.18ms +[2025-09-05 19:03:53] [Rank 0] step:7081/10000 train_time:298546ms step_avg:42.16ms +[2025-09-05 19:03:53] [Rank 0] step:7081/10000 train_time:298546ms step_avg:42.16ms +[2025-09-05 19:03:54] [Rank 0] step:7101/10000 train_time:299282ms step_avg:42.15ms +[2025-09-05 19:03:54] [Rank 0] step:7101/10000 train_time:299282ms step_avg:42.15ms +[2025-09-05 19:03:55] [Rank 0] step:7121/10000 train_time:300019ms step_avg:42.13ms +[2025-09-05 19:03:55] [Rank 0] step:7121/10000 train_time:300019ms step_avg:42.13ms +[2025-09-05 19:03:55] [Rank 0] step:7141/10000 train_time:300756ms step_avg:42.12ms +[2025-09-05 19:03:55] [Rank 0] step:7141/10000 train_time:300756ms step_avg:42.12ms +[2025-09-05 19:03:56] [Rank 0] step:7161/10000 train_time:301493ms step_avg:42.10ms +[2025-09-05 19:03:56] [Rank 0] step:7161/10000 train_time:301493ms step_avg:42.10ms +[2025-09-05 19:03:57] [Rank 0] step:7181/10000 train_time:302230ms step_avg:42.09ms +[2025-09-05 19:03:57] [Rank 0] step:7181/10000 train_time:302230ms step_avg:42.09ms +[2025-09-05 19:03:58] [Rank 0] step:7201/10000 train_time:302967ms step_avg:42.07ms +[2025-09-05 19:03:58] [Rank 0] step:7201/10000 train_time:302967ms step_avg:42.07ms +[2025-09-05 19:03:58] [Rank 0] step:7221/10000 train_time:303704ms step_avg:42.06ms +[2025-09-05 19:03:58] [Rank 0] step:7221/10000 train_time:303704ms step_avg:42.06ms +[2025-09-05 19:03:59] [Rank 0] step:7241/10000 train_time:304442ms step_avg:42.04ms +[2025-09-05 19:03:59] [Rank 0] step:7241/10000 train_time:304442ms step_avg:42.04ms +[2025-09-05 19:04:00] [Rank 0] step:7261/10000 train_time:305180ms step_avg:42.03ms +[2025-09-05 19:04:00] [Rank 0] step:7261/10000 train_time:305180ms step_avg:42.03ms +[2025-09-05 19:04:01] [Rank 0] step:7281/10000 train_time:305916ms step_avg:42.02ms +[2025-09-05 19:04:01] [Rank 0] step:7281/10000 train_time:305916ms step_avg:42.02ms +[2025-09-05 19:04:01] [Rank 0] step:7301/10000 train_time:306654ms step_avg:42.00ms +[2025-09-05 19:04:01] [Rank 0] step:7301/10000 train_time:306654ms step_avg:42.00ms +[2025-09-05 19:04:02] [Rank 0] step:7321/10000 train_time:307391ms step_avg:41.99ms +[2025-09-05 19:04:02] [Rank 0] step:7321/10000 train_time:307391ms step_avg:41.99ms +[2025-09-05 19:04:03] [Rank 0] step:7341/10000 train_time:308127ms step_avg:41.97ms +[2025-09-05 19:04:03] [Rank 0] step:7341/10000 train_time:308127ms step_avg:41.97ms +[2025-09-05 19:04:04] [Rank 0] step:7361/10000 train_time:308864ms step_avg:41.96ms +[2025-09-05 19:04:04] [Rank 0] step:7361/10000 train_time:308864ms step_avg:41.96ms +[2025-09-05 19:04:04] [Rank 0] step:7381/10000 train_time:309601ms step_avg:41.95ms +[2025-09-05 19:04:04] [Rank 0] step:7381/10000 train_time:309601ms step_avg:41.95ms +[2025-09-05 19:04:05] [Rank 0] step:7401/10000 train_time:310338ms step_avg:41.93ms +[2025-09-05 19:04:05] [Rank 0] step:7401/10000 train_time:310338ms step_avg:41.93ms +[2025-09-05 19:04:06] [Rank 0] step:7421/10000 train_time:311075ms step_avg:41.92ms +[2025-09-05 19:04:06] [Rank 0] step:7421/10000 train_time:311075ms step_avg:41.92ms +[2025-09-05 19:04:07] [Rank 0] step:7441/10000 train_time:311811ms step_avg:41.90ms +[2025-09-05 19:04:07] [Rank 0] step:7441/10000 train_time:311811ms step_avg:41.90ms +[2025-09-05 19:04:07] [Rank 0] step:7461/10000 train_time:312549ms step_avg:41.89ms +[2025-09-05 19:04:07] [Rank 0] step:7461/10000 train_time:312549ms step_avg:41.89ms +[2025-09-05 19:04:08] [Rank 0] step:7481/10000 train_time:313285ms step_avg:41.88ms +[2025-09-05 19:04:08] [Rank 0] step:7481/10000 train_time:313285ms step_avg:41.88ms +[2025-09-05 19:04:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:04:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:04:09] [Rank 0] PRINT: step:7500/10000 train_loss:1.6757 val_loss:1.6638 train_time:314102ms step_avg:41.88ms +[2025-09-05 19:04:09] [Rank 0] PRINT: step:7500/10000 train_loss:1.6757 val_loss:1.6638 train_time:314102ms step_avg:41.88ms +[2025-09-05 19:04:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:04:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:04:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:04:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:05:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:05:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:05:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:05:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:05:31] [Rank 0] Total Loss: 4.1819 +[2025-09-05 19:05:31] [Rank 0] Total Loss: 4.1819 +[2025-09-05 19:05:31] [Rank 0] Total FTA (Unweighted): 0.4662 +[2025-09-05 19:05:31] [Rank 0] Total FTA (Unweighted): 0.4662 +[2025-09-05 19:05:31] [Rank 0] Total FTA (Weighted): 0.4662 +[2025-09-05 19:05:31] [Rank 0] Total FTA (Weighted): 0.4662 +[2025-09-05 19:05:31] [Rank 0] Group 0 Loss: 3.1552 +[2025-09-05 19:05:31] [Rank 0] Group 0 Loss: 3.1552 +[2025-09-05 19:05:31] [Rank 0] Group 1 Loss: 3.1239 +[2025-09-05 19:05:31] [Rank 0] Group 1 Loss: 3.1239 +[2025-09-05 19:05:31] [Rank 0] Group 2 Loss: 3.0814 +[2025-09-05 19:05:31] [Rank 0] Group 2 Loss: 3.0814 +[2025-09-05 19:05:31] [Rank 0] Group 3 Loss: 3.4113 +[2025-09-05 19:05:31] [Rank 0] Group 3 Loss: 3.4113 +[2025-09-05 19:05:31] [Rank 0] Group 4 Loss: 3.6183 +[2025-09-05 19:05:31] [Rank 0] Group 4 Loss: 3.6183 +[2025-09-05 19:05:31] [Rank 0] Group 5 Loss: 3.8364 +[2025-09-05 19:05:31] [Rank 0] Group 5 Loss: 3.8364 +[2025-09-05 19:05:31] [Rank 0] Group 6 Loss: 4.0919 +[2025-09-05 19:05:31] [Rank 0] Group 6 Loss: 4.0919 +[2025-09-05 19:05:31] [Rank 0] Group 7 Loss: 4.3254 +[2025-09-05 19:05:31] [Rank 0] Group 7 Loss: 4.3254 +[2025-09-05 19:05:31] [Rank 0] Group 8 Loss: 4.5839 +[2025-09-05 19:05:31] [Rank 0] Group 8 Loss: 4.5839 +[2025-09-05 19:05:31] [Rank 0] Group 9 Loss: 4.7174 +[2025-09-05 19:05:31] [Rank 0] Group 9 Loss: 4.7174 +[2025-09-05 19:05:31] [Rank 0] Group 10 Loss: 4.8512 +[2025-09-05 19:05:31] [Rank 0] Group 10 Loss: 4.8512 +[2025-09-05 19:05:31] [Rank 0] Group 11 Loss: 4.8356 +[2025-09-05 19:05:31] [Rank 0] Group 11 Loss: 4.8356 +[2025-09-05 19:05:31] [Rank 0] Group 12 Loss: 4.7785 +[2025-09-05 19:05:31] [Rank 0] Group 12 Loss: 4.7785 +[2025-09-05 19:05:31] [Rank 0] Group 13 Loss: 4.8194 +[2025-09-05 19:05:31] [Rank 0] Group 13 Loss: 4.8194 +[2025-09-05 19:05:31] [Rank 0] Group 14 Loss: 4.8580 +[2025-09-05 19:05:31] [Rank 0] Group 14 Loss: 4.8580 +[2025-09-05 19:05:31] [Rank 0] Group 15 Loss: 4.8231 +[2025-09-05 19:05:31] [Rank 0] Group 15 Loss: 4.8231 +[2025-09-05 19:05:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:05:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:05:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:05:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:05:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:05:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:05:31] [Rank 0] Group 3 FTA: 0.7200 +[2025-09-05 19:05:31] [Rank 0] Group 3 FTA: 0.7200 +[2025-09-05 19:05:31] [Rank 0] Group 4 FTA: 0.4700 +[2025-09-05 19:05:31] [Rank 0] Group 4 FTA: 0.4700 +[2025-09-05 19:05:31] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 19:05:31] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 19:05:31] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 19:05:31] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 19:05:31] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 19:05:31] [Rank 0] Group 7 FTA: 0.3300 +[2025-09-05 19:05:31] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 19:05:31] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 19:05:31] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:05:31] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:05:31] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:05:31] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:05:31] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 19:05:31] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 19:05:31] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 19:05:31] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 19:05:31] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 19:05:31] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 19:05:31] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:05:31] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:05:31] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:05:31] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:05:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:05:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:05:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:05:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:05:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:05:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:05:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:05:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:05:32] [Rank 0] step:7501/10000 train_time:314110ms step_avg:41.88ms +[2025-09-05 19:05:32] [Rank 0] step:7501/10000 train_time:314110ms step_avg:41.88ms +[2025-09-05 19:05:33] [Rank 0] step:7521/10000 train_time:314772ms step_avg:41.85ms +[2025-09-05 19:05:33] [Rank 0] step:7521/10000 train_time:314772ms step_avg:41.85ms +[2025-09-05 19:05:34] [Rank 0] step:7541/10000 train_time:315509ms step_avg:41.84ms +[2025-09-05 19:05:34] [Rank 0] step:7541/10000 train_time:315509ms step_avg:41.84ms +[2025-09-05 19:05:34] [Rank 0] step:7561/10000 train_time:316245ms step_avg:41.83ms +[2025-09-05 19:05:34] [Rank 0] step:7561/10000 train_time:316245ms step_avg:41.83ms +[2025-09-05 19:05:35] [Rank 0] step:7581/10000 train_time:316981ms step_avg:41.81ms +[2025-09-05 19:05:35] [Rank 0] step:7581/10000 train_time:316981ms step_avg:41.81ms +[2025-09-05 19:05:36] [Rank 0] step:7601/10000 train_time:317718ms step_avg:41.80ms +[2025-09-05 19:05:36] [Rank 0] step:7601/10000 train_time:317718ms step_avg:41.80ms +[2025-09-05 19:05:37] [Rank 0] step:7621/10000 train_time:318455ms step_avg:41.79ms +[2025-09-05 19:05:37] [Rank 0] step:7621/10000 train_time:318455ms step_avg:41.79ms +[2025-09-05 19:05:38] [Rank 0] step:7641/10000 train_time:319211ms step_avg:41.78ms +[2025-09-05 19:05:38] [Rank 0] step:7641/10000 train_time:319211ms step_avg:41.78ms +[2025-09-05 19:05:39] [Rank 0] step:7661/10000 train_time:320539ms step_avg:41.84ms +[2025-09-05 19:05:39] [Rank 0] step:7661/10000 train_time:320539ms step_avg:41.84ms +[2025-09-05 19:05:39] [Rank 0] step:7681/10000 train_time:321275ms step_avg:41.83ms +[2025-09-05 19:05:39] [Rank 0] step:7681/10000 train_time:321275ms step_avg:41.83ms +[2025-09-05 19:05:40] [Rank 0] step:7701/10000 train_time:322012ms step_avg:41.81ms +[2025-09-05 19:05:40] [Rank 0] step:7701/10000 train_time:322012ms step_avg:41.81ms +[2025-09-05 19:05:41] [Rank 0] step:7721/10000 train_time:322748ms step_avg:41.80ms +[2025-09-05 19:05:41] [Rank 0] step:7721/10000 train_time:322748ms step_avg:41.80ms +[2025-09-05 19:05:42] [Rank 0] step:7741/10000 train_time:323485ms step_avg:41.79ms +[2025-09-05 19:05:42] [Rank 0] step:7741/10000 train_time:323485ms step_avg:41.79ms +[2025-09-05 19:05:42] [Rank 0] step:7761/10000 train_time:324221ms step_avg:41.78ms +[2025-09-05 19:05:42] [Rank 0] step:7761/10000 train_time:324221ms step_avg:41.78ms +[2025-09-05 19:05:43] [Rank 0] step:7781/10000 train_time:324957ms step_avg:41.76ms +[2025-09-05 19:05:43] [Rank 0] step:7781/10000 train_time:324957ms step_avg:41.76ms +[2025-09-05 19:05:44] [Rank 0] step:7801/10000 train_time:325693ms step_avg:41.75ms +[2025-09-05 19:05:44] [Rank 0] step:7801/10000 train_time:325693ms step_avg:41.75ms +[2025-09-05 19:05:45] [Rank 0] step:7821/10000 train_time:326430ms step_avg:41.74ms +[2025-09-05 19:05:45] [Rank 0] step:7821/10000 train_time:326430ms step_avg:41.74ms +[2025-09-05 19:05:45] [Rank 0] step:7841/10000 train_time:327166ms step_avg:41.73ms +[2025-09-05 19:05:45] [Rank 0] step:7841/10000 train_time:327166ms step_avg:41.73ms +[2025-09-05 19:05:46] [Rank 0] step:7861/10000 train_time:327902ms step_avg:41.71ms +[2025-09-05 19:05:46] [Rank 0] step:7861/10000 train_time:327902ms step_avg:41.71ms +[2025-09-05 19:05:47] [Rank 0] step:7881/10000 train_time:328639ms step_avg:41.70ms +[2025-09-05 19:05:47] [Rank 0] step:7881/10000 train_time:328639ms step_avg:41.70ms +[2025-09-05 19:05:47] [Rank 0] step:7901/10000 train_time:329375ms step_avg:41.69ms +[2025-09-05 19:05:47] [Rank 0] step:7901/10000 train_time:329375ms step_avg:41.69ms +[2025-09-05 19:05:48] [Rank 0] step:7921/10000 train_time:330112ms step_avg:41.68ms +[2025-09-05 19:05:48] [Rank 0] step:7921/10000 train_time:330112ms step_avg:41.68ms +[2025-09-05 19:05:49] [Rank 0] step:7941/10000 train_time:330848ms step_avg:41.66ms +[2025-09-05 19:05:49] [Rank 0] step:7941/10000 train_time:330848ms step_avg:41.66ms +[2025-09-05 19:05:50] [Rank 0] step:7961/10000 train_time:331584ms step_avg:41.65ms +[2025-09-05 19:05:50] [Rank 0] step:7961/10000 train_time:331584ms step_avg:41.65ms +[2025-09-05 19:05:50] [Rank 0] step:7981/10000 train_time:332321ms step_avg:41.64ms +[2025-09-05 19:05:50] [Rank 0] step:7981/10000 train_time:332321ms step_avg:41.64ms +[2025-09-05 19:05:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:05:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:05:52] [Rank 0] PRINT: step:8000/10000 train_loss:1.6696 val_loss:1.6567 train_time:333247ms step_avg:41.66ms +[2025-09-05 19:05:52] [Rank 0] PRINT: step:8000/10000 train_loss:1.6696 val_loss:1.6567 train_time:333247ms step_avg:41.66ms +[2025-09-05 19:05:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:05:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:05:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:05:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:07:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:07:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:07:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:07:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:07:13] [Rank 0] Total Loss: 4.2586 +[2025-09-05 19:07:13] [Rank 0] Total Loss: 4.2586 +[2025-09-05 19:07:13] [Rank 0] Total FTA (Unweighted): 0.4775 +[2025-09-05 19:07:13] [Rank 0] Total FTA (Unweighted): 0.4775 +[2025-09-05 19:07:13] [Rank 0] Total FTA (Weighted): 0.4775 +[2025-09-05 19:07:13] [Rank 0] Total FTA (Weighted): 0.4775 +[2025-09-05 19:07:13] [Rank 0] Group 0 Loss: 3.2310 +[2025-09-05 19:07:13] [Rank 0] Group 0 Loss: 3.2310 +[2025-09-05 19:07:13] [Rank 0] Group 1 Loss: 3.1728 +[2025-09-05 19:07:13] [Rank 0] Group 1 Loss: 3.1728 +[2025-09-05 19:07:13] [Rank 0] Group 2 Loss: 3.1883 +[2025-09-05 19:07:13] [Rank 0] Group 2 Loss: 3.1883 +[2025-09-05 19:07:13] [Rank 0] Group 3 Loss: 3.4994 +[2025-09-05 19:07:13] [Rank 0] Group 3 Loss: 3.4994 +[2025-09-05 19:07:13] [Rank 0] Group 4 Loss: 3.6867 +[2025-09-05 19:07:13] [Rank 0] Group 4 Loss: 3.6867 +[2025-09-05 19:07:13] [Rank 0] Group 5 Loss: 3.9360 +[2025-09-05 19:07:13] [Rank 0] Group 5 Loss: 3.9360 +[2025-09-05 19:07:13] [Rank 0] Group 6 Loss: 4.1937 +[2025-09-05 19:07:13] [Rank 0] Group 6 Loss: 4.1937 +[2025-09-05 19:07:13] [Rank 0] Group 7 Loss: 4.4076 +[2025-09-05 19:07:13] [Rank 0] Group 7 Loss: 4.4076 +[2025-09-05 19:07:13] [Rank 0] Group 8 Loss: 4.6469 +[2025-09-05 19:07:13] [Rank 0] Group 8 Loss: 4.6469 +[2025-09-05 19:07:13] [Rank 0] Group 9 Loss: 4.7926 +[2025-09-05 19:07:13] [Rank 0] Group 9 Loss: 4.7926 +[2025-09-05 19:07:13] [Rank 0] Group 10 Loss: 4.9011 +[2025-09-05 19:07:13] [Rank 0] Group 10 Loss: 4.9011 +[2025-09-05 19:07:13] [Rank 0] Group 11 Loss: 4.9165 +[2025-09-05 19:07:13] [Rank 0] Group 11 Loss: 4.9165 +[2025-09-05 19:07:13] [Rank 0] Group 12 Loss: 4.8738 +[2025-09-05 19:07:13] [Rank 0] Group 12 Loss: 4.8738 +[2025-09-05 19:07:13] [Rank 0] Group 13 Loss: 4.9012 +[2025-09-05 19:07:13] [Rank 0] Group 13 Loss: 4.9012 +[2025-09-05 19:07:13] [Rank 0] Group 14 Loss: 4.9176 +[2025-09-05 19:07:13] [Rank 0] Group 14 Loss: 4.9176 +[2025-09-05 19:07:13] [Rank 0] Group 15 Loss: 4.8731 +[2025-09-05 19:07:13] [Rank 0] Group 15 Loss: 4.8731 +[2025-09-05 19:07:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:07:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:07:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:07:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:07:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:07:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:07:13] [Rank 0] Group 3 FTA: 0.7700 +[2025-09-05 19:07:13] [Rank 0] Group 3 FTA: 0.7700 +[2025-09-05 19:07:13] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:07:13] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:07:13] [Rank 0] Group 5 FTA: 0.5200 +[2025-09-05 19:07:13] [Rank 0] Group 5 FTA: 0.5200 +[2025-09-05 19:07:13] [Rank 0] Group 6 FTA: 0.4200 +[2025-09-05 19:07:13] [Rank 0] Group 6 FTA: 0.4200 +[2025-09-05 19:07:13] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 19:07:13] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 19:07:13] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 19:07:13] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 19:07:13] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:07:13] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:07:13] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 19:07:13] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 19:07:13] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:07:13] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:07:13] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 19:07:13] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 19:07:13] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 19:07:13] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 19:07:13] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:07:13] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 19:07:13] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:07:13] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:07:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:07:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:07:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:07:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:07:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:07:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:07:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:07:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:07:14] [Rank 0] step:8001/10000 train_time:333256ms step_avg:41.65ms +[2025-09-05 19:07:14] [Rank 0] step:8001/10000 train_time:333256ms step_avg:41.65ms +[2025-09-05 19:07:16] [Rank 0] step:8021/10000 train_time:334549ms step_avg:41.71ms +[2025-09-05 19:07:16] [Rank 0] step:8021/10000 train_time:334549ms step_avg:41.71ms +[2025-09-05 19:07:17] [Rank 0] step:8041/10000 train_time:335286ms step_avg:41.70ms +[2025-09-05 19:07:17] [Rank 0] step:8041/10000 train_time:335286ms step_avg:41.70ms +[2025-09-05 19:07:17] [Rank 0] step:8061/10000 train_time:336022ms step_avg:41.68ms +[2025-09-05 19:07:17] [Rank 0] step:8061/10000 train_time:336022ms step_avg:41.68ms +[2025-09-05 19:07:18] [Rank 0] step:8081/10000 train_time:336759ms step_avg:41.67ms +[2025-09-05 19:07:18] [Rank 0] step:8081/10000 train_time:336759ms step_avg:41.67ms +[2025-09-05 19:07:19] [Rank 0] step:8101/10000 train_time:337495ms step_avg:41.66ms +[2025-09-05 19:07:19] [Rank 0] step:8101/10000 train_time:337495ms step_avg:41.66ms +[2025-09-05 19:07:19] [Rank 0] step:8121/10000 train_time:338232ms step_avg:41.65ms +[2025-09-05 19:07:19] [Rank 0] step:8121/10000 train_time:338232ms step_avg:41.65ms +[2025-09-05 19:07:20] [Rank 0] step:8141/10000 train_time:338969ms step_avg:41.64ms +[2025-09-05 19:07:20] [Rank 0] step:8141/10000 train_time:338969ms step_avg:41.64ms +[2025-09-05 19:07:21] [Rank 0] step:8161/10000 train_time:339705ms step_avg:41.63ms +[2025-09-05 19:07:21] [Rank 0] step:8161/10000 train_time:339705ms step_avg:41.63ms +[2025-09-05 19:07:22] [Rank 0] step:8181/10000 train_time:340442ms step_avg:41.61ms +[2025-09-05 19:07:22] [Rank 0] step:8181/10000 train_time:340442ms step_avg:41.61ms +[2025-09-05 19:07:22] [Rank 0] step:8201/10000 train_time:341179ms step_avg:41.60ms +[2025-09-05 19:07:22] [Rank 0] step:8201/10000 train_time:341179ms step_avg:41.60ms +[2025-09-05 19:07:23] [Rank 0] step:8221/10000 train_time:341916ms step_avg:41.59ms +[2025-09-05 19:07:23] [Rank 0] step:8221/10000 train_time:341916ms step_avg:41.59ms +[2025-09-05 19:07:24] [Rank 0] step:8241/10000 train_time:342653ms step_avg:41.58ms +[2025-09-05 19:07:24] [Rank 0] step:8241/10000 train_time:342653ms step_avg:41.58ms +[2025-09-05 19:07:25] [Rank 0] step:8261/10000 train_time:343389ms step_avg:41.57ms +[2025-09-05 19:07:25] [Rank 0] step:8261/10000 train_time:343389ms step_avg:41.57ms +[2025-09-05 19:07:25] [Rank 0] step:8281/10000 train_time:344126ms step_avg:41.56ms +[2025-09-05 19:07:25] [Rank 0] step:8281/10000 train_time:344126ms step_avg:41.56ms +[2025-09-05 19:07:26] [Rank 0] step:8301/10000 train_time:344863ms step_avg:41.54ms +[2025-09-05 19:07:26] [Rank 0] step:8301/10000 train_time:344863ms step_avg:41.54ms +[2025-09-05 19:07:27] [Rank 0] step:8321/10000 train_time:345599ms step_avg:41.53ms +[2025-09-05 19:07:27] [Rank 0] step:8321/10000 train_time:345599ms step_avg:41.53ms +[2025-09-05 19:07:28] [Rank 0] step:8341/10000 train_time:346335ms step_avg:41.52ms +[2025-09-05 19:07:28] [Rank 0] step:8341/10000 train_time:346335ms step_avg:41.52ms +[2025-09-05 19:07:28] [Rank 0] step:8361/10000 train_time:347072ms step_avg:41.51ms +[2025-09-05 19:07:28] [Rank 0] step:8361/10000 train_time:347072ms step_avg:41.51ms +[2025-09-05 19:07:29] [Rank 0] step:8381/10000 train_time:347808ms step_avg:41.50ms +[2025-09-05 19:07:29] [Rank 0] step:8381/10000 train_time:347808ms step_avg:41.50ms +[2025-09-05 19:07:30] [Rank 0] step:8401/10000 train_time:348545ms step_avg:41.49ms +[2025-09-05 19:07:30] [Rank 0] step:8401/10000 train_time:348545ms step_avg:41.49ms +[2025-09-05 19:07:31] [Rank 0] step:8421/10000 train_time:349282ms step_avg:41.48ms +[2025-09-05 19:07:31] [Rank 0] step:8421/10000 train_time:349282ms step_avg:41.48ms +[2025-09-05 19:07:31] [Rank 0] step:8441/10000 train_time:350019ms step_avg:41.47ms +[2025-09-05 19:07:31] [Rank 0] step:8441/10000 train_time:350019ms step_avg:41.47ms +[2025-09-05 19:07:32] [Rank 0] step:8461/10000 train_time:350755ms step_avg:41.46ms +[2025-09-05 19:07:32] [Rank 0] step:8461/10000 train_time:350755ms step_avg:41.46ms +[2025-09-05 19:07:33] [Rank 0] step:8481/10000 train_time:351492ms step_avg:41.44ms +[2025-09-05 19:07:33] [Rank 0] step:8481/10000 train_time:351492ms step_avg:41.44ms +[2025-09-05 19:07:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:07:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:07:34] [Rank 0] PRINT: step:8500/10000 train_loss:1.6625 val_loss:1.6511 train_time:352309ms step_avg:41.45ms +[2025-09-05 19:07:34] [Rank 0] PRINT: step:8500/10000 train_loss:1.6625 val_loss:1.6511 train_time:352309ms step_avg:41.45ms +[2025-09-05 19:07:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:07:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:07:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:07:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:08:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:08:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:08:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:08:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:08:55] [Rank 0] Total Loss: 4.2380 +[2025-09-05 19:08:55] [Rank 0] Total Loss: 4.2380 +[2025-09-05 19:08:55] [Rank 0] Total FTA (Unweighted): 0.4831 +[2025-09-05 19:08:55] [Rank 0] Total FTA (Unweighted): 0.4831 +[2025-09-05 19:08:55] [Rank 0] Total FTA (Weighted): 0.4831 +[2025-09-05 19:08:55] [Rank 0] Total FTA (Weighted): 0.4831 +[2025-09-05 19:08:55] [Rank 0] Group 0 Loss: 3.2045 +[2025-09-05 19:08:55] [Rank 0] Group 0 Loss: 3.2045 +[2025-09-05 19:08:55] [Rank 0] Group 1 Loss: 3.1473 +[2025-09-05 19:08:55] [Rank 0] Group 1 Loss: 3.1473 +[2025-09-05 19:08:55] [Rank 0] Group 2 Loss: 3.1307 +[2025-09-05 19:08:55] [Rank 0] Group 2 Loss: 3.1307 +[2025-09-05 19:08:55] [Rank 0] Group 3 Loss: 3.5083 +[2025-09-05 19:08:55] [Rank 0] Group 3 Loss: 3.5083 +[2025-09-05 19:08:55] [Rank 0] Group 4 Loss: 3.6682 +[2025-09-05 19:08:55] [Rank 0] Group 4 Loss: 3.6682 +[2025-09-05 19:08:55] [Rank 0] Group 5 Loss: 3.9242 +[2025-09-05 19:08:55] [Rank 0] Group 5 Loss: 3.9242 +[2025-09-05 19:08:55] [Rank 0] Group 6 Loss: 4.1712 +[2025-09-05 19:08:55] [Rank 0] Group 6 Loss: 4.1712 +[2025-09-05 19:08:55] [Rank 0] Group 7 Loss: 4.3682 +[2025-09-05 19:08:55] [Rank 0] Group 7 Loss: 4.3682 +[2025-09-05 19:08:55] [Rank 0] Group 8 Loss: 4.6254 +[2025-09-05 19:08:55] [Rank 0] Group 8 Loss: 4.6254 +[2025-09-05 19:08:55] [Rank 0] Group 9 Loss: 4.7718 +[2025-09-05 19:08:55] [Rank 0] Group 9 Loss: 4.7718 +[2025-09-05 19:08:55] [Rank 0] Group 10 Loss: 4.8734 +[2025-09-05 19:08:55] [Rank 0] Group 10 Loss: 4.8734 +[2025-09-05 19:08:55] [Rank 0] Group 11 Loss: 4.8903 +[2025-09-05 19:08:55] [Rank 0] Group 11 Loss: 4.8903 +[2025-09-05 19:08:55] [Rank 0] Group 12 Loss: 4.8511 +[2025-09-05 19:08:55] [Rank 0] Group 12 Loss: 4.8511 +[2025-09-05 19:08:55] [Rank 0] Group 13 Loss: 4.8895 +[2025-09-05 19:08:55] [Rank 0] Group 13 Loss: 4.8895 +[2025-09-05 19:08:55] [Rank 0] Group 14 Loss: 4.9323 +[2025-09-05 19:08:55] [Rank 0] Group 14 Loss: 4.9323 +[2025-09-05 19:08:55] [Rank 0] Group 15 Loss: 4.8509 +[2025-09-05 19:08:55] [Rank 0] Group 15 Loss: 4.8509 +[2025-09-05 19:08:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:08:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:08:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:08:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:08:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:08:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:08:55] [Rank 0] Group 3 FTA: 0.7700 +[2025-09-05 19:08:55] [Rank 0] Group 3 FTA: 0.7700 +[2025-09-05 19:08:55] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 19:08:55] [Rank 0] Group 4 FTA: 0.4600 +[2025-09-05 19:08:55] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:08:55] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:08:55] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 19:08:55] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 19:08:55] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 19:08:55] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 19:08:55] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 19:08:55] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 19:08:55] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 19:08:55] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 19:08:55] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 19:08:55] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 19:08:55] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:08:55] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:08:55] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 19:08:55] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 19:08:55] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 19:08:55] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 19:08:55] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 19:08:55] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 19:08:55] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:08:55] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:08:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:08:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:08:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:08:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:08:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:08:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:08:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:08:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:08:57] [Rank 0] step:8501/10000 train_time:352318ms step_avg:41.44ms +[2025-09-05 19:08:57] [Rank 0] step:8501/10000 train_time:352318ms step_avg:41.44ms +[2025-09-05 19:08:57] [Rank 0] step:8521/10000 train_time:352982ms step_avg:41.42ms +[2025-09-05 19:08:57] [Rank 0] step:8521/10000 train_time:352982ms step_avg:41.42ms +[2025-09-05 19:08:58] [Rank 0] step:8541/10000 train_time:353718ms step_avg:41.41ms +[2025-09-05 19:08:58] [Rank 0] step:8541/10000 train_time:353718ms step_avg:41.41ms +[2025-09-05 19:08:59] [Rank 0] step:8561/10000 train_time:354455ms step_avg:41.40ms +[2025-09-05 19:08:59] [Rank 0] step:8561/10000 train_time:354455ms step_avg:41.40ms +[2025-09-05 19:09:00] [Rank 0] step:8581/10000 train_time:355191ms step_avg:41.39ms +[2025-09-05 19:09:00] [Rank 0] step:8581/10000 train_time:355191ms step_avg:41.39ms +[2025-09-05 19:09:01] [Rank 0] step:8601/10000 train_time:356061ms step_avg:41.40ms +[2025-09-05 19:09:01] [Rank 0] step:8601/10000 train_time:356061ms step_avg:41.40ms +[2025-09-05 19:09:01] [Rank 0] step:8621/10000 train_time:356797ms step_avg:41.39ms +[2025-09-05 19:09:01] [Rank 0] step:8621/10000 train_time:356797ms step_avg:41.39ms +[2025-09-05 19:09:02] [Rank 0] step:8641/10000 train_time:357534ms step_avg:41.38ms +[2025-09-05 19:09:02] [Rank 0] step:8641/10000 train_time:357534ms step_avg:41.38ms +[2025-09-05 19:09:03] [Rank 0] step:8661/10000 train_time:358395ms step_avg:41.38ms +[2025-09-05 19:09:03] [Rank 0] step:8661/10000 train_time:358395ms step_avg:41.38ms +[2025-09-05 19:09:04] [Rank 0] step:8681/10000 train_time:359139ms step_avg:41.37ms +[2025-09-05 19:09:04] [Rank 0] step:8681/10000 train_time:359139ms step_avg:41.37ms +[2025-09-05 19:09:04] [Rank 0] step:8701/10000 train_time:359875ms step_avg:41.36ms +[2025-09-05 19:09:04] [Rank 0] step:8701/10000 train_time:359875ms step_avg:41.36ms +[2025-09-05 19:09:05] [Rank 0] step:8721/10000 train_time:360611ms step_avg:41.35ms +[2025-09-05 19:09:05] [Rank 0] step:8721/10000 train_time:360611ms step_avg:41.35ms +[2025-09-05 19:09:06] [Rank 0] step:8741/10000 train_time:361349ms step_avg:41.34ms +[2025-09-05 19:09:06] [Rank 0] step:8741/10000 train_time:361349ms step_avg:41.34ms +[2025-09-05 19:09:07] [Rank 0] step:8761/10000 train_time:362085ms step_avg:41.33ms +[2025-09-05 19:09:07] [Rank 0] step:8761/10000 train_time:362085ms step_avg:41.33ms +[2025-09-05 19:09:07] [Rank 0] step:8781/10000 train_time:362822ms step_avg:41.32ms +[2025-09-05 19:09:07] [Rank 0] step:8781/10000 train_time:362822ms step_avg:41.32ms +[2025-09-05 19:09:08] [Rank 0] step:8801/10000 train_time:363559ms step_avg:41.31ms +[2025-09-05 19:09:08] [Rank 0] step:8801/10000 train_time:363559ms step_avg:41.31ms +[2025-09-05 19:09:09] [Rank 0] step:8821/10000 train_time:364296ms step_avg:41.30ms +[2025-09-05 19:09:09] [Rank 0] step:8821/10000 train_time:364296ms step_avg:41.30ms +[2025-09-05 19:09:10] [Rank 0] step:8841/10000 train_time:365657ms step_avg:41.36ms +[2025-09-05 19:09:10] [Rank 0] step:8841/10000 train_time:365657ms step_avg:41.36ms +[2025-09-05 19:09:11] [Rank 0] step:8861/10000 train_time:366394ms step_avg:41.35ms +[2025-09-05 19:09:11] [Rank 0] step:8861/10000 train_time:366394ms step_avg:41.35ms +[2025-09-05 19:09:12] [Rank 0] step:8881/10000 train_time:367130ms step_avg:41.34ms +[2025-09-05 19:09:12] [Rank 0] step:8881/10000 train_time:367130ms step_avg:41.34ms +[2025-09-05 19:09:12] [Rank 0] step:8901/10000 train_time:367866ms step_avg:41.33ms +[2025-09-05 19:09:12] [Rank 0] step:8901/10000 train_time:367866ms step_avg:41.33ms +[2025-09-05 19:09:13] [Rank 0] step:8921/10000 train_time:368602ms step_avg:41.32ms +[2025-09-05 19:09:13] [Rank 0] step:8921/10000 train_time:368602ms step_avg:41.32ms +[2025-09-05 19:09:14] [Rank 0] step:8941/10000 train_time:369340ms step_avg:41.31ms +[2025-09-05 19:09:14] [Rank 0] step:8941/10000 train_time:369340ms step_avg:41.31ms +[2025-09-05 19:09:15] [Rank 0] step:8961/10000 train_time:370076ms step_avg:41.30ms +[2025-09-05 19:09:15] [Rank 0] step:8961/10000 train_time:370076ms step_avg:41.30ms +[2025-09-05 19:09:15] [Rank 0] step:8981/10000 train_time:370812ms step_avg:41.29ms +[2025-09-05 19:09:15] [Rank 0] step:8981/10000 train_time:370812ms step_avg:41.29ms +[2025-09-05 19:09:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:09:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:09:16] [Rank 0] PRINT: step:9000/10000 train_loss:1.6560 val_loss:1.6445 train_time:371629ms step_avg:41.29ms +[2025-09-05 19:09:16] [Rank 0] PRINT: step:9000/10000 train_loss:1.6560 val_loss:1.6445 train_time:371629ms step_avg:41.29ms +[2025-09-05 19:09:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:09:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:09:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:09:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:10:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:10:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:10:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:10:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:10:38] [Rank 0] Total Loss: 4.2181 +[2025-09-05 19:10:38] [Rank 0] Total Loss: 4.2181 +[2025-09-05 19:10:38] [Rank 0] Total FTA (Unweighted): 0.4988 +[2025-09-05 19:10:38] [Rank 0] Total FTA (Unweighted): 0.4988 +[2025-09-05 19:10:38] [Rank 0] Total FTA (Weighted): 0.4988 +[2025-09-05 19:10:38] [Rank 0] Total FTA (Weighted): 0.4988 +[2025-09-05 19:10:38] [Rank 0] Group 0 Loss: 3.1998 +[2025-09-05 19:10:38] [Rank 0] Group 0 Loss: 3.1998 +[2025-09-05 19:10:38] [Rank 0] Group 1 Loss: 3.1375 +[2025-09-05 19:10:38] [Rank 0] Group 1 Loss: 3.1375 +[2025-09-05 19:10:38] [Rank 0] Group 2 Loss: 3.1335 +[2025-09-05 19:10:38] [Rank 0] Group 2 Loss: 3.1335 +[2025-09-05 19:10:38] [Rank 0] Group 3 Loss: 3.4697 +[2025-09-05 19:10:38] [Rank 0] Group 3 Loss: 3.4697 +[2025-09-05 19:10:38] [Rank 0] Group 4 Loss: 3.6457 +[2025-09-05 19:10:38] [Rank 0] Group 4 Loss: 3.6457 +[2025-09-05 19:10:38] [Rank 0] Group 5 Loss: 3.8943 +[2025-09-05 19:10:38] [Rank 0] Group 5 Loss: 3.8943 +[2025-09-05 19:10:38] [Rank 0] Group 6 Loss: 4.1506 +[2025-09-05 19:10:38] [Rank 0] Group 6 Loss: 4.1506 +[2025-09-05 19:10:38] [Rank 0] Group 7 Loss: 4.3412 +[2025-09-05 19:10:38] [Rank 0] Group 7 Loss: 4.3412 +[2025-09-05 19:10:38] [Rank 0] Group 8 Loss: 4.6167 +[2025-09-05 19:10:38] [Rank 0] Group 8 Loss: 4.6167 +[2025-09-05 19:10:38] [Rank 0] Group 9 Loss: 4.7509 +[2025-09-05 19:10:38] [Rank 0] Group 9 Loss: 4.7509 +[2025-09-05 19:10:38] [Rank 0] Group 10 Loss: 4.8528 +[2025-09-05 19:10:38] [Rank 0] Group 10 Loss: 4.8528 +[2025-09-05 19:10:38] [Rank 0] Group 11 Loss: 4.8626 +[2025-09-05 19:10:38] [Rank 0] Group 11 Loss: 4.8626 +[2025-09-05 19:10:38] [Rank 0] Group 12 Loss: 4.8347 +[2025-09-05 19:10:38] [Rank 0] Group 12 Loss: 4.8347 +[2025-09-05 19:10:38] [Rank 0] Group 13 Loss: 4.8728 +[2025-09-05 19:10:38] [Rank 0] Group 13 Loss: 4.8728 +[2025-09-05 19:10:38] [Rank 0] Group 14 Loss: 4.9107 +[2025-09-05 19:10:38] [Rank 0] Group 14 Loss: 4.9107 +[2025-09-05 19:10:38] [Rank 0] Group 15 Loss: 4.8166 +[2025-09-05 19:10:38] [Rank 0] Group 15 Loss: 4.8166 +[2025-09-05 19:10:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:10:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:10:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:10:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:10:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:10:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:10:38] [Rank 0] Group 3 FTA: 0.8900 +[2025-09-05 19:10:38] [Rank 0] Group 3 FTA: 0.8900 +[2025-09-05 19:10:38] [Rank 0] Group 4 FTA: 0.5100 +[2025-09-05 19:10:38] [Rank 0] Group 4 FTA: 0.5100 +[2025-09-05 19:10:38] [Rank 0] Group 5 FTA: 0.5200 +[2025-09-05 19:10:38] [Rank 0] Group 5 FTA: 0.5200 +[2025-09-05 19:10:39] [Rank 0] Group 6 FTA: 0.4200 +[2025-09-05 19:10:39] [Rank 0] Group 6 FTA: 0.4200 +[2025-09-05 19:10:39] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:10:39] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:10:39] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 19:10:39] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 19:10:39] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 19:10:39] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 19:10:39] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:10:39] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:10:39] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 19:10:39] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 19:10:39] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 19:10:39] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 19:10:39] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-05 19:10:39] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-05 19:10:39] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:10:39] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:10:39] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:10:39] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:10:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:10:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:10:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:10:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:10:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:10:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:10:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:10:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:10:40] [Rank 0] step:9001/10000 train_time:371638ms step_avg:41.29ms +[2025-09-05 19:10:40] [Rank 0] step:9001/10000 train_time:371638ms step_avg:41.29ms +[2025-09-05 19:10:41] [Rank 0] step:9021/10000 train_time:372322ms step_avg:41.27ms +[2025-09-05 19:10:41] [Rank 0] step:9021/10000 train_time:372322ms step_avg:41.27ms +[2025-09-05 19:10:41] [Rank 0] step:9041/10000 train_time:373058ms step_avg:41.26ms +[2025-09-05 19:10:41] [Rank 0] step:9041/10000 train_time:373058ms step_avg:41.26ms +[2025-09-05 19:10:42] [Rank 0] step:9061/10000 train_time:373795ms step_avg:41.25ms +[2025-09-05 19:10:42] [Rank 0] step:9061/10000 train_time:373795ms step_avg:41.25ms +[2025-09-05 19:10:43] [Rank 0] step:9081/10000 train_time:374532ms step_avg:41.24ms +[2025-09-05 19:10:43] [Rank 0] step:9081/10000 train_time:374532ms step_avg:41.24ms +[2025-09-05 19:10:44] [Rank 0] step:9101/10000 train_time:375269ms step_avg:41.23ms +[2025-09-05 19:10:44] [Rank 0] step:9101/10000 train_time:375269ms step_avg:41.23ms +[2025-09-05 19:10:44] [Rank 0] step:9121/10000 train_time:376006ms step_avg:41.22ms +[2025-09-05 19:10:44] [Rank 0] step:9121/10000 train_time:376006ms step_avg:41.22ms +[2025-09-05 19:10:45] [Rank 0] step:9141/10000 train_time:376742ms step_avg:41.21ms +[2025-09-05 19:10:45] [Rank 0] step:9141/10000 train_time:376742ms step_avg:41.21ms +[2025-09-05 19:10:46] [Rank 0] step:9161/10000 train_time:377479ms step_avg:41.21ms +[2025-09-05 19:10:46] [Rank 0] step:9161/10000 train_time:377479ms step_avg:41.21ms +[2025-09-05 19:10:47] [Rank 0] step:9181/10000 train_time:378216ms step_avg:41.20ms +[2025-09-05 19:10:47] [Rank 0] step:9181/10000 train_time:378216ms step_avg:41.20ms +[2025-09-05 19:10:47] [Rank 0] step:9201/10000 train_time:378953ms step_avg:41.19ms +[2025-09-05 19:10:47] [Rank 0] step:9201/10000 train_time:378953ms step_avg:41.19ms +[2025-09-05 19:10:48] [Rank 0] step:9221/10000 train_time:379690ms step_avg:41.18ms +[2025-09-05 19:10:48] [Rank 0] step:9221/10000 train_time:379690ms step_avg:41.18ms +[2025-09-05 19:10:49] [Rank 0] step:9241/10000 train_time:380426ms step_avg:41.17ms +[2025-09-05 19:10:49] [Rank 0] step:9241/10000 train_time:380426ms step_avg:41.17ms +[2025-09-05 19:10:50] [Rank 0] step:9261/10000 train_time:381163ms step_avg:41.16ms +[2025-09-05 19:10:50] [Rank 0] step:9261/10000 train_time:381163ms step_avg:41.16ms +[2025-09-05 19:10:50] [Rank 0] step:9281/10000 train_time:381899ms step_avg:41.15ms +[2025-09-05 19:10:50] [Rank 0] step:9281/10000 train_time:381899ms step_avg:41.15ms +[2025-09-05 19:10:51] [Rank 0] step:9301/10000 train_time:382636ms step_avg:41.14ms +[2025-09-05 19:10:51] [Rank 0] step:9301/10000 train_time:382636ms step_avg:41.14ms +[2025-09-05 19:10:52] [Rank 0] step:9321/10000 train_time:383373ms step_avg:41.13ms +[2025-09-05 19:10:52] [Rank 0] step:9321/10000 train_time:383373ms step_avg:41.13ms +[2025-09-05 19:10:52] [Rank 0] step:9341/10000 train_time:384109ms step_avg:41.12ms +[2025-09-05 19:10:52] [Rank 0] step:9341/10000 train_time:384109ms step_avg:41.12ms +[2025-09-05 19:10:53] [Rank 0] step:9361/10000 train_time:384846ms step_avg:41.11ms +[2025-09-05 19:10:53] [Rank 0] step:9361/10000 train_time:384846ms step_avg:41.11ms +[2025-09-05 19:10:54] [Rank 0] step:9381/10000 train_time:385582ms step_avg:41.10ms +[2025-09-05 19:10:54] [Rank 0] step:9381/10000 train_time:385582ms step_avg:41.10ms +[2025-09-05 19:10:55] [Rank 0] step:9401/10000 train_time:386319ms step_avg:41.09ms +[2025-09-05 19:10:55] [Rank 0] step:9401/10000 train_time:386319ms step_avg:41.09ms +[2025-09-05 19:10:55] [Rank 0] step:9421/10000 train_time:387056ms step_avg:41.08ms +[2025-09-05 19:10:55] [Rank 0] step:9421/10000 train_time:387056ms step_avg:41.08ms +[2025-09-05 19:10:56] [Rank 0] step:9441/10000 train_time:387792ms step_avg:41.08ms +[2025-09-05 19:10:56] [Rank 0] step:9441/10000 train_time:387792ms step_avg:41.08ms +[2025-09-05 19:10:57] [Rank 0] step:9461/10000 train_time:388529ms step_avg:41.07ms +[2025-09-05 19:10:57] [Rank 0] step:9461/10000 train_time:388529ms step_avg:41.07ms +[2025-09-05 19:10:58] [Rank 0] step:9481/10000 train_time:389264ms step_avg:41.06ms +[2025-09-05 19:10:58] [Rank 0] step:9481/10000 train_time:389264ms step_avg:41.06ms +[2025-09-05 19:10:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:10:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:10:59] [Rank 0] PRINT: step:9500/10000 train_loss:1.6495 val_loss:1.6393 train_time:390082ms step_avg:41.06ms +[2025-09-05 19:10:59] [Rank 0] PRINT: step:9500/10000 train_loss:1.6495 val_loss:1.6393 train_time:390082ms step_avg:41.06ms +[2025-09-05 19:10:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:10:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:10:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:10:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:12:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:12:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:12:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:12:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:12:20] [Rank 0] Total Loss: 4.1868 +[2025-09-05 19:12:20] [Rank 0] Total Loss: 4.1868 +[2025-09-05 19:12:20] [Rank 0] Total FTA (Unweighted): 0.4988 +[2025-09-05 19:12:20] [Rank 0] Total FTA (Unweighted): 0.4988 +[2025-09-05 19:12:20] [Rank 0] Total FTA (Weighted): 0.4988 +[2025-09-05 19:12:20] [Rank 0] Total FTA (Weighted): 0.4988 +[2025-09-05 19:12:20] [Rank 0] Group 0 Loss: 3.2081 +[2025-09-05 19:12:20] [Rank 0] Group 0 Loss: 3.2081 +[2025-09-05 19:12:20] [Rank 0] Group 1 Loss: 3.1273 +[2025-09-05 19:12:20] [Rank 0] Group 1 Loss: 3.1273 +[2025-09-05 19:12:20] [Rank 0] Group 2 Loss: 3.1085 +[2025-09-05 19:12:20] [Rank 0] Group 2 Loss: 3.1085 +[2025-09-05 19:12:20] [Rank 0] Group 3 Loss: 3.4476 +[2025-09-05 19:12:20] [Rank 0] Group 3 Loss: 3.4476 +[2025-09-05 19:12:20] [Rank 0] Group 4 Loss: 3.6111 +[2025-09-05 19:12:20] [Rank 0] Group 4 Loss: 3.6111 +[2025-09-05 19:12:20] [Rank 0] Group 5 Loss: 3.8489 +[2025-09-05 19:12:20] [Rank 0] Group 5 Loss: 3.8489 +[2025-09-05 19:12:20] [Rank 0] Group 6 Loss: 4.1014 +[2025-09-05 19:12:20] [Rank 0] Group 6 Loss: 4.1014 +[2025-09-05 19:12:20] [Rank 0] Group 7 Loss: 4.2982 +[2025-09-05 19:12:20] [Rank 0] Group 7 Loss: 4.2982 +[2025-09-05 19:12:20] [Rank 0] Group 8 Loss: 4.5745 +[2025-09-05 19:12:20] [Rank 0] Group 8 Loss: 4.5745 +[2025-09-05 19:12:20] [Rank 0] Group 9 Loss: 4.6995 +[2025-09-05 19:12:20] [Rank 0] Group 9 Loss: 4.6995 +[2025-09-05 19:12:20] [Rank 0] Group 10 Loss: 4.8164 +[2025-09-05 19:12:20] [Rank 0] Group 10 Loss: 4.8164 +[2025-09-05 19:12:20] [Rank 0] Group 11 Loss: 4.8262 +[2025-09-05 19:12:20] [Rank 0] Group 11 Loss: 4.8262 +[2025-09-05 19:12:20] [Rank 0] Group 12 Loss: 4.7972 +[2025-09-05 19:12:20] [Rank 0] Group 12 Loss: 4.7972 +[2025-09-05 19:12:20] [Rank 0] Group 13 Loss: 4.8457 +[2025-09-05 19:12:20] [Rank 0] Group 13 Loss: 4.8457 +[2025-09-05 19:12:20] [Rank 0] Group 14 Loss: 4.8771 +[2025-09-05 19:12:20] [Rank 0] Group 14 Loss: 4.8771 +[2025-09-05 19:12:20] [Rank 0] Group 15 Loss: 4.8014 +[2025-09-05 19:12:20] [Rank 0] Group 15 Loss: 4.8014 +[2025-09-05 19:12:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:12:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:12:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:12:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:12:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:12:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:12:20] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:12:20] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:12:20] [Rank 0] Group 4 FTA: 0.5200 +[2025-09-05 19:12:20] [Rank 0] Group 4 FTA: 0.5200 +[2025-09-05 19:12:20] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:12:20] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:12:20] [Rank 0] Group 6 FTA: 0.4300 +[2025-09-05 19:12:20] [Rank 0] Group 6 FTA: 0.4300 +[2025-09-05 19:12:20] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:12:20] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:12:20] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 19:12:20] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 19:12:20] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 19:12:20] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 19:12:20] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 19:12:20] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 19:12:20] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 19:12:20] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 19:12:20] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-05 19:12:20] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-05 19:12:20] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 19:12:20] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 19:12:20] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 19:12:20] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 19:12:20] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:12:20] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:12:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:12:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:12:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:12:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:12:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:12:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:12:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:12:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:12:21] [Rank 0] step:9501/10000 train_time:390091ms step_avg:41.06ms +[2025-09-05 19:12:21] [Rank 0] step:9501/10000 train_time:390091ms step_avg:41.06ms +[2025-09-05 19:12:22] [Rank 0] step:9521/10000 train_time:390765ms step_avg:41.04ms +[2025-09-05 19:12:22] [Rank 0] step:9521/10000 train_time:390765ms step_avg:41.04ms +[2025-09-05 19:12:23] [Rank 0] step:9541/10000 train_time:391501ms step_avg:41.03ms +[2025-09-05 19:12:23] [Rank 0] step:9541/10000 train_time:391501ms step_avg:41.03ms +[2025-09-05 19:12:24] [Rank 0] step:9561/10000 train_time:392238ms step_avg:41.02ms +[2025-09-05 19:12:24] [Rank 0] step:9561/10000 train_time:392238ms step_avg:41.02ms +[2025-09-05 19:12:24] [Rank 0] step:9581/10000 train_time:392975ms step_avg:41.02ms +[2025-09-05 19:12:24] [Rank 0] step:9581/10000 train_time:392975ms step_avg:41.02ms +[2025-09-05 19:12:25] [Rank 0] step:9601/10000 train_time:393711ms step_avg:41.01ms +[2025-09-05 19:12:25] [Rank 0] step:9601/10000 train_time:393711ms step_avg:41.01ms +[2025-09-05 19:12:26] [Rank 0] step:9621/10000 train_time:394448ms step_avg:41.00ms +[2025-09-05 19:12:26] [Rank 0] step:9621/10000 train_time:394448ms step_avg:41.00ms +[2025-09-05 19:12:26] [Rank 0] step:9641/10000 train_time:395185ms step_avg:40.99ms +[2025-09-05 19:12:26] [Rank 0] step:9641/10000 train_time:395185ms step_avg:40.99ms +[2025-09-05 19:12:27] [Rank 0] step:9661/10000 train_time:396201ms step_avg:41.01ms +[2025-09-05 19:12:27] [Rank 0] step:9661/10000 train_time:396201ms step_avg:41.01ms +[2025-09-05 19:12:28] [Rank 0] step:9681/10000 train_time:396939ms step_avg:41.00ms +[2025-09-05 19:12:28] [Rank 0] step:9681/10000 train_time:396939ms step_avg:41.00ms +[2025-09-05 19:12:29] [Rank 0] step:9701/10000 train_time:397677ms step_avg:40.99ms +[2025-09-05 19:12:29] [Rank 0] step:9701/10000 train_time:397677ms step_avg:40.99ms +[2025-09-05 19:12:30] [Rank 0] step:9721/10000 train_time:398413ms step_avg:40.98ms +[2025-09-05 19:12:30] [Rank 0] step:9721/10000 train_time:398413ms step_avg:40.98ms +[2025-09-05 19:12:30] [Rank 0] step:9741/10000 train_time:399150ms step_avg:40.98ms +[2025-09-05 19:12:30] [Rank 0] step:9741/10000 train_time:399150ms step_avg:40.98ms +[2025-09-05 19:12:31] [Rank 0] step:9761/10000 train_time:399886ms step_avg:40.97ms +[2025-09-05 19:12:31] [Rank 0] step:9761/10000 train_time:399886ms step_avg:40.97ms +[2025-09-05 19:12:32] [Rank 0] step:9781/10000 train_time:400623ms step_avg:40.96ms +[2025-09-05 19:12:32] [Rank 0] step:9781/10000 train_time:400623ms step_avg:40.96ms +[2025-09-05 19:12:33] [Rank 0] step:9801/10000 train_time:401360ms step_avg:40.95ms +[2025-09-05 19:12:33] [Rank 0] step:9801/10000 train_time:401360ms step_avg:40.95ms +[2025-09-05 19:12:33] [Rank 0] step:9821/10000 train_time:402096ms step_avg:40.94ms +[2025-09-05 19:12:33] [Rank 0] step:9821/10000 train_time:402096ms step_avg:40.94ms +[2025-09-05 19:12:34] [Rank 0] step:9841/10000 train_time:402936ms step_avg:40.94ms +[2025-09-05 19:12:34] [Rank 0] step:9841/10000 train_time:402936ms step_avg:40.94ms +[2025-09-05 19:12:35] [Rank 0] step:9861/10000 train_time:403674ms step_avg:40.94ms +[2025-09-05 19:12:35] [Rank 0] step:9861/10000 train_time:403674ms step_avg:40.94ms +[2025-09-05 19:12:36] [Rank 0] step:9881/10000 train_time:404410ms step_avg:40.93ms +[2025-09-05 19:12:36] [Rank 0] step:9881/10000 train_time:404410ms step_avg:40.93ms +[2025-09-05 19:12:36] [Rank 0] step:9901/10000 train_time:405146ms step_avg:40.92ms +[2025-09-05 19:12:36] [Rank 0] step:9901/10000 train_time:405146ms step_avg:40.92ms +[2025-09-05 19:12:37] [Rank 0] step:9921/10000 train_time:405883ms step_avg:40.91ms +[2025-09-05 19:12:37] [Rank 0] step:9921/10000 train_time:405883ms step_avg:40.91ms +[2025-09-05 19:12:38] [Rank 0] step:9941/10000 train_time:406619ms step_avg:40.90ms +[2025-09-05 19:12:38] [Rank 0] step:9941/10000 train_time:406619ms step_avg:40.90ms +[2025-09-05 19:12:39] [Rank 0] step:9961/10000 train_time:407355ms step_avg:40.90ms +[2025-09-05 19:12:39] [Rank 0] step:9961/10000 train_time:407355ms step_avg:40.90ms +[2025-09-05 19:12:39] [Rank 0] step:9981/10000 train_time:408092ms step_avg:40.89ms +[2025-09-05 19:12:39] [Rank 0] step:9981/10000 train_time:408092ms step_avg:40.89ms +[2025-09-05 19:12:40] [Rank 0] step:10000/10000 train_time:408791ms step_avg:40.88ms +[2025-09-05 19:12:40] [Rank 0] step:10000/10000 train_time:408791ms step_avg:40.88ms +[2025-09-05 19:12:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:12:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:12:41] [Rank 0] PRINT: step:10000/10000 train_loss:1.6445 val_loss:1.6336 train_time:408914ms step_avg:40.89ms +[2025-09-05 19:12:41] [Rank 0] PRINT: step:10000/10000 train_loss:1.6445 val_loss:1.6336 train_time:408914ms step_avg:40.89ms +[2025-09-05 19:12:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:12:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:12:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:12:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:14:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:14:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:14:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:14:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:14:02] [Rank 0] Total Loss: 4.2340 +[2025-09-05 19:14:02] [Rank 0] Total Loss: 4.2340 +[2025-09-05 19:14:02] [Rank 0] Total FTA (Unweighted): 0.5062 +[2025-09-05 19:14:02] [Rank 0] Total FTA (Unweighted): 0.5062 +[2025-09-05 19:14:02] [Rank 0] Total FTA (Weighted): 0.5062 +[2025-09-05 19:14:02] [Rank 0] Total FTA (Weighted): 0.5062 +[2025-09-05 19:14:02] [Rank 0] Group 0 Loss: 3.2156 +[2025-09-05 19:14:02] [Rank 0] Group 0 Loss: 3.2156 +[2025-09-05 19:14:02] [Rank 0] Group 1 Loss: 3.1886 +[2025-09-05 19:14:02] [Rank 0] Group 1 Loss: 3.1886 +[2025-09-05 19:14:02] [Rank 0] Group 2 Loss: 3.1459 +[2025-09-05 19:14:02] [Rank 0] Group 2 Loss: 3.1459 +[2025-09-05 19:14:02] [Rank 0] Group 3 Loss: 3.4830 +[2025-09-05 19:14:02] [Rank 0] Group 3 Loss: 3.4830 +[2025-09-05 19:14:02] [Rank 0] Group 4 Loss: 3.6642 +[2025-09-05 19:14:02] [Rank 0] Group 4 Loss: 3.6642 +[2025-09-05 19:14:02] [Rank 0] Group 5 Loss: 3.9114 +[2025-09-05 19:14:02] [Rank 0] Group 5 Loss: 3.9114 +[2025-09-05 19:14:02] [Rank 0] Group 6 Loss: 4.1636 +[2025-09-05 19:14:02] [Rank 0] Group 6 Loss: 4.1636 +[2025-09-05 19:14:02] [Rank 0] Group 7 Loss: 4.3744 +[2025-09-05 19:14:02] [Rank 0] Group 7 Loss: 4.3744 +[2025-09-05 19:14:02] [Rank 0] Group 8 Loss: 4.6317 +[2025-09-05 19:14:02] [Rank 0] Group 8 Loss: 4.6317 +[2025-09-05 19:14:02] [Rank 0] Group 9 Loss: 4.7494 +[2025-09-05 19:14:02] [Rank 0] Group 9 Loss: 4.7494 +[2025-09-05 19:14:02] [Rank 0] Group 10 Loss: 4.8801 +[2025-09-05 19:14:02] [Rank 0] Group 10 Loss: 4.8801 +[2025-09-05 19:14:02] [Rank 0] Group 11 Loss: 4.8778 +[2025-09-05 19:14:02] [Rank 0] Group 11 Loss: 4.8778 +[2025-09-05 19:14:02] [Rank 0] Group 12 Loss: 4.8341 +[2025-09-05 19:14:02] [Rank 0] Group 12 Loss: 4.8341 +[2025-09-05 19:14:02] [Rank 0] Group 13 Loss: 4.8757 +[2025-09-05 19:14:02] [Rank 0] Group 13 Loss: 4.8757 +[2025-09-05 19:14:02] [Rank 0] Group 14 Loss: 4.9111 +[2025-09-05 19:14:02] [Rank 0] Group 14 Loss: 4.9111 +[2025-09-05 19:14:02] [Rank 0] Group 15 Loss: 4.8373 +[2025-09-05 19:14:02] [Rank 0] Group 15 Loss: 4.8373 +[2025-09-05 19:14:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:14:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:14:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:14:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:14:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:14:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:14:02] [Rank 0] Group 3 FTA: 0.8900 +[2025-09-05 19:14:02] [Rank 0] Group 3 FTA: 0.8900 +[2025-09-05 19:14:02] [Rank 0] Group 4 FTA: 0.5200 +[2025-09-05 19:14:02] [Rank 0] Group 4 FTA: 0.5200 +[2025-09-05 19:14:02] [Rank 0] Group 5 FTA: 0.5200 +[2025-09-05 19:14:02] [Rank 0] Group 5 FTA: 0.5200 +[2025-09-05 19:14:02] [Rank 0] Group 6 FTA: 0.4200 +[2025-09-05 19:14:02] [Rank 0] Group 6 FTA: 0.4200 +[2025-09-05 19:14:02] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:14:02] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:14:02] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 19:14:02] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 19:14:02] [Rank 0] Group 9 FTA: 0.3000 +[2025-09-05 19:14:02] [Rank 0] Group 9 FTA: 0.3000 +[2025-09-05 19:14:02] [Rank 0] Group 10 FTA: 0.4000 +[2025-09-05 19:14:02] [Rank 0] Group 10 FTA: 0.4000 +[2025-09-05 19:14:02] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:14:02] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:14:02] [Rank 0] Group 12 FTA: 0.3500 +[2025-09-05 19:14:02] [Rank 0] Group 12 FTA: 0.3500 +[2025-09-05 19:14:02] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 19:14:02] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 19:14:02] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 19:14:02] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 19:14:02] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 19:14:02] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 19:14:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:14:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_loss_curves.png +[2025-09-05 19:14:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:14:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/per_class_acc_curves.png +[2025-09-05 19:14:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:14:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_loss_curve.png +[2025-09-05 19:14:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:14:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_44/total_acc_curve.png +[2025-09-05 19:14:03] [Rank 0] step:10001/10000 train_time:408924ms step_avg:40.89ms +[2025-09-05 19:14:03] [Rank 0] step:10001/10000 train_time:408924ms step_avg:40.89ms +[2025-09-05 19:14:03] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 19:14:03 2025 --- +[2025-09-05 19:14:03] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 19:14:03 2025 --- +[2025-09-05 19:14:03] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 19:14:03] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..01ea4aa36a38430f41ff1cc20b7a276bde791755 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.2, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "922a29e0-12c0-4264-96d7-8b1afe504dce", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..743c3ac0df575854717f5ffe532343ef7ca9c6ce --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b26748031b31e52729a819abaef7f020913d5bfe3ac160c9946b7d7441a01816 +size 403315 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..8ede76368e0f966bbb9cdf735e9d0d00d3a70d04 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3ef708cf37c7aaee811af293a76d627cf29a997adf81f27b6b4e980e63c5f02 +size 462157 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..551625eca0cbdeb7687e0c3f5e33bb209a2e968a --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:080fbc26eb401a3b7a7a9e4807bb82c4138594721cd08d590f89e38a02e8659d +size 95776 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ead86c531e00d71ca359bc5a0f8025ee5e91eb0a --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3e2654075db3e55b80dec57f463161fd7f9bcd5a7a9676c40f6836774bb28b1 +size 122427 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/training_log_922a29e0-12c0-4264-96d7-8b1afe504dce.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/training_log_922a29e0-12c0-4264-96d7-8b1afe504dce.txt new file mode 100644 index 0000000000000000000000000000000000000000..642dc127ed87326e48890b3f05b9882711a54ecb --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/training_log_922a29e0-12c0-4264-96d7-8b1afe504dce.txt @@ -0,0 +1,5614 @@ +[2025-09-05 19:14:26] [Rank 0] PRINT: --- Script Start: Fri Sep 5 19:14:26 2025 --- +[2025-09-05 19:14:26] [Rank 0] PRINT: --- Script Start: Fri Sep 5 19:14:26 2025 --- +[2025-09-05 19:14:26] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 19:14:26] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 19:14:26] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 19:14:26] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 19:14:26] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-05 19:14:26] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-05 19:14:26] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45 +[2025-09-05 19:14:26] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45 +[2025-09-05 19:14:26] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 19:14:26] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 19:14:26] [Rank 0] PRINT: Constructing model... +[2025-09-05 19:14:26] [Rank 0] PRINT: Constructing model... +[2025-09-05 19:14:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 19:14:27] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 19:14:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 19:14:27] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 19:14:27] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 19:14:27] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 19:14:31] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 19:14:31] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 19:14:31] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 19:14:31] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 19:14:31] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 19:14:31] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 19:14:31] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 19:14:31] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 19:14:32] [Rank 0] PRINT: Model returns: +[2025-09-05 19:14:32] [Rank 0] PRINT: Model returns: +[2025-09-05 19:14:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 19:14:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 19:14:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 19:14:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 19:14:32] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 19:14:32] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 19:14:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 19:14:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 19:14:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 19:14:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 19:14:36] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 19:14:36] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 19:14:36] [Rank 0] PRINT: Starting warmup... +[2025-09-05 19:14:36] [Rank 0] PRINT: Starting warmup... +[2025-09-05 19:15:15] [Rank 0] PRINT: Warmup complete. +[2025-09-05 19:15:15] [Rank 0] PRINT: Warmup complete. +[2025-09-05 19:15:15] [Rank 0] PRINT: Starting training... +[2025-09-05 19:15:15] [Rank 0] PRINT: Starting training... +[2025-09-05 19:15:21] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/fixed_eval_indices.json +[2025-09-05 19:15:21] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/fixed_eval_indices.json +[2025-09-05 19:15:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:15:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:15:25] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 19:15:25] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 19:15:57] [Rank 0] step:21/10000 train_time:32393ms step_avg:1542.50ms +[2025-09-05 19:15:57] [Rank 0] step:21/10000 train_time:32393ms step_avg:1542.50ms +[2025-09-05 19:15:58] [Rank 0] step:41/10000 train_time:33123ms step_avg:807.88ms +[2025-09-05 19:15:58] [Rank 0] step:41/10000 train_time:33123ms step_avg:807.88ms +[2025-09-05 19:15:59] [Rank 0] step:61/10000 train_time:33852ms step_avg:554.94ms +[2025-09-05 19:15:59] [Rank 0] step:61/10000 train_time:33852ms step_avg:554.94ms +[2025-09-05 19:16:00] [Rank 0] step:81/10000 train_time:34580ms step_avg:426.91ms +[2025-09-05 19:16:00] [Rank 0] step:81/10000 train_time:34580ms step_avg:426.91ms +[2025-09-05 19:16:00] [Rank 0] step:101/10000 train_time:35309ms step_avg:349.59ms +[2025-09-05 19:16:00] [Rank 0] step:101/10000 train_time:35309ms step_avg:349.59ms +[2025-09-05 19:16:01] [Rank 0] step:121/10000 train_time:36037ms step_avg:297.83ms +[2025-09-05 19:16:01] [Rank 0] step:121/10000 train_time:36037ms step_avg:297.83ms +[2025-09-05 19:16:02] [Rank 0] step:141/10000 train_time:36765ms step_avg:260.75ms +[2025-09-05 19:16:02] [Rank 0] step:141/10000 train_time:36765ms step_avg:260.75ms +[2025-09-05 19:16:03] [Rank 0] step:161/10000 train_time:37494ms step_avg:232.88ms +[2025-09-05 19:16:03] [Rank 0] step:161/10000 train_time:37494ms step_avg:232.88ms +[2025-09-05 19:16:03] [Rank 0] step:181/10000 train_time:38224ms step_avg:211.18ms +[2025-09-05 19:16:03] [Rank 0] step:181/10000 train_time:38224ms step_avg:211.18ms +[2025-09-05 19:16:04] [Rank 0] step:201/10000 train_time:38952ms step_avg:193.79ms +[2025-09-05 19:16:04] [Rank 0] step:201/10000 train_time:38952ms step_avg:193.79ms +[2025-09-05 19:16:05] [Rank 0] step:221/10000 train_time:39680ms step_avg:179.55ms +[2025-09-05 19:16:05] [Rank 0] step:221/10000 train_time:39680ms step_avg:179.55ms +[2025-09-05 19:16:06] [Rank 0] step:241/10000 train_time:40410ms step_avg:167.68ms +[2025-09-05 19:16:06] [Rank 0] step:241/10000 train_time:40410ms step_avg:167.68ms +[2025-09-05 19:16:06] [Rank 0] step:261/10000 train_time:41139ms step_avg:157.62ms +[2025-09-05 19:16:06] [Rank 0] step:261/10000 train_time:41139ms step_avg:157.62ms +[2025-09-05 19:16:07] [Rank 0] step:281/10000 train_time:41869ms step_avg:149.00ms +[2025-09-05 19:16:07] [Rank 0] step:281/10000 train_time:41869ms step_avg:149.00ms +[2025-09-05 19:16:08] [Rank 0] step:301/10000 train_time:42598ms step_avg:141.52ms +[2025-09-05 19:16:08] [Rank 0] step:301/10000 train_time:42598ms step_avg:141.52ms +[2025-09-05 19:16:08] [Rank 0] step:321/10000 train_time:43327ms step_avg:134.97ms +[2025-09-05 19:16:08] [Rank 0] step:321/10000 train_time:43327ms step_avg:134.97ms +[2025-09-05 19:16:09] [Rank 0] step:341/10000 train_time:44055ms step_avg:129.19ms +[2025-09-05 19:16:09] [Rank 0] step:341/10000 train_time:44055ms step_avg:129.19ms +[2025-09-05 19:16:10] [Rank 0] step:361/10000 train_time:44782ms step_avg:124.05ms +[2025-09-05 19:16:10] [Rank 0] step:361/10000 train_time:44782ms step_avg:124.05ms +[2025-09-05 19:16:11] [Rank 0] step:381/10000 train_time:45510ms step_avg:119.45ms +[2025-09-05 19:16:11] [Rank 0] step:381/10000 train_time:45510ms step_avg:119.45ms +[2025-09-05 19:16:11] [Rank 0] step:401/10000 train_time:46238ms step_avg:115.31ms +[2025-09-05 19:16:11] [Rank 0] step:401/10000 train_time:46238ms step_avg:115.31ms +[2025-09-05 19:16:12] [Rank 0] step:421/10000 train_time:46965ms step_avg:111.56ms +[2025-09-05 19:16:12] [Rank 0] step:421/10000 train_time:46965ms step_avg:111.56ms +[2025-09-05 19:16:13] [Rank 0] step:441/10000 train_time:47694ms step_avg:108.15ms +[2025-09-05 19:16:13] [Rank 0] step:441/10000 train_time:47694ms step_avg:108.15ms +[2025-09-05 19:16:14] [Rank 0] step:461/10000 train_time:48423ms step_avg:105.04ms +[2025-09-05 19:16:14] [Rank 0] step:461/10000 train_time:48423ms step_avg:105.04ms +[2025-09-05 19:16:14] [Rank 0] step:481/10000 train_time:49150ms step_avg:102.18ms +[2025-09-05 19:16:14] [Rank 0] step:481/10000 train_time:49150ms step_avg:102.18ms +[2025-09-05 19:16:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:16:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:16:15] [Rank 0] PRINT: step:500/10000 train_loss:4.6565 val_loss:3.2254 train_time:49958ms step_avg:99.92ms +[2025-09-05 19:16:15] [Rank 0] PRINT: step:500/10000 train_loss:4.6565 val_loss:3.2254 train_time:49958ms step_avg:99.92ms +[2025-09-05 19:16:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:16:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:16:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:16:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:17:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:17:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:17:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:17:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:17:36] [Rank 0] Total Loss: 5.5784 +[2025-09-05 19:17:36] [Rank 0] Total Loss: 5.5784 +[2025-09-05 19:17:36] [Rank 0] Total FTA (Unweighted): 0.1263 +[2025-09-05 19:17:36] [Rank 0] Total FTA (Unweighted): 0.1263 +[2025-09-05 19:17:36] [Rank 0] Total FTA (Weighted): 0.1263 +[2025-09-05 19:17:36] [Rank 0] Total FTA (Weighted): 0.1263 +[2025-09-05 19:17:36] [Rank 0] Group 0 Loss: 3.5822 +[2025-09-05 19:17:36] [Rank 0] Group 0 Loss: 3.5822 +[2025-09-05 19:17:36] [Rank 0] Group 1 Loss: 3.4738 +[2025-09-05 19:17:36] [Rank 0] Group 1 Loss: 3.4738 +[2025-09-05 19:17:36] [Rank 0] Group 2 Loss: 3.7870 +[2025-09-05 19:17:36] [Rank 0] Group 2 Loss: 3.7870 +[2025-09-05 19:17:36] [Rank 0] Group 3 Loss: 4.4390 +[2025-09-05 19:17:36] [Rank 0] Group 3 Loss: 4.4390 +[2025-09-05 19:17:36] [Rank 0] Group 4 Loss: 5.3483 +[2025-09-05 19:17:36] [Rank 0] Group 4 Loss: 5.3483 +[2025-09-05 19:17:36] [Rank 0] Group 5 Loss: 5.7681 +[2025-09-05 19:17:36] [Rank 0] Group 5 Loss: 5.7681 +[2025-09-05 19:17:36] [Rank 0] Group 6 Loss: 6.0125 +[2025-09-05 19:17:36] [Rank 0] Group 6 Loss: 6.0125 +[2025-09-05 19:17:36] [Rank 0] Group 7 Loss: 6.0557 +[2025-09-05 19:17:36] [Rank 0] Group 7 Loss: 6.0557 +[2025-09-05 19:17:36] [Rank 0] Group 8 Loss: 6.2365 +[2025-09-05 19:17:36] [Rank 0] Group 8 Loss: 6.2365 +[2025-09-05 19:17:36] [Rank 0] Group 9 Loss: 6.3957 +[2025-09-05 19:17:36] [Rank 0] Group 9 Loss: 6.3957 +[2025-09-05 19:17:36] [Rank 0] Group 10 Loss: 6.4138 +[2025-09-05 19:17:36] [Rank 0] Group 10 Loss: 6.4138 +[2025-09-05 19:17:36] [Rank 0] Group 11 Loss: 6.4805 +[2025-09-05 19:17:36] [Rank 0] Group 11 Loss: 6.4805 +[2025-09-05 19:17:36] [Rank 0] Group 12 Loss: 6.2766 +[2025-09-05 19:17:36] [Rank 0] Group 12 Loss: 6.2766 +[2025-09-05 19:17:36] [Rank 0] Group 13 Loss: 6.2965 +[2025-09-05 19:17:36] [Rank 0] Group 13 Loss: 6.2965 +[2025-09-05 19:17:36] [Rank 0] Group 14 Loss: 6.3942 +[2025-09-05 19:17:36] [Rank 0] Group 14 Loss: 6.3942 +[2025-09-05 19:17:36] [Rank 0] Group 15 Loss: 6.2945 +[2025-09-05 19:17:36] [Rank 0] Group 15 Loss: 6.2945 +[2025-09-05 19:17:36] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 19:17:36] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 19:17:36] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 19:17:36] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 19:17:36] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:17:36] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:17:36] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 19:17:36] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 19:17:36] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 19:17:36] [Rank 0] Group 4 FTA: 0.1300 +[2025-09-05 19:17:36] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 19:17:36] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 19:17:36] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 19:17:36] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 19:17:36] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 19:17:36] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 19:17:36] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 19:17:36] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 19:17:36] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 19:17:36] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 19:17:36] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 19:17:36] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 19:17:36] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 19:17:36] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 19:17:36] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 19:17:36] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 19:17:36] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:17:36] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:17:36] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:17:36] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:17:36] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:17:36] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:17:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:17:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:17:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:17:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:17:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:17:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:17:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:17:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:17:38] [Rank 0] step:501/10000 train_time:49968ms step_avg:99.74ms +[2025-09-05 19:17:38] [Rank 0] step:501/10000 train_time:49968ms step_avg:99.74ms +[2025-09-05 19:17:39] [Rank 0] step:521/10000 train_time:50641ms step_avg:97.20ms +[2025-09-05 19:17:39] [Rank 0] step:521/10000 train_time:50641ms step_avg:97.20ms +[2025-09-05 19:17:40] [Rank 0] step:541/10000 train_time:51369ms step_avg:94.95ms +[2025-09-05 19:17:40] [Rank 0] step:541/10000 train_time:51369ms step_avg:94.95ms +[2025-09-05 19:17:40] [Rank 0] step:561/10000 train_time:52098ms step_avg:92.87ms +[2025-09-05 19:17:40] [Rank 0] step:561/10000 train_time:52098ms step_avg:92.87ms +[2025-09-05 19:17:41] [Rank 0] step:581/10000 train_time:52826ms step_avg:90.92ms +[2025-09-05 19:17:41] [Rank 0] step:581/10000 train_time:52826ms step_avg:90.92ms +[2025-09-05 19:17:42] [Rank 0] step:601/10000 train_time:53553ms step_avg:89.11ms +[2025-09-05 19:17:42] [Rank 0] step:601/10000 train_time:53553ms step_avg:89.11ms +[2025-09-05 19:17:43] [Rank 0] step:621/10000 train_time:54281ms step_avg:87.41ms +[2025-09-05 19:17:43] [Rank 0] step:621/10000 train_time:54281ms step_avg:87.41ms +[2025-09-05 19:17:43] [Rank 0] step:641/10000 train_time:55010ms step_avg:85.82ms +[2025-09-05 19:17:43] [Rank 0] step:641/10000 train_time:55010ms step_avg:85.82ms +[2025-09-05 19:17:44] [Rank 0] step:661/10000 train_time:55739ms step_avg:84.32ms +[2025-09-05 19:17:44] [Rank 0] step:661/10000 train_time:55739ms step_avg:84.32ms +[2025-09-05 19:17:45] [Rank 0] step:681/10000 train_time:56466ms step_avg:82.92ms +[2025-09-05 19:17:45] [Rank 0] step:681/10000 train_time:56466ms step_avg:82.92ms +[2025-09-05 19:17:46] [Rank 0] step:701/10000 train_time:57194ms step_avg:81.59ms +[2025-09-05 19:17:46] [Rank 0] step:701/10000 train_time:57194ms step_avg:81.59ms +[2025-09-05 19:17:46] [Rank 0] step:721/10000 train_time:57922ms step_avg:80.34ms +[2025-09-05 19:17:46] [Rank 0] step:721/10000 train_time:57922ms step_avg:80.34ms +[2025-09-05 19:17:47] [Rank 0] step:741/10000 train_time:58651ms step_avg:79.15ms +[2025-09-05 19:17:47] [Rank 0] step:741/10000 train_time:58651ms step_avg:79.15ms +[2025-09-05 19:17:48] [Rank 0] step:761/10000 train_time:59384ms step_avg:78.03ms +[2025-09-05 19:17:48] [Rank 0] step:761/10000 train_time:59384ms step_avg:78.03ms +[2025-09-05 19:17:48] [Rank 0] step:781/10000 train_time:60118ms step_avg:76.98ms +[2025-09-05 19:17:48] [Rank 0] step:781/10000 train_time:60118ms step_avg:76.98ms +[2025-09-05 19:17:49] [Rank 0] step:801/10000 train_time:60851ms step_avg:75.97ms +[2025-09-05 19:17:49] [Rank 0] step:801/10000 train_time:60851ms step_avg:75.97ms +[2025-09-05 19:17:51] [Rank 0] step:821/10000 train_time:62192ms step_avg:75.75ms +[2025-09-05 19:17:51] [Rank 0] step:821/10000 train_time:62192ms step_avg:75.75ms +[2025-09-05 19:17:51] [Rank 0] step:841/10000 train_time:62926ms step_avg:74.82ms +[2025-09-05 19:17:51] [Rank 0] step:841/10000 train_time:62926ms step_avg:74.82ms +[2025-09-05 19:17:52] [Rank 0] step:861/10000 train_time:63660ms step_avg:73.94ms +[2025-09-05 19:17:52] [Rank 0] step:861/10000 train_time:63660ms step_avg:73.94ms +[2025-09-05 19:17:53] [Rank 0] step:881/10000 train_time:64393ms step_avg:73.09ms +[2025-09-05 19:17:53] [Rank 0] step:881/10000 train_time:64393ms step_avg:73.09ms +[2025-09-05 19:17:53] [Rank 0] step:901/10000 train_time:65127ms step_avg:72.28ms +[2025-09-05 19:17:53] [Rank 0] step:901/10000 train_time:65127ms step_avg:72.28ms +[2025-09-05 19:17:54] [Rank 0] step:921/10000 train_time:65861ms step_avg:71.51ms +[2025-09-05 19:17:54] [Rank 0] step:921/10000 train_time:65861ms step_avg:71.51ms +[2025-09-05 19:17:55] [Rank 0] step:941/10000 train_time:66594ms step_avg:70.77ms +[2025-09-05 19:17:55] [Rank 0] step:941/10000 train_time:66594ms step_avg:70.77ms +[2025-09-05 19:17:56] [Rank 0] step:961/10000 train_time:67328ms step_avg:70.06ms +[2025-09-05 19:17:56] [Rank 0] step:961/10000 train_time:67328ms step_avg:70.06ms +[2025-09-05 19:17:56] [Rank 0] step:981/10000 train_time:68062ms step_avg:69.38ms +[2025-09-05 19:17:56] [Rank 0] step:981/10000 train_time:68062ms step_avg:69.38ms +[2025-09-05 19:17:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:17:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:17:58] [Rank 0] PRINT: step:1000/10000 train_loss:2.8909 val_loss:2.6133 train_time:68876ms step_avg:68.88ms +[2025-09-05 19:17:58] [Rank 0] PRINT: step:1000/10000 train_loss:2.8909 val_loss:2.6133 train_time:68876ms step_avg:68.88ms +[2025-09-05 19:17:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:17:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:17:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:17:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:19:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:19:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:19:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:19:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:19:18] [Rank 0] Total Loss: 5.1902 +[2025-09-05 19:19:18] [Rank 0] Total Loss: 5.1902 +[2025-09-05 19:19:18] [Rank 0] Total FTA (Unweighted): 0.1819 +[2025-09-05 19:19:18] [Rank 0] Total FTA (Unweighted): 0.1819 +[2025-09-05 19:19:18] [Rank 0] Total FTA (Weighted): 0.1819 +[2025-09-05 19:19:18] [Rank 0] Total FTA (Weighted): 0.1819 +[2025-09-05 19:19:18] [Rank 0] Group 0 Loss: 3.6116 +[2025-09-05 19:19:18] [Rank 0] Group 0 Loss: 3.6116 +[2025-09-05 19:19:18] [Rank 0] Group 1 Loss: 3.4734 +[2025-09-05 19:19:18] [Rank 0] Group 1 Loss: 3.4734 +[2025-09-05 19:19:18] [Rank 0] Group 2 Loss: 3.5735 +[2025-09-05 19:19:18] [Rank 0] Group 2 Loss: 3.5735 +[2025-09-05 19:19:18] [Rank 0] Group 3 Loss: 4.0518 +[2025-09-05 19:19:18] [Rank 0] Group 3 Loss: 4.0518 +[2025-09-05 19:19:18] [Rank 0] Group 4 Loss: 4.5519 +[2025-09-05 19:19:18] [Rank 0] Group 4 Loss: 4.5519 +[2025-09-05 19:19:18] [Rank 0] Group 5 Loss: 5.1097 +[2025-09-05 19:19:18] [Rank 0] Group 5 Loss: 5.1097 +[2025-09-05 19:19:18] [Rank 0] Group 6 Loss: 5.4175 +[2025-09-05 19:19:18] [Rank 0] Group 6 Loss: 5.4175 +[2025-09-05 19:19:18] [Rank 0] Group 7 Loss: 5.5481 +[2025-09-05 19:19:18] [Rank 0] Group 7 Loss: 5.5481 +[2025-09-05 19:19:18] [Rank 0] Group 8 Loss: 5.8337 +[2025-09-05 19:19:18] [Rank 0] Group 8 Loss: 5.8337 +[2025-09-05 19:19:18] [Rank 0] Group 9 Loss: 5.9596 +[2025-09-05 19:19:18] [Rank 0] Group 9 Loss: 5.9596 +[2025-09-05 19:19:18] [Rank 0] Group 10 Loss: 6.0127 +[2025-09-05 19:19:18] [Rank 0] Group 10 Loss: 6.0127 +[2025-09-05 19:19:18] [Rank 0] Group 11 Loss: 6.0568 +[2025-09-05 19:19:18] [Rank 0] Group 11 Loss: 6.0568 +[2025-09-05 19:19:18] [Rank 0] Group 12 Loss: 5.9379 +[2025-09-05 19:19:18] [Rank 0] Group 12 Loss: 5.9379 +[2025-09-05 19:19:18] [Rank 0] Group 13 Loss: 5.9418 +[2025-09-05 19:19:18] [Rank 0] Group 13 Loss: 5.9418 +[2025-09-05 19:19:18] [Rank 0] Group 14 Loss: 6.0169 +[2025-09-05 19:19:18] [Rank 0] Group 14 Loss: 6.0169 +[2025-09-05 19:19:18] [Rank 0] Group 15 Loss: 5.9460 +[2025-09-05 19:19:18] [Rank 0] Group 15 Loss: 5.9460 +[2025-09-05 19:19:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:19:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:19:18] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 19:19:18] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 19:19:18] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:19:18] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:19:18] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 19:19:18] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 19:19:18] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-05 19:19:18] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-05 19:19:18] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 19:19:18] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 19:19:18] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 19:19:18] [Rank 0] Group 6 FTA: 0.1000 +[2025-09-05 19:19:18] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 19:19:18] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 19:19:18] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 19:19:18] [Rank 0] Group 8 FTA: 0.1700 +[2025-09-05 19:19:18] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 19:19:18] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 19:19:18] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 19:19:18] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 19:19:18] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 19:19:18] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 19:19:18] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 19:19:18] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 19:19:18] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:19:18] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:19:18] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:19:18] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:19:18] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:19:18] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:19:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:19:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:19:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:19:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:19:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:19:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:19:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:19:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:19:20] [Rank 0] step:1001/10000 train_time:68886ms step_avg:68.82ms +[2025-09-05 19:19:20] [Rank 0] step:1001/10000 train_time:68886ms step_avg:68.82ms +[2025-09-05 19:19:21] [Rank 0] step:1021/10000 train_time:69556ms step_avg:68.13ms +[2025-09-05 19:19:21] [Rank 0] step:1021/10000 train_time:69556ms step_avg:68.13ms +[2025-09-05 19:19:22] [Rank 0] step:1041/10000 train_time:70289ms step_avg:67.52ms +[2025-09-05 19:19:22] [Rank 0] step:1041/10000 train_time:70289ms step_avg:67.52ms +[2025-09-05 19:19:23] [Rank 0] step:1061/10000 train_time:71023ms step_avg:66.94ms +[2025-09-05 19:19:23] [Rank 0] step:1061/10000 train_time:71023ms step_avg:66.94ms +[2025-09-05 19:19:23] [Rank 0] step:1081/10000 train_time:71757ms step_avg:66.38ms +[2025-09-05 19:19:23] [Rank 0] step:1081/10000 train_time:71757ms step_avg:66.38ms +[2025-09-05 19:19:24] [Rank 0] step:1101/10000 train_time:72490ms step_avg:65.84ms +[2025-09-05 19:19:24] [Rank 0] step:1101/10000 train_time:72490ms step_avg:65.84ms +[2025-09-05 19:19:25] [Rank 0] step:1121/10000 train_time:73225ms step_avg:65.32ms +[2025-09-05 19:19:25] [Rank 0] step:1121/10000 train_time:73225ms step_avg:65.32ms +[2025-09-05 19:19:26] [Rank 0] step:1141/10000 train_time:73958ms step_avg:64.82ms +[2025-09-05 19:19:26] [Rank 0] step:1141/10000 train_time:73958ms step_avg:64.82ms +[2025-09-05 19:19:26] [Rank 0] step:1161/10000 train_time:74691ms step_avg:64.33ms +[2025-09-05 19:19:26] [Rank 0] step:1161/10000 train_time:74691ms step_avg:64.33ms +[2025-09-05 19:19:27] [Rank 0] step:1181/10000 train_time:75425ms step_avg:63.87ms +[2025-09-05 19:19:27] [Rank 0] step:1181/10000 train_time:75425ms step_avg:63.87ms +[2025-09-05 19:19:28] [Rank 0] step:1201/10000 train_time:76159ms step_avg:63.41ms +[2025-09-05 19:19:28] [Rank 0] step:1201/10000 train_time:76159ms step_avg:63.41ms +[2025-09-05 19:19:28] [Rank 0] step:1221/10000 train_time:76892ms step_avg:62.97ms +[2025-09-05 19:19:28] [Rank 0] step:1221/10000 train_time:76892ms step_avg:62.97ms +[2025-09-05 19:19:29] [Rank 0] step:1241/10000 train_time:77625ms step_avg:62.55ms +[2025-09-05 19:19:29] [Rank 0] step:1241/10000 train_time:77625ms step_avg:62.55ms +[2025-09-05 19:19:30] [Rank 0] step:1261/10000 train_time:78359ms step_avg:62.14ms +[2025-09-05 19:19:30] [Rank 0] step:1261/10000 train_time:78359ms step_avg:62.14ms +[2025-09-05 19:19:31] [Rank 0] step:1281/10000 train_time:79093ms step_avg:61.74ms +[2025-09-05 19:19:31] [Rank 0] step:1281/10000 train_time:79093ms step_avg:61.74ms +[2025-09-05 19:19:31] [Rank 0] step:1301/10000 train_time:79827ms step_avg:61.36ms +[2025-09-05 19:19:31] [Rank 0] step:1301/10000 train_time:79827ms step_avg:61.36ms +[2025-09-05 19:19:32] [Rank 0] step:1321/10000 train_time:80560ms step_avg:60.98ms +[2025-09-05 19:19:32] [Rank 0] step:1321/10000 train_time:80560ms step_avg:60.98ms +[2025-09-05 19:19:33] [Rank 0] step:1341/10000 train_time:81294ms step_avg:60.62ms +[2025-09-05 19:19:33] [Rank 0] step:1341/10000 train_time:81294ms step_avg:60.62ms +[2025-09-05 19:19:34] [Rank 0] step:1361/10000 train_time:82173ms step_avg:60.38ms +[2025-09-05 19:19:34] [Rank 0] step:1361/10000 train_time:82173ms step_avg:60.38ms +[2025-09-05 19:19:34] [Rank 0] step:1381/10000 train_time:82906ms step_avg:60.03ms +[2025-09-05 19:19:34] [Rank 0] step:1381/10000 train_time:82906ms step_avg:60.03ms +[2025-09-05 19:19:35] [Rank 0] step:1401/10000 train_time:83640ms step_avg:59.70ms +[2025-09-05 19:19:35] [Rank 0] step:1401/10000 train_time:83640ms step_avg:59.70ms +[2025-09-05 19:19:36] [Rank 0] step:1421/10000 train_time:84554ms step_avg:59.50ms +[2025-09-05 19:19:36] [Rank 0] step:1421/10000 train_time:84554ms step_avg:59.50ms +[2025-09-05 19:19:37] [Rank 0] step:1441/10000 train_time:85288ms step_avg:59.19ms +[2025-09-05 19:19:37] [Rank 0] step:1441/10000 train_time:85288ms step_avg:59.19ms +[2025-09-05 19:19:38] [Rank 0] step:1461/10000 train_time:86022ms step_avg:58.88ms +[2025-09-05 19:19:38] [Rank 0] step:1461/10000 train_time:86022ms step_avg:58.88ms +[2025-09-05 19:19:38] [Rank 0] step:1481/10000 train_time:86755ms step_avg:58.58ms +[2025-09-05 19:19:38] [Rank 0] step:1481/10000 train_time:86755ms step_avg:58.58ms +[2025-09-05 19:19:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:19:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:19:40] [Rank 0] PRINT: step:1500/10000 train_loss:2.4458 val_loss:2.2942 train_time:87568ms step_avg:58.38ms +[2025-09-05 19:19:40] [Rank 0] PRINT: step:1500/10000 train_loss:2.4458 val_loss:2.2942 train_time:87568ms step_avg:58.38ms +[2025-09-05 19:19:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:19:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:19:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:19:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:21:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:21:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:21:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:21:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:21:01] [Rank 0] Total Loss: 4.9684 +[2025-09-05 19:21:01] [Rank 0] Total Loss: 4.9684 +[2025-09-05 19:21:01] [Rank 0] Total FTA (Unweighted): 0.2650 +[2025-09-05 19:21:01] [Rank 0] Total FTA (Unweighted): 0.2650 +[2025-09-05 19:21:01] [Rank 0] Total FTA (Weighted): 0.2650 +[2025-09-05 19:21:01] [Rank 0] Total FTA (Weighted): 0.2650 +[2025-09-05 19:21:01] [Rank 0] Group 0 Loss: 3.5070 +[2025-09-05 19:21:01] [Rank 0] Group 0 Loss: 3.5070 +[2025-09-05 19:21:01] [Rank 0] Group 1 Loss: 3.5215 +[2025-09-05 19:21:01] [Rank 0] Group 1 Loss: 3.5215 +[2025-09-05 19:21:01] [Rank 0] Group 2 Loss: 3.4845 +[2025-09-05 19:21:01] [Rank 0] Group 2 Loss: 3.4845 +[2025-09-05 19:21:01] [Rank 0] Group 3 Loss: 3.9029 +[2025-09-05 19:21:01] [Rank 0] Group 3 Loss: 3.9029 +[2025-09-05 19:21:01] [Rank 0] Group 4 Loss: 4.2647 +[2025-09-05 19:21:01] [Rank 0] Group 4 Loss: 4.2647 +[2025-09-05 19:21:01] [Rank 0] Group 5 Loss: 4.7473 +[2025-09-05 19:21:01] [Rank 0] Group 5 Loss: 4.7473 +[2025-09-05 19:21:01] [Rank 0] Group 6 Loss: 5.1141 +[2025-09-05 19:21:01] [Rank 0] Group 6 Loss: 5.1141 +[2025-09-05 19:21:01] [Rank 0] Group 7 Loss: 5.2455 +[2025-09-05 19:21:01] [Rank 0] Group 7 Loss: 5.2455 +[2025-09-05 19:21:01] [Rank 0] Group 8 Loss: 5.5512 +[2025-09-05 19:21:01] [Rank 0] Group 8 Loss: 5.5512 +[2025-09-05 19:21:01] [Rank 0] Group 9 Loss: 5.6627 +[2025-09-05 19:21:01] [Rank 0] Group 9 Loss: 5.6627 +[2025-09-05 19:21:01] [Rank 0] Group 10 Loss: 5.7634 +[2025-09-05 19:21:01] [Rank 0] Group 10 Loss: 5.7634 +[2025-09-05 19:21:01] [Rank 0] Group 11 Loss: 5.8193 +[2025-09-05 19:21:01] [Rank 0] Group 11 Loss: 5.8193 +[2025-09-05 19:21:01] [Rank 0] Group 12 Loss: 5.6805 +[2025-09-05 19:21:01] [Rank 0] Group 12 Loss: 5.6805 +[2025-09-05 19:21:01] [Rank 0] Group 13 Loss: 5.7387 +[2025-09-05 19:21:01] [Rank 0] Group 13 Loss: 5.7387 +[2025-09-05 19:21:01] [Rank 0] Group 14 Loss: 5.7771 +[2025-09-05 19:21:01] [Rank 0] Group 14 Loss: 5.7771 +[2025-09-05 19:21:01] [Rank 0] Group 15 Loss: 5.7146 +[2025-09-05 19:21:01] [Rank 0] Group 15 Loss: 5.7146 +[2025-09-05 19:21:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:21:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:21:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:21:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:21:01] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 19:21:01] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 19:21:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:21:01] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:21:01] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 19:21:01] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 19:21:01] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 19:21:01] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 19:21:01] [Rank 0] Group 6 FTA: 0.1800 +[2025-09-05 19:21:01] [Rank 0] Group 6 FTA: 0.1800 +[2025-09-05 19:21:01] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 19:21:01] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 19:21:01] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 19:21:01] [Rank 0] Group 8 FTA: 0.2100 +[2025-09-05 19:21:01] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 19:21:01] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 19:21:01] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 19:21:01] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 19:21:01] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 19:21:01] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 19:21:01] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:21:01] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:21:01] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 19:21:01] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 19:21:01] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:21:01] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:21:01] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:21:01] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:21:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:21:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:21:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:21:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:21:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:21:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:21:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:21:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:21:02] [Rank 0] step:1501/10000 train_time:87578ms step_avg:58.35ms +[2025-09-05 19:21:02] [Rank 0] step:1501/10000 train_time:87578ms step_avg:58.35ms +[2025-09-05 19:21:03] [Rank 0] step:1521/10000 train_time:88253ms step_avg:58.02ms +[2025-09-05 19:21:03] [Rank 0] step:1521/10000 train_time:88253ms step_avg:58.02ms +[2025-09-05 19:21:04] [Rank 0] step:1541/10000 train_time:88986ms step_avg:57.75ms +[2025-09-05 19:21:04] [Rank 0] step:1541/10000 train_time:88986ms step_avg:57.75ms +[2025-09-05 19:21:05] [Rank 0] step:1561/10000 train_time:89720ms step_avg:57.48ms +[2025-09-05 19:21:05] [Rank 0] step:1561/10000 train_time:89720ms step_avg:57.48ms +[2025-09-05 19:21:05] [Rank 0] step:1581/10000 train_time:90453ms step_avg:57.21ms +[2025-09-05 19:21:05] [Rank 0] step:1581/10000 train_time:90453ms step_avg:57.21ms +[2025-09-05 19:21:06] [Rank 0] step:1601/10000 train_time:91186ms step_avg:56.96ms +[2025-09-05 19:21:06] [Rank 0] step:1601/10000 train_time:91186ms step_avg:56.96ms +[2025-09-05 19:21:07] [Rank 0] step:1621/10000 train_time:91920ms step_avg:56.71ms +[2025-09-05 19:21:07] [Rank 0] step:1621/10000 train_time:91920ms step_avg:56.71ms +[2025-09-05 19:21:08] [Rank 0] step:1641/10000 train_time:93276ms step_avg:56.84ms +[2025-09-05 19:21:08] [Rank 0] step:1641/10000 train_time:93276ms step_avg:56.84ms +[2025-09-05 19:21:09] [Rank 0] step:1661/10000 train_time:94010ms step_avg:56.60ms +[2025-09-05 19:21:09] [Rank 0] step:1661/10000 train_time:94010ms step_avg:56.60ms +[2025-09-05 19:21:10] [Rank 0] step:1681/10000 train_time:94743ms step_avg:56.36ms +[2025-09-05 19:21:10] [Rank 0] step:1681/10000 train_time:94743ms step_avg:56.36ms +[2025-09-05 19:21:10] [Rank 0] step:1701/10000 train_time:95476ms step_avg:56.13ms +[2025-09-05 19:21:10] [Rank 0] step:1701/10000 train_time:95476ms step_avg:56.13ms +[2025-09-05 19:21:11] [Rank 0] step:1721/10000 train_time:96209ms step_avg:55.90ms +[2025-09-05 19:21:11] [Rank 0] step:1721/10000 train_time:96209ms step_avg:55.90ms +[2025-09-05 19:21:12] [Rank 0] step:1741/10000 train_time:96942ms step_avg:55.68ms +[2025-09-05 19:21:12] [Rank 0] step:1741/10000 train_time:96942ms step_avg:55.68ms +[2025-09-05 19:21:13] [Rank 0] step:1761/10000 train_time:97674ms step_avg:55.47ms +[2025-09-05 19:21:13] [Rank 0] step:1761/10000 train_time:97674ms step_avg:55.47ms +[2025-09-05 19:21:13] [Rank 0] step:1781/10000 train_time:98407ms step_avg:55.25ms +[2025-09-05 19:21:13] [Rank 0] step:1781/10000 train_time:98407ms step_avg:55.25ms +[2025-09-05 19:21:14] [Rank 0] step:1801/10000 train_time:99140ms step_avg:55.05ms +[2025-09-05 19:21:14] [Rank 0] step:1801/10000 train_time:99140ms step_avg:55.05ms +[2025-09-05 19:21:15] [Rank 0] step:1821/10000 train_time:99873ms step_avg:54.85ms +[2025-09-05 19:21:15] [Rank 0] step:1821/10000 train_time:99873ms step_avg:54.85ms +[2025-09-05 19:21:16] [Rank 0] step:1841/10000 train_time:100606ms step_avg:54.65ms +[2025-09-05 19:21:16] [Rank 0] step:1841/10000 train_time:100606ms step_avg:54.65ms +[2025-09-05 19:21:16] [Rank 0] step:1861/10000 train_time:101340ms step_avg:54.45ms +[2025-09-05 19:21:16] [Rank 0] step:1861/10000 train_time:101340ms step_avg:54.45ms +[2025-09-05 19:21:17] [Rank 0] step:1881/10000 train_time:102073ms step_avg:54.27ms +[2025-09-05 19:21:17] [Rank 0] step:1881/10000 train_time:102073ms step_avg:54.27ms +[2025-09-05 19:21:18] [Rank 0] step:1901/10000 train_time:102807ms step_avg:54.08ms +[2025-09-05 19:21:18] [Rank 0] step:1901/10000 train_time:102807ms step_avg:54.08ms +[2025-09-05 19:21:18] [Rank 0] step:1921/10000 train_time:103541ms step_avg:53.90ms +[2025-09-05 19:21:18] [Rank 0] step:1921/10000 train_time:103541ms step_avg:53.90ms +[2025-09-05 19:21:19] [Rank 0] step:1941/10000 train_time:104273ms step_avg:53.72ms +[2025-09-05 19:21:19] [Rank 0] step:1941/10000 train_time:104273ms step_avg:53.72ms +[2025-09-05 19:21:20] [Rank 0] step:1961/10000 train_time:105007ms step_avg:53.55ms +[2025-09-05 19:21:20] [Rank 0] step:1961/10000 train_time:105007ms step_avg:53.55ms +[2025-09-05 19:21:21] [Rank 0] step:1981/10000 train_time:105740ms step_avg:53.38ms +[2025-09-05 19:21:21] [Rank 0] step:1981/10000 train_time:105740ms step_avg:53.38ms +[2025-09-05 19:21:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:21:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:21:22] [Rank 0] PRINT: step:2000/10000 train_loss:2.2067 val_loss:2.1100 train_time:106553ms step_avg:53.28ms +[2025-09-05 19:21:22] [Rank 0] PRINT: step:2000/10000 train_loss:2.2067 val_loss:2.1100 train_time:106553ms step_avg:53.28ms +[2025-09-05 19:21:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:21:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:21:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:21:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:22:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:22:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:22:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:22:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:22:44] [Rank 0] Total Loss: 4.8126 +[2025-09-05 19:22:44] [Rank 0] Total Loss: 4.8126 +[2025-09-05 19:22:44] [Rank 0] Total FTA (Unweighted): 0.3006 +[2025-09-05 19:22:44] [Rank 0] Total FTA (Unweighted): 0.3006 +[2025-09-05 19:22:44] [Rank 0] Total FTA (Weighted): 0.3006 +[2025-09-05 19:22:44] [Rank 0] Total FTA (Weighted): 0.3006 +[2025-09-05 19:22:44] [Rank 0] Group 0 Loss: 3.4425 +[2025-09-05 19:22:44] [Rank 0] Group 0 Loss: 3.4425 +[2025-09-05 19:22:44] [Rank 0] Group 1 Loss: 3.4293 +[2025-09-05 19:22:44] [Rank 0] Group 1 Loss: 3.4293 +[2025-09-05 19:22:44] [Rank 0] Group 2 Loss: 3.4227 +[2025-09-05 19:22:44] [Rank 0] Group 2 Loss: 3.4227 +[2025-09-05 19:22:44] [Rank 0] Group 3 Loss: 3.8227 +[2025-09-05 19:22:44] [Rank 0] Group 3 Loss: 3.8227 +[2025-09-05 19:22:44] [Rank 0] Group 4 Loss: 4.1772 +[2025-09-05 19:22:44] [Rank 0] Group 4 Loss: 4.1772 +[2025-09-05 19:22:44] [Rank 0] Group 5 Loss: 4.5454 +[2025-09-05 19:22:44] [Rank 0] Group 5 Loss: 4.5454 +[2025-09-05 19:22:44] [Rank 0] Group 6 Loss: 4.8532 +[2025-09-05 19:22:44] [Rank 0] Group 6 Loss: 4.8532 +[2025-09-05 19:22:44] [Rank 0] Group 7 Loss: 5.0355 +[2025-09-05 19:22:44] [Rank 0] Group 7 Loss: 5.0355 +[2025-09-05 19:22:44] [Rank 0] Group 8 Loss: 5.3642 +[2025-09-05 19:22:44] [Rank 0] Group 8 Loss: 5.3642 +[2025-09-05 19:22:44] [Rank 0] Group 9 Loss: 5.4708 +[2025-09-05 19:22:44] [Rank 0] Group 9 Loss: 5.4708 +[2025-09-05 19:22:44] [Rank 0] Group 10 Loss: 5.5773 +[2025-09-05 19:22:44] [Rank 0] Group 10 Loss: 5.5773 +[2025-09-05 19:22:44] [Rank 0] Group 11 Loss: 5.6126 +[2025-09-05 19:22:44] [Rank 0] Group 11 Loss: 5.6126 +[2025-09-05 19:22:44] [Rank 0] Group 12 Loss: 5.5171 +[2025-09-05 19:22:44] [Rank 0] Group 12 Loss: 5.5171 +[2025-09-05 19:22:44] [Rank 0] Group 13 Loss: 5.5697 +[2025-09-05 19:22:44] [Rank 0] Group 13 Loss: 5.5697 +[2025-09-05 19:22:44] [Rank 0] Group 14 Loss: 5.5928 +[2025-09-05 19:22:44] [Rank 0] Group 14 Loss: 5.5928 +[2025-09-05 19:22:44] [Rank 0] Group 15 Loss: 5.5692 +[2025-09-05 19:22:44] [Rank 0] Group 15 Loss: 5.5692 +[2025-09-05 19:22:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:22:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:22:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:22:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:22:44] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 19:22:44] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 19:22:44] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:22:44] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:22:44] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 19:22:44] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 19:22:44] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-05 19:22:44] [Rank 0] Group 5 FTA: 0.2300 +[2025-09-05 19:22:44] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-05 19:22:44] [Rank 0] Group 6 FTA: 0.2800 +[2025-09-05 19:22:44] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 19:22:44] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 19:22:44] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 19:22:44] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 19:22:44] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 19:22:44] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 19:22:44] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 19:22:44] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 19:22:44] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 19:22:44] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 19:22:44] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:22:44] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:22:44] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:22:44] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:22:44] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:22:44] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:22:44] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:22:44] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:22:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:22:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:22:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:22:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:22:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:22:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:22:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:22:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:22:45] [Rank 0] step:2001/10000 train_time:106563ms step_avg:53.26ms +[2025-09-05 19:22:45] [Rank 0] step:2001/10000 train_time:106563ms step_avg:53.26ms +[2025-09-05 19:22:46] [Rank 0] step:2021/10000 train_time:107434ms step_avg:53.16ms +[2025-09-05 19:22:46] [Rank 0] step:2021/10000 train_time:107434ms step_avg:53.16ms +[2025-09-05 19:22:47] [Rank 0] step:2041/10000 train_time:108166ms step_avg:53.00ms +[2025-09-05 19:22:47] [Rank 0] step:2041/10000 train_time:108166ms step_avg:53.00ms +[2025-09-05 19:22:48] [Rank 0] step:2061/10000 train_time:108898ms step_avg:52.84ms +[2025-09-05 19:22:48] [Rank 0] step:2061/10000 train_time:108898ms step_avg:52.84ms +[2025-09-05 19:22:49] [Rank 0] step:2081/10000 train_time:109630ms step_avg:52.68ms +[2025-09-05 19:22:49] [Rank 0] step:2081/10000 train_time:109630ms step_avg:52.68ms +[2025-09-05 19:22:49] [Rank 0] step:2101/10000 train_time:110364ms step_avg:52.53ms +[2025-09-05 19:22:49] [Rank 0] step:2101/10000 train_time:110364ms step_avg:52.53ms +[2025-09-05 19:22:50] [Rank 0] step:2121/10000 train_time:111097ms step_avg:52.38ms +[2025-09-05 19:22:50] [Rank 0] step:2121/10000 train_time:111097ms step_avg:52.38ms +[2025-09-05 19:22:51] [Rank 0] step:2141/10000 train_time:111830ms step_avg:52.23ms +[2025-09-05 19:22:51] [Rank 0] step:2141/10000 train_time:111830ms step_avg:52.23ms +[2025-09-05 19:22:51] [Rank 0] step:2161/10000 train_time:112562ms step_avg:52.09ms +[2025-09-05 19:22:51] [Rank 0] step:2161/10000 train_time:112562ms step_avg:52.09ms +[2025-09-05 19:22:52] [Rank 0] step:2181/10000 train_time:113295ms step_avg:51.95ms +[2025-09-05 19:22:52] [Rank 0] step:2181/10000 train_time:113295ms step_avg:51.95ms +[2025-09-05 19:22:53] [Rank 0] step:2201/10000 train_time:114028ms step_avg:51.81ms +[2025-09-05 19:22:53] [Rank 0] step:2201/10000 train_time:114028ms step_avg:51.81ms +[2025-09-05 19:22:54] [Rank 0] step:2221/10000 train_time:114761ms step_avg:51.67ms +[2025-09-05 19:22:54] [Rank 0] step:2221/10000 train_time:114761ms step_avg:51.67ms +[2025-09-05 19:22:54] [Rank 0] step:2241/10000 train_time:115498ms step_avg:51.54ms +[2025-09-05 19:22:54] [Rank 0] step:2241/10000 train_time:115498ms step_avg:51.54ms +[2025-09-05 19:22:55] [Rank 0] step:2261/10000 train_time:116237ms step_avg:51.41ms +[2025-09-05 19:22:55] [Rank 0] step:2261/10000 train_time:116237ms step_avg:51.41ms +[2025-09-05 19:22:56] [Rank 0] step:2281/10000 train_time:116976ms step_avg:51.28ms +[2025-09-05 19:22:56] [Rank 0] step:2281/10000 train_time:116976ms step_avg:51.28ms +[2025-09-05 19:22:57] [Rank 0] step:2301/10000 train_time:117715ms step_avg:51.16ms +[2025-09-05 19:22:57] [Rank 0] step:2301/10000 train_time:117715ms step_avg:51.16ms +[2025-09-05 19:22:57] [Rank 0] step:2321/10000 train_time:118455ms step_avg:51.04ms +[2025-09-05 19:22:57] [Rank 0] step:2321/10000 train_time:118455ms step_avg:51.04ms +[2025-09-05 19:22:58] [Rank 0] step:2341/10000 train_time:119194ms step_avg:50.92ms +[2025-09-05 19:22:58] [Rank 0] step:2341/10000 train_time:119194ms step_avg:50.92ms +[2025-09-05 19:22:59] [Rank 0] step:2361/10000 train_time:119934ms step_avg:50.80ms +[2025-09-05 19:22:59] [Rank 0] step:2361/10000 train_time:119934ms step_avg:50.80ms +[2025-09-05 19:23:00] [Rank 0] step:2381/10000 train_time:120674ms step_avg:50.68ms +[2025-09-05 19:23:00] [Rank 0] step:2381/10000 train_time:120674ms step_avg:50.68ms +[2025-09-05 19:23:00] [Rank 0] step:2401/10000 train_time:121413ms step_avg:50.57ms +[2025-09-05 19:23:00] [Rank 0] step:2401/10000 train_time:121413ms step_avg:50.57ms +[2025-09-05 19:23:01] [Rank 0] step:2421/10000 train_time:122152ms step_avg:50.46ms +[2025-09-05 19:23:01] [Rank 0] step:2421/10000 train_time:122152ms step_avg:50.46ms +[2025-09-05 19:23:02] [Rank 0] step:2441/10000 train_time:122892ms step_avg:50.35ms +[2025-09-05 19:23:02] [Rank 0] step:2441/10000 train_time:122892ms step_avg:50.35ms +[2025-09-05 19:23:03] [Rank 0] step:2461/10000 train_time:123633ms step_avg:50.24ms +[2025-09-05 19:23:03] [Rank 0] step:2461/10000 train_time:123633ms step_avg:50.24ms +[2025-09-05 19:23:03] [Rank 0] step:2481/10000 train_time:124373ms step_avg:50.13ms +[2025-09-05 19:23:03] [Rank 0] step:2481/10000 train_time:124373ms step_avg:50.13ms +[2025-09-05 19:23:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:23:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:23:04] [Rank 0] PRINT: step:2500/10000 train_loss:2.0581 val_loss:1.9853 train_time:125194ms step_avg:50.08ms +[2025-09-05 19:23:04] [Rank 0] PRINT: step:2500/10000 train_loss:2.0581 val_loss:1.9853 train_time:125194ms step_avg:50.08ms +[2025-09-05 19:23:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:23:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:23:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:23:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:24:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:24:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:24:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:24:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:24:26] [Rank 0] Total Loss: 4.7338 +[2025-09-05 19:24:26] [Rank 0] Total Loss: 4.7338 +[2025-09-05 19:24:26] [Rank 0] Total FTA (Unweighted): 0.3194 +[2025-09-05 19:24:26] [Rank 0] Total FTA (Unweighted): 0.3194 +[2025-09-05 19:24:26] [Rank 0] Total FTA (Weighted): 0.3194 +[2025-09-05 19:24:26] [Rank 0] Total FTA (Weighted): 0.3194 +[2025-09-05 19:24:26] [Rank 0] Group 0 Loss: 3.5228 +[2025-09-05 19:24:26] [Rank 0] Group 0 Loss: 3.5228 +[2025-09-05 19:24:26] [Rank 0] Group 1 Loss: 3.4741 +[2025-09-05 19:24:26] [Rank 0] Group 1 Loss: 3.4741 +[2025-09-05 19:24:26] [Rank 0] Group 2 Loss: 3.3990 +[2025-09-05 19:24:26] [Rank 0] Group 2 Loss: 3.3990 +[2025-09-05 19:24:26] [Rank 0] Group 3 Loss: 3.8240 +[2025-09-05 19:24:26] [Rank 0] Group 3 Loss: 3.8240 +[2025-09-05 19:24:26] [Rank 0] Group 4 Loss: 4.0783 +[2025-09-05 19:24:26] [Rank 0] Group 4 Loss: 4.0783 +[2025-09-05 19:24:26] [Rank 0] Group 5 Loss: 4.4202 +[2025-09-05 19:24:26] [Rank 0] Group 5 Loss: 4.4202 +[2025-09-05 19:24:26] [Rank 0] Group 6 Loss: 4.7216 +[2025-09-05 19:24:26] [Rank 0] Group 6 Loss: 4.7216 +[2025-09-05 19:24:26] [Rank 0] Group 7 Loss: 4.9198 +[2025-09-05 19:24:26] [Rank 0] Group 7 Loss: 4.9198 +[2025-09-05 19:24:26] [Rank 0] Group 8 Loss: 5.2596 +[2025-09-05 19:24:26] [Rank 0] Group 8 Loss: 5.2596 +[2025-09-05 19:24:26] [Rank 0] Group 9 Loss: 5.3480 +[2025-09-05 19:24:26] [Rank 0] Group 9 Loss: 5.3480 +[2025-09-05 19:24:26] [Rank 0] Group 10 Loss: 5.4771 +[2025-09-05 19:24:26] [Rank 0] Group 10 Loss: 5.4771 +[2025-09-05 19:24:26] [Rank 0] Group 11 Loss: 5.4897 +[2025-09-05 19:24:26] [Rank 0] Group 11 Loss: 5.4897 +[2025-09-05 19:24:26] [Rank 0] Group 12 Loss: 5.4080 +[2025-09-05 19:24:26] [Rank 0] Group 12 Loss: 5.4080 +[2025-09-05 19:24:26] [Rank 0] Group 13 Loss: 5.4442 +[2025-09-05 19:24:26] [Rank 0] Group 13 Loss: 5.4442 +[2025-09-05 19:24:26] [Rank 0] Group 14 Loss: 5.4978 +[2025-09-05 19:24:26] [Rank 0] Group 14 Loss: 5.4978 +[2025-09-05 19:24:26] [Rank 0] Group 15 Loss: 5.4574 +[2025-09-05 19:24:26] [Rank 0] Group 15 Loss: 5.4574 +[2025-09-05 19:24:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:24:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:24:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:24:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:24:26] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 19:24:26] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 19:24:26] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:24:26] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:24:26] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 19:24:26] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 19:24:26] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 19:24:26] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 19:24:26] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 19:24:26] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 19:24:26] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 19:24:26] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 19:24:26] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 19:24:26] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 19:24:26] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 19:24:26] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 19:24:26] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 19:24:26] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 19:24:26] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 19:24:26] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 19:24:26] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 19:24:26] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 19:24:26] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:24:26] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 19:24:26] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:24:26] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:24:26] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:24:26] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:24:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:24:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:24:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:24:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:24:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:24:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:24:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:24:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:24:28] [Rank 0] step:2501/10000 train_time:125204ms step_avg:50.06ms +[2025-09-05 19:24:28] [Rank 0] step:2501/10000 train_time:125204ms step_avg:50.06ms +[2025-09-05 19:24:29] [Rank 0] step:2521/10000 train_time:125883ms step_avg:49.93ms +[2025-09-05 19:24:29] [Rank 0] step:2521/10000 train_time:125883ms step_avg:49.93ms +[2025-09-05 19:24:29] [Rank 0] step:2541/10000 train_time:126623ms step_avg:49.83ms +[2025-09-05 19:24:29] [Rank 0] step:2541/10000 train_time:126623ms step_avg:49.83ms +[2025-09-05 19:24:30] [Rank 0] step:2561/10000 train_time:127362ms step_avg:49.73ms +[2025-09-05 19:24:30] [Rank 0] step:2561/10000 train_time:127362ms step_avg:49.73ms +[2025-09-05 19:24:31] [Rank 0] step:2581/10000 train_time:128102ms step_avg:49.63ms +[2025-09-05 19:24:31] [Rank 0] step:2581/10000 train_time:128102ms step_avg:49.63ms +[2025-09-05 19:24:32] [Rank 0] step:2601/10000 train_time:128842ms step_avg:49.54ms +[2025-09-05 19:24:32] [Rank 0] step:2601/10000 train_time:128842ms step_avg:49.54ms +[2025-09-05 19:24:32] [Rank 0] step:2621/10000 train_time:129582ms step_avg:49.44ms +[2025-09-05 19:24:32] [Rank 0] step:2621/10000 train_time:129582ms step_avg:49.44ms +[2025-09-05 19:24:33] [Rank 0] step:2641/10000 train_time:130321ms step_avg:49.35ms +[2025-09-05 19:24:33] [Rank 0] step:2641/10000 train_time:130321ms step_avg:49.35ms +[2025-09-05 19:24:34] [Rank 0] step:2661/10000 train_time:131061ms step_avg:49.25ms +[2025-09-05 19:24:34] [Rank 0] step:2661/10000 train_time:131061ms step_avg:49.25ms +[2025-09-05 19:24:35] [Rank 0] step:2681/10000 train_time:131801ms step_avg:49.16ms +[2025-09-05 19:24:35] [Rank 0] step:2681/10000 train_time:131801ms step_avg:49.16ms +[2025-09-05 19:24:35] [Rank 0] step:2701/10000 train_time:132541ms step_avg:49.07ms +[2025-09-05 19:24:35] [Rank 0] step:2701/10000 train_time:132541ms step_avg:49.07ms +[2025-09-05 19:24:36] [Rank 0] step:2721/10000 train_time:133281ms step_avg:48.98ms +[2025-09-05 19:24:36] [Rank 0] step:2721/10000 train_time:133281ms step_avg:48.98ms +[2025-09-05 19:24:37] [Rank 0] step:2741/10000 train_time:134021ms step_avg:48.89ms +[2025-09-05 19:24:37] [Rank 0] step:2741/10000 train_time:134021ms step_avg:48.89ms +[2025-09-05 19:24:38] [Rank 0] step:2761/10000 train_time:134761ms step_avg:48.81ms +[2025-09-05 19:24:38] [Rank 0] step:2761/10000 train_time:134761ms step_avg:48.81ms +[2025-09-05 19:24:38] [Rank 0] step:2781/10000 train_time:135500ms step_avg:48.72ms +[2025-09-05 19:24:38] [Rank 0] step:2781/10000 train_time:135500ms step_avg:48.72ms +[2025-09-05 19:24:39] [Rank 0] step:2801/10000 train_time:136239ms step_avg:48.64ms +[2025-09-05 19:24:39] [Rank 0] step:2801/10000 train_time:136239ms step_avg:48.64ms +[2025-09-05 19:24:40] [Rank 0] step:2821/10000 train_time:137586ms step_avg:48.77ms +[2025-09-05 19:24:40] [Rank 0] step:2821/10000 train_time:137586ms step_avg:48.77ms +[2025-09-05 19:24:41] [Rank 0] step:2841/10000 train_time:138325ms step_avg:48.69ms +[2025-09-05 19:24:41] [Rank 0] step:2841/10000 train_time:138325ms step_avg:48.69ms +[2025-09-05 19:24:42] [Rank 0] step:2861/10000 train_time:139065ms step_avg:48.61ms +[2025-09-05 19:24:42] [Rank 0] step:2861/10000 train_time:139065ms step_avg:48.61ms +[2025-09-05 19:24:43] [Rank 0] step:2881/10000 train_time:139810ms step_avg:48.53ms +[2025-09-05 19:24:43] [Rank 0] step:2881/10000 train_time:139810ms step_avg:48.53ms +[2025-09-05 19:24:43] [Rank 0] step:2901/10000 train_time:140551ms step_avg:48.45ms +[2025-09-05 19:24:43] [Rank 0] step:2901/10000 train_time:140551ms step_avg:48.45ms +[2025-09-05 19:24:44] [Rank 0] step:2921/10000 train_time:141290ms step_avg:48.37ms +[2025-09-05 19:24:44] [Rank 0] step:2921/10000 train_time:141290ms step_avg:48.37ms +[2025-09-05 19:24:45] [Rank 0] step:2941/10000 train_time:142031ms step_avg:48.29ms +[2025-09-05 19:24:45] [Rank 0] step:2941/10000 train_time:142031ms step_avg:48.29ms +[2025-09-05 19:24:46] [Rank 0] step:2961/10000 train_time:142770ms step_avg:48.22ms +[2025-09-05 19:24:46] [Rank 0] step:2961/10000 train_time:142770ms step_avg:48.22ms +[2025-09-05 19:24:46] [Rank 0] step:2981/10000 train_time:143509ms step_avg:48.14ms +[2025-09-05 19:24:46] [Rank 0] step:2981/10000 train_time:143509ms step_avg:48.14ms +[2025-09-05 19:24:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:24:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:24:47] [Rank 0] PRINT: step:3000/10000 train_loss:1.9472 val_loss:1.8969 train_time:144330ms step_avg:48.11ms +[2025-09-05 19:24:47] [Rank 0] PRINT: step:3000/10000 train_loss:1.9472 val_loss:1.8969 train_time:144330ms step_avg:48.11ms +[2025-09-05 19:24:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:24:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:24:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:24:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:26:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:26:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:26:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:26:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:26:09] [Rank 0] Total Loss: 4.5774 +[2025-09-05 19:26:09] [Rank 0] Total Loss: 4.5774 +[2025-09-05 19:26:09] [Rank 0] Total FTA (Unweighted): 0.3525 +[2025-09-05 19:26:09] [Rank 0] Total FTA (Unweighted): 0.3525 +[2025-09-05 19:26:09] [Rank 0] Total FTA (Weighted): 0.3525 +[2025-09-05 19:26:09] [Rank 0] Total FTA (Weighted): 0.3525 +[2025-09-05 19:26:09] [Rank 0] Group 0 Loss: 3.4033 +[2025-09-05 19:26:09] [Rank 0] Group 0 Loss: 3.4033 +[2025-09-05 19:26:09] [Rank 0] Group 1 Loss: 3.3854 +[2025-09-05 19:26:09] [Rank 0] Group 1 Loss: 3.3854 +[2025-09-05 19:26:09] [Rank 0] Group 2 Loss: 3.2956 +[2025-09-05 19:26:09] [Rank 0] Group 2 Loss: 3.2956 +[2025-09-05 19:26:09] [Rank 0] Group 3 Loss: 3.6742 +[2025-09-05 19:26:09] [Rank 0] Group 3 Loss: 3.6742 +[2025-09-05 19:26:09] [Rank 0] Group 4 Loss: 3.9465 +[2025-09-05 19:26:09] [Rank 0] Group 4 Loss: 3.9465 +[2025-09-05 19:26:09] [Rank 0] Group 5 Loss: 4.2481 +[2025-09-05 19:26:09] [Rank 0] Group 5 Loss: 4.2481 +[2025-09-05 19:26:09] [Rank 0] Group 6 Loss: 4.5576 +[2025-09-05 19:26:09] [Rank 0] Group 6 Loss: 4.5576 +[2025-09-05 19:26:09] [Rank 0] Group 7 Loss: 4.7499 +[2025-09-05 19:26:09] [Rank 0] Group 7 Loss: 4.7499 +[2025-09-05 19:26:09] [Rank 0] Group 8 Loss: 5.0739 +[2025-09-05 19:26:09] [Rank 0] Group 8 Loss: 5.0739 +[2025-09-05 19:26:10] [Rank 0] Group 9 Loss: 5.1606 +[2025-09-05 19:26:10] [Rank 0] Group 9 Loss: 5.1606 +[2025-09-05 19:26:10] [Rank 0] Group 10 Loss: 5.2807 +[2025-09-05 19:26:10] [Rank 0] Group 10 Loss: 5.2807 +[2025-09-05 19:26:10] [Rank 0] Group 11 Loss: 5.3035 +[2025-09-05 19:26:10] [Rank 0] Group 11 Loss: 5.3035 +[2025-09-05 19:26:10] [Rank 0] Group 12 Loss: 5.2221 +[2025-09-05 19:26:10] [Rank 0] Group 12 Loss: 5.2221 +[2025-09-05 19:26:10] [Rank 0] Group 13 Loss: 5.2961 +[2025-09-05 19:26:10] [Rank 0] Group 13 Loss: 5.2961 +[2025-09-05 19:26:10] [Rank 0] Group 14 Loss: 5.3162 +[2025-09-05 19:26:10] [Rank 0] Group 14 Loss: 5.3162 +[2025-09-05 19:26:10] [Rank 0] Group 15 Loss: 5.3247 +[2025-09-05 19:26:10] [Rank 0] Group 15 Loss: 5.3247 +[2025-09-05 19:26:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:26:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:26:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:26:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:26:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:26:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:26:10] [Rank 0] Group 3 FTA: 0.3100 +[2025-09-05 19:26:10] [Rank 0] Group 3 FTA: 0.3100 +[2025-09-05 19:26:10] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 19:26:10] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 19:26:10] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 19:26:10] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 19:26:10] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 19:26:10] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 19:26:10] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 19:26:10] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 19:26:10] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 19:26:10] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 19:26:10] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 19:26:10] [Rank 0] Group 9 FTA: 0.1500 +[2025-09-05 19:26:10] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 19:26:10] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 19:26:10] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 19:26:10] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 19:26:10] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 19:26:10] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 19:26:10] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:26:10] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 19:26:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:26:10] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:26:10] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:26:10] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:26:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:26:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:26:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:26:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:26:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:26:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:26:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:26:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:26:11] [Rank 0] step:3001/10000 train_time:144340ms step_avg:48.10ms +[2025-09-05 19:26:11] [Rank 0] step:3001/10000 train_time:144340ms step_avg:48.10ms +[2025-09-05 19:26:12] [Rank 0] step:3021/10000 train_time:145020ms step_avg:48.00ms +[2025-09-05 19:26:12] [Rank 0] step:3021/10000 train_time:145020ms step_avg:48.00ms +[2025-09-05 19:26:13] [Rank 0] step:3041/10000 train_time:145760ms step_avg:47.93ms +[2025-09-05 19:26:13] [Rank 0] step:3041/10000 train_time:145760ms step_avg:47.93ms +[2025-09-05 19:26:13] [Rank 0] step:3061/10000 train_time:146500ms step_avg:47.86ms +[2025-09-05 19:26:13] [Rank 0] step:3061/10000 train_time:146500ms step_avg:47.86ms +[2025-09-05 19:26:14] [Rank 0] step:3081/10000 train_time:147240ms step_avg:47.79ms +[2025-09-05 19:26:14] [Rank 0] step:3081/10000 train_time:147240ms step_avg:47.79ms +[2025-09-05 19:26:15] [Rank 0] step:3101/10000 train_time:147980ms step_avg:47.72ms +[2025-09-05 19:26:15] [Rank 0] step:3101/10000 train_time:147980ms step_avg:47.72ms +[2025-09-05 19:26:15] [Rank 0] step:3121/10000 train_time:148720ms step_avg:47.65ms +[2025-09-05 19:26:15] [Rank 0] step:3121/10000 train_time:148720ms step_avg:47.65ms +[2025-09-05 19:26:16] [Rank 0] step:3141/10000 train_time:149463ms step_avg:47.58ms +[2025-09-05 19:26:16] [Rank 0] step:3141/10000 train_time:149463ms step_avg:47.58ms +[2025-09-05 19:26:17] [Rank 0] step:3161/10000 train_time:150203ms step_avg:47.52ms +[2025-09-05 19:26:17] [Rank 0] step:3161/10000 train_time:150203ms step_avg:47.52ms +[2025-09-05 19:26:18] [Rank 0] step:3181/10000 train_time:150943ms step_avg:47.45ms +[2025-09-05 19:26:18] [Rank 0] step:3181/10000 train_time:150943ms step_avg:47.45ms +[2025-09-05 19:26:18] [Rank 0] step:3201/10000 train_time:151683ms step_avg:47.39ms +[2025-09-05 19:26:18] [Rank 0] step:3201/10000 train_time:151683ms step_avg:47.39ms +[2025-09-05 19:26:19] [Rank 0] step:3221/10000 train_time:152427ms step_avg:47.32ms +[2025-09-05 19:26:19] [Rank 0] step:3221/10000 train_time:152427ms step_avg:47.32ms +[2025-09-05 19:26:20] [Rank 0] step:3241/10000 train_time:153167ms step_avg:47.26ms +[2025-09-05 19:26:20] [Rank 0] step:3241/10000 train_time:153167ms step_avg:47.26ms +[2025-09-05 19:26:21] [Rank 0] step:3261/10000 train_time:153907ms step_avg:47.20ms +[2025-09-05 19:26:21] [Rank 0] step:3261/10000 train_time:153907ms step_avg:47.20ms +[2025-09-05 19:26:21] [Rank 0] step:3281/10000 train_time:154648ms step_avg:47.13ms +[2025-09-05 19:26:21] [Rank 0] step:3281/10000 train_time:154648ms step_avg:47.13ms +[2025-09-05 19:26:22] [Rank 0] step:3301/10000 train_time:155387ms step_avg:47.07ms +[2025-09-05 19:26:22] [Rank 0] step:3301/10000 train_time:155387ms step_avg:47.07ms +[2025-09-05 19:26:23] [Rank 0] step:3321/10000 train_time:156127ms step_avg:47.01ms +[2025-09-05 19:26:23] [Rank 0] step:3321/10000 train_time:156127ms step_avg:47.01ms +[2025-09-05 19:26:24] [Rank 0] step:3341/10000 train_time:156867ms step_avg:46.95ms +[2025-09-05 19:26:24] [Rank 0] step:3341/10000 train_time:156867ms step_avg:46.95ms +[2025-09-05 19:26:24] [Rank 0] step:3361/10000 train_time:157608ms step_avg:46.89ms +[2025-09-05 19:26:24] [Rank 0] step:3361/10000 train_time:157608ms step_avg:46.89ms +[2025-09-05 19:26:25] [Rank 0] step:3381/10000 train_time:158348ms step_avg:46.83ms +[2025-09-05 19:26:25] [Rank 0] step:3381/10000 train_time:158348ms step_avg:46.83ms +[2025-09-05 19:26:26] [Rank 0] step:3401/10000 train_time:159088ms step_avg:46.78ms +[2025-09-05 19:26:26] [Rank 0] step:3401/10000 train_time:159088ms step_avg:46.78ms +[2025-09-05 19:26:27] [Rank 0] step:3421/10000 train_time:159827ms step_avg:46.72ms +[2025-09-05 19:26:27] [Rank 0] step:3421/10000 train_time:159827ms step_avg:46.72ms +[2025-09-05 19:26:27] [Rank 0] step:3441/10000 train_time:160568ms step_avg:46.66ms +[2025-09-05 19:26:27] [Rank 0] step:3441/10000 train_time:160568ms step_avg:46.66ms +[2025-09-05 19:26:28] [Rank 0] step:3461/10000 train_time:161307ms step_avg:46.61ms +[2025-09-05 19:26:28] [Rank 0] step:3461/10000 train_time:161307ms step_avg:46.61ms +[2025-09-05 19:26:29] [Rank 0] step:3481/10000 train_time:162047ms step_avg:46.55ms +[2025-09-05 19:26:29] [Rank 0] step:3481/10000 train_time:162047ms step_avg:46.55ms +[2025-09-05 19:26:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:26:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:26:30] [Rank 0] PRINT: step:3500/10000 train_loss:1.8732 val_loss:1.8340 train_time:162867ms step_avg:46.53ms +[2025-09-05 19:26:30] [Rank 0] PRINT: step:3500/10000 train_loss:1.8732 val_loss:1.8340 train_time:162867ms step_avg:46.53ms +[2025-09-05 19:26:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:26:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:26:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:26:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:27:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:27:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:27:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:27:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:27:51] [Rank 0] Total Loss: 4.4567 +[2025-09-05 19:27:51] [Rank 0] Total Loss: 4.4567 +[2025-09-05 19:27:51] [Rank 0] Total FTA (Unweighted): 0.3713 +[2025-09-05 19:27:51] [Rank 0] Total FTA (Unweighted): 0.3713 +[2025-09-05 19:27:51] [Rank 0] Total FTA (Weighted): 0.3713 +[2025-09-05 19:27:51] [Rank 0] Total FTA (Weighted): 0.3713 +[2025-09-05 19:27:51] [Rank 0] Group 0 Loss: 3.3442 +[2025-09-05 19:27:51] [Rank 0] Group 0 Loss: 3.3442 +[2025-09-05 19:27:51] [Rank 0] Group 1 Loss: 3.2691 +[2025-09-05 19:27:51] [Rank 0] Group 1 Loss: 3.2691 +[2025-09-05 19:27:51] [Rank 0] Group 2 Loss: 3.2370 +[2025-09-05 19:27:51] [Rank 0] Group 2 Loss: 3.2370 +[2025-09-05 19:27:51] [Rank 0] Group 3 Loss: 3.6262 +[2025-09-05 19:27:51] [Rank 0] Group 3 Loss: 3.6262 +[2025-09-05 19:27:51] [Rank 0] Group 4 Loss: 3.8181 +[2025-09-05 19:27:51] [Rank 0] Group 4 Loss: 3.8181 +[2025-09-05 19:27:51] [Rank 0] Group 5 Loss: 4.1229 +[2025-09-05 19:27:51] [Rank 0] Group 5 Loss: 4.1229 +[2025-09-05 19:27:51] [Rank 0] Group 6 Loss: 4.4081 +[2025-09-05 19:27:51] [Rank 0] Group 6 Loss: 4.4081 +[2025-09-05 19:27:51] [Rank 0] Group 7 Loss: 4.6219 +[2025-09-05 19:27:51] [Rank 0] Group 7 Loss: 4.6219 +[2025-09-05 19:27:51] [Rank 0] Group 8 Loss: 4.9090 +[2025-09-05 19:27:51] [Rank 0] Group 8 Loss: 4.9090 +[2025-09-05 19:27:51] [Rank 0] Group 9 Loss: 5.0076 +[2025-09-05 19:27:51] [Rank 0] Group 9 Loss: 5.0076 +[2025-09-05 19:27:51] [Rank 0] Group 10 Loss: 5.1180 +[2025-09-05 19:27:51] [Rank 0] Group 10 Loss: 5.1180 +[2025-09-05 19:27:51] [Rank 0] Group 11 Loss: 5.1646 +[2025-09-05 19:27:51] [Rank 0] Group 11 Loss: 5.1646 +[2025-09-05 19:27:51] [Rank 0] Group 12 Loss: 5.1155 +[2025-09-05 19:27:51] [Rank 0] Group 12 Loss: 5.1155 +[2025-09-05 19:27:51] [Rank 0] Group 13 Loss: 5.1804 +[2025-09-05 19:27:51] [Rank 0] Group 13 Loss: 5.1804 +[2025-09-05 19:27:51] [Rank 0] Group 14 Loss: 5.1873 +[2025-09-05 19:27:51] [Rank 0] Group 14 Loss: 5.1873 +[2025-09-05 19:27:51] [Rank 0] Group 15 Loss: 5.1780 +[2025-09-05 19:27:51] [Rank 0] Group 15 Loss: 5.1780 +[2025-09-05 19:27:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:27:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:27:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:27:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:27:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:27:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:27:51] [Rank 0] Group 3 FTA: 0.4500 +[2025-09-05 19:27:51] [Rank 0] Group 3 FTA: 0.4500 +[2025-09-05 19:27:51] [Rank 0] Group 4 FTA: 0.3400 +[2025-09-05 19:27:51] [Rank 0] Group 4 FTA: 0.3400 +[2025-09-05 19:27:51] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 19:27:51] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 19:27:51] [Rank 0] Group 6 FTA: 0.3400 +[2025-09-05 19:27:51] [Rank 0] Group 6 FTA: 0.3400 +[2025-09-05 19:27:51] [Rank 0] Group 7 FTA: 0.2300 +[2025-09-05 19:27:51] [Rank 0] Group 7 FTA: 0.2300 +[2025-09-05 19:27:51] [Rank 0] Group 8 FTA: 0.2800 +[2025-09-05 19:27:51] [Rank 0] Group 8 FTA: 0.2800 +[2025-09-05 19:27:51] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 19:27:51] [Rank 0] Group 9 FTA: 0.1900 +[2025-09-05 19:27:51] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 19:27:51] [Rank 0] Group 10 FTA: 0.2000 +[2025-09-05 19:27:51] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 19:27:51] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 19:27:51] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 19:27:51] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 19:27:51] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:27:51] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:27:51] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:27:51] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:27:51] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:27:51] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:27:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:27:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:27:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:27:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:27:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:27:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:27:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:27:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:27:52] [Rank 0] step:3501/10000 train_time:162877ms step_avg:46.52ms +[2025-09-05 19:27:52] [Rank 0] step:3501/10000 train_time:162877ms step_avg:46.52ms +[2025-09-05 19:27:53] [Rank 0] step:3521/10000 train_time:163542ms step_avg:46.45ms +[2025-09-05 19:27:53] [Rank 0] step:3521/10000 train_time:163542ms step_avg:46.45ms +[2025-09-05 19:27:54] [Rank 0] step:3541/10000 train_time:164282ms step_avg:46.39ms +[2025-09-05 19:27:54] [Rank 0] step:3541/10000 train_time:164282ms step_avg:46.39ms +[2025-09-05 19:27:55] [Rank 0] step:3561/10000 train_time:165022ms step_avg:46.34ms +[2025-09-05 19:27:55] [Rank 0] step:3561/10000 train_time:165022ms step_avg:46.34ms +[2025-09-05 19:27:55] [Rank 0] step:3581/10000 train_time:165762ms step_avg:46.29ms +[2025-09-05 19:27:55] [Rank 0] step:3581/10000 train_time:165762ms step_avg:46.29ms +[2025-09-05 19:27:56] [Rank 0] step:3601/10000 train_time:166501ms step_avg:46.24ms +[2025-09-05 19:27:56] [Rank 0] step:3601/10000 train_time:166501ms step_avg:46.24ms +[2025-09-05 19:27:57] [Rank 0] step:3621/10000 train_time:167240ms step_avg:46.19ms +[2025-09-05 19:27:57] [Rank 0] step:3621/10000 train_time:167240ms step_avg:46.19ms +[2025-09-05 19:27:58] [Rank 0] step:3641/10000 train_time:168608ms step_avg:46.31ms +[2025-09-05 19:27:58] [Rank 0] step:3641/10000 train_time:168608ms step_avg:46.31ms +[2025-09-05 19:27:59] [Rank 0] step:3661/10000 train_time:169487ms step_avg:46.30ms +[2025-09-05 19:27:59] [Rank 0] step:3661/10000 train_time:169487ms step_avg:46.30ms +[2025-09-05 19:28:00] [Rank 0] step:3681/10000 train_time:170226ms step_avg:46.24ms +[2025-09-05 19:28:00] [Rank 0] step:3681/10000 train_time:170226ms step_avg:46.24ms +[2025-09-05 19:28:01] [Rank 0] step:3701/10000 train_time:170966ms step_avg:46.19ms +[2025-09-05 19:28:01] [Rank 0] step:3701/10000 train_time:170966ms step_avg:46.19ms +[2025-09-05 19:28:01] [Rank 0] step:3721/10000 train_time:171855ms step_avg:46.19ms +[2025-09-05 19:28:01] [Rank 0] step:3721/10000 train_time:171855ms step_avg:46.19ms +[2025-09-05 19:28:02] [Rank 0] step:3741/10000 train_time:172594ms step_avg:46.14ms +[2025-09-05 19:28:02] [Rank 0] step:3741/10000 train_time:172594ms step_avg:46.14ms +[2025-09-05 19:28:03] [Rank 0] step:3761/10000 train_time:173334ms step_avg:46.09ms +[2025-09-05 19:28:03] [Rank 0] step:3761/10000 train_time:173334ms step_avg:46.09ms +[2025-09-05 19:28:04] [Rank 0] step:3781/10000 train_time:174074ms step_avg:46.04ms +[2025-09-05 19:28:04] [Rank 0] step:3781/10000 train_time:174074ms step_avg:46.04ms +[2025-09-05 19:28:04] [Rank 0] step:3801/10000 train_time:174814ms step_avg:45.99ms +[2025-09-05 19:28:04] [Rank 0] step:3801/10000 train_time:174814ms step_avg:45.99ms +[2025-09-05 19:28:05] [Rank 0] step:3821/10000 train_time:175553ms step_avg:45.94ms +[2025-09-05 19:28:05] [Rank 0] step:3821/10000 train_time:175553ms step_avg:45.94ms +[2025-09-05 19:28:06] [Rank 0] step:3841/10000 train_time:176293ms step_avg:45.90ms +[2025-09-05 19:28:06] [Rank 0] step:3841/10000 train_time:176293ms step_avg:45.90ms +[2025-09-05 19:28:07] [Rank 0] step:3861/10000 train_time:177032ms step_avg:45.85ms +[2025-09-05 19:28:07] [Rank 0] step:3861/10000 train_time:177032ms step_avg:45.85ms +[2025-09-05 19:28:07] [Rank 0] step:3881/10000 train_time:177771ms step_avg:45.81ms +[2025-09-05 19:28:07] [Rank 0] step:3881/10000 train_time:177771ms step_avg:45.81ms +[2025-09-05 19:28:08] [Rank 0] step:3901/10000 train_time:178509ms step_avg:45.76ms +[2025-09-05 19:28:08] [Rank 0] step:3901/10000 train_time:178509ms step_avg:45.76ms +[2025-09-05 19:28:09] [Rank 0] step:3921/10000 train_time:179248ms step_avg:45.71ms +[2025-09-05 19:28:09] [Rank 0] step:3921/10000 train_time:179248ms step_avg:45.71ms +[2025-09-05 19:28:10] [Rank 0] step:3941/10000 train_time:179988ms step_avg:45.67ms +[2025-09-05 19:28:10] [Rank 0] step:3941/10000 train_time:179988ms step_avg:45.67ms +[2025-09-05 19:28:10] [Rank 0] step:3961/10000 train_time:180727ms step_avg:45.63ms +[2025-09-05 19:28:10] [Rank 0] step:3961/10000 train_time:180727ms step_avg:45.63ms +[2025-09-05 19:28:11] [Rank 0] step:3981/10000 train_time:181466ms step_avg:45.58ms +[2025-09-05 19:28:11] [Rank 0] step:3981/10000 train_time:181466ms step_avg:45.58ms +[2025-09-05 19:28:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:28:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:28:12] [Rank 0] PRINT: step:4000/10000 train_loss:1.8211 val_loss:1.7901 train_time:182286ms step_avg:45.57ms +[2025-09-05 19:28:12] [Rank 0] PRINT: step:4000/10000 train_loss:1.8211 val_loss:1.7901 train_time:182286ms step_avg:45.57ms +[2025-09-05 19:28:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:28:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:28:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:28:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:29:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:29:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:29:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:29:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:29:34] [Rank 0] Total Loss: 4.4194 +[2025-09-05 19:29:34] [Rank 0] Total Loss: 4.4194 +[2025-09-05 19:29:34] [Rank 0] Total FTA (Unweighted): 0.4037 +[2025-09-05 19:29:34] [Rank 0] Total FTA (Unweighted): 0.4037 +[2025-09-05 19:29:34] [Rank 0] Total FTA (Weighted): 0.4037 +[2025-09-05 19:29:34] [Rank 0] Total FTA (Weighted): 0.4037 +[2025-09-05 19:29:34] [Rank 0] Group 0 Loss: 3.3492 +[2025-09-05 19:29:34] [Rank 0] Group 0 Loss: 3.3492 +[2025-09-05 19:29:34] [Rank 0] Group 1 Loss: 3.2430 +[2025-09-05 19:29:34] [Rank 0] Group 1 Loss: 3.2430 +[2025-09-05 19:29:34] [Rank 0] Group 2 Loss: 3.2448 +[2025-09-05 19:29:34] [Rank 0] Group 2 Loss: 3.2448 +[2025-09-05 19:29:34] [Rank 0] Group 3 Loss: 3.6178 +[2025-09-05 19:29:34] [Rank 0] Group 3 Loss: 3.6178 +[2025-09-05 19:29:34] [Rank 0] Group 4 Loss: 3.7889 +[2025-09-05 19:29:34] [Rank 0] Group 4 Loss: 3.7889 +[2025-09-05 19:29:34] [Rank 0] Group 5 Loss: 4.0743 +[2025-09-05 19:29:34] [Rank 0] Group 5 Loss: 4.0743 +[2025-09-05 19:29:34] [Rank 0] Group 6 Loss: 4.3370 +[2025-09-05 19:29:34] [Rank 0] Group 6 Loss: 4.3370 +[2025-09-05 19:29:34] [Rank 0] Group 7 Loss: 4.6039 +[2025-09-05 19:29:34] [Rank 0] Group 7 Loss: 4.6039 +[2025-09-05 19:29:34] [Rank 0] Group 8 Loss: 4.8665 +[2025-09-05 19:29:34] [Rank 0] Group 8 Loss: 4.8665 +[2025-09-05 19:29:34] [Rank 0] Group 9 Loss: 4.9693 +[2025-09-05 19:29:34] [Rank 0] Group 9 Loss: 4.9693 +[2025-09-05 19:29:34] [Rank 0] Group 10 Loss: 5.0732 +[2025-09-05 19:29:34] [Rank 0] Group 10 Loss: 5.0732 +[2025-09-05 19:29:34] [Rank 0] Group 11 Loss: 5.1143 +[2025-09-05 19:29:34] [Rank 0] Group 11 Loss: 5.1143 +[2025-09-05 19:29:34] [Rank 0] Group 12 Loss: 5.0551 +[2025-09-05 19:29:34] [Rank 0] Group 12 Loss: 5.0551 +[2025-09-05 19:29:34] [Rank 0] Group 13 Loss: 5.1262 +[2025-09-05 19:29:34] [Rank 0] Group 13 Loss: 5.1262 +[2025-09-05 19:29:34] [Rank 0] Group 14 Loss: 5.1256 +[2025-09-05 19:29:34] [Rank 0] Group 14 Loss: 5.1256 +[2025-09-05 19:29:34] [Rank 0] Group 15 Loss: 5.1219 +[2025-09-05 19:29:34] [Rank 0] Group 15 Loss: 5.1219 +[2025-09-05 19:29:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:29:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:29:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:29:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:29:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:29:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:29:34] [Rank 0] Group 3 FTA: 0.5500 +[2025-09-05 19:29:34] [Rank 0] Group 3 FTA: 0.5500 +[2025-09-05 19:29:34] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 19:29:34] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 19:29:34] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 19:29:34] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 19:29:34] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 19:29:34] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 19:29:34] [Rank 0] Group 7 FTA: 0.2500 +[2025-09-05 19:29:34] [Rank 0] Group 7 FTA: 0.2500 +[2025-09-05 19:29:34] [Rank 0] Group 8 FTA: 0.2900 +[2025-09-05 19:29:34] [Rank 0] Group 8 FTA: 0.2900 +[2025-09-05 19:29:34] [Rank 0] Group 9 FTA: 0.2200 +[2025-09-05 19:29:34] [Rank 0] Group 9 FTA: 0.2200 +[2025-09-05 19:29:34] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 19:29:34] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 19:29:34] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 19:29:34] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 19:29:34] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 19:29:34] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 19:29:34] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 19:29:34] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 19:29:34] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:29:34] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:29:34] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 19:29:34] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 19:29:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:29:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:29:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:29:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:29:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:29:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:29:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:29:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:29:36] [Rank 0] step:4001/10000 train_time:182296ms step_avg:45.56ms +[2025-09-05 19:29:36] [Rank 0] step:4001/10000 train_time:182296ms step_avg:45.56ms +[2025-09-05 19:29:37] [Rank 0] step:4021/10000 train_time:183587ms step_avg:45.66ms +[2025-09-05 19:29:37] [Rank 0] step:4021/10000 train_time:183587ms step_avg:45.66ms +[2025-09-05 19:29:38] [Rank 0] step:4041/10000 train_time:184327ms step_avg:45.61ms +[2025-09-05 19:29:38] [Rank 0] step:4041/10000 train_time:184327ms step_avg:45.61ms +[2025-09-05 19:29:39] [Rank 0] step:4061/10000 train_time:185067ms step_avg:45.57ms +[2025-09-05 19:29:39] [Rank 0] step:4061/10000 train_time:185067ms step_avg:45.57ms +[2025-09-05 19:29:39] [Rank 0] step:4081/10000 train_time:185808ms step_avg:45.53ms +[2025-09-05 19:29:39] [Rank 0] step:4081/10000 train_time:185808ms step_avg:45.53ms +[2025-09-05 19:29:40] [Rank 0] step:4101/10000 train_time:186547ms step_avg:45.49ms +[2025-09-05 19:29:40] [Rank 0] step:4101/10000 train_time:186547ms step_avg:45.49ms +[2025-09-05 19:29:41] [Rank 0] step:4121/10000 train_time:187288ms step_avg:45.45ms +[2025-09-05 19:29:41] [Rank 0] step:4121/10000 train_time:187288ms step_avg:45.45ms +[2025-09-05 19:29:42] [Rank 0] step:4141/10000 train_time:188028ms step_avg:45.41ms +[2025-09-05 19:29:42] [Rank 0] step:4141/10000 train_time:188028ms step_avg:45.41ms +[2025-09-05 19:29:42] [Rank 0] step:4161/10000 train_time:188769ms step_avg:45.37ms +[2025-09-05 19:29:42] [Rank 0] step:4161/10000 train_time:188769ms step_avg:45.37ms +[2025-09-05 19:29:43] [Rank 0] step:4181/10000 train_time:189510ms step_avg:45.33ms +[2025-09-05 19:29:43] [Rank 0] step:4181/10000 train_time:189510ms step_avg:45.33ms +[2025-09-05 19:29:44] [Rank 0] step:4201/10000 train_time:190250ms step_avg:45.29ms +[2025-09-05 19:29:44] [Rank 0] step:4201/10000 train_time:190250ms step_avg:45.29ms +[2025-09-05 19:29:45] [Rank 0] step:4221/10000 train_time:190990ms step_avg:45.25ms +[2025-09-05 19:29:45] [Rank 0] step:4221/10000 train_time:190990ms step_avg:45.25ms +[2025-09-05 19:29:45] [Rank 0] step:4241/10000 train_time:191730ms step_avg:45.21ms +[2025-09-05 19:29:45] [Rank 0] step:4241/10000 train_time:191730ms step_avg:45.21ms +[2025-09-05 19:29:46] [Rank 0] step:4261/10000 train_time:192471ms step_avg:45.17ms +[2025-09-05 19:29:46] [Rank 0] step:4261/10000 train_time:192471ms step_avg:45.17ms +[2025-09-05 19:29:47] [Rank 0] step:4281/10000 train_time:193211ms step_avg:45.13ms +[2025-09-05 19:29:47] [Rank 0] step:4281/10000 train_time:193211ms step_avg:45.13ms +[2025-09-05 19:29:48] [Rank 0] step:4301/10000 train_time:193951ms step_avg:45.09ms +[2025-09-05 19:29:48] [Rank 0] step:4301/10000 train_time:193951ms step_avg:45.09ms +[2025-09-05 19:29:48] [Rank 0] step:4321/10000 train_time:194690ms step_avg:45.06ms +[2025-09-05 19:29:48] [Rank 0] step:4321/10000 train_time:194690ms step_avg:45.06ms +[2025-09-05 19:29:49] [Rank 0] step:4341/10000 train_time:195429ms step_avg:45.02ms +[2025-09-05 19:29:49] [Rank 0] step:4341/10000 train_time:195429ms step_avg:45.02ms +[2025-09-05 19:29:50] [Rank 0] step:4361/10000 train_time:196170ms step_avg:44.98ms +[2025-09-05 19:29:50] [Rank 0] step:4361/10000 train_time:196170ms step_avg:44.98ms +[2025-09-05 19:29:51] [Rank 0] step:4381/10000 train_time:196910ms step_avg:44.95ms +[2025-09-05 19:29:51] [Rank 0] step:4381/10000 train_time:196910ms step_avg:44.95ms +[2025-09-05 19:29:51] [Rank 0] step:4401/10000 train_time:197650ms step_avg:44.91ms +[2025-09-05 19:29:51] [Rank 0] step:4401/10000 train_time:197650ms step_avg:44.91ms +[2025-09-05 19:29:52] [Rank 0] step:4421/10000 train_time:198390ms step_avg:44.87ms +[2025-09-05 19:29:52] [Rank 0] step:4421/10000 train_time:198390ms step_avg:44.87ms +[2025-09-05 19:29:53] [Rank 0] step:4441/10000 train_time:199130ms step_avg:44.84ms +[2025-09-05 19:29:53] [Rank 0] step:4441/10000 train_time:199130ms step_avg:44.84ms +[2025-09-05 19:29:54] [Rank 0] step:4461/10000 train_time:199870ms step_avg:44.80ms +[2025-09-05 19:29:54] [Rank 0] step:4461/10000 train_time:199870ms step_avg:44.80ms +[2025-09-05 19:29:54] [Rank 0] step:4481/10000 train_time:200610ms step_avg:44.77ms +[2025-09-05 19:29:54] [Rank 0] step:4481/10000 train_time:200610ms step_avg:44.77ms +[2025-09-05 19:29:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:29:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:29:55] [Rank 0] PRINT: step:4500/10000 train_loss:1.7833 val_loss:1.7594 train_time:201431ms step_avg:44.76ms +[2025-09-05 19:29:55] [Rank 0] PRINT: step:4500/10000 train_loss:1.7833 val_loss:1.7594 train_time:201431ms step_avg:44.76ms +[2025-09-05 19:29:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:29:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:29:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:29:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:31:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:31:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:31:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:31:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:31:17] [Rank 0] Total Loss: 4.5723 +[2025-09-05 19:31:17] [Rank 0] Total Loss: 4.5723 +[2025-09-05 19:31:17] [Rank 0] Total FTA (Unweighted): 0.3944 +[2025-09-05 19:31:17] [Rank 0] Total FTA (Unweighted): 0.3944 +[2025-09-05 19:31:17] [Rank 0] Total FTA (Weighted): 0.3944 +[2025-09-05 19:31:17] [Rank 0] Total FTA (Weighted): 0.3944 +[2025-09-05 19:31:17] [Rank 0] Group 0 Loss: 3.5368 +[2025-09-05 19:31:17] [Rank 0] Group 0 Loss: 3.5368 +[2025-09-05 19:31:17] [Rank 0] Group 1 Loss: 3.4223 +[2025-09-05 19:31:17] [Rank 0] Group 1 Loss: 3.4223 +[2025-09-05 19:31:17] [Rank 0] Group 2 Loss: 3.4430 +[2025-09-05 19:31:17] [Rank 0] Group 2 Loss: 3.4430 +[2025-09-05 19:31:17] [Rank 0] Group 3 Loss: 3.7739 +[2025-09-05 19:31:17] [Rank 0] Group 3 Loss: 3.7739 +[2025-09-05 19:31:17] [Rank 0] Group 4 Loss: 4.0206 +[2025-09-05 19:31:17] [Rank 0] Group 4 Loss: 4.0206 +[2025-09-05 19:31:17] [Rank 0] Group 5 Loss: 4.2331 +[2025-09-05 19:31:17] [Rank 0] Group 5 Loss: 4.2331 +[2025-09-05 19:31:17] [Rank 0] Group 6 Loss: 4.4884 +[2025-09-05 19:31:17] [Rank 0] Group 6 Loss: 4.4884 +[2025-09-05 19:31:17] [Rank 0] Group 7 Loss: 4.7169 +[2025-09-05 19:31:17] [Rank 0] Group 7 Loss: 4.7169 +[2025-09-05 19:31:17] [Rank 0] Group 8 Loss: 4.9924 +[2025-09-05 19:31:17] [Rank 0] Group 8 Loss: 4.9924 +[2025-09-05 19:31:17] [Rank 0] Group 9 Loss: 5.1282 +[2025-09-05 19:31:17] [Rank 0] Group 9 Loss: 5.1282 +[2025-09-05 19:31:17] [Rank 0] Group 10 Loss: 5.2471 +[2025-09-05 19:31:17] [Rank 0] Group 10 Loss: 5.2471 +[2025-09-05 19:31:17] [Rank 0] Group 11 Loss: 5.2371 +[2025-09-05 19:31:17] [Rank 0] Group 11 Loss: 5.2371 +[2025-09-05 19:31:17] [Rank 0] Group 12 Loss: 5.1741 +[2025-09-05 19:31:17] [Rank 0] Group 12 Loss: 5.1741 +[2025-09-05 19:31:17] [Rank 0] Group 13 Loss: 5.2115 +[2025-09-05 19:31:17] [Rank 0] Group 13 Loss: 5.2115 +[2025-09-05 19:31:17] [Rank 0] Group 14 Loss: 5.2677 +[2025-09-05 19:31:17] [Rank 0] Group 14 Loss: 5.2677 +[2025-09-05 19:31:17] [Rank 0] Group 15 Loss: 5.2646 +[2025-09-05 19:31:17] [Rank 0] Group 15 Loss: 5.2646 +[2025-09-05 19:31:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:31:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:31:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:31:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:31:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:31:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:31:17] [Rank 0] Group 3 FTA: 0.4900 +[2025-09-05 19:31:17] [Rank 0] Group 3 FTA: 0.4900 +[2025-09-05 19:31:17] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 19:31:17] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 19:31:17] [Rank 0] Group 5 FTA: 0.3900 +[2025-09-05 19:31:17] [Rank 0] Group 5 FTA: 0.3900 +[2025-09-05 19:31:17] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 19:31:17] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 19:31:17] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 19:31:17] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 19:31:17] [Rank 0] Group 8 FTA: 0.2800 +[2025-09-05 19:31:17] [Rank 0] Group 8 FTA: 0.2800 +[2025-09-05 19:31:17] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 19:31:17] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 19:31:17] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 19:31:17] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 19:31:17] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 19:31:17] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 19:31:17] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 19:31:17] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 19:31:17] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 19:31:17] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 19:31:17] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:31:17] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:31:17] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:31:17] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:31:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:31:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:31:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:31:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:31:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:31:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:31:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:31:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:31:18] [Rank 0] step:4501/10000 train_time:201440ms step_avg:44.75ms +[2025-09-05 19:31:18] [Rank 0] step:4501/10000 train_time:201440ms step_avg:44.75ms +[2025-09-05 19:31:19] [Rank 0] step:4521/10000 train_time:202111ms step_avg:44.70ms +[2025-09-05 19:31:19] [Rank 0] step:4521/10000 train_time:202111ms step_avg:44.70ms +[2025-09-05 19:31:20] [Rank 0] step:4541/10000 train_time:202850ms step_avg:44.67ms +[2025-09-05 19:31:20] [Rank 0] step:4541/10000 train_time:202850ms step_avg:44.67ms +[2025-09-05 19:31:21] [Rank 0] step:4561/10000 train_time:203589ms step_avg:44.64ms +[2025-09-05 19:31:21] [Rank 0] step:4561/10000 train_time:203589ms step_avg:44.64ms +[2025-09-05 19:31:21] [Rank 0] step:4581/10000 train_time:204330ms step_avg:44.60ms +[2025-09-05 19:31:21] [Rank 0] step:4581/10000 train_time:204330ms step_avg:44.60ms +[2025-09-05 19:31:22] [Rank 0] step:4601/10000 train_time:205070ms step_avg:44.57ms +[2025-09-05 19:31:22] [Rank 0] step:4601/10000 train_time:205070ms step_avg:44.57ms +[2025-09-05 19:31:23] [Rank 0] step:4621/10000 train_time:205809ms step_avg:44.54ms +[2025-09-05 19:31:23] [Rank 0] step:4621/10000 train_time:205809ms step_avg:44.54ms +[2025-09-05 19:31:24] [Rank 0] step:4641/10000 train_time:206550ms step_avg:44.51ms +[2025-09-05 19:31:24] [Rank 0] step:4641/10000 train_time:206550ms step_avg:44.51ms +[2025-09-05 19:31:24] [Rank 0] step:4661/10000 train_time:207291ms step_avg:44.47ms +[2025-09-05 19:31:24] [Rank 0] step:4661/10000 train_time:207291ms step_avg:44.47ms +[2025-09-05 19:31:25] [Rank 0] step:4681/10000 train_time:208032ms step_avg:44.44ms +[2025-09-05 19:31:25] [Rank 0] step:4681/10000 train_time:208032ms step_avg:44.44ms +[2025-09-05 19:31:26] [Rank 0] step:4701/10000 train_time:208772ms step_avg:44.41ms +[2025-09-05 19:31:26] [Rank 0] step:4701/10000 train_time:208772ms step_avg:44.41ms +[2025-09-05 19:31:27] [Rank 0] step:4721/10000 train_time:209512ms step_avg:44.38ms +[2025-09-05 19:31:27] [Rank 0] step:4721/10000 train_time:209512ms step_avg:44.38ms +[2025-09-05 19:31:27] [Rank 0] step:4741/10000 train_time:210255ms step_avg:44.35ms +[2025-09-05 19:31:27] [Rank 0] step:4741/10000 train_time:210255ms step_avg:44.35ms +[2025-09-05 19:31:28] [Rank 0] step:4761/10000 train_time:210994ms step_avg:44.32ms +[2025-09-05 19:31:28] [Rank 0] step:4761/10000 train_time:210994ms step_avg:44.32ms +[2025-09-05 19:31:29] [Rank 0] step:4781/10000 train_time:211734ms step_avg:44.29ms +[2025-09-05 19:31:29] [Rank 0] step:4781/10000 train_time:211734ms step_avg:44.29ms +[2025-09-05 19:31:30] [Rank 0] step:4801/10000 train_time:212474ms step_avg:44.26ms +[2025-09-05 19:31:30] [Rank 0] step:4801/10000 train_time:212474ms step_avg:44.26ms +[2025-09-05 19:31:30] [Rank 0] step:4821/10000 train_time:213213ms step_avg:44.23ms +[2025-09-05 19:31:30] [Rank 0] step:4821/10000 train_time:213213ms step_avg:44.23ms +[2025-09-05 19:31:31] [Rank 0] step:4841/10000 train_time:214262ms step_avg:44.26ms +[2025-09-05 19:31:31] [Rank 0] step:4841/10000 train_time:214262ms step_avg:44.26ms +[2025-09-05 19:31:32] [Rank 0] step:4861/10000 train_time:215002ms step_avg:44.23ms +[2025-09-05 19:31:32] [Rank 0] step:4861/10000 train_time:215002ms step_avg:44.23ms +[2025-09-05 19:31:33] [Rank 0] step:4881/10000 train_time:215741ms step_avg:44.20ms +[2025-09-05 19:31:33] [Rank 0] step:4881/10000 train_time:215741ms step_avg:44.20ms +[2025-09-05 19:31:34] [Rank 0] step:4901/10000 train_time:216481ms step_avg:44.17ms +[2025-09-05 19:31:34] [Rank 0] step:4901/10000 train_time:216481ms step_avg:44.17ms +[2025-09-05 19:31:34] [Rank 0] step:4921/10000 train_time:217221ms step_avg:44.14ms +[2025-09-05 19:31:34] [Rank 0] step:4921/10000 train_time:217221ms step_avg:44.14ms +[2025-09-05 19:31:35] [Rank 0] step:4941/10000 train_time:217960ms step_avg:44.11ms +[2025-09-05 19:31:35] [Rank 0] step:4941/10000 train_time:217960ms step_avg:44.11ms +[2025-09-05 19:31:36] [Rank 0] step:4961/10000 train_time:218700ms step_avg:44.08ms +[2025-09-05 19:31:36] [Rank 0] step:4961/10000 train_time:218700ms step_avg:44.08ms +[2025-09-05 19:31:36] [Rank 0] step:4981/10000 train_time:219439ms step_avg:44.06ms +[2025-09-05 19:31:36] [Rank 0] step:4981/10000 train_time:219439ms step_avg:44.06ms +[2025-09-05 19:31:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:31:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:31:38] [Rank 0] PRINT: step:5000/10000 train_loss:1.7524 val_loss:1.7294 train_time:220259ms step_avg:44.05ms +[2025-09-05 19:31:38] [Rank 0] PRINT: step:5000/10000 train_loss:1.7524 val_loss:1.7294 train_time:220259ms step_avg:44.05ms +[2025-09-05 19:31:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:31:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:31:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:31:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:32:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:32:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:32:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:32:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:32:59] [Rank 0] Total Loss: 4.5232 +[2025-09-05 19:32:59] [Rank 0] Total Loss: 4.5232 +[2025-09-05 19:32:59] [Rank 0] Total FTA (Unweighted): 0.4275 +[2025-09-05 19:32:59] [Rank 0] Total FTA (Unweighted): 0.4275 +[2025-09-05 19:32:59] [Rank 0] Total FTA (Weighted): 0.4275 +[2025-09-05 19:32:59] [Rank 0] Total FTA (Weighted): 0.4275 +[2025-09-05 19:32:59] [Rank 0] Group 0 Loss: 3.5949 +[2025-09-05 19:32:59] [Rank 0] Group 0 Loss: 3.5949 +[2025-09-05 19:32:59] [Rank 0] Group 1 Loss: 3.4466 +[2025-09-05 19:32:59] [Rank 0] Group 1 Loss: 3.4466 +[2025-09-05 19:32:59] [Rank 0] Group 2 Loss: 3.4296 +[2025-09-05 19:32:59] [Rank 0] Group 2 Loss: 3.4296 +[2025-09-05 19:32:59] [Rank 0] Group 3 Loss: 3.7200 +[2025-09-05 19:32:59] [Rank 0] Group 3 Loss: 3.7200 +[2025-09-05 19:32:59] [Rank 0] Group 4 Loss: 3.9716 +[2025-09-05 19:32:59] [Rank 0] Group 4 Loss: 3.9716 +[2025-09-05 19:33:00] [Rank 0] Group 5 Loss: 4.1570 +[2025-09-05 19:33:00] [Rank 0] Group 5 Loss: 4.1570 +[2025-09-05 19:33:00] [Rank 0] Group 6 Loss: 4.4216 +[2025-09-05 19:33:00] [Rank 0] Group 6 Loss: 4.4216 +[2025-09-05 19:33:00] [Rank 0] Group 7 Loss: 4.6484 +[2025-09-05 19:33:00] [Rank 0] Group 7 Loss: 4.6484 +[2025-09-05 19:33:00] [Rank 0] Group 8 Loss: 4.9051 +[2025-09-05 19:33:00] [Rank 0] Group 8 Loss: 4.9051 +[2025-09-05 19:33:00] [Rank 0] Group 9 Loss: 5.0506 +[2025-09-05 19:33:00] [Rank 0] Group 9 Loss: 5.0506 +[2025-09-05 19:33:00] [Rank 0] Group 10 Loss: 5.1894 +[2025-09-05 19:33:00] [Rank 0] Group 10 Loss: 5.1894 +[2025-09-05 19:33:00] [Rank 0] Group 11 Loss: 5.1790 +[2025-09-05 19:33:00] [Rank 0] Group 11 Loss: 5.1790 +[2025-09-05 19:33:00] [Rank 0] Group 12 Loss: 5.1199 +[2025-09-05 19:33:00] [Rank 0] Group 12 Loss: 5.1199 +[2025-09-05 19:33:00] [Rank 0] Group 13 Loss: 5.1624 +[2025-09-05 19:33:00] [Rank 0] Group 13 Loss: 5.1624 +[2025-09-05 19:33:00] [Rank 0] Group 14 Loss: 5.1955 +[2025-09-05 19:33:00] [Rank 0] Group 14 Loss: 5.1955 +[2025-09-05 19:33:00] [Rank 0] Group 15 Loss: 5.1794 +[2025-09-05 19:33:00] [Rank 0] Group 15 Loss: 5.1794 +[2025-09-05 19:33:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:33:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:33:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:33:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:33:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:33:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:33:00] [Rank 0] Group 3 FTA: 0.6300 +[2025-09-05 19:33:00] [Rank 0] Group 3 FTA: 0.6300 +[2025-09-05 19:33:00] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 19:33:00] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 19:33:00] [Rank 0] Group 5 FTA: 0.4500 +[2025-09-05 19:33:00] [Rank 0] Group 5 FTA: 0.4500 +[2025-09-05 19:33:00] [Rank 0] Group 6 FTA: 0.3700 +[2025-09-05 19:33:00] [Rank 0] Group 6 FTA: 0.3700 +[2025-09-05 19:33:00] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 19:33:00] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 19:33:00] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 19:33:00] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 19:33:00] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 19:33:00] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 19:33:00] [Rank 0] Group 10 FTA: 0.2800 +[2025-09-05 19:33:00] [Rank 0] Group 10 FTA: 0.2800 +[2025-09-05 19:33:00] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 19:33:00] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 19:33:00] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 19:33:00] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 19:33:00] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 19:33:00] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 19:33:00] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:33:00] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 19:33:00] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:33:00] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:33:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:33:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:33:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:33:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:33:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:33:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:33:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:33:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:33:01] [Rank 0] step:5001/10000 train_time:220269ms step_avg:44.04ms +[2025-09-05 19:33:01] [Rank 0] step:5001/10000 train_time:220269ms step_avg:44.04ms +[2025-09-05 19:33:02] [Rank 0] step:5021/10000 train_time:220943ms step_avg:44.00ms +[2025-09-05 19:33:02] [Rank 0] step:5021/10000 train_time:220943ms step_avg:44.00ms +[2025-09-05 19:33:03] [Rank 0] step:5041/10000 train_time:221683ms step_avg:43.98ms +[2025-09-05 19:33:03] [Rank 0] step:5041/10000 train_time:221683ms step_avg:43.98ms +[2025-09-05 19:33:03] [Rank 0] step:5061/10000 train_time:222423ms step_avg:43.95ms +[2025-09-05 19:33:03] [Rank 0] step:5061/10000 train_time:222423ms step_avg:43.95ms +[2025-09-05 19:33:04] [Rank 0] step:5081/10000 train_time:223163ms step_avg:43.92ms +[2025-09-05 19:33:04] [Rank 0] step:5081/10000 train_time:223163ms step_avg:43.92ms +[2025-09-05 19:33:05] [Rank 0] step:5101/10000 train_time:223903ms step_avg:43.89ms +[2025-09-05 19:33:05] [Rank 0] step:5101/10000 train_time:223903ms step_avg:43.89ms +[2025-09-05 19:33:06] [Rank 0] step:5121/10000 train_time:224644ms step_avg:43.87ms +[2025-09-05 19:33:06] [Rank 0] step:5121/10000 train_time:224644ms step_avg:43.87ms +[2025-09-05 19:33:06] [Rank 0] step:5141/10000 train_time:225385ms step_avg:43.84ms +[2025-09-05 19:33:06] [Rank 0] step:5141/10000 train_time:225385ms step_avg:43.84ms +[2025-09-05 19:33:07] [Rank 0] step:5161/10000 train_time:226125ms step_avg:43.81ms +[2025-09-05 19:33:07] [Rank 0] step:5161/10000 train_time:226125ms step_avg:43.81ms +[2025-09-05 19:33:08] [Rank 0] step:5181/10000 train_time:226864ms step_avg:43.79ms +[2025-09-05 19:33:08] [Rank 0] step:5181/10000 train_time:226864ms step_avg:43.79ms +[2025-09-05 19:33:09] [Rank 0] step:5201/10000 train_time:227604ms step_avg:43.76ms +[2025-09-05 19:33:09] [Rank 0] step:5201/10000 train_time:227604ms step_avg:43.76ms +[2025-09-05 19:33:09] [Rank 0] step:5221/10000 train_time:228343ms step_avg:43.74ms +[2025-09-05 19:33:09] [Rank 0] step:5221/10000 train_time:228343ms step_avg:43.74ms +[2025-09-05 19:33:10] [Rank 0] step:5241/10000 train_time:229082ms step_avg:43.71ms +[2025-09-05 19:33:10] [Rank 0] step:5241/10000 train_time:229082ms step_avg:43.71ms +[2025-09-05 19:33:11] [Rank 0] step:5261/10000 train_time:229821ms step_avg:43.68ms +[2025-09-05 19:33:11] [Rank 0] step:5261/10000 train_time:229821ms step_avg:43.68ms +[2025-09-05 19:33:11] [Rank 0] step:5281/10000 train_time:230561ms step_avg:43.66ms +[2025-09-05 19:33:11] [Rank 0] step:5281/10000 train_time:230561ms step_avg:43.66ms +[2025-09-05 19:33:12] [Rank 0] step:5301/10000 train_time:231300ms step_avg:43.63ms +[2025-09-05 19:33:12] [Rank 0] step:5301/10000 train_time:231300ms step_avg:43.63ms +[2025-09-05 19:33:13] [Rank 0] step:5321/10000 train_time:232040ms step_avg:43.61ms +[2025-09-05 19:33:13] [Rank 0] step:5321/10000 train_time:232040ms step_avg:43.61ms +[2025-09-05 19:33:14] [Rank 0] step:5341/10000 train_time:232778ms step_avg:43.58ms +[2025-09-05 19:33:14] [Rank 0] step:5341/10000 train_time:232778ms step_avg:43.58ms +[2025-09-05 19:33:14] [Rank 0] step:5361/10000 train_time:233516ms step_avg:43.56ms +[2025-09-05 19:33:14] [Rank 0] step:5361/10000 train_time:233516ms step_avg:43.56ms +[2025-09-05 19:33:15] [Rank 0] step:5381/10000 train_time:234404ms step_avg:43.56ms +[2025-09-05 19:33:15] [Rank 0] step:5381/10000 train_time:234404ms step_avg:43.56ms +[2025-09-05 19:33:16] [Rank 0] step:5401/10000 train_time:235144ms step_avg:43.54ms +[2025-09-05 19:33:16] [Rank 0] step:5401/10000 train_time:235144ms step_avg:43.54ms +[2025-09-05 19:33:17] [Rank 0] step:5421/10000 train_time:235883ms step_avg:43.51ms +[2025-09-05 19:33:17] [Rank 0] step:5421/10000 train_time:235883ms step_avg:43.51ms +[2025-09-05 19:33:18] [Rank 0] step:5441/10000 train_time:236768ms step_avg:43.52ms +[2025-09-05 19:33:18] [Rank 0] step:5441/10000 train_time:236768ms step_avg:43.52ms +[2025-09-05 19:33:18] [Rank 0] step:5461/10000 train_time:237507ms step_avg:43.49ms +[2025-09-05 19:33:18] [Rank 0] step:5461/10000 train_time:237507ms step_avg:43.49ms +[2025-09-05 19:33:19] [Rank 0] step:5481/10000 train_time:238246ms step_avg:43.47ms +[2025-09-05 19:33:19] [Rank 0] step:5481/10000 train_time:238246ms step_avg:43.47ms +[2025-09-05 19:33:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:33:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:33:20] [Rank 0] PRINT: step:5500/10000 train_loss:1.7300 val_loss:1.7121 train_time:239066ms step_avg:43.47ms +[2025-09-05 19:33:20] [Rank 0] PRINT: step:5500/10000 train_loss:1.7300 val_loss:1.7121 train_time:239066ms step_avg:43.47ms +[2025-09-05 19:33:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:33:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:33:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:33:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:34:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:34:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:34:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:34:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:34:41] [Rank 0] Total Loss: 4.5881 +[2025-09-05 19:34:41] [Rank 0] Total Loss: 4.5881 +[2025-09-05 19:34:41] [Rank 0] Total FTA (Unweighted): 0.4269 +[2025-09-05 19:34:41] [Rank 0] Total FTA (Unweighted): 0.4269 +[2025-09-05 19:34:41] [Rank 0] Total FTA (Weighted): 0.4269 +[2025-09-05 19:34:41] [Rank 0] Total FTA (Weighted): 0.4269 +[2025-09-05 19:34:41] [Rank 0] Group 0 Loss: 3.5866 +[2025-09-05 19:34:41] [Rank 0] Group 0 Loss: 3.5866 +[2025-09-05 19:34:41] [Rank 0] Group 1 Loss: 3.5235 +[2025-09-05 19:34:41] [Rank 0] Group 1 Loss: 3.5235 +[2025-09-05 19:34:41] [Rank 0] Group 2 Loss: 3.4839 +[2025-09-05 19:34:41] [Rank 0] Group 2 Loss: 3.4839 +[2025-09-05 19:34:41] [Rank 0] Group 3 Loss: 3.8459 +[2025-09-05 19:34:41] [Rank 0] Group 3 Loss: 3.8459 +[2025-09-05 19:34:41] [Rank 0] Group 4 Loss: 4.0473 +[2025-09-05 19:34:41] [Rank 0] Group 4 Loss: 4.0473 +[2025-09-05 19:34:41] [Rank 0] Group 5 Loss: 4.2306 +[2025-09-05 19:34:41] [Rank 0] Group 5 Loss: 4.2306 +[2025-09-05 19:34:41] [Rank 0] Group 6 Loss: 4.4788 +[2025-09-05 19:34:41] [Rank 0] Group 6 Loss: 4.4788 +[2025-09-05 19:34:41] [Rank 0] Group 7 Loss: 4.7105 +[2025-09-05 19:34:41] [Rank 0] Group 7 Loss: 4.7105 +[2025-09-05 19:34:41] [Rank 0] Group 8 Loss: 4.9943 +[2025-09-05 19:34:41] [Rank 0] Group 8 Loss: 4.9943 +[2025-09-05 19:34:41] [Rank 0] Group 9 Loss: 5.1315 +[2025-09-05 19:34:41] [Rank 0] Group 9 Loss: 5.1315 +[2025-09-05 19:34:41] [Rank 0] Group 10 Loss: 5.2372 +[2025-09-05 19:34:41] [Rank 0] Group 10 Loss: 5.2372 +[2025-09-05 19:34:41] [Rank 0] Group 11 Loss: 5.2426 +[2025-09-05 19:34:41] [Rank 0] Group 11 Loss: 5.2426 +[2025-09-05 19:34:41] [Rank 0] Group 12 Loss: 5.1900 +[2025-09-05 19:34:41] [Rank 0] Group 12 Loss: 5.1900 +[2025-09-05 19:34:41] [Rank 0] Group 13 Loss: 5.2182 +[2025-09-05 19:34:41] [Rank 0] Group 13 Loss: 5.2182 +[2025-09-05 19:34:41] [Rank 0] Group 14 Loss: 5.2710 +[2025-09-05 19:34:41] [Rank 0] Group 14 Loss: 5.2710 +[2025-09-05 19:34:41] [Rank 0] Group 15 Loss: 5.2170 +[2025-09-05 19:34:41] [Rank 0] Group 15 Loss: 5.2170 +[2025-09-05 19:34:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:34:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:34:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:34:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:34:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:34:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:34:41] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 19:34:41] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 19:34:41] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 19:34:41] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 19:34:41] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 19:34:41] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 19:34:41] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 19:34:41] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 19:34:41] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 19:34:41] [Rank 0] Group 7 FTA: 0.2900 +[2025-09-05 19:34:41] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 19:34:41] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 19:34:41] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 19:34:41] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 19:34:41] [Rank 0] Group 10 FTA: 0.3200 +[2025-09-05 19:34:41] [Rank 0] Group 10 FTA: 0.3200 +[2025-09-05 19:34:41] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 19:34:41] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 19:34:41] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:34:41] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:34:41] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 19:34:41] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 19:34:41] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:34:41] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:34:41] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:34:41] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:34:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:34:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:34:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:34:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:34:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:34:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:34:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:34:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:34:43] [Rank 0] step:5501/10000 train_time:239077ms step_avg:43.46ms +[2025-09-05 19:34:43] [Rank 0] step:5501/10000 train_time:239077ms step_avg:43.46ms +[2025-09-05 19:34:43] [Rank 0] step:5521/10000 train_time:239758ms step_avg:43.43ms +[2025-09-05 19:34:43] [Rank 0] step:5521/10000 train_time:239758ms step_avg:43.43ms +[2025-09-05 19:34:44] [Rank 0] step:5541/10000 train_time:240498ms step_avg:43.40ms +[2025-09-05 19:34:44] [Rank 0] step:5541/10000 train_time:240498ms step_avg:43.40ms +[2025-09-05 19:34:45] [Rank 0] step:5561/10000 train_time:241238ms step_avg:43.38ms +[2025-09-05 19:34:45] [Rank 0] step:5561/10000 train_time:241238ms step_avg:43.38ms +[2025-09-05 19:34:46] [Rank 0] step:5581/10000 train_time:241978ms step_avg:43.36ms +[2025-09-05 19:34:46] [Rank 0] step:5581/10000 train_time:241978ms step_avg:43.36ms +[2025-09-05 19:34:46] [Rank 0] step:5601/10000 train_time:242718ms step_avg:43.33ms +[2025-09-05 19:34:46] [Rank 0] step:5601/10000 train_time:242718ms step_avg:43.33ms +[2025-09-05 19:34:47] [Rank 0] step:5621/10000 train_time:243458ms step_avg:43.31ms +[2025-09-05 19:34:47] [Rank 0] step:5621/10000 train_time:243458ms step_avg:43.31ms +[2025-09-05 19:34:48] [Rank 0] step:5641/10000 train_time:244813ms step_avg:43.40ms +[2025-09-05 19:34:48] [Rank 0] step:5641/10000 train_time:244813ms step_avg:43.40ms +[2025-09-05 19:34:49] [Rank 0] step:5661/10000 train_time:245553ms step_avg:43.38ms +[2025-09-05 19:34:49] [Rank 0] step:5661/10000 train_time:245553ms step_avg:43.38ms +[2025-09-05 19:34:50] [Rank 0] step:5681/10000 train_time:246294ms step_avg:43.35ms +[2025-09-05 19:34:50] [Rank 0] step:5681/10000 train_time:246294ms step_avg:43.35ms +[2025-09-05 19:34:51] [Rank 0] step:5701/10000 train_time:247034ms step_avg:43.33ms +[2025-09-05 19:34:51] [Rank 0] step:5701/10000 train_time:247034ms step_avg:43.33ms +[2025-09-05 19:34:51] [Rank 0] step:5721/10000 train_time:247774ms step_avg:43.31ms +[2025-09-05 19:34:51] [Rank 0] step:5721/10000 train_time:247774ms step_avg:43.31ms +[2025-09-05 19:34:52] [Rank 0] step:5741/10000 train_time:248514ms step_avg:43.29ms +[2025-09-05 19:34:52] [Rank 0] step:5741/10000 train_time:248514ms step_avg:43.29ms +[2025-09-05 19:34:53] [Rank 0] step:5761/10000 train_time:249254ms step_avg:43.27ms +[2025-09-05 19:34:53] [Rank 0] step:5761/10000 train_time:249254ms step_avg:43.27ms +[2025-09-05 19:34:54] [Rank 0] step:5781/10000 train_time:249993ms step_avg:43.24ms +[2025-09-05 19:34:54] [Rank 0] step:5781/10000 train_time:249993ms step_avg:43.24ms +[2025-09-05 19:34:54] [Rank 0] step:5801/10000 train_time:250734ms step_avg:43.22ms +[2025-09-05 19:34:54] [Rank 0] step:5801/10000 train_time:250734ms step_avg:43.22ms +[2025-09-05 19:34:55] [Rank 0] step:5821/10000 train_time:251474ms step_avg:43.20ms +[2025-09-05 19:34:55] [Rank 0] step:5821/10000 train_time:251474ms step_avg:43.20ms +[2025-09-05 19:34:56] [Rank 0] step:5841/10000 train_time:252214ms step_avg:43.18ms +[2025-09-05 19:34:56] [Rank 0] step:5841/10000 train_time:252214ms step_avg:43.18ms +[2025-09-05 19:34:57] [Rank 0] step:5861/10000 train_time:252953ms step_avg:43.16ms +[2025-09-05 19:34:57] [Rank 0] step:5861/10000 train_time:252953ms step_avg:43.16ms +[2025-09-05 19:34:57] [Rank 0] step:5881/10000 train_time:253693ms step_avg:43.14ms +[2025-09-05 19:34:57] [Rank 0] step:5881/10000 train_time:253693ms step_avg:43.14ms +[2025-09-05 19:34:58] [Rank 0] step:5901/10000 train_time:254537ms step_avg:43.13ms +[2025-09-05 19:34:58] [Rank 0] step:5901/10000 train_time:254537ms step_avg:43.13ms +[2025-09-05 19:34:59] [Rank 0] step:5921/10000 train_time:255277ms step_avg:43.11ms +[2025-09-05 19:34:59] [Rank 0] step:5921/10000 train_time:255277ms step_avg:43.11ms +[2025-09-05 19:35:00] [Rank 0] step:5941/10000 train_time:256016ms step_avg:43.09ms +[2025-09-05 19:35:00] [Rank 0] step:5941/10000 train_time:256016ms step_avg:43.09ms +[2025-09-05 19:35:00] [Rank 0] step:5961/10000 train_time:256755ms step_avg:43.07ms +[2025-09-05 19:35:00] [Rank 0] step:5961/10000 train_time:256755ms step_avg:43.07ms +[2025-09-05 19:35:01] [Rank 0] step:5981/10000 train_time:257495ms step_avg:43.05ms +[2025-09-05 19:35:01] [Rank 0] step:5981/10000 train_time:257495ms step_avg:43.05ms +[2025-09-05 19:35:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:35:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:35:02] [Rank 0] PRINT: step:6000/10000 train_loss:1.7112 val_loss:1.6935 train_time:258316ms step_avg:43.05ms +[2025-09-05 19:35:02] [Rank 0] PRINT: step:6000/10000 train_loss:1.7112 val_loss:1.6935 train_time:258316ms step_avg:43.05ms +[2025-09-05 19:35:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:35:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:35:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:35:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:36:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:36:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:36:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:36:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:36:23] [Rank 0] Total Loss: 4.4447 +[2025-09-05 19:36:23] [Rank 0] Total Loss: 4.4447 +[2025-09-05 19:36:23] [Rank 0] Total FTA (Unweighted): 0.4481 +[2025-09-05 19:36:23] [Rank 0] Total FTA (Unweighted): 0.4481 +[2025-09-05 19:36:23] [Rank 0] Total FTA (Weighted): 0.4481 +[2025-09-05 19:36:23] [Rank 0] Total FTA (Weighted): 0.4481 +[2025-09-05 19:36:23] [Rank 0] Group 0 Loss: 3.5332 +[2025-09-05 19:36:23] [Rank 0] Group 0 Loss: 3.5332 +[2025-09-05 19:36:23] [Rank 0] Group 1 Loss: 3.3977 +[2025-09-05 19:36:23] [Rank 0] Group 1 Loss: 3.3977 +[2025-09-05 19:36:23] [Rank 0] Group 2 Loss: 3.4053 +[2025-09-05 19:36:23] [Rank 0] Group 2 Loss: 3.4053 +[2025-09-05 19:36:23] [Rank 0] Group 3 Loss: 3.6854 +[2025-09-05 19:36:23] [Rank 0] Group 3 Loss: 3.6854 +[2025-09-05 19:36:23] [Rank 0] Group 4 Loss: 3.8677 +[2025-09-05 19:36:23] [Rank 0] Group 4 Loss: 3.8677 +[2025-09-05 19:36:23] [Rank 0] Group 5 Loss: 4.0782 +[2025-09-05 19:36:23] [Rank 0] Group 5 Loss: 4.0782 +[2025-09-05 19:36:23] [Rank 0] Group 6 Loss: 4.3662 +[2025-09-05 19:36:23] [Rank 0] Group 6 Loss: 4.3662 +[2025-09-05 19:36:23] [Rank 0] Group 7 Loss: 4.5609 +[2025-09-05 19:36:23] [Rank 0] Group 7 Loss: 4.5609 +[2025-09-05 19:36:23] [Rank 0] Group 8 Loss: 4.8221 +[2025-09-05 19:36:23] [Rank 0] Group 8 Loss: 4.8221 +[2025-09-05 19:36:23] [Rank 0] Group 9 Loss: 4.9483 +[2025-09-05 19:36:23] [Rank 0] Group 9 Loss: 4.9483 +[2025-09-05 19:36:23] [Rank 0] Group 10 Loss: 5.0894 +[2025-09-05 19:36:23] [Rank 0] Group 10 Loss: 5.0894 +[2025-09-05 19:36:23] [Rank 0] Group 11 Loss: 5.0914 +[2025-09-05 19:36:23] [Rank 0] Group 11 Loss: 5.0914 +[2025-09-05 19:36:23] [Rank 0] Group 12 Loss: 5.0128 +[2025-09-05 19:36:23] [Rank 0] Group 12 Loss: 5.0128 +[2025-09-05 19:36:23] [Rank 0] Group 13 Loss: 5.0922 +[2025-09-05 19:36:23] [Rank 0] Group 13 Loss: 5.0922 +[2025-09-05 19:36:23] [Rank 0] Group 14 Loss: 5.1001 +[2025-09-05 19:36:23] [Rank 0] Group 14 Loss: 5.1001 +[2025-09-05 19:36:23] [Rank 0] Group 15 Loss: 5.0648 +[2025-09-05 19:36:23] [Rank 0] Group 15 Loss: 5.0648 +[2025-09-05 19:36:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:36:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:36:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:36:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:36:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:36:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:36:23] [Rank 0] Group 3 FTA: 0.7400 +[2025-09-05 19:36:23] [Rank 0] Group 3 FTA: 0.7400 +[2025-09-05 19:36:23] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 19:36:23] [Rank 0] Group 4 FTA: 0.3900 +[2025-09-05 19:36:23] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 19:36:23] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 19:36:23] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 19:36:23] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 19:36:23] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 19:36:23] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 19:36:23] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 19:36:23] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 19:36:23] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 19:36:23] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 19:36:23] [Rank 0] Group 10 FTA: 0.3100 +[2025-09-05 19:36:23] [Rank 0] Group 10 FTA: 0.3100 +[2025-09-05 19:36:23] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 19:36:23] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 19:36:23] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:36:23] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:36:23] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 19:36:23] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 19:36:23] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:36:23] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:36:23] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:36:23] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 19:36:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:36:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:36:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:36:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:36:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:36:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:36:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:36:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:36:24] [Rank 0] step:6001/10000 train_time:258327ms step_avg:43.05ms +[2025-09-05 19:36:24] [Rank 0] step:6001/10000 train_time:258327ms step_avg:43.05ms +[2025-09-05 19:36:25] [Rank 0] step:6021/10000 train_time:259621ms step_avg:43.12ms +[2025-09-05 19:36:25] [Rank 0] step:6021/10000 train_time:259621ms step_avg:43.12ms +[2025-09-05 19:36:26] [Rank 0] step:6041/10000 train_time:260361ms step_avg:43.10ms +[2025-09-05 19:36:26] [Rank 0] step:6041/10000 train_time:260361ms step_avg:43.10ms +[2025-09-05 19:36:27] [Rank 0] step:6061/10000 train_time:261101ms step_avg:43.08ms +[2025-09-05 19:36:27] [Rank 0] step:6061/10000 train_time:261101ms step_avg:43.08ms +[2025-09-05 19:36:28] [Rank 0] step:6081/10000 train_time:262051ms step_avg:43.09ms +[2025-09-05 19:36:28] [Rank 0] step:6081/10000 train_time:262051ms step_avg:43.09ms +[2025-09-05 19:36:29] [Rank 0] step:6101/10000 train_time:262790ms step_avg:43.07ms +[2025-09-05 19:36:29] [Rank 0] step:6101/10000 train_time:262790ms step_avg:43.07ms +[2025-09-05 19:36:29] [Rank 0] step:6121/10000 train_time:263529ms step_avg:43.05ms +[2025-09-05 19:36:29] [Rank 0] step:6121/10000 train_time:263529ms step_avg:43.05ms +[2025-09-05 19:36:30] [Rank 0] step:6141/10000 train_time:264269ms step_avg:43.03ms +[2025-09-05 19:36:30] [Rank 0] step:6141/10000 train_time:264269ms step_avg:43.03ms +[2025-09-05 19:36:31] [Rank 0] step:6161/10000 train_time:265008ms step_avg:43.01ms +[2025-09-05 19:36:31] [Rank 0] step:6161/10000 train_time:265008ms step_avg:43.01ms +[2025-09-05 19:36:32] [Rank 0] step:6181/10000 train_time:265748ms step_avg:42.99ms +[2025-09-05 19:36:32] [Rank 0] step:6181/10000 train_time:265748ms step_avg:42.99ms +[2025-09-05 19:36:32] [Rank 0] step:6201/10000 train_time:266488ms step_avg:42.97ms +[2025-09-05 19:36:32] [Rank 0] step:6201/10000 train_time:266488ms step_avg:42.97ms +[2025-09-05 19:36:33] [Rank 0] step:6221/10000 train_time:267227ms step_avg:42.96ms +[2025-09-05 19:36:33] [Rank 0] step:6221/10000 train_time:267227ms step_avg:42.96ms +[2025-09-05 19:36:34] [Rank 0] step:6241/10000 train_time:267966ms step_avg:42.94ms +[2025-09-05 19:36:34] [Rank 0] step:6241/10000 train_time:267966ms step_avg:42.94ms +[2025-09-05 19:36:35] [Rank 0] step:6261/10000 train_time:268707ms step_avg:42.92ms +[2025-09-05 19:36:35] [Rank 0] step:6261/10000 train_time:268707ms step_avg:42.92ms +[2025-09-05 19:36:35] [Rank 0] step:6281/10000 train_time:269446ms step_avg:42.90ms +[2025-09-05 19:36:35] [Rank 0] step:6281/10000 train_time:269446ms step_avg:42.90ms +[2025-09-05 19:36:36] [Rank 0] step:6301/10000 train_time:270186ms step_avg:42.88ms +[2025-09-05 19:36:36] [Rank 0] step:6301/10000 train_time:270186ms step_avg:42.88ms +[2025-09-05 19:36:37] [Rank 0] step:6321/10000 train_time:270926ms step_avg:42.86ms +[2025-09-05 19:36:37] [Rank 0] step:6321/10000 train_time:270926ms step_avg:42.86ms +[2025-09-05 19:36:38] [Rank 0] step:6341/10000 train_time:271666ms step_avg:42.84ms +[2025-09-05 19:36:38] [Rank 0] step:6341/10000 train_time:271666ms step_avg:42.84ms +[2025-09-05 19:36:38] [Rank 0] step:6361/10000 train_time:272406ms step_avg:42.82ms +[2025-09-05 19:36:38] [Rank 0] step:6361/10000 train_time:272406ms step_avg:42.82ms +[2025-09-05 19:36:39] [Rank 0] step:6381/10000 train_time:273147ms step_avg:42.81ms +[2025-09-05 19:36:39] [Rank 0] step:6381/10000 train_time:273147ms step_avg:42.81ms +[2025-09-05 19:36:40] [Rank 0] step:6401/10000 train_time:273886ms step_avg:42.79ms +[2025-09-05 19:36:40] [Rank 0] step:6401/10000 train_time:273886ms step_avg:42.79ms +[2025-09-05 19:36:41] [Rank 0] step:6421/10000 train_time:274627ms step_avg:42.77ms +[2025-09-05 19:36:41] [Rank 0] step:6421/10000 train_time:274627ms step_avg:42.77ms +[2025-09-05 19:36:41] [Rank 0] step:6441/10000 train_time:275366ms step_avg:42.75ms +[2025-09-05 19:36:41] [Rank 0] step:6441/10000 train_time:275366ms step_avg:42.75ms +[2025-09-05 19:36:42] [Rank 0] step:6461/10000 train_time:276106ms step_avg:42.73ms +[2025-09-05 19:36:42] [Rank 0] step:6461/10000 train_time:276106ms step_avg:42.73ms +[2025-09-05 19:36:43] [Rank 0] step:6481/10000 train_time:276846ms step_avg:42.72ms +[2025-09-05 19:36:43] [Rank 0] step:6481/10000 train_time:276846ms step_avg:42.72ms +[2025-09-05 19:36:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:36:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:36:44] [Rank 0] PRINT: step:6500/10000 train_loss:1.6975 val_loss:1.6801 train_time:277667ms step_avg:42.72ms +[2025-09-05 19:36:44] [Rank 0] PRINT: step:6500/10000 train_loss:1.6975 val_loss:1.6801 train_time:277667ms step_avg:42.72ms +[2025-09-05 19:36:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:36:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:36:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:36:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:38:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:38:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:38:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:38:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:38:06] [Rank 0] Total Loss: 4.4762 +[2025-09-05 19:38:06] [Rank 0] Total Loss: 4.4762 +[2025-09-05 19:38:06] [Rank 0] Total FTA (Unweighted): 0.4569 +[2025-09-05 19:38:06] [Rank 0] Total FTA (Unweighted): 0.4569 +[2025-09-05 19:38:06] [Rank 0] Total FTA (Weighted): 0.4569 +[2025-09-05 19:38:06] [Rank 0] Total FTA (Weighted): 0.4569 +[2025-09-05 19:38:06] [Rank 0] Group 0 Loss: 3.6024 +[2025-09-05 19:38:06] [Rank 0] Group 0 Loss: 3.6024 +[2025-09-05 19:38:06] [Rank 0] Group 1 Loss: 3.3808 +[2025-09-05 19:38:06] [Rank 0] Group 1 Loss: 3.3808 +[2025-09-05 19:38:06] [Rank 0] Group 2 Loss: 3.4293 +[2025-09-05 19:38:06] [Rank 0] Group 2 Loss: 3.4293 +[2025-09-05 19:38:06] [Rank 0] Group 3 Loss: 3.7797 +[2025-09-05 19:38:06] [Rank 0] Group 3 Loss: 3.7797 +[2025-09-05 19:38:06] [Rank 0] Group 4 Loss: 3.9147 +[2025-09-05 19:38:06] [Rank 0] Group 4 Loss: 3.9147 +[2025-09-05 19:38:06] [Rank 0] Group 5 Loss: 4.1185 +[2025-09-05 19:38:06] [Rank 0] Group 5 Loss: 4.1185 +[2025-09-05 19:38:06] [Rank 0] Group 6 Loss: 4.3790 +[2025-09-05 19:38:06] [Rank 0] Group 6 Loss: 4.3790 +[2025-09-05 19:38:06] [Rank 0] Group 7 Loss: 4.5811 +[2025-09-05 19:38:06] [Rank 0] Group 7 Loss: 4.5811 +[2025-09-05 19:38:06] [Rank 0] Group 8 Loss: 4.8536 +[2025-09-05 19:38:06] [Rank 0] Group 8 Loss: 4.8536 +[2025-09-05 19:38:06] [Rank 0] Group 9 Loss: 4.9903 +[2025-09-05 19:38:06] [Rank 0] Group 9 Loss: 4.9903 +[2025-09-05 19:38:06] [Rank 0] Group 10 Loss: 5.1197 +[2025-09-05 19:38:06] [Rank 0] Group 10 Loss: 5.1197 +[2025-09-05 19:38:06] [Rank 0] Group 11 Loss: 5.1116 +[2025-09-05 19:38:06] [Rank 0] Group 11 Loss: 5.1116 +[2025-09-05 19:38:06] [Rank 0] Group 12 Loss: 5.0448 +[2025-09-05 19:38:06] [Rank 0] Group 12 Loss: 5.0448 +[2025-09-05 19:38:06] [Rank 0] Group 13 Loss: 5.0799 +[2025-09-05 19:38:06] [Rank 0] Group 13 Loss: 5.0799 +[2025-09-05 19:38:06] [Rank 0] Group 14 Loss: 5.1407 +[2025-09-05 19:38:06] [Rank 0] Group 14 Loss: 5.1407 +[2025-09-05 19:38:06] [Rank 0] Group 15 Loss: 5.0924 +[2025-09-05 19:38:06] [Rank 0] Group 15 Loss: 5.0924 +[2025-09-05 19:38:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:38:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:38:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:38:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:38:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:38:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:38:06] [Rank 0] Group 3 FTA: 0.7700 +[2025-09-05 19:38:06] [Rank 0] Group 3 FTA: 0.7700 +[2025-09-05 19:38:06] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 19:38:06] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 19:38:06] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 19:38:06] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 19:38:06] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:38:06] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:38:06] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 19:38:06] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 19:38:06] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 19:38:06] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 19:38:06] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 19:38:06] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 19:38:06] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 19:38:06] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 19:38:06] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 19:38:06] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 19:38:06] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:38:06] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 19:38:06] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 19:38:06] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 19:38:06] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:38:06] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 19:38:06] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:38:06] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 19:38:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:38:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:38:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:38:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:38:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:38:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:38:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:38:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:38:08] [Rank 0] step:6501/10000 train_time:277678ms step_avg:42.71ms +[2025-09-05 19:38:08] [Rank 0] step:6501/10000 train_time:277678ms step_avg:42.71ms +[2025-09-05 19:38:08] [Rank 0] step:6521/10000 train_time:278344ms step_avg:42.68ms +[2025-09-05 19:38:08] [Rank 0] step:6521/10000 train_time:278344ms step_avg:42.68ms +[2025-09-05 19:38:09] [Rank 0] step:6541/10000 train_time:279084ms step_avg:42.67ms +[2025-09-05 19:38:09] [Rank 0] step:6541/10000 train_time:279084ms step_avg:42.67ms +[2025-09-05 19:38:10] [Rank 0] step:6561/10000 train_time:279824ms step_avg:42.65ms +[2025-09-05 19:38:10] [Rank 0] step:6561/10000 train_time:279824ms step_avg:42.65ms +[2025-09-05 19:38:11] [Rank 0] step:6581/10000 train_time:280563ms step_avg:42.63ms +[2025-09-05 19:38:11] [Rank 0] step:6581/10000 train_time:280563ms step_avg:42.63ms +[2025-09-05 19:38:11] [Rank 0] step:6601/10000 train_time:281303ms step_avg:42.62ms +[2025-09-05 19:38:11] [Rank 0] step:6601/10000 train_time:281303ms step_avg:42.62ms +[2025-09-05 19:38:12] [Rank 0] step:6621/10000 train_time:282043ms step_avg:42.60ms +[2025-09-05 19:38:12] [Rank 0] step:6621/10000 train_time:282043ms step_avg:42.60ms +[2025-09-05 19:38:13] [Rank 0] step:6641/10000 train_time:282782ms step_avg:42.58ms +[2025-09-05 19:38:13] [Rank 0] step:6641/10000 train_time:282782ms step_avg:42.58ms +[2025-09-05 19:38:14] [Rank 0] step:6661/10000 train_time:283522ms step_avg:42.56ms +[2025-09-05 19:38:14] [Rank 0] step:6661/10000 train_time:283522ms step_avg:42.56ms +[2025-09-05 19:38:14] [Rank 0] step:6681/10000 train_time:284262ms step_avg:42.55ms +[2025-09-05 19:38:14] [Rank 0] step:6681/10000 train_time:284262ms step_avg:42.55ms +[2025-09-05 19:38:15] [Rank 0] step:6701/10000 train_time:285002ms step_avg:42.53ms +[2025-09-05 19:38:15] [Rank 0] step:6701/10000 train_time:285002ms step_avg:42.53ms +[2025-09-05 19:38:16] [Rank 0] step:6721/10000 train_time:285746ms step_avg:42.52ms +[2025-09-05 19:38:16] [Rank 0] step:6721/10000 train_time:285746ms step_avg:42.52ms +[2025-09-05 19:38:17] [Rank 0] step:6741/10000 train_time:286486ms step_avg:42.50ms +[2025-09-05 19:38:17] [Rank 0] step:6741/10000 train_time:286486ms step_avg:42.50ms +[2025-09-05 19:38:17] [Rank 0] step:6761/10000 train_time:287227ms step_avg:42.48ms +[2025-09-05 19:38:17] [Rank 0] step:6761/10000 train_time:287227ms step_avg:42.48ms +[2025-09-05 19:38:18] [Rank 0] step:6781/10000 train_time:287967ms step_avg:42.47ms +[2025-09-05 19:38:18] [Rank 0] step:6781/10000 train_time:287967ms step_avg:42.47ms +[2025-09-05 19:38:19] [Rank 0] step:6801/10000 train_time:288706ms step_avg:42.45ms +[2025-09-05 19:38:19] [Rank 0] step:6801/10000 train_time:288706ms step_avg:42.45ms +[2025-09-05 19:38:20] [Rank 0] step:6821/10000 train_time:289445ms step_avg:42.43ms +[2025-09-05 19:38:20] [Rank 0] step:6821/10000 train_time:289445ms step_avg:42.43ms +[2025-09-05 19:38:21] [Rank 0] step:6841/10000 train_time:290791ms step_avg:42.51ms +[2025-09-05 19:38:21] [Rank 0] step:6841/10000 train_time:290791ms step_avg:42.51ms +[2025-09-05 19:38:22] [Rank 0] step:6861/10000 train_time:291531ms step_avg:42.49ms +[2025-09-05 19:38:22] [Rank 0] step:6861/10000 train_time:291531ms step_avg:42.49ms +[2025-09-05 19:38:22] [Rank 0] step:6881/10000 train_time:292271ms step_avg:42.48ms +[2025-09-05 19:38:22] [Rank 0] step:6881/10000 train_time:292271ms step_avg:42.48ms +[2025-09-05 19:38:23] [Rank 0] step:6901/10000 train_time:293012ms step_avg:42.46ms +[2025-09-05 19:38:23] [Rank 0] step:6901/10000 train_time:293012ms step_avg:42.46ms +[2025-09-05 19:38:24] [Rank 0] step:6921/10000 train_time:293752ms step_avg:42.44ms +[2025-09-05 19:38:24] [Rank 0] step:6921/10000 train_time:293752ms step_avg:42.44ms +[2025-09-05 19:38:25] [Rank 0] step:6941/10000 train_time:294492ms step_avg:42.43ms +[2025-09-05 19:38:25] [Rank 0] step:6941/10000 train_time:294492ms step_avg:42.43ms +[2025-09-05 19:38:25] [Rank 0] step:6961/10000 train_time:295232ms step_avg:42.41ms +[2025-09-05 19:38:25] [Rank 0] step:6961/10000 train_time:295232ms step_avg:42.41ms +[2025-09-05 19:38:26] [Rank 0] step:6981/10000 train_time:295972ms step_avg:42.40ms +[2025-09-05 19:38:26] [Rank 0] step:6981/10000 train_time:295972ms step_avg:42.40ms +[2025-09-05 19:38:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:38:27] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:38:27] [Rank 0] PRINT: step:7000/10000 train_loss:1.6833 val_loss:1.6686 train_time:296793ms step_avg:42.40ms +[2025-09-05 19:38:27] [Rank 0] PRINT: step:7000/10000 train_loss:1.6833 val_loss:1.6686 train_time:296793ms step_avg:42.40ms +[2025-09-05 19:38:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:38:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:38:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:38:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:39:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:39:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:39:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:39:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:39:48] [Rank 0] Total Loss: 4.3632 +[2025-09-05 19:39:48] [Rank 0] Total Loss: 4.3632 +[2025-09-05 19:39:48] [Rank 0] Total FTA (Unweighted): 0.4638 +[2025-09-05 19:39:48] [Rank 0] Total FTA (Unweighted): 0.4638 +[2025-09-05 19:39:48] [Rank 0] Total FTA (Weighted): 0.4637 +[2025-09-05 19:39:48] [Rank 0] Total FTA (Weighted): 0.4637 +[2025-09-05 19:39:48] [Rank 0] Group 0 Loss: 3.4072 +[2025-09-05 19:39:48] [Rank 0] Group 0 Loss: 3.4072 +[2025-09-05 19:39:48] [Rank 0] Group 1 Loss: 3.2771 +[2025-09-05 19:39:48] [Rank 0] Group 1 Loss: 3.2771 +[2025-09-05 19:39:48] [Rank 0] Group 2 Loss: 3.3405 +[2025-09-05 19:39:48] [Rank 0] Group 2 Loss: 3.3405 +[2025-09-05 19:39:48] [Rank 0] Group 3 Loss: 3.6342 +[2025-09-05 19:39:48] [Rank 0] Group 3 Loss: 3.6342 +[2025-09-05 19:39:48] [Rank 0] Group 4 Loss: 3.8105 +[2025-09-05 19:39:48] [Rank 0] Group 4 Loss: 3.8105 +[2025-09-05 19:39:48] [Rank 0] Group 5 Loss: 4.0079 +[2025-09-05 19:39:48] [Rank 0] Group 5 Loss: 4.0079 +[2025-09-05 19:39:48] [Rank 0] Group 6 Loss: 4.2814 +[2025-09-05 19:39:48] [Rank 0] Group 6 Loss: 4.2814 +[2025-09-05 19:39:48] [Rank 0] Group 7 Loss: 4.4736 +[2025-09-05 19:39:48] [Rank 0] Group 7 Loss: 4.4736 +[2025-09-05 19:39:48] [Rank 0] Group 8 Loss: 4.7410 +[2025-09-05 19:39:48] [Rank 0] Group 8 Loss: 4.7410 +[2025-09-05 19:39:48] [Rank 0] Group 9 Loss: 4.8677 +[2025-09-05 19:39:48] [Rank 0] Group 9 Loss: 4.8677 +[2025-09-05 19:39:48] [Rank 0] Group 10 Loss: 4.9862 +[2025-09-05 19:39:48] [Rank 0] Group 10 Loss: 4.9862 +[2025-09-05 19:39:48] [Rank 0] Group 11 Loss: 4.9953 +[2025-09-05 19:39:48] [Rank 0] Group 11 Loss: 4.9953 +[2025-09-05 19:39:48] [Rank 0] Group 12 Loss: 4.9593 +[2025-09-05 19:39:48] [Rank 0] Group 12 Loss: 4.9593 +[2025-09-05 19:39:48] [Rank 0] Group 13 Loss: 5.0193 +[2025-09-05 19:39:48] [Rank 0] Group 13 Loss: 5.0193 +[2025-09-05 19:39:48] [Rank 0] Group 14 Loss: 5.0248 +[2025-09-05 19:39:48] [Rank 0] Group 14 Loss: 5.0248 +[2025-09-05 19:39:48] [Rank 0] Group 15 Loss: 4.9850 +[2025-09-05 19:39:48] [Rank 0] Group 15 Loss: 4.9850 +[2025-09-05 19:39:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:39:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:39:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:39:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:39:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:39:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:39:48] [Rank 0] Group 3 FTA: 0.7800 +[2025-09-05 19:39:48] [Rank 0] Group 3 FTA: 0.7800 +[2025-09-05 19:39:48] [Rank 0] Group 4 FTA: 0.4900 +[2025-09-05 19:39:48] [Rank 0] Group 4 FTA: 0.4900 +[2025-09-05 19:39:48] [Rank 0] Group 5 FTA: 0.4500 +[2025-09-05 19:39:48] [Rank 0] Group 5 FTA: 0.4500 +[2025-09-05 19:39:48] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:39:48] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:39:48] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 19:39:48] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 19:39:48] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 19:39:48] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 19:39:48] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 19:39:48] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 19:39:48] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 19:39:48] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 19:39:48] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 19:39:48] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 19:39:48] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 19:39:48] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 19:39:48] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 19:39:48] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 19:39:48] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:39:48] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:39:48] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:39:48] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:39:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:39:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:39:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:39:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:39:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:39:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:39:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:39:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:39:50] [Rank 0] step:7001/10000 train_time:296803ms step_avg:42.39ms +[2025-09-05 19:39:50] [Rank 0] step:7001/10000 train_time:296803ms step_avg:42.39ms +[2025-09-05 19:39:50] [Rank 0] step:7021/10000 train_time:297481ms step_avg:42.37ms +[2025-09-05 19:39:50] [Rank 0] step:7021/10000 train_time:297481ms step_avg:42.37ms +[2025-09-05 19:39:51] [Rank 0] step:7041/10000 train_time:298220ms step_avg:42.35ms +[2025-09-05 19:39:51] [Rank 0] step:7041/10000 train_time:298220ms step_avg:42.35ms +[2025-09-05 19:39:52] [Rank 0] step:7061/10000 train_time:298960ms step_avg:42.34ms +[2025-09-05 19:39:52] [Rank 0] step:7061/10000 train_time:298960ms step_avg:42.34ms +[2025-09-05 19:39:53] [Rank 0] step:7081/10000 train_time:299700ms step_avg:42.32ms +[2025-09-05 19:39:53] [Rank 0] step:7081/10000 train_time:299700ms step_avg:42.32ms +[2025-09-05 19:39:53] [Rank 0] step:7101/10000 train_time:300440ms step_avg:42.31ms +[2025-09-05 19:39:53] [Rank 0] step:7101/10000 train_time:300440ms step_avg:42.31ms +[2025-09-05 19:39:54] [Rank 0] step:7121/10000 train_time:301179ms step_avg:42.29ms +[2025-09-05 19:39:54] [Rank 0] step:7121/10000 train_time:301179ms step_avg:42.29ms +[2025-09-05 19:39:55] [Rank 0] step:7141/10000 train_time:301920ms step_avg:42.28ms +[2025-09-05 19:39:55] [Rank 0] step:7141/10000 train_time:301920ms step_avg:42.28ms +[2025-09-05 19:39:56] [Rank 0] step:7161/10000 train_time:302660ms step_avg:42.27ms +[2025-09-05 19:39:56] [Rank 0] step:7161/10000 train_time:302660ms step_avg:42.27ms +[2025-09-05 19:39:56] [Rank 0] step:7181/10000 train_time:303400ms step_avg:42.25ms +[2025-09-05 19:39:56] [Rank 0] step:7181/10000 train_time:303400ms step_avg:42.25ms +[2025-09-05 19:39:57] [Rank 0] step:7201/10000 train_time:304139ms step_avg:42.24ms +[2025-09-05 19:39:57] [Rank 0] step:7201/10000 train_time:304139ms step_avg:42.24ms +[2025-09-05 19:39:58] [Rank 0] step:7221/10000 train_time:304879ms step_avg:42.22ms +[2025-09-05 19:39:58] [Rank 0] step:7221/10000 train_time:304879ms step_avg:42.22ms +[2025-09-05 19:39:59] [Rank 0] step:7241/10000 train_time:305618ms step_avg:42.21ms +[2025-09-05 19:39:59] [Rank 0] step:7241/10000 train_time:305618ms step_avg:42.21ms +[2025-09-05 19:39:59] [Rank 0] step:7261/10000 train_time:306358ms step_avg:42.19ms +[2025-09-05 19:39:59] [Rank 0] step:7261/10000 train_time:306358ms step_avg:42.19ms +[2025-09-05 19:40:00] [Rank 0] step:7281/10000 train_time:307097ms step_avg:42.18ms +[2025-09-05 19:40:00] [Rank 0] step:7281/10000 train_time:307097ms step_avg:42.18ms +[2025-09-05 19:40:01] [Rank 0] step:7301/10000 train_time:307837ms step_avg:42.16ms +[2025-09-05 19:40:01] [Rank 0] step:7301/10000 train_time:307837ms step_avg:42.16ms +[2025-09-05 19:40:02] [Rank 0] step:7321/10000 train_time:308590ms step_avg:42.15ms +[2025-09-05 19:40:02] [Rank 0] step:7321/10000 train_time:308590ms step_avg:42.15ms +[2025-09-05 19:40:02] [Rank 0] step:7341/10000 train_time:309330ms step_avg:42.14ms +[2025-09-05 19:40:02] [Rank 0] step:7341/10000 train_time:309330ms step_avg:42.14ms +[2025-09-05 19:40:03] [Rank 0] step:7361/10000 train_time:310070ms step_avg:42.12ms +[2025-09-05 19:40:03] [Rank 0] step:7361/10000 train_time:310070ms step_avg:42.12ms +[2025-09-05 19:40:04] [Rank 0] step:7381/10000 train_time:310810ms step_avg:42.11ms +[2025-09-05 19:40:04] [Rank 0] step:7381/10000 train_time:310810ms step_avg:42.11ms +[2025-09-05 19:40:05] [Rank 0] step:7401/10000 train_time:311549ms step_avg:42.10ms +[2025-09-05 19:40:05] [Rank 0] step:7401/10000 train_time:311549ms step_avg:42.10ms +[2025-09-05 19:40:05] [Rank 0] step:7421/10000 train_time:312290ms step_avg:42.08ms +[2025-09-05 19:40:05] [Rank 0] step:7421/10000 train_time:312290ms step_avg:42.08ms +[2025-09-05 19:40:06] [Rank 0] step:7441/10000 train_time:313030ms step_avg:42.07ms +[2025-09-05 19:40:06] [Rank 0] step:7441/10000 train_time:313030ms step_avg:42.07ms +[2025-09-05 19:40:07] [Rank 0] step:7461/10000 train_time:313770ms step_avg:42.05ms +[2025-09-05 19:40:07] [Rank 0] step:7461/10000 train_time:313770ms step_avg:42.05ms +[2025-09-05 19:40:08] [Rank 0] step:7481/10000 train_time:314509ms step_avg:42.04ms +[2025-09-05 19:40:08] [Rank 0] step:7481/10000 train_time:314509ms step_avg:42.04ms +[2025-09-05 19:40:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:40:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:40:09] [Rank 0] PRINT: step:7500/10000 train_loss:1.6715 val_loss:1.6571 train_time:315330ms step_avg:42.04ms +[2025-09-05 19:40:09] [Rank 0] PRINT: step:7500/10000 train_loss:1.6715 val_loss:1.6571 train_time:315330ms step_avg:42.04ms +[2025-09-05 19:40:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:40:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:40:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:40:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:41:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:41:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:41:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:41:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:41:30] [Rank 0] Total Loss: 4.3467 +[2025-09-05 19:41:30] [Rank 0] Total Loss: 4.3467 +[2025-09-05 19:41:30] [Rank 0] Total FTA (Unweighted): 0.4738 +[2025-09-05 19:41:30] [Rank 0] Total FTA (Unweighted): 0.4738 +[2025-09-05 19:41:30] [Rank 0] Total FTA (Weighted): 0.4738 +[2025-09-05 19:41:30] [Rank 0] Total FTA (Weighted): 0.4738 +[2025-09-05 19:41:30] [Rank 0] Group 0 Loss: 3.3979 +[2025-09-05 19:41:30] [Rank 0] Group 0 Loss: 3.3979 +[2025-09-05 19:41:30] [Rank 0] Group 1 Loss: 3.2955 +[2025-09-05 19:41:30] [Rank 0] Group 1 Loss: 3.2955 +[2025-09-05 19:41:30] [Rank 0] Group 2 Loss: 3.3374 +[2025-09-05 19:41:30] [Rank 0] Group 2 Loss: 3.3374 +[2025-09-05 19:41:30] [Rank 0] Group 3 Loss: 3.6150 +[2025-09-05 19:41:30] [Rank 0] Group 3 Loss: 3.6150 +[2025-09-05 19:41:30] [Rank 0] Group 4 Loss: 3.7903 +[2025-09-05 19:41:30] [Rank 0] Group 4 Loss: 3.7903 +[2025-09-05 19:41:30] [Rank 0] Group 5 Loss: 3.9762 +[2025-09-05 19:41:30] [Rank 0] Group 5 Loss: 3.9762 +[2025-09-05 19:41:30] [Rank 0] Group 6 Loss: 4.2394 +[2025-09-05 19:41:30] [Rank 0] Group 6 Loss: 4.2394 +[2025-09-05 19:41:30] [Rank 0] Group 7 Loss: 4.4671 +[2025-09-05 19:41:30] [Rank 0] Group 7 Loss: 4.4671 +[2025-09-05 19:41:30] [Rank 0] Group 8 Loss: 4.7224 +[2025-09-05 19:41:30] [Rank 0] Group 8 Loss: 4.7224 +[2025-09-05 19:41:30] [Rank 0] Group 9 Loss: 4.8406 +[2025-09-05 19:41:30] [Rank 0] Group 9 Loss: 4.8406 +[2025-09-05 19:41:30] [Rank 0] Group 10 Loss: 4.9943 +[2025-09-05 19:41:30] [Rank 0] Group 10 Loss: 4.9943 +[2025-09-05 19:41:30] [Rank 0] Group 11 Loss: 4.9684 +[2025-09-05 19:41:30] [Rank 0] Group 11 Loss: 4.9684 +[2025-09-05 19:41:30] [Rank 0] Group 12 Loss: 4.9103 +[2025-09-05 19:41:30] [Rank 0] Group 12 Loss: 4.9103 +[2025-09-05 19:41:30] [Rank 0] Group 13 Loss: 5.0112 +[2025-09-05 19:41:30] [Rank 0] Group 13 Loss: 5.0112 +[2025-09-05 19:41:30] [Rank 0] Group 14 Loss: 5.0138 +[2025-09-05 19:41:30] [Rank 0] Group 14 Loss: 5.0138 +[2025-09-05 19:41:30] [Rank 0] Group 15 Loss: 4.9669 +[2025-09-05 19:41:30] [Rank 0] Group 15 Loss: 4.9669 +[2025-09-05 19:41:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:41:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:41:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:41:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:41:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:41:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:41:30] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:41:30] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:41:30] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:41:30] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:41:30] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 19:41:30] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 19:41:30] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:41:30] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:41:30] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 19:41:30] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 19:41:30] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 19:41:30] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 19:41:30] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 19:41:30] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 19:41:30] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 19:41:30] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 19:41:30] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 19:41:30] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 19:41:30] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 19:41:30] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 19:41:30] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 19:41:30] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 19:41:30] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:41:30] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:41:30] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:41:30] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:41:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:41:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:41:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:41:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:41:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:41:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:41:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:41:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:41:31] [Rank 0] step:7501/10000 train_time:315341ms step_avg:42.04ms +[2025-09-05 19:41:31] [Rank 0] step:7501/10000 train_time:315341ms step_avg:42.04ms +[2025-09-05 19:41:32] [Rank 0] step:7521/10000 train_time:316016ms step_avg:42.02ms +[2025-09-05 19:41:32] [Rank 0] step:7521/10000 train_time:316016ms step_avg:42.02ms +[2025-09-05 19:41:33] [Rank 0] step:7541/10000 train_time:316754ms step_avg:42.00ms +[2025-09-05 19:41:33] [Rank 0] step:7541/10000 train_time:316754ms step_avg:42.00ms +[2025-09-05 19:41:33] [Rank 0] step:7561/10000 train_time:317494ms step_avg:41.99ms +[2025-09-05 19:41:33] [Rank 0] step:7561/10000 train_time:317494ms step_avg:41.99ms +[2025-09-05 19:41:34] [Rank 0] step:7581/10000 train_time:318233ms step_avg:41.98ms +[2025-09-05 19:41:34] [Rank 0] step:7581/10000 train_time:318233ms step_avg:41.98ms +[2025-09-05 19:41:35] [Rank 0] step:7601/10000 train_time:318972ms step_avg:41.96ms +[2025-09-05 19:41:35] [Rank 0] step:7601/10000 train_time:318972ms step_avg:41.96ms +[2025-09-05 19:41:36] [Rank 0] step:7621/10000 train_time:319712ms step_avg:41.95ms +[2025-09-05 19:41:36] [Rank 0] step:7621/10000 train_time:319712ms step_avg:41.95ms +[2025-09-05 19:41:37] [Rank 0] step:7641/10000 train_time:320452ms step_avg:41.94ms +[2025-09-05 19:41:37] [Rank 0] step:7641/10000 train_time:320452ms step_avg:41.94ms +[2025-09-05 19:41:38] [Rank 0] step:7661/10000 train_time:321808ms step_avg:42.01ms +[2025-09-05 19:41:38] [Rank 0] step:7661/10000 train_time:321808ms step_avg:42.01ms +[2025-09-05 19:41:39] [Rank 0] step:7681/10000 train_time:322548ms step_avg:41.99ms +[2025-09-05 19:41:39] [Rank 0] step:7681/10000 train_time:322548ms step_avg:41.99ms +[2025-09-05 19:41:39] [Rank 0] step:7701/10000 train_time:323287ms step_avg:41.98ms +[2025-09-05 19:41:39] [Rank 0] step:7701/10000 train_time:323287ms step_avg:41.98ms +[2025-09-05 19:41:40] [Rank 0] step:7721/10000 train_time:324027ms step_avg:41.97ms +[2025-09-05 19:41:40] [Rank 0] step:7721/10000 train_time:324027ms step_avg:41.97ms +[2025-09-05 19:41:41] [Rank 0] step:7741/10000 train_time:324766ms step_avg:41.95ms +[2025-09-05 19:41:41] [Rank 0] step:7741/10000 train_time:324766ms step_avg:41.95ms +[2025-09-05 19:41:42] [Rank 0] step:7761/10000 train_time:325638ms step_avg:41.96ms +[2025-09-05 19:41:42] [Rank 0] step:7761/10000 train_time:325638ms step_avg:41.96ms +[2025-09-05 19:41:42] [Rank 0] step:7781/10000 train_time:326377ms step_avg:41.95ms +[2025-09-05 19:41:42] [Rank 0] step:7781/10000 train_time:326377ms step_avg:41.95ms +[2025-09-05 19:41:43] [Rank 0] step:7801/10000 train_time:327115ms step_avg:41.93ms +[2025-09-05 19:41:43] [Rank 0] step:7801/10000 train_time:327115ms step_avg:41.93ms +[2025-09-05 19:41:44] [Rank 0] step:7821/10000 train_time:327994ms step_avg:41.94ms +[2025-09-05 19:41:44] [Rank 0] step:7821/10000 train_time:327994ms step_avg:41.94ms +[2025-09-05 19:41:45] [Rank 0] step:7841/10000 train_time:328733ms step_avg:41.92ms +[2025-09-05 19:41:45] [Rank 0] step:7841/10000 train_time:328733ms step_avg:41.92ms +[2025-09-05 19:41:45] [Rank 0] step:7861/10000 train_time:329473ms step_avg:41.91ms +[2025-09-05 19:41:45] [Rank 0] step:7861/10000 train_time:329473ms step_avg:41.91ms +[2025-09-05 19:41:46] [Rank 0] step:7881/10000 train_time:330213ms step_avg:41.90ms +[2025-09-05 19:41:46] [Rank 0] step:7881/10000 train_time:330213ms step_avg:41.90ms +[2025-09-05 19:41:47] [Rank 0] step:7901/10000 train_time:330952ms step_avg:41.89ms +[2025-09-05 19:41:47] [Rank 0] step:7901/10000 train_time:330952ms step_avg:41.89ms +[2025-09-05 19:41:48] [Rank 0] step:7921/10000 train_time:331692ms step_avg:41.87ms +[2025-09-05 19:41:48] [Rank 0] step:7921/10000 train_time:331692ms step_avg:41.87ms +[2025-09-05 19:41:48] [Rank 0] step:7941/10000 train_time:332432ms step_avg:41.86ms +[2025-09-05 19:41:48] [Rank 0] step:7941/10000 train_time:332432ms step_avg:41.86ms +[2025-09-05 19:41:49] [Rank 0] step:7961/10000 train_time:333172ms step_avg:41.85ms +[2025-09-05 19:41:49] [Rank 0] step:7961/10000 train_time:333172ms step_avg:41.85ms +[2025-09-05 19:41:50] [Rank 0] step:7981/10000 train_time:333911ms step_avg:41.84ms +[2025-09-05 19:41:50] [Rank 0] step:7981/10000 train_time:333911ms step_avg:41.84ms +[2025-09-05 19:41:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:41:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:41:51] [Rank 0] PRINT: step:8000/10000 train_loss:1.6627 val_loss:1.6503 train_time:334731ms step_avg:41.84ms +[2025-09-05 19:41:51] [Rank 0] PRINT: step:8000/10000 train_loss:1.6627 val_loss:1.6503 train_time:334731ms step_avg:41.84ms +[2025-09-05 19:41:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:41:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:41:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:41:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:43:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:43:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:43:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:43:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:43:12] [Rank 0] Total Loss: 4.3994 +[2025-09-05 19:43:12] [Rank 0] Total Loss: 4.3994 +[2025-09-05 19:43:12] [Rank 0] Total FTA (Unweighted): 0.4787 +[2025-09-05 19:43:12] [Rank 0] Total FTA (Unweighted): 0.4787 +[2025-09-05 19:43:12] [Rank 0] Total FTA (Weighted): 0.4788 +[2025-09-05 19:43:12] [Rank 0] Total FTA (Weighted): 0.4788 +[2025-09-05 19:43:12] [Rank 0] Group 0 Loss: 3.5405 +[2025-09-05 19:43:12] [Rank 0] Group 0 Loss: 3.5405 +[2025-09-05 19:43:12] [Rank 0] Group 1 Loss: 3.3250 +[2025-09-05 19:43:12] [Rank 0] Group 1 Loss: 3.3250 +[2025-09-05 19:43:12] [Rank 0] Group 2 Loss: 3.3624 +[2025-09-05 19:43:12] [Rank 0] Group 2 Loss: 3.3624 +[2025-09-05 19:43:12] [Rank 0] Group 3 Loss: 3.6704 +[2025-09-05 19:43:12] [Rank 0] Group 3 Loss: 3.6704 +[2025-09-05 19:43:12] [Rank 0] Group 4 Loss: 3.8321 +[2025-09-05 19:43:12] [Rank 0] Group 4 Loss: 3.8321 +[2025-09-05 19:43:12] [Rank 0] Group 5 Loss: 4.0344 +[2025-09-05 19:43:12] [Rank 0] Group 5 Loss: 4.0344 +[2025-09-05 19:43:12] [Rank 0] Group 6 Loss: 4.2651 +[2025-09-05 19:43:12] [Rank 0] Group 6 Loss: 4.2651 +[2025-09-05 19:43:12] [Rank 0] Group 7 Loss: 4.5053 +[2025-09-05 19:43:12] [Rank 0] Group 7 Loss: 4.5053 +[2025-09-05 19:43:12] [Rank 0] Group 8 Loss: 4.7743 +[2025-09-05 19:43:12] [Rank 0] Group 8 Loss: 4.7743 +[2025-09-05 19:43:12] [Rank 0] Group 9 Loss: 4.8875 +[2025-09-05 19:43:12] [Rank 0] Group 9 Loss: 4.8875 +[2025-09-05 19:43:12] [Rank 0] Group 10 Loss: 5.0561 +[2025-09-05 19:43:12] [Rank 0] Group 10 Loss: 5.0561 +[2025-09-05 19:43:12] [Rank 0] Group 11 Loss: 5.0307 +[2025-09-05 19:43:12] [Rank 0] Group 11 Loss: 5.0307 +[2025-09-05 19:43:12] [Rank 0] Group 12 Loss: 4.9757 +[2025-09-05 19:43:12] [Rank 0] Group 12 Loss: 4.9757 +[2025-09-05 19:43:12] [Rank 0] Group 13 Loss: 5.0688 +[2025-09-05 19:43:12] [Rank 0] Group 13 Loss: 5.0688 +[2025-09-05 19:43:12] [Rank 0] Group 14 Loss: 5.0561 +[2025-09-05 19:43:12] [Rank 0] Group 14 Loss: 5.0561 +[2025-09-05 19:43:12] [Rank 0] Group 15 Loss: 5.0068 +[2025-09-05 19:43:12] [Rank 0] Group 15 Loss: 5.0068 +[2025-09-05 19:43:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:43:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:43:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:43:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:43:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:43:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:43:12] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:43:12] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:43:12] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:43:12] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:43:12] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 19:43:12] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 19:43:12] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:43:12] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:43:12] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:43:12] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:43:12] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 19:43:12] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 19:43:12] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 19:43:12] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 19:43:12] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 19:43:12] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 19:43:12] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 19:43:12] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 19:43:12] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 19:43:12] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 19:43:12] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 19:43:12] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 19:43:12] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:43:12] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:43:12] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:43:12] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:43:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:43:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:43:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:43:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:43:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:43:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:43:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:43:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:43:14] [Rank 0] step:8001/10000 train_time:334741ms step_avg:41.84ms +[2025-09-05 19:43:14] [Rank 0] step:8001/10000 train_time:334741ms step_avg:41.84ms +[2025-09-05 19:43:15] [Rank 0] step:8021/10000 train_time:336036ms step_avg:41.89ms +[2025-09-05 19:43:15] [Rank 0] step:8021/10000 train_time:336036ms step_avg:41.89ms +[2025-09-05 19:43:16] [Rank 0] step:8041/10000 train_time:336774ms step_avg:41.88ms +[2025-09-05 19:43:16] [Rank 0] step:8041/10000 train_time:336774ms step_avg:41.88ms +[2025-09-05 19:43:16] [Rank 0] step:8061/10000 train_time:337514ms step_avg:41.87ms +[2025-09-05 19:43:16] [Rank 0] step:8061/10000 train_time:337514ms step_avg:41.87ms +[2025-09-05 19:43:17] [Rank 0] step:8081/10000 train_time:338253ms step_avg:41.86ms +[2025-09-05 19:43:17] [Rank 0] step:8081/10000 train_time:338253ms step_avg:41.86ms +[2025-09-05 19:43:18] [Rank 0] step:8101/10000 train_time:338992ms step_avg:41.85ms +[2025-09-05 19:43:18] [Rank 0] step:8101/10000 train_time:338992ms step_avg:41.85ms +[2025-09-05 19:43:19] [Rank 0] step:8121/10000 train_time:339731ms step_avg:41.83ms +[2025-09-05 19:43:19] [Rank 0] step:8121/10000 train_time:339731ms step_avg:41.83ms +[2025-09-05 19:43:19] [Rank 0] step:8141/10000 train_time:340471ms step_avg:41.82ms +[2025-09-05 19:43:19] [Rank 0] step:8141/10000 train_time:340471ms step_avg:41.82ms +[2025-09-05 19:43:20] [Rank 0] step:8161/10000 train_time:341210ms step_avg:41.81ms +[2025-09-05 19:43:20] [Rank 0] step:8161/10000 train_time:341210ms step_avg:41.81ms +[2025-09-05 19:43:21] [Rank 0] step:8181/10000 train_time:341949ms step_avg:41.80ms +[2025-09-05 19:43:21] [Rank 0] step:8181/10000 train_time:341949ms step_avg:41.80ms +[2025-09-05 19:43:22] [Rank 0] step:8201/10000 train_time:342687ms step_avg:41.79ms +[2025-09-05 19:43:22] [Rank 0] step:8201/10000 train_time:342687ms step_avg:41.79ms +[2025-09-05 19:43:22] [Rank 0] step:8221/10000 train_time:343425ms step_avg:41.77ms +[2025-09-05 19:43:22] [Rank 0] step:8221/10000 train_time:343425ms step_avg:41.77ms +[2025-09-05 19:43:23] [Rank 0] step:8241/10000 train_time:344166ms step_avg:41.76ms +[2025-09-05 19:43:23] [Rank 0] step:8241/10000 train_time:344166ms step_avg:41.76ms +[2025-09-05 19:43:24] [Rank 0] step:8261/10000 train_time:344905ms step_avg:41.75ms +[2025-09-05 19:43:24] [Rank 0] step:8261/10000 train_time:344905ms step_avg:41.75ms +[2025-09-05 19:43:25] [Rank 0] step:8281/10000 train_time:345645ms step_avg:41.74ms +[2025-09-05 19:43:25] [Rank 0] step:8281/10000 train_time:345645ms step_avg:41.74ms +[2025-09-05 19:43:25] [Rank 0] step:8301/10000 train_time:346384ms step_avg:41.73ms +[2025-09-05 19:43:25] [Rank 0] step:8301/10000 train_time:346384ms step_avg:41.73ms +[2025-09-05 19:43:26] [Rank 0] step:8321/10000 train_time:347124ms step_avg:41.72ms +[2025-09-05 19:43:26] [Rank 0] step:8321/10000 train_time:347124ms step_avg:41.72ms +[2025-09-05 19:43:27] [Rank 0] step:8341/10000 train_time:347864ms step_avg:41.71ms +[2025-09-05 19:43:27] [Rank 0] step:8341/10000 train_time:347864ms step_avg:41.71ms +[2025-09-05 19:43:28] [Rank 0] step:8361/10000 train_time:348604ms step_avg:41.69ms +[2025-09-05 19:43:28] [Rank 0] step:8361/10000 train_time:348604ms step_avg:41.69ms +[2025-09-05 19:43:28] [Rank 0] step:8381/10000 train_time:349343ms step_avg:41.68ms +[2025-09-05 19:43:28] [Rank 0] step:8381/10000 train_time:349343ms step_avg:41.68ms +[2025-09-05 19:43:29] [Rank 0] step:8401/10000 train_time:350083ms step_avg:41.67ms +[2025-09-05 19:43:29] [Rank 0] step:8401/10000 train_time:350083ms step_avg:41.67ms +[2025-09-05 19:43:30] [Rank 0] step:8421/10000 train_time:350821ms step_avg:41.66ms +[2025-09-05 19:43:30] [Rank 0] step:8421/10000 train_time:350821ms step_avg:41.66ms +[2025-09-05 19:43:30] [Rank 0] step:8441/10000 train_time:351560ms step_avg:41.65ms +[2025-09-05 19:43:30] [Rank 0] step:8441/10000 train_time:351560ms step_avg:41.65ms +[2025-09-05 19:43:31] [Rank 0] step:8461/10000 train_time:352299ms step_avg:41.64ms +[2025-09-05 19:43:31] [Rank 0] step:8461/10000 train_time:352299ms step_avg:41.64ms +[2025-09-05 19:43:32] [Rank 0] step:8481/10000 train_time:353037ms step_avg:41.63ms +[2025-09-05 19:43:32] [Rank 0] step:8481/10000 train_time:353037ms step_avg:41.63ms +[2025-09-05 19:43:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:43:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:43:33] [Rank 0] PRINT: step:8500/10000 train_loss:1.6568 val_loss:1.6449 train_time:353857ms step_avg:41.63ms +[2025-09-05 19:43:33] [Rank 0] PRINT: step:8500/10000 train_loss:1.6568 val_loss:1.6449 train_time:353857ms step_avg:41.63ms +[2025-09-05 19:43:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:43:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:43:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:43:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:44:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:44:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:44:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:44:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:44:54] [Rank 0] Total Loss: 4.3594 +[2025-09-05 19:44:54] [Rank 0] Total Loss: 4.3594 +[2025-09-05 19:44:54] [Rank 0] Total FTA (Unweighted): 0.4825 +[2025-09-05 19:44:54] [Rank 0] Total FTA (Unweighted): 0.4825 +[2025-09-05 19:44:54] [Rank 0] Total FTA (Weighted): 0.4825 +[2025-09-05 19:44:54] [Rank 0] Total FTA (Weighted): 0.4825 +[2025-09-05 19:44:54] [Rank 0] Group 0 Loss: 3.4843 +[2025-09-05 19:44:54] [Rank 0] Group 0 Loss: 3.4843 +[2025-09-05 19:44:54] [Rank 0] Group 1 Loss: 3.3176 +[2025-09-05 19:44:54] [Rank 0] Group 1 Loss: 3.3176 +[2025-09-05 19:44:54] [Rank 0] Group 2 Loss: 3.3199 +[2025-09-05 19:44:54] [Rank 0] Group 2 Loss: 3.3199 +[2025-09-05 19:44:54] [Rank 0] Group 3 Loss: 3.6343 +[2025-09-05 19:44:54] [Rank 0] Group 3 Loss: 3.6343 +[2025-09-05 19:44:54] [Rank 0] Group 4 Loss: 3.8104 +[2025-09-05 19:44:54] [Rank 0] Group 4 Loss: 3.8104 +[2025-09-05 19:44:54] [Rank 0] Group 5 Loss: 4.0102 +[2025-09-05 19:44:54] [Rank 0] Group 5 Loss: 4.0102 +[2025-09-05 19:44:54] [Rank 0] Group 6 Loss: 4.2455 +[2025-09-05 19:44:54] [Rank 0] Group 6 Loss: 4.2455 +[2025-09-05 19:44:54] [Rank 0] Group 7 Loss: 4.4736 +[2025-09-05 19:44:54] [Rank 0] Group 7 Loss: 4.4736 +[2025-09-05 19:44:54] [Rank 0] Group 8 Loss: 4.7127 +[2025-09-05 19:44:54] [Rank 0] Group 8 Loss: 4.7127 +[2025-09-05 19:44:54] [Rank 0] Group 9 Loss: 4.8335 +[2025-09-05 19:44:54] [Rank 0] Group 9 Loss: 4.8335 +[2025-09-05 19:44:54] [Rank 0] Group 10 Loss: 4.9850 +[2025-09-05 19:44:54] [Rank 0] Group 10 Loss: 4.9850 +[2025-09-05 19:44:54] [Rank 0] Group 11 Loss: 4.9817 +[2025-09-05 19:44:54] [Rank 0] Group 11 Loss: 4.9817 +[2025-09-05 19:44:54] [Rank 0] Group 12 Loss: 4.9443 +[2025-09-05 19:44:54] [Rank 0] Group 12 Loss: 4.9443 +[2025-09-05 19:44:54] [Rank 0] Group 13 Loss: 5.0288 +[2025-09-05 19:44:54] [Rank 0] Group 13 Loss: 5.0288 +[2025-09-05 19:44:54] [Rank 0] Group 14 Loss: 5.0046 +[2025-09-05 19:44:54] [Rank 0] Group 14 Loss: 5.0046 +[2025-09-05 19:44:54] [Rank 0] Group 15 Loss: 4.9633 +[2025-09-05 19:44:54] [Rank 0] Group 15 Loss: 4.9633 +[2025-09-05 19:44:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:44:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:44:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:44:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:44:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:44:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:44:54] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:44:54] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:44:54] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:44:54] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:44:54] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:44:54] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:44:54] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 19:44:54] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 19:44:54] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 19:44:54] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 19:44:54] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 19:44:54] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 19:44:54] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:44:54] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:44:54] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 19:44:54] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 19:44:54] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 19:44:54] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 19:44:54] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 19:44:54] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 19:44:54] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 19:44:54] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 19:44:54] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 19:44:54] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 19:44:54] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:44:54] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:44:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:44:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:44:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:44:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:44:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:44:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:44:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:44:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:44:56] [Rank 0] step:8501/10000 train_time:353867ms step_avg:41.63ms +[2025-09-05 19:44:56] [Rank 0] step:8501/10000 train_time:353867ms step_avg:41.63ms +[2025-09-05 19:44:57] [Rank 0] step:8521/10000 train_time:354543ms step_avg:41.61ms +[2025-09-05 19:44:57] [Rank 0] step:8521/10000 train_time:354543ms step_avg:41.61ms +[2025-09-05 19:44:57] [Rank 0] step:8541/10000 train_time:355283ms step_avg:41.60ms +[2025-09-05 19:44:57] [Rank 0] step:8541/10000 train_time:355283ms step_avg:41.60ms +[2025-09-05 19:44:58] [Rank 0] step:8561/10000 train_time:356023ms step_avg:41.59ms +[2025-09-05 19:44:58] [Rank 0] step:8561/10000 train_time:356023ms step_avg:41.59ms +[2025-09-05 19:44:59] [Rank 0] step:8581/10000 train_time:356763ms step_avg:41.58ms +[2025-09-05 19:44:59] [Rank 0] step:8581/10000 train_time:356763ms step_avg:41.58ms +[2025-09-05 19:45:00] [Rank 0] step:8601/10000 train_time:357504ms step_avg:41.57ms +[2025-09-05 19:45:00] [Rank 0] step:8601/10000 train_time:357504ms step_avg:41.57ms +[2025-09-05 19:45:00] [Rank 0] step:8621/10000 train_time:358245ms step_avg:41.55ms +[2025-09-05 19:45:00] [Rank 0] step:8621/10000 train_time:358245ms step_avg:41.55ms +[2025-09-05 19:45:01] [Rank 0] step:8641/10000 train_time:358985ms step_avg:41.54ms +[2025-09-05 19:45:01] [Rank 0] step:8641/10000 train_time:358985ms step_avg:41.54ms +[2025-09-05 19:45:02] [Rank 0] step:8661/10000 train_time:359725ms step_avg:41.53ms +[2025-09-05 19:45:02] [Rank 0] step:8661/10000 train_time:359725ms step_avg:41.53ms +[2025-09-05 19:45:03] [Rank 0] step:8681/10000 train_time:360465ms step_avg:41.52ms +[2025-09-05 19:45:03] [Rank 0] step:8681/10000 train_time:360465ms step_avg:41.52ms +[2025-09-05 19:45:03] [Rank 0] step:8701/10000 train_time:361205ms step_avg:41.51ms +[2025-09-05 19:45:03] [Rank 0] step:8701/10000 train_time:361205ms step_avg:41.51ms +[2025-09-05 19:45:04] [Rank 0] step:8721/10000 train_time:361943ms step_avg:41.50ms +[2025-09-05 19:45:04] [Rank 0] step:8721/10000 train_time:361943ms step_avg:41.50ms +[2025-09-05 19:45:05] [Rank 0] step:8741/10000 train_time:362682ms step_avg:41.49ms +[2025-09-05 19:45:05] [Rank 0] step:8741/10000 train_time:362682ms step_avg:41.49ms +[2025-09-05 19:45:05] [Rank 0] step:8761/10000 train_time:363422ms step_avg:41.48ms +[2025-09-05 19:45:05] [Rank 0] step:8761/10000 train_time:363422ms step_avg:41.48ms +[2025-09-05 19:45:06] [Rank 0] step:8781/10000 train_time:364162ms step_avg:41.47ms +[2025-09-05 19:45:06] [Rank 0] step:8781/10000 train_time:364162ms step_avg:41.47ms +[2025-09-05 19:45:07] [Rank 0] step:8801/10000 train_time:364903ms step_avg:41.46ms +[2025-09-05 19:45:07] [Rank 0] step:8801/10000 train_time:364903ms step_avg:41.46ms +[2025-09-05 19:45:08] [Rank 0] step:8821/10000 train_time:365644ms step_avg:41.45ms +[2025-09-05 19:45:08] [Rank 0] step:8821/10000 train_time:365644ms step_avg:41.45ms +[2025-09-05 19:45:09] [Rank 0] step:8841/10000 train_time:366991ms step_avg:41.51ms +[2025-09-05 19:45:09] [Rank 0] step:8841/10000 train_time:366991ms step_avg:41.51ms +[2025-09-05 19:45:10] [Rank 0] step:8861/10000 train_time:367730ms step_avg:41.50ms +[2025-09-05 19:45:10] [Rank 0] step:8861/10000 train_time:367730ms step_avg:41.50ms +[2025-09-05 19:45:11] [Rank 0] step:8881/10000 train_time:368474ms step_avg:41.49ms +[2025-09-05 19:45:11] [Rank 0] step:8881/10000 train_time:368474ms step_avg:41.49ms +[2025-09-05 19:45:11] [Rank 0] step:8901/10000 train_time:369214ms step_avg:41.48ms +[2025-09-05 19:45:11] [Rank 0] step:8901/10000 train_time:369214ms step_avg:41.48ms +[2025-09-05 19:45:12] [Rank 0] step:8921/10000 train_time:369954ms step_avg:41.47ms +[2025-09-05 19:45:12] [Rank 0] step:8921/10000 train_time:369954ms step_avg:41.47ms +[2025-09-05 19:45:13] [Rank 0] step:8941/10000 train_time:370694ms step_avg:41.46ms +[2025-09-05 19:45:13] [Rank 0] step:8941/10000 train_time:370694ms step_avg:41.46ms +[2025-09-05 19:45:14] [Rank 0] step:8961/10000 train_time:371433ms step_avg:41.45ms +[2025-09-05 19:45:14] [Rank 0] step:8961/10000 train_time:371433ms step_avg:41.45ms +[2025-09-05 19:45:14] [Rank 0] step:8981/10000 train_time:372173ms step_avg:41.44ms +[2025-09-05 19:45:14] [Rank 0] step:8981/10000 train_time:372173ms step_avg:41.44ms +[2025-09-05 19:45:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:45:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:45:15] [Rank 0] PRINT: step:9000/10000 train_loss:1.6500 val_loss:1.6388 train_time:372993ms step_avg:41.44ms +[2025-09-05 19:45:15] [Rank 0] PRINT: step:9000/10000 train_loss:1.6500 val_loss:1.6388 train_time:372993ms step_avg:41.44ms +[2025-09-05 19:45:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:45:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:45:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:45:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:46:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:46:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:46:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:46:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:46:37] [Rank 0] Total Loss: 4.3688 +[2025-09-05 19:46:37] [Rank 0] Total Loss: 4.3688 +[2025-09-05 19:46:37] [Rank 0] Total FTA (Unweighted): 0.4881 +[2025-09-05 19:46:37] [Rank 0] Total FTA (Unweighted): 0.4881 +[2025-09-05 19:46:37] [Rank 0] Total FTA (Weighted): 0.4881 +[2025-09-05 19:46:37] [Rank 0] Total FTA (Weighted): 0.4881 +[2025-09-05 19:46:37] [Rank 0] Group 0 Loss: 3.5185 +[2025-09-05 19:46:37] [Rank 0] Group 0 Loss: 3.5185 +[2025-09-05 19:46:37] [Rank 0] Group 1 Loss: 3.3500 +[2025-09-05 19:46:37] [Rank 0] Group 1 Loss: 3.3500 +[2025-09-05 19:46:37] [Rank 0] Group 2 Loss: 3.3886 +[2025-09-05 19:46:37] [Rank 0] Group 2 Loss: 3.3886 +[2025-09-05 19:46:37] [Rank 0] Group 3 Loss: 3.6514 +[2025-09-05 19:46:37] [Rank 0] Group 3 Loss: 3.6514 +[2025-09-05 19:46:37] [Rank 0] Group 4 Loss: 3.8204 +[2025-09-05 19:46:37] [Rank 0] Group 4 Loss: 3.8204 +[2025-09-05 19:46:37] [Rank 0] Group 5 Loss: 4.0122 +[2025-09-05 19:46:37] [Rank 0] Group 5 Loss: 4.0122 +[2025-09-05 19:46:37] [Rank 0] Group 6 Loss: 4.2657 +[2025-09-05 19:46:37] [Rank 0] Group 6 Loss: 4.2657 +[2025-09-05 19:46:37] [Rank 0] Group 7 Loss: 4.4741 +[2025-09-05 19:46:37] [Rank 0] Group 7 Loss: 4.4741 +[2025-09-05 19:46:37] [Rank 0] Group 8 Loss: 4.7222 +[2025-09-05 19:46:37] [Rank 0] Group 8 Loss: 4.7222 +[2025-09-05 19:46:37] [Rank 0] Group 9 Loss: 4.8365 +[2025-09-05 19:46:37] [Rank 0] Group 9 Loss: 4.8365 +[2025-09-05 19:46:37] [Rank 0] Group 10 Loss: 4.9830 +[2025-09-05 19:46:37] [Rank 0] Group 10 Loss: 4.9830 +[2025-09-05 19:46:37] [Rank 0] Group 11 Loss: 4.9784 +[2025-09-05 19:46:37] [Rank 0] Group 11 Loss: 4.9784 +[2025-09-05 19:46:37] [Rank 0] Group 12 Loss: 4.9352 +[2025-09-05 19:46:37] [Rank 0] Group 12 Loss: 4.9352 +[2025-09-05 19:46:37] [Rank 0] Group 13 Loss: 5.0130 +[2025-09-05 19:46:37] [Rank 0] Group 13 Loss: 5.0130 +[2025-09-05 19:46:37] [Rank 0] Group 14 Loss: 4.9936 +[2025-09-05 19:46:37] [Rank 0] Group 14 Loss: 4.9936 +[2025-09-05 19:46:37] [Rank 0] Group 15 Loss: 4.9584 +[2025-09-05 19:46:37] [Rank 0] Group 15 Loss: 4.9584 +[2025-09-05 19:46:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:46:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:46:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:46:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:46:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:46:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:46:37] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:46:37] [Rank 0] Group 3 FTA: 0.8600 +[2025-09-05 19:46:37] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:46:37] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:46:37] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:46:37] [Rank 0] Group 5 FTA: 0.5000 +[2025-09-05 19:46:37] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:46:37] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:46:37] [Rank 0] Group 7 FTA: 0.3700 +[2025-09-05 19:46:37] [Rank 0] Group 7 FTA: 0.3700 +[2025-09-05 19:46:37] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 19:46:37] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 19:46:37] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:46:37] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:46:37] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:46:37] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:46:37] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 19:46:37] [Rank 0] Group 11 FTA: 0.2900 +[2025-09-05 19:46:37] [Rank 0] Group 12 FTA: 0.3100 +[2025-09-05 19:46:37] [Rank 0] Group 12 FTA: 0.3100 +[2025-09-05 19:46:37] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 19:46:37] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 19:46:37] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:46:37] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 19:46:37] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:46:37] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 19:46:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:46:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:46:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:46:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:46:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:46:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:46:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:46:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:46:39] [Rank 0] step:9001/10000 train_time:373002ms step_avg:41.44ms +[2025-09-05 19:46:39] [Rank 0] step:9001/10000 train_time:373002ms step_avg:41.44ms +[2025-09-05 19:46:40] [Rank 0] step:9021/10000 train_time:373673ms step_avg:41.42ms +[2025-09-05 19:46:40] [Rank 0] step:9021/10000 train_time:373673ms step_avg:41.42ms +[2025-09-05 19:46:40] [Rank 0] step:9041/10000 train_time:374412ms step_avg:41.41ms +[2025-09-05 19:46:40] [Rank 0] step:9041/10000 train_time:374412ms step_avg:41.41ms +[2025-09-05 19:46:41] [Rank 0] step:9061/10000 train_time:375151ms step_avg:41.40ms +[2025-09-05 19:46:41] [Rank 0] step:9061/10000 train_time:375151ms step_avg:41.40ms +[2025-09-05 19:46:42] [Rank 0] step:9081/10000 train_time:375891ms step_avg:41.39ms +[2025-09-05 19:46:42] [Rank 0] step:9081/10000 train_time:375891ms step_avg:41.39ms +[2025-09-05 19:46:43] [Rank 0] step:9101/10000 train_time:376631ms step_avg:41.38ms +[2025-09-05 19:46:43] [Rank 0] step:9101/10000 train_time:376631ms step_avg:41.38ms +[2025-09-05 19:46:43] [Rank 0] step:9121/10000 train_time:377371ms step_avg:41.37ms +[2025-09-05 19:46:43] [Rank 0] step:9121/10000 train_time:377371ms step_avg:41.37ms +[2025-09-05 19:46:44] [Rank 0] step:9141/10000 train_time:378111ms step_avg:41.36ms +[2025-09-05 19:46:44] [Rank 0] step:9141/10000 train_time:378111ms step_avg:41.36ms +[2025-09-05 19:46:45] [Rank 0] step:9161/10000 train_time:378852ms step_avg:41.35ms +[2025-09-05 19:46:45] [Rank 0] step:9161/10000 train_time:378852ms step_avg:41.35ms +[2025-09-05 19:46:45] [Rank 0] step:9181/10000 train_time:379591ms step_avg:41.35ms +[2025-09-05 19:46:45] [Rank 0] step:9181/10000 train_time:379591ms step_avg:41.35ms +[2025-09-05 19:46:46] [Rank 0] step:9201/10000 train_time:380331ms step_avg:41.34ms +[2025-09-05 19:46:46] [Rank 0] step:9201/10000 train_time:380331ms step_avg:41.34ms +[2025-09-05 19:46:47] [Rank 0] step:9221/10000 train_time:381071ms step_avg:41.33ms +[2025-09-05 19:46:47] [Rank 0] step:9221/10000 train_time:381071ms step_avg:41.33ms +[2025-09-05 19:46:48] [Rank 0] step:9241/10000 train_time:381811ms step_avg:41.32ms +[2025-09-05 19:46:48] [Rank 0] step:9241/10000 train_time:381811ms step_avg:41.32ms +[2025-09-05 19:46:48] [Rank 0] step:9261/10000 train_time:382550ms step_avg:41.31ms +[2025-09-05 19:46:48] [Rank 0] step:9261/10000 train_time:382550ms step_avg:41.31ms +[2025-09-05 19:46:49] [Rank 0] step:9281/10000 train_time:383290ms step_avg:41.30ms +[2025-09-05 19:46:49] [Rank 0] step:9281/10000 train_time:383290ms step_avg:41.30ms +[2025-09-05 19:46:50] [Rank 0] step:9301/10000 train_time:384031ms step_avg:41.29ms +[2025-09-05 19:46:50] [Rank 0] step:9301/10000 train_time:384031ms step_avg:41.29ms +[2025-09-05 19:46:51] [Rank 0] step:9321/10000 train_time:384771ms step_avg:41.28ms +[2025-09-05 19:46:51] [Rank 0] step:9321/10000 train_time:384771ms step_avg:41.28ms +[2025-09-05 19:46:51] [Rank 0] step:9341/10000 train_time:385510ms step_avg:41.27ms +[2025-09-05 19:46:51] [Rank 0] step:9341/10000 train_time:385510ms step_avg:41.27ms +[2025-09-05 19:46:52] [Rank 0] step:9361/10000 train_time:386249ms step_avg:41.26ms +[2025-09-05 19:46:52] [Rank 0] step:9361/10000 train_time:386249ms step_avg:41.26ms +[2025-09-05 19:46:53] [Rank 0] step:9381/10000 train_time:386990ms step_avg:41.25ms +[2025-09-05 19:46:53] [Rank 0] step:9381/10000 train_time:386990ms step_avg:41.25ms +[2025-09-05 19:46:54] [Rank 0] step:9401/10000 train_time:387730ms step_avg:41.24ms +[2025-09-05 19:46:54] [Rank 0] step:9401/10000 train_time:387730ms step_avg:41.24ms +[2025-09-05 19:46:54] [Rank 0] step:9421/10000 train_time:388470ms step_avg:41.23ms +[2025-09-05 19:46:54] [Rank 0] step:9421/10000 train_time:388470ms step_avg:41.23ms +[2025-09-05 19:46:55] [Rank 0] step:9441/10000 train_time:389211ms step_avg:41.23ms +[2025-09-05 19:46:55] [Rank 0] step:9441/10000 train_time:389211ms step_avg:41.23ms +[2025-09-05 19:46:56] [Rank 0] step:9461/10000 train_time:389951ms step_avg:41.22ms +[2025-09-05 19:46:56] [Rank 0] step:9461/10000 train_time:389951ms step_avg:41.22ms +[2025-09-05 19:46:57] [Rank 0] step:9481/10000 train_time:390691ms step_avg:41.21ms +[2025-09-05 19:46:57] [Rank 0] step:9481/10000 train_time:390691ms step_avg:41.21ms +[2025-09-05 19:46:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:46:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:46:58] [Rank 0] PRINT: step:9500/10000 train_loss:1.6431 val_loss:1.6328 train_time:391624ms step_avg:41.22ms +[2025-09-05 19:46:58] [Rank 0] PRINT: step:9500/10000 train_loss:1.6431 val_loss:1.6328 train_time:391624ms step_avg:41.22ms +[2025-09-05 19:46:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:46:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:46:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:46:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:48:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:48:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:48:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:48:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:48:19] [Rank 0] Total Loss: 4.3372 +[2025-09-05 19:48:19] [Rank 0] Total Loss: 4.3372 +[2025-09-05 19:48:19] [Rank 0] Total FTA (Unweighted): 0.4962 +[2025-09-05 19:48:19] [Rank 0] Total FTA (Unweighted): 0.4962 +[2025-09-05 19:48:19] [Rank 0] Total FTA (Weighted): 0.4963 +[2025-09-05 19:48:19] [Rank 0] Total FTA (Weighted): 0.4963 +[2025-09-05 19:48:19] [Rank 0] Group 0 Loss: 3.4405 +[2025-09-05 19:48:19] [Rank 0] Group 0 Loss: 3.4405 +[2025-09-05 19:48:19] [Rank 0] Group 1 Loss: 3.3311 +[2025-09-05 19:48:19] [Rank 0] Group 1 Loss: 3.3311 +[2025-09-05 19:48:19] [Rank 0] Group 2 Loss: 3.3224 +[2025-09-05 19:48:19] [Rank 0] Group 2 Loss: 3.3224 +[2025-09-05 19:48:19] [Rank 0] Group 3 Loss: 3.6172 +[2025-09-05 19:48:19] [Rank 0] Group 3 Loss: 3.6172 +[2025-09-05 19:48:19] [Rank 0] Group 4 Loss: 3.7964 +[2025-09-05 19:48:19] [Rank 0] Group 4 Loss: 3.7964 +[2025-09-05 19:48:19] [Rank 0] Group 5 Loss: 3.9770 +[2025-09-05 19:48:19] [Rank 0] Group 5 Loss: 3.9770 +[2025-09-05 19:48:19] [Rank 0] Group 6 Loss: 4.2484 +[2025-09-05 19:48:19] [Rank 0] Group 6 Loss: 4.2484 +[2025-09-05 19:48:19] [Rank 0] Group 7 Loss: 4.4464 +[2025-09-05 19:48:19] [Rank 0] Group 7 Loss: 4.4464 +[2025-09-05 19:48:19] [Rank 0] Group 8 Loss: 4.6870 +[2025-09-05 19:48:19] [Rank 0] Group 8 Loss: 4.6870 +[2025-09-05 19:48:19] [Rank 0] Group 9 Loss: 4.8108 +[2025-09-05 19:48:19] [Rank 0] Group 9 Loss: 4.8108 +[2025-09-05 19:48:19] [Rank 0] Group 10 Loss: 4.9605 +[2025-09-05 19:48:19] [Rank 0] Group 10 Loss: 4.9605 +[2025-09-05 19:48:19] [Rank 0] Group 11 Loss: 4.9466 +[2025-09-05 19:48:19] [Rank 0] Group 11 Loss: 4.9466 +[2025-09-05 19:48:19] [Rank 0] Group 12 Loss: 4.9152 +[2025-09-05 19:48:19] [Rank 0] Group 12 Loss: 4.9152 +[2025-09-05 19:48:19] [Rank 0] Group 13 Loss: 4.9887 +[2025-09-05 19:48:19] [Rank 0] Group 13 Loss: 4.9887 +[2025-09-05 19:48:19] [Rank 0] Group 14 Loss: 4.9689 +[2025-09-05 19:48:19] [Rank 0] Group 14 Loss: 4.9689 +[2025-09-05 19:48:19] [Rank 0] Group 15 Loss: 4.9386 +[2025-09-05 19:48:19] [Rank 0] Group 15 Loss: 4.9386 +[2025-09-05 19:48:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:48:19] [Rank 0] Group 3 FTA: 0.9200 +[2025-09-05 19:48:19] [Rank 0] Group 3 FTA: 0.9200 +[2025-09-05 19:48:19] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:48:19] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 19:48:19] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 19:48:19] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 19:48:19] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:48:19] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 19:48:19] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:48:19] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 19:48:19] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 19:48:19] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 19:48:19] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:48:19] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 19:48:19] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 19:48:19] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 19:48:19] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:48:19] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 19:48:19] [Rank 0] Group 12 FTA: 0.3300 +[2025-09-05 19:48:19] [Rank 0] Group 12 FTA: 0.3300 +[2025-09-05 19:48:19] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 19:48:19] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 19:48:19] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 19:48:19] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 19:48:19] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 19:48:19] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 19:48:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:48:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:48:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:48:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:48:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:48:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:48:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:48:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:48:20] [Rank 0] step:9501/10000 train_time:391633ms step_avg:41.22ms +[2025-09-05 19:48:20] [Rank 0] step:9501/10000 train_time:391633ms step_avg:41.22ms +[2025-09-05 19:48:21] [Rank 0] step:9521/10000 train_time:392308ms step_avg:41.20ms +[2025-09-05 19:48:21] [Rank 0] step:9521/10000 train_time:392308ms step_avg:41.20ms +[2025-09-05 19:48:22] [Rank 0] step:9541/10000 train_time:393048ms step_avg:41.20ms +[2025-09-05 19:48:22] [Rank 0] step:9541/10000 train_time:393048ms step_avg:41.20ms +[2025-09-05 19:48:23] [Rank 0] step:9561/10000 train_time:393787ms step_avg:41.19ms +[2025-09-05 19:48:23] [Rank 0] step:9561/10000 train_time:393787ms step_avg:41.19ms +[2025-09-05 19:48:23] [Rank 0] step:9581/10000 train_time:394527ms step_avg:41.18ms +[2025-09-05 19:48:23] [Rank 0] step:9581/10000 train_time:394527ms step_avg:41.18ms +[2025-09-05 19:48:24] [Rank 0] step:9601/10000 train_time:395267ms step_avg:41.17ms +[2025-09-05 19:48:24] [Rank 0] step:9601/10000 train_time:395267ms step_avg:41.17ms +[2025-09-05 19:48:25] [Rank 0] step:9621/10000 train_time:396007ms step_avg:41.16ms +[2025-09-05 19:48:25] [Rank 0] step:9621/10000 train_time:396007ms step_avg:41.16ms +[2025-09-05 19:48:26] [Rank 0] step:9641/10000 train_time:396748ms step_avg:41.15ms +[2025-09-05 19:48:26] [Rank 0] step:9641/10000 train_time:396748ms step_avg:41.15ms +[2025-09-05 19:48:27] [Rank 0] step:9661/10000 train_time:397764ms step_avg:41.17ms +[2025-09-05 19:48:27] [Rank 0] step:9661/10000 train_time:397764ms step_avg:41.17ms +[2025-09-05 19:48:27] [Rank 0] step:9681/10000 train_time:398503ms step_avg:41.16ms +[2025-09-05 19:48:27] [Rank 0] step:9681/10000 train_time:398503ms step_avg:41.16ms +[2025-09-05 19:48:28] [Rank 0] step:9701/10000 train_time:399244ms step_avg:41.15ms +[2025-09-05 19:48:28] [Rank 0] step:9701/10000 train_time:399244ms step_avg:41.15ms +[2025-09-05 19:48:29] [Rank 0] step:9721/10000 train_time:399984ms step_avg:41.15ms +[2025-09-05 19:48:29] [Rank 0] step:9721/10000 train_time:399984ms step_avg:41.15ms +[2025-09-05 19:48:29] [Rank 0] step:9741/10000 train_time:400723ms step_avg:41.14ms +[2025-09-05 19:48:29] [Rank 0] step:9741/10000 train_time:400723ms step_avg:41.14ms +[2025-09-05 19:48:30] [Rank 0] step:9761/10000 train_time:401463ms step_avg:41.13ms +[2025-09-05 19:48:30] [Rank 0] step:9761/10000 train_time:401463ms step_avg:41.13ms +[2025-09-05 19:48:31] [Rank 0] step:9781/10000 train_time:402203ms step_avg:41.12ms +[2025-09-05 19:48:31] [Rank 0] step:9781/10000 train_time:402203ms step_avg:41.12ms +[2025-09-05 19:48:32] [Rank 0] step:9801/10000 train_time:402943ms step_avg:41.11ms +[2025-09-05 19:48:32] [Rank 0] step:9801/10000 train_time:402943ms step_avg:41.11ms +[2025-09-05 19:48:32] [Rank 0] step:9821/10000 train_time:403682ms step_avg:41.10ms +[2025-09-05 19:48:32] [Rank 0] step:9821/10000 train_time:403682ms step_avg:41.10ms +[2025-09-05 19:48:33] [Rank 0] step:9841/10000 train_time:404422ms step_avg:41.10ms +[2025-09-05 19:48:33] [Rank 0] step:9841/10000 train_time:404422ms step_avg:41.10ms +[2025-09-05 19:48:34] [Rank 0] step:9861/10000 train_time:405162ms step_avg:41.09ms +[2025-09-05 19:48:34] [Rank 0] step:9861/10000 train_time:405162ms step_avg:41.09ms +[2025-09-05 19:48:35] [Rank 0] step:9881/10000 train_time:405903ms step_avg:41.08ms +[2025-09-05 19:48:35] [Rank 0] step:9881/10000 train_time:405903ms step_avg:41.08ms +[2025-09-05 19:48:35] [Rank 0] step:9901/10000 train_time:406643ms step_avg:41.07ms +[2025-09-05 19:48:35] [Rank 0] step:9901/10000 train_time:406643ms step_avg:41.07ms +[2025-09-05 19:48:36] [Rank 0] step:9921/10000 train_time:407383ms step_avg:41.06ms +[2025-09-05 19:48:36] [Rank 0] step:9921/10000 train_time:407383ms step_avg:41.06ms +[2025-09-05 19:48:37] [Rank 0] step:9941/10000 train_time:408123ms step_avg:41.05ms +[2025-09-05 19:48:37] [Rank 0] step:9941/10000 train_time:408123ms step_avg:41.05ms +[2025-09-05 19:48:38] [Rank 0] step:9961/10000 train_time:408863ms step_avg:41.05ms +[2025-09-05 19:48:38] [Rank 0] step:9961/10000 train_time:408863ms step_avg:41.05ms +[2025-09-05 19:48:38] [Rank 0] step:9981/10000 train_time:409603ms step_avg:41.04ms +[2025-09-05 19:48:38] [Rank 0] step:9981/10000 train_time:409603ms step_avg:41.04ms +[2025-09-05 19:48:39] [Rank 0] step:10000/10000 train_time:410307ms step_avg:41.03ms +[2025-09-05 19:48:39] [Rank 0] step:10000/10000 train_time:410307ms step_avg:41.03ms +[2025-09-05 19:48:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:48:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:48:40] [Rank 0] PRINT: step:10000/10000 train_loss:1.6378 val_loss:1.6272 train_time:410431ms step_avg:41.04ms +[2025-09-05 19:48:40] [Rank 0] PRINT: step:10000/10000 train_loss:1.6378 val_loss:1.6272 train_time:410431ms step_avg:41.04ms +[2025-09-05 19:48:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:48:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:48:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:48:40] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:50:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:50:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:50:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:50:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:50:01] [Rank 0] Total Loss: 4.3556 +[2025-09-05 19:50:01] [Rank 0] Total Loss: 4.3556 +[2025-09-05 19:50:01] [Rank 0] Total FTA (Unweighted): 0.5038 +[2025-09-05 19:50:01] [Rank 0] Total FTA (Unweighted): 0.5038 +[2025-09-05 19:50:01] [Rank 0] Total FTA (Weighted): 0.5038 +[2025-09-05 19:50:01] [Rank 0] Total FTA (Weighted): 0.5038 +[2025-09-05 19:50:01] [Rank 0] Group 0 Loss: 3.5009 +[2025-09-05 19:50:01] [Rank 0] Group 0 Loss: 3.5009 +[2025-09-05 19:50:01] [Rank 0] Group 1 Loss: 3.3469 +[2025-09-05 19:50:01] [Rank 0] Group 1 Loss: 3.3469 +[2025-09-05 19:50:01] [Rank 0] Group 2 Loss: 3.3632 +[2025-09-05 19:50:01] [Rank 0] Group 2 Loss: 3.3632 +[2025-09-05 19:50:01] [Rank 0] Group 3 Loss: 3.6277 +[2025-09-05 19:50:01] [Rank 0] Group 3 Loss: 3.6277 +[2025-09-05 19:50:01] [Rank 0] Group 4 Loss: 3.8095 +[2025-09-05 19:50:01] [Rank 0] Group 4 Loss: 3.8095 +[2025-09-05 19:50:01] [Rank 0] Group 5 Loss: 3.9887 +[2025-09-05 19:50:01] [Rank 0] Group 5 Loss: 3.9887 +[2025-09-05 19:50:01] [Rank 0] Group 6 Loss: 4.2532 +[2025-09-05 19:50:01] [Rank 0] Group 6 Loss: 4.2532 +[2025-09-05 19:50:01] [Rank 0] Group 7 Loss: 4.4513 +[2025-09-05 19:50:01] [Rank 0] Group 7 Loss: 4.4513 +[2025-09-05 19:50:01] [Rank 0] Group 8 Loss: 4.7120 +[2025-09-05 19:50:01] [Rank 0] Group 8 Loss: 4.7120 +[2025-09-05 19:50:01] [Rank 0] Group 9 Loss: 4.8349 +[2025-09-05 19:50:01] [Rank 0] Group 9 Loss: 4.8349 +[2025-09-05 19:50:01] [Rank 0] Group 10 Loss: 4.9796 +[2025-09-05 19:50:01] [Rank 0] Group 10 Loss: 4.9796 +[2025-09-05 19:50:01] [Rank 0] Group 11 Loss: 4.9569 +[2025-09-05 19:50:01] [Rank 0] Group 11 Loss: 4.9569 +[2025-09-05 19:50:01] [Rank 0] Group 12 Loss: 4.9295 +[2025-09-05 19:50:01] [Rank 0] Group 12 Loss: 4.9295 +[2025-09-05 19:50:01] [Rank 0] Group 13 Loss: 5.0092 +[2025-09-05 19:50:01] [Rank 0] Group 13 Loss: 5.0092 +[2025-09-05 19:50:01] [Rank 0] Group 14 Loss: 4.9811 +[2025-09-05 19:50:01] [Rank 0] Group 14 Loss: 4.9811 +[2025-09-05 19:50:01] [Rank 0] Group 15 Loss: 4.9450 +[2025-09-05 19:50:01] [Rank 0] Group 15 Loss: 4.9450 +[2025-09-05 19:50:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:50:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:50:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:50:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:50:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:50:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 19:50:01] [Rank 0] Group 3 FTA: 0.9200 +[2025-09-05 19:50:01] [Rank 0] Group 3 FTA: 0.9200 +[2025-09-05 19:50:01] [Rank 0] Group 4 FTA: 0.5100 +[2025-09-05 19:50:01] [Rank 0] Group 4 FTA: 0.5100 +[2025-09-05 19:50:01] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 19:50:01] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 19:50:01] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 19:50:01] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 19:50:01] [Rank 0] Group 7 FTA: 0.3800 +[2025-09-05 19:50:01] [Rank 0] Group 7 FTA: 0.3800 +[2025-09-05 19:50:01] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 19:50:01] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 19:50:01] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 19:50:01] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 19:50:01] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:50:01] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 19:50:01] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 19:50:01] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 19:50:01] [Rank 0] Group 12 FTA: 0.3300 +[2025-09-05 19:50:01] [Rank 0] Group 12 FTA: 0.3300 +[2025-09-05 19:50:01] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 19:50:01] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 19:50:01] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 19:50:01] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 19:50:01] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:50:01] [Rank 0] Group 15 FTA: 0.1500 +[2025-09-05 19:50:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:50:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_loss_curves.png +[2025-09-05 19:50:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:50:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/per_class_acc_curves.png +[2025-09-05 19:50:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:50:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_loss_curve.png +[2025-09-05 19:50:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:50:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_45/total_acc_curve.png +[2025-09-05 19:50:02] [Rank 0] step:10001/10000 train_time:410440ms step_avg:41.04ms +[2025-09-05 19:50:02] [Rank 0] step:10001/10000 train_time:410440ms step_avg:41.04ms +[2025-09-05 19:50:02] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 19:50:02 2025 --- +[2025-09-05 19:50:02] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 19:50:02 2025 --- +[2025-09-05 19:50:02] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 19:50:02] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1608ba3641d27bea7c05771d22a7fc49d72feb41 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.2, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "56264fe6-00d6-459c-9593-18b7fd965a37", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..14f44ce64b515a354b598d43e1e923e352e9a047 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48499401a2953cc8e989985f5947270b2b5e4697511e3153686af0a310663747 +size 403212 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..bcc51079a6637444c41217e7ace94df9b428ae1b --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9509d492f6b4c591366e2d33cf56e05cc99f11ff58f1004d5ac3eb590ca360fe +size 436221 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..6e39cecd85b7e266ecf113b5b0e4528e406e1c61 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56ffa5e28f90fb8fd19048761c07cfc33a4f4176efc1af2f123d9d94cd3ddb1e +size 95702 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..2552225d67386bd0f27a59f8cb23890f1afd3a5d --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20a454a0229be04a32b0dd9dbdc9050c81be8e8a6e252b2f071f570be7d16ce7 +size 114963 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/training_log_56264fe6-00d6-459c-9593-18b7fd965a37.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/training_log_56264fe6-00d6-459c-9593-18b7fd965a37.txt new file mode 100644 index 0000000000000000000000000000000000000000..1d2f0ca02cf7caa95e848032b5e3102901c9013c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/training_log_56264fe6-00d6-459c-9593-18b7fd965a37.txt @@ -0,0 +1,5614 @@ +[2025-09-05 19:50:26] [Rank 0] PRINT: --- Script Start: Fri Sep 5 19:50:26 2025 --- +[2025-09-05 19:50:26] [Rank 0] PRINT: --- Script Start: Fri Sep 5 19:50:26 2025 --- +[2025-09-05 19:50:26] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 19:50:26] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.2, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 19:50:26] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 19:50:26] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 19:50:26] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-05 19:50:26] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-05 19:50:26] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46 +[2025-09-05 19:50:26] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46 +[2025-09-05 19:50:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 19:50:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 19:50:27] [Rank 0] PRINT: Constructing model... +[2025-09-05 19:50:27] [Rank 0] PRINT: Constructing model... +[2025-09-05 19:50:28] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 19:50:28] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 19:50:28] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 19:50:28] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 19:50:28] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 19:50:28] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 19:50:32] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 19:50:32] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 19:50:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 19:50:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 19:50:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 19:50:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 19:50:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 19:50:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 19:50:32] [Rank 0] PRINT: Model returns: +[2025-09-05 19:50:32] [Rank 0] PRINT: Model returns: +[2025-09-05 19:50:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 19:50:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 19:50:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 19:50:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 19:50:32] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 19:50:32] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.2). +[2025-09-05 19:50:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 19:50:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 19:50:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 19:50:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 19:50:37] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 19:50:37] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 19:50:37] [Rank 0] PRINT: Starting warmup... +[2025-09-05 19:50:37] [Rank 0] PRINT: Starting warmup... +[2025-09-05 19:51:15] [Rank 0] PRINT: Warmup complete. +[2025-09-05 19:51:15] [Rank 0] PRINT: Warmup complete. +[2025-09-05 19:51:16] [Rank 0] PRINT: Starting training... +[2025-09-05 19:51:16] [Rank 0] PRINT: Starting training... +[2025-09-05 19:51:22] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/fixed_eval_indices.json +[2025-09-05 19:51:22] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/fixed_eval_indices.json +[2025-09-05 19:51:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:51:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:51:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 19:51:26] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 19:52:01] [Rank 0] step:21/10000 train_time:34894ms step_avg:1661.60ms +[2025-09-05 19:52:01] [Rank 0] step:21/10000 train_time:34894ms step_avg:1661.60ms +[2025-09-05 19:52:02] [Rank 0] step:41/10000 train_time:35623ms step_avg:868.85ms +[2025-09-05 19:52:02] [Rank 0] step:41/10000 train_time:35623ms step_avg:868.85ms +[2025-09-05 19:52:02] [Rank 0] step:61/10000 train_time:36350ms step_avg:595.91ms +[2025-09-05 19:52:02] [Rank 0] step:61/10000 train_time:36350ms step_avg:595.91ms +[2025-09-05 19:52:03] [Rank 0] step:81/10000 train_time:37078ms step_avg:457.75ms +[2025-09-05 19:52:03] [Rank 0] step:81/10000 train_time:37078ms step_avg:457.75ms +[2025-09-05 19:52:04] [Rank 0] step:101/10000 train_time:37805ms step_avg:374.31ms +[2025-09-05 19:52:04] [Rank 0] step:101/10000 train_time:37805ms step_avg:374.31ms +[2025-09-05 19:52:05] [Rank 0] step:121/10000 train_time:38533ms step_avg:318.45ms +[2025-09-05 19:52:05] [Rank 0] step:121/10000 train_time:38533ms step_avg:318.45ms +[2025-09-05 19:52:05] [Rank 0] step:141/10000 train_time:39261ms step_avg:278.45ms +[2025-09-05 19:52:05] [Rank 0] step:141/10000 train_time:39261ms step_avg:278.45ms +[2025-09-05 19:52:06] [Rank 0] step:161/10000 train_time:39988ms step_avg:248.37ms +[2025-09-05 19:52:06] [Rank 0] step:161/10000 train_time:39988ms step_avg:248.37ms +[2025-09-05 19:52:07] [Rank 0] step:181/10000 train_time:40716ms step_avg:224.95ms +[2025-09-05 19:52:07] [Rank 0] step:181/10000 train_time:40716ms step_avg:224.95ms +[2025-09-05 19:52:07] [Rank 0] step:201/10000 train_time:41444ms step_avg:206.19ms +[2025-09-05 19:52:07] [Rank 0] step:201/10000 train_time:41444ms step_avg:206.19ms +[2025-09-05 19:52:08] [Rank 0] step:221/10000 train_time:42171ms step_avg:190.82ms +[2025-09-05 19:52:08] [Rank 0] step:221/10000 train_time:42171ms step_avg:190.82ms +[2025-09-05 19:52:09] [Rank 0] step:241/10000 train_time:42897ms step_avg:178.00ms +[2025-09-05 19:52:09] [Rank 0] step:241/10000 train_time:42897ms step_avg:178.00ms +[2025-09-05 19:52:10] [Rank 0] step:261/10000 train_time:43624ms step_avg:167.14ms +[2025-09-05 19:52:10] [Rank 0] step:261/10000 train_time:43624ms step_avg:167.14ms +[2025-09-05 19:52:10] [Rank 0] step:281/10000 train_time:44351ms step_avg:157.83ms +[2025-09-05 19:52:10] [Rank 0] step:281/10000 train_time:44351ms step_avg:157.83ms +[2025-09-05 19:52:11] [Rank 0] step:301/10000 train_time:45079ms step_avg:149.76ms +[2025-09-05 19:52:11] [Rank 0] step:301/10000 train_time:45079ms step_avg:149.76ms +[2025-09-05 19:52:12] [Rank 0] step:321/10000 train_time:45805ms step_avg:142.70ms +[2025-09-05 19:52:12] [Rank 0] step:321/10000 train_time:45805ms step_avg:142.70ms +[2025-09-05 19:52:13] [Rank 0] step:341/10000 train_time:46532ms step_avg:136.46ms +[2025-09-05 19:52:13] [Rank 0] step:341/10000 train_time:46532ms step_avg:136.46ms +[2025-09-05 19:52:13] [Rank 0] step:361/10000 train_time:47384ms step_avg:131.26ms +[2025-09-05 19:52:13] [Rank 0] step:361/10000 train_time:47384ms step_avg:131.26ms +[2025-09-05 19:52:14] [Rank 0] step:381/10000 train_time:48110ms step_avg:126.27ms +[2025-09-05 19:52:14] [Rank 0] step:381/10000 train_time:48110ms step_avg:126.27ms +[2025-09-05 19:52:15] [Rank 0] step:401/10000 train_time:48836ms step_avg:121.79ms +[2025-09-05 19:52:15] [Rank 0] step:401/10000 train_time:48836ms step_avg:121.79ms +[2025-09-05 19:52:16] [Rank 0] step:421/10000 train_time:49687ms step_avg:118.02ms +[2025-09-05 19:52:16] [Rank 0] step:421/10000 train_time:49687ms step_avg:118.02ms +[2025-09-05 19:52:16] [Rank 0] step:441/10000 train_time:50417ms step_avg:114.32ms +[2025-09-05 19:52:16] [Rank 0] step:441/10000 train_time:50417ms step_avg:114.32ms +[2025-09-05 19:52:17] [Rank 0] step:461/10000 train_time:51144ms step_avg:110.94ms +[2025-09-05 19:52:17] [Rank 0] step:461/10000 train_time:51144ms step_avg:110.94ms +[2025-09-05 19:52:18] [Rank 0] step:481/10000 train_time:51870ms step_avg:107.84ms +[2025-09-05 19:52:18] [Rank 0] step:481/10000 train_time:51870ms step_avg:107.84ms +[2025-09-05 19:52:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:52:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:52:19] [Rank 0] PRINT: step:500/10000 train_loss:4.6473 val_loss:3.2422 train_time:52676ms step_avg:105.35ms +[2025-09-05 19:52:19] [Rank 0] PRINT: step:500/10000 train_loss:4.6473 val_loss:3.2422 train_time:52676ms step_avg:105.35ms +[2025-09-05 19:52:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:52:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:52:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:52:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:53:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:53:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:53:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:53:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:53:41] [Rank 0] Total Loss: 5.4364 +[2025-09-05 19:53:41] [Rank 0] Total Loss: 5.4364 +[2025-09-05 19:53:41] [Rank 0] Total FTA (Unweighted): 0.1206 +[2025-09-05 19:53:41] [Rank 0] Total FTA (Unweighted): 0.1206 +[2025-09-05 19:53:41] [Rank 0] Total FTA (Weighted): 0.1206 +[2025-09-05 19:53:41] [Rank 0] Total FTA (Weighted): 0.1206 +[2025-09-05 19:53:41] [Rank 0] Group 0 Loss: 3.3630 +[2025-09-05 19:53:41] [Rank 0] Group 0 Loss: 3.3630 +[2025-09-05 19:53:41] [Rank 0] Group 1 Loss: 3.3210 +[2025-09-05 19:53:41] [Rank 0] Group 1 Loss: 3.3210 +[2025-09-05 19:53:41] [Rank 0] Group 2 Loss: 3.5895 +[2025-09-05 19:53:41] [Rank 0] Group 2 Loss: 3.5895 +[2025-09-05 19:53:41] [Rank 0] Group 3 Loss: 4.3061 +[2025-09-05 19:53:41] [Rank 0] Group 3 Loss: 4.3061 +[2025-09-05 19:53:41] [Rank 0] Group 4 Loss: 5.1833 +[2025-09-05 19:53:41] [Rank 0] Group 4 Loss: 5.1833 +[2025-09-05 19:53:41] [Rank 0] Group 5 Loss: 5.5926 +[2025-09-05 19:53:41] [Rank 0] Group 5 Loss: 5.5926 +[2025-09-05 19:53:41] [Rank 0] Group 6 Loss: 5.8634 +[2025-09-05 19:53:41] [Rank 0] Group 6 Loss: 5.8634 +[2025-09-05 19:53:41] [Rank 0] Group 7 Loss: 5.9059 +[2025-09-05 19:53:41] [Rank 0] Group 7 Loss: 5.9059 +[2025-09-05 19:53:41] [Rank 0] Group 8 Loss: 6.1226 +[2025-09-05 19:53:41] [Rank 0] Group 8 Loss: 6.1226 +[2025-09-05 19:53:41] [Rank 0] Group 9 Loss: 6.2910 +[2025-09-05 19:53:41] [Rank 0] Group 9 Loss: 6.2910 +[2025-09-05 19:53:41] [Rank 0] Group 10 Loss: 6.2702 +[2025-09-05 19:53:41] [Rank 0] Group 10 Loss: 6.2702 +[2025-09-05 19:53:41] [Rank 0] Group 11 Loss: 6.3469 +[2025-09-05 19:53:41] [Rank 0] Group 11 Loss: 6.3469 +[2025-09-05 19:53:41] [Rank 0] Group 12 Loss: 6.1764 +[2025-09-05 19:53:41] [Rank 0] Group 12 Loss: 6.1764 +[2025-09-05 19:53:41] [Rank 0] Group 13 Loss: 6.1885 +[2025-09-05 19:53:41] [Rank 0] Group 13 Loss: 6.1885 +[2025-09-05 19:53:41] [Rank 0] Group 14 Loss: 6.2857 +[2025-09-05 19:53:41] [Rank 0] Group 14 Loss: 6.2857 +[2025-09-05 19:53:41] [Rank 0] Group 15 Loss: 6.1768 +[2025-09-05 19:53:41] [Rank 0] Group 15 Loss: 6.1768 +[2025-09-05 19:53:41] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 19:53:41] [Rank 0] Group 0 FTA: 0.2500 +[2025-09-05 19:53:41] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 19:53:41] [Rank 0] Group 1 FTA: 0.2000 +[2025-09-05 19:53:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:53:41] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:53:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 19:53:41] [Rank 0] Group 3 FTA: 0.1100 +[2025-09-05 19:53:41] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 19:53:41] [Rank 0] Group 4 FTA: 0.0900 +[2025-09-05 19:53:41] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 19:53:41] [Rank 0] Group 5 FTA: 0.1600 +[2025-09-05 19:53:41] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 19:53:41] [Rank 0] Group 6 FTA: 0.0600 +[2025-09-05 19:53:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 19:53:41] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 19:53:41] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 19:53:41] [Rank 0] Group 8 FTA: 0.1100 +[2025-09-05 19:53:41] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 19:53:41] [Rank 0] Group 9 FTA: 0.0800 +[2025-09-05 19:53:41] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 19:53:41] [Rank 0] Group 10 FTA: 0.0700 +[2025-09-05 19:53:41] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 19:53:41] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 19:53:41] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:53:41] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:53:41] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:53:41] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 19:53:41] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:53:41] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:53:41] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:53:41] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 19:53:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 19:53:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 19:53:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 19:53:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 19:53:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 19:53:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 19:53:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 19:53:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 19:53:43] [Rank 0] step:501/10000 train_time:52688ms step_avg:105.17ms +[2025-09-05 19:53:43] [Rank 0] step:501/10000 train_time:52688ms step_avg:105.17ms +[2025-09-05 19:53:44] [Rank 0] step:521/10000 train_time:53354ms step_avg:102.41ms +[2025-09-05 19:53:44] [Rank 0] step:521/10000 train_time:53354ms step_avg:102.41ms +[2025-09-05 19:53:44] [Rank 0] step:541/10000 train_time:54082ms step_avg:99.97ms +[2025-09-05 19:53:44] [Rank 0] step:541/10000 train_time:54082ms step_avg:99.97ms +[2025-09-05 19:53:45] [Rank 0] step:561/10000 train_time:54809ms step_avg:97.70ms +[2025-09-05 19:53:45] [Rank 0] step:561/10000 train_time:54809ms step_avg:97.70ms +[2025-09-05 19:53:46] [Rank 0] step:581/10000 train_time:55536ms step_avg:95.59ms +[2025-09-05 19:53:46] [Rank 0] step:581/10000 train_time:55536ms step_avg:95.59ms +[2025-09-05 19:53:46] [Rank 0] step:601/10000 train_time:56263ms step_avg:93.62ms +[2025-09-05 19:53:46] [Rank 0] step:601/10000 train_time:56263ms step_avg:93.62ms +[2025-09-05 19:53:47] [Rank 0] step:621/10000 train_time:56990ms step_avg:91.77ms +[2025-09-05 19:53:47] [Rank 0] step:621/10000 train_time:56990ms step_avg:91.77ms +[2025-09-05 19:53:48] [Rank 0] step:641/10000 train_time:57718ms step_avg:90.04ms +[2025-09-05 19:53:48] [Rank 0] step:641/10000 train_time:57718ms step_avg:90.04ms +[2025-09-05 19:53:49] [Rank 0] step:661/10000 train_time:58445ms step_avg:88.42ms +[2025-09-05 19:53:49] [Rank 0] step:661/10000 train_time:58445ms step_avg:88.42ms +[2025-09-05 19:53:49] [Rank 0] step:681/10000 train_time:59172ms step_avg:86.89ms +[2025-09-05 19:53:49] [Rank 0] step:681/10000 train_time:59172ms step_avg:86.89ms +[2025-09-05 19:53:50] [Rank 0] step:701/10000 train_time:59899ms step_avg:85.45ms +[2025-09-05 19:53:50] [Rank 0] step:701/10000 train_time:59899ms step_avg:85.45ms +[2025-09-05 19:53:51] [Rank 0] step:721/10000 train_time:60626ms step_avg:84.09ms +[2025-09-05 19:53:51] [Rank 0] step:721/10000 train_time:60626ms step_avg:84.09ms +[2025-09-05 19:53:52] [Rank 0] step:741/10000 train_time:61354ms step_avg:82.80ms +[2025-09-05 19:53:52] [Rank 0] step:741/10000 train_time:61354ms step_avg:82.80ms +[2025-09-05 19:53:52] [Rank 0] step:761/10000 train_time:62086ms step_avg:81.58ms +[2025-09-05 19:53:52] [Rank 0] step:761/10000 train_time:62086ms step_avg:81.58ms +[2025-09-05 19:53:53] [Rank 0] step:781/10000 train_time:62818ms step_avg:80.43ms +[2025-09-05 19:53:53] [Rank 0] step:781/10000 train_time:62818ms step_avg:80.43ms +[2025-09-05 19:53:54] [Rank 0] step:801/10000 train_time:63552ms step_avg:79.34ms +[2025-09-05 19:53:54] [Rank 0] step:801/10000 train_time:63552ms step_avg:79.34ms +[2025-09-05 19:53:55] [Rank 0] step:821/10000 train_time:64890ms step_avg:79.04ms +[2025-09-05 19:53:55] [Rank 0] step:821/10000 train_time:64890ms step_avg:79.04ms +[2025-09-05 19:53:56] [Rank 0] step:841/10000 train_time:65622ms step_avg:78.03ms +[2025-09-05 19:53:56] [Rank 0] step:841/10000 train_time:65622ms step_avg:78.03ms +[2025-09-05 19:53:57] [Rank 0] step:861/10000 train_time:66355ms step_avg:77.07ms +[2025-09-05 19:53:57] [Rank 0] step:861/10000 train_time:66355ms step_avg:77.07ms +[2025-09-05 19:53:57] [Rank 0] step:881/10000 train_time:67087ms step_avg:76.15ms +[2025-09-05 19:53:57] [Rank 0] step:881/10000 train_time:67087ms step_avg:76.15ms +[2025-09-05 19:53:58] [Rank 0] step:901/10000 train_time:67819ms step_avg:75.27ms +[2025-09-05 19:53:58] [Rank 0] step:901/10000 train_time:67819ms step_avg:75.27ms +[2025-09-05 19:53:59] [Rank 0] step:921/10000 train_time:68551ms step_avg:74.43ms +[2025-09-05 19:53:59] [Rank 0] step:921/10000 train_time:68551ms step_avg:74.43ms +[2025-09-05 19:54:00] [Rank 0] step:941/10000 train_time:69283ms step_avg:73.63ms +[2025-09-05 19:54:00] [Rank 0] step:941/10000 train_time:69283ms step_avg:73.63ms +[2025-09-05 19:54:00] [Rank 0] step:961/10000 train_time:70016ms step_avg:72.86ms +[2025-09-05 19:54:00] [Rank 0] step:961/10000 train_time:70016ms step_avg:72.86ms +[2025-09-05 19:54:01] [Rank 0] step:981/10000 train_time:70751ms step_avg:72.12ms +[2025-09-05 19:54:01] [Rank 0] step:981/10000 train_time:70751ms step_avg:72.12ms +[2025-09-05 19:54:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:54:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:54:02] [Rank 0] PRINT: step:1000/10000 train_loss:2.8773 val_loss:2.5927 train_time:71563ms step_avg:71.56ms +[2025-09-05 19:54:02] [Rank 0] PRINT: step:1000/10000 train_loss:2.8773 val_loss:2.5927 train_time:71563ms step_avg:71.56ms +[2025-09-05 19:54:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:54:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:54:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:54:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:55:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:55:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:55:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:55:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:55:24] [Rank 0] Total Loss: 5.0444 +[2025-09-05 19:55:24] [Rank 0] Total Loss: 5.0444 +[2025-09-05 19:55:24] [Rank 0] Total FTA (Unweighted): 0.2144 +[2025-09-05 19:55:24] [Rank 0] Total FTA (Unweighted): 0.2144 +[2025-09-05 19:55:24] [Rank 0] Total FTA (Weighted): 0.2144 +[2025-09-05 19:55:24] [Rank 0] Total FTA (Weighted): 0.2144 +[2025-09-05 19:55:24] [Rank 0] Group 0 Loss: 3.3533 +[2025-09-05 19:55:24] [Rank 0] Group 0 Loss: 3.3533 +[2025-09-05 19:55:24] [Rank 0] Group 1 Loss: 3.2698 +[2025-09-05 19:55:24] [Rank 0] Group 1 Loss: 3.2698 +[2025-09-05 19:55:24] [Rank 0] Group 2 Loss: 3.3221 +[2025-09-05 19:55:24] [Rank 0] Group 2 Loss: 3.3221 +[2025-09-05 19:55:24] [Rank 0] Group 3 Loss: 3.9095 +[2025-09-05 19:55:24] [Rank 0] Group 3 Loss: 3.9095 +[2025-09-05 19:55:24] [Rank 0] Group 4 Loss: 4.4076 +[2025-09-05 19:55:24] [Rank 0] Group 4 Loss: 4.4076 +[2025-09-05 19:55:24] [Rank 0] Group 5 Loss: 4.9193 +[2025-09-05 19:55:24] [Rank 0] Group 5 Loss: 4.9193 +[2025-09-05 19:55:24] [Rank 0] Group 6 Loss: 5.3127 +[2025-09-05 19:55:24] [Rank 0] Group 6 Loss: 5.3127 +[2025-09-05 19:55:24] [Rank 0] Group 7 Loss: 5.4209 +[2025-09-05 19:55:24] [Rank 0] Group 7 Loss: 5.4209 +[2025-09-05 19:55:24] [Rank 0] Group 8 Loss: 5.7105 +[2025-09-05 19:55:24] [Rank 0] Group 8 Loss: 5.7105 +[2025-09-05 19:55:24] [Rank 0] Group 9 Loss: 5.8697 +[2025-09-05 19:55:24] [Rank 0] Group 9 Loss: 5.8697 +[2025-09-05 19:55:24] [Rank 0] Group 10 Loss: 5.9173 +[2025-09-05 19:55:24] [Rank 0] Group 10 Loss: 5.9173 +[2025-09-05 19:55:24] [Rank 0] Group 11 Loss: 5.9605 +[2025-09-05 19:55:24] [Rank 0] Group 11 Loss: 5.9605 +[2025-09-05 19:55:24] [Rank 0] Group 12 Loss: 5.8214 +[2025-09-05 19:55:24] [Rank 0] Group 12 Loss: 5.8214 +[2025-09-05 19:55:24] [Rank 0] Group 13 Loss: 5.8265 +[2025-09-05 19:55:24] [Rank 0] Group 13 Loss: 5.8265 +[2025-09-05 19:55:24] [Rank 0] Group 14 Loss: 5.8854 +[2025-09-05 19:55:24] [Rank 0] Group 14 Loss: 5.8854 +[2025-09-05 19:55:24] [Rank 0] Group 15 Loss: 5.8045 +[2025-09-05 19:55:24] [Rank 0] Group 15 Loss: 5.8045 +[2025-09-05 19:55:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:55:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:55:24] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-05 19:55:24] [Rank 0] Group 1 FTA: 0.6500 +[2025-09-05 19:55:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:55:24] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 19:55:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:55:24] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:55:24] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-05 19:55:24] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-05 19:55:24] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 19:55:24] [Rank 0] Group 5 FTA: 0.1800 +[2025-09-05 19:55:24] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 19:55:24] [Rank 0] Group 6 FTA: 0.0900 +[2025-09-05 19:55:24] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 19:55:24] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 19:55:24] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-05 19:55:24] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-05 19:55:24] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 19:55:24] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 19:55:24] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 19:55:24] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 19:55:24] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 19:55:24] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 19:55:24] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 19:55:24] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 19:55:24] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 19:55:24] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 19:55:24] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:55:24] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:55:24] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:55:24] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:55:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 19:55:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 19:55:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 19:55:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 19:55:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 19:55:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 19:55:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 19:55:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 19:55:26] [Rank 0] step:1001/10000 train_time:71574ms step_avg:71.50ms +[2025-09-05 19:55:26] [Rank 0] step:1001/10000 train_time:71574ms step_avg:71.50ms +[2025-09-05 19:55:26] [Rank 0] step:1021/10000 train_time:72235ms step_avg:70.75ms +[2025-09-05 19:55:26] [Rank 0] step:1021/10000 train_time:72235ms step_avg:70.75ms +[2025-09-05 19:55:27] [Rank 0] step:1041/10000 train_time:72968ms step_avg:70.09ms +[2025-09-05 19:55:27] [Rank 0] step:1041/10000 train_time:72968ms step_avg:70.09ms +[2025-09-05 19:55:28] [Rank 0] step:1061/10000 train_time:73701ms step_avg:69.46ms +[2025-09-05 19:55:28] [Rank 0] step:1061/10000 train_time:73701ms step_avg:69.46ms +[2025-09-05 19:55:29] [Rank 0] step:1081/10000 train_time:74433ms step_avg:68.86ms +[2025-09-05 19:55:29] [Rank 0] step:1081/10000 train_time:74433ms step_avg:68.86ms +[2025-09-05 19:55:29] [Rank 0] step:1101/10000 train_time:75166ms step_avg:68.27ms +[2025-09-05 19:55:29] [Rank 0] step:1101/10000 train_time:75166ms step_avg:68.27ms +[2025-09-05 19:55:30] [Rank 0] step:1121/10000 train_time:75898ms step_avg:67.71ms +[2025-09-05 19:55:30] [Rank 0] step:1121/10000 train_time:75898ms step_avg:67.71ms +[2025-09-05 19:55:31] [Rank 0] step:1141/10000 train_time:76630ms step_avg:67.16ms +[2025-09-05 19:55:31] [Rank 0] step:1141/10000 train_time:76630ms step_avg:67.16ms +[2025-09-05 19:55:32] [Rank 0] step:1161/10000 train_time:77362ms step_avg:66.63ms +[2025-09-05 19:55:32] [Rank 0] step:1161/10000 train_time:77362ms step_avg:66.63ms +[2025-09-05 19:55:32] [Rank 0] step:1181/10000 train_time:78094ms step_avg:66.13ms +[2025-09-05 19:55:32] [Rank 0] step:1181/10000 train_time:78094ms step_avg:66.13ms +[2025-09-05 19:55:33] [Rank 0] step:1201/10000 train_time:78827ms step_avg:65.63ms +[2025-09-05 19:55:33] [Rank 0] step:1201/10000 train_time:78827ms step_avg:65.63ms +[2025-09-05 19:55:34] [Rank 0] step:1221/10000 train_time:79558ms step_avg:65.16ms +[2025-09-05 19:55:34] [Rank 0] step:1221/10000 train_time:79558ms step_avg:65.16ms +[2025-09-05 19:55:35] [Rank 0] step:1241/10000 train_time:80292ms step_avg:64.70ms +[2025-09-05 19:55:35] [Rank 0] step:1241/10000 train_time:80292ms step_avg:64.70ms +[2025-09-05 19:55:35] [Rank 0] step:1261/10000 train_time:81025ms step_avg:64.25ms +[2025-09-05 19:55:35] [Rank 0] step:1261/10000 train_time:81025ms step_avg:64.25ms +[2025-09-05 19:55:36] [Rank 0] step:1281/10000 train_time:81757ms step_avg:63.82ms +[2025-09-05 19:55:36] [Rank 0] step:1281/10000 train_time:81757ms step_avg:63.82ms +[2025-09-05 19:55:37] [Rank 0] step:1301/10000 train_time:82490ms step_avg:63.40ms +[2025-09-05 19:55:37] [Rank 0] step:1301/10000 train_time:82490ms step_avg:63.40ms +[2025-09-05 19:55:37] [Rank 0] step:1321/10000 train_time:83222ms step_avg:63.00ms +[2025-09-05 19:55:37] [Rank 0] step:1321/10000 train_time:83222ms step_avg:63.00ms +[2025-09-05 19:55:38] [Rank 0] step:1341/10000 train_time:83954ms step_avg:62.61ms +[2025-09-05 19:55:38] [Rank 0] step:1341/10000 train_time:83954ms step_avg:62.61ms +[2025-09-05 19:55:39] [Rank 0] step:1361/10000 train_time:84687ms step_avg:62.22ms +[2025-09-05 19:55:39] [Rank 0] step:1361/10000 train_time:84687ms step_avg:62.22ms +[2025-09-05 19:55:40] [Rank 0] step:1381/10000 train_time:85420ms step_avg:61.85ms +[2025-09-05 19:55:40] [Rank 0] step:1381/10000 train_time:85420ms step_avg:61.85ms +[2025-09-05 19:55:40] [Rank 0] step:1401/10000 train_time:86153ms step_avg:61.49ms +[2025-09-05 19:55:40] [Rank 0] step:1401/10000 train_time:86153ms step_avg:61.49ms +[2025-09-05 19:55:41] [Rank 0] step:1421/10000 train_time:86885ms step_avg:61.14ms +[2025-09-05 19:55:41] [Rank 0] step:1421/10000 train_time:86885ms step_avg:61.14ms +[2025-09-05 19:55:42] [Rank 0] step:1441/10000 train_time:87616ms step_avg:60.80ms +[2025-09-05 19:55:42] [Rank 0] step:1441/10000 train_time:87616ms step_avg:60.80ms +[2025-09-05 19:55:43] [Rank 0] step:1461/10000 train_time:88348ms step_avg:60.47ms +[2025-09-05 19:55:43] [Rank 0] step:1461/10000 train_time:88348ms step_avg:60.47ms +[2025-09-05 19:55:43] [Rank 0] step:1481/10000 train_time:89081ms step_avg:60.15ms +[2025-09-05 19:55:43] [Rank 0] step:1481/10000 train_time:89081ms step_avg:60.15ms +[2025-09-05 19:55:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:55:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:55:44] [Rank 0] PRINT: step:1500/10000 train_loss:2.4337 val_loss:2.2953 train_time:89893ms step_avg:59.93ms +[2025-09-05 19:55:44] [Rank 0] PRINT: step:1500/10000 train_loss:2.4337 val_loss:2.2953 train_time:89893ms step_avg:59.93ms +[2025-09-05 19:55:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:55:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:55:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:55:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:57:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:57:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:57:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:57:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:57:06] [Rank 0] Total Loss: 4.8011 +[2025-09-05 19:57:06] [Rank 0] Total Loss: 4.8011 +[2025-09-05 19:57:06] [Rank 0] Total FTA (Unweighted): 0.2669 +[2025-09-05 19:57:06] [Rank 0] Total FTA (Unweighted): 0.2669 +[2025-09-05 19:57:06] [Rank 0] Total FTA (Weighted): 0.2669 +[2025-09-05 19:57:06] [Rank 0] Total FTA (Weighted): 0.2669 +[2025-09-05 19:57:06] [Rank 0] Group 0 Loss: 3.4156 +[2025-09-05 19:57:06] [Rank 0] Group 0 Loss: 3.4156 +[2025-09-05 19:57:06] [Rank 0] Group 1 Loss: 3.3611 +[2025-09-05 19:57:06] [Rank 0] Group 1 Loss: 3.3611 +[2025-09-05 19:57:06] [Rank 0] Group 2 Loss: 3.3096 +[2025-09-05 19:57:06] [Rank 0] Group 2 Loss: 3.3096 +[2025-09-05 19:57:06] [Rank 0] Group 3 Loss: 3.8257 +[2025-09-05 19:57:06] [Rank 0] Group 3 Loss: 3.8257 +[2025-09-05 19:57:06] [Rank 0] Group 4 Loss: 4.0490 +[2025-09-05 19:57:06] [Rank 0] Group 4 Loss: 4.0490 +[2025-09-05 19:57:06] [Rank 0] Group 5 Loss: 4.5260 +[2025-09-05 19:57:06] [Rank 0] Group 5 Loss: 4.5260 +[2025-09-05 19:57:06] [Rank 0] Group 6 Loss: 4.9235 +[2025-09-05 19:57:06] [Rank 0] Group 6 Loss: 4.9235 +[2025-09-05 19:57:06] [Rank 0] Group 7 Loss: 5.0642 +[2025-09-05 19:57:06] [Rank 0] Group 7 Loss: 5.0642 +[2025-09-05 19:57:06] [Rank 0] Group 8 Loss: 5.3713 +[2025-09-05 19:57:06] [Rank 0] Group 8 Loss: 5.3713 +[2025-09-05 19:57:06] [Rank 0] Group 9 Loss: 5.4919 +[2025-09-05 19:57:06] [Rank 0] Group 9 Loss: 5.4919 +[2025-09-05 19:57:06] [Rank 0] Group 10 Loss: 5.6235 +[2025-09-05 19:57:06] [Rank 0] Group 10 Loss: 5.6235 +[2025-09-05 19:57:06] [Rank 0] Group 11 Loss: 5.6405 +[2025-09-05 19:57:06] [Rank 0] Group 11 Loss: 5.6405 +[2025-09-05 19:57:06] [Rank 0] Group 12 Loss: 5.5343 +[2025-09-05 19:57:06] [Rank 0] Group 12 Loss: 5.5343 +[2025-09-05 19:57:06] [Rank 0] Group 13 Loss: 5.5547 +[2025-09-05 19:57:06] [Rank 0] Group 13 Loss: 5.5547 +[2025-09-05 19:57:06] [Rank 0] Group 14 Loss: 5.5965 +[2025-09-05 19:57:06] [Rank 0] Group 14 Loss: 5.5965 +[2025-09-05 19:57:06] [Rank 0] Group 15 Loss: 5.5298 +[2025-09-05 19:57:06] [Rank 0] Group 15 Loss: 5.5298 +[2025-09-05 19:57:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:57:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:57:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:57:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:57:06] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 19:57:06] [Rank 0] Group 2 FTA: 0.4000 +[2025-09-05 19:57:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:57:06] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:57:06] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 19:57:06] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 19:57:06] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 19:57:06] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 19:57:06] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-05 19:57:06] [Rank 0] Group 6 FTA: 0.1600 +[2025-09-05 19:57:06] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 19:57:06] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 19:57:06] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 19:57:06] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 19:57:06] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 19:57:06] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 19:57:06] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 19:57:06] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 19:57:06] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 19:57:06] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 19:57:06] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 19:57:06] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 19:57:06] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 19:57:06] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 19:57:06] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:57:06] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 19:57:06] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 19:57:06] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 19:57:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 19:57:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 19:57:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 19:57:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 19:57:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 19:57:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 19:57:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 19:57:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 19:57:07] [Rank 0] step:1501/10000 train_time:89903ms step_avg:59.90ms +[2025-09-05 19:57:07] [Rank 0] step:1501/10000 train_time:89903ms step_avg:59.90ms +[2025-09-05 19:57:08] [Rank 0] step:1521/10000 train_time:90564ms step_avg:59.54ms +[2025-09-05 19:57:08] [Rank 0] step:1521/10000 train_time:90564ms step_avg:59.54ms +[2025-09-05 19:57:09] [Rank 0] step:1541/10000 train_time:91296ms step_avg:59.24ms +[2025-09-05 19:57:09] [Rank 0] step:1541/10000 train_time:91296ms step_avg:59.24ms +[2025-09-05 19:57:10] [Rank 0] step:1561/10000 train_time:92028ms step_avg:58.95ms +[2025-09-05 19:57:10] [Rank 0] step:1561/10000 train_time:92028ms step_avg:58.95ms +[2025-09-05 19:57:10] [Rank 0] step:1581/10000 train_time:92760ms step_avg:58.67ms +[2025-09-05 19:57:10] [Rank 0] step:1581/10000 train_time:92760ms step_avg:58.67ms +[2025-09-05 19:57:11] [Rank 0] step:1601/10000 train_time:93492ms step_avg:58.40ms +[2025-09-05 19:57:11] [Rank 0] step:1601/10000 train_time:93492ms step_avg:58.40ms +[2025-09-05 19:57:12] [Rank 0] step:1621/10000 train_time:94225ms step_avg:58.13ms +[2025-09-05 19:57:12] [Rank 0] step:1621/10000 train_time:94225ms step_avg:58.13ms +[2025-09-05 19:57:13] [Rank 0] step:1641/10000 train_time:95585ms step_avg:58.25ms +[2025-09-05 19:57:13] [Rank 0] step:1641/10000 train_time:95585ms step_avg:58.25ms +[2025-09-05 19:57:14] [Rank 0] step:1661/10000 train_time:96318ms step_avg:57.99ms +[2025-09-05 19:57:14] [Rank 0] step:1661/10000 train_time:96318ms step_avg:57.99ms +[2025-09-05 19:57:15] [Rank 0] step:1681/10000 train_time:97051ms step_avg:57.73ms +[2025-09-05 19:57:15] [Rank 0] step:1681/10000 train_time:97051ms step_avg:57.73ms +[2025-09-05 19:57:15] [Rank 0] step:1701/10000 train_time:97783ms step_avg:57.49ms +[2025-09-05 19:57:15] [Rank 0] step:1701/10000 train_time:97783ms step_avg:57.49ms +[2025-09-05 19:57:16] [Rank 0] step:1721/10000 train_time:98515ms step_avg:57.24ms +[2025-09-05 19:57:16] [Rank 0] step:1721/10000 train_time:98515ms step_avg:57.24ms +[2025-09-05 19:57:17] [Rank 0] step:1741/10000 train_time:99247ms step_avg:57.01ms +[2025-09-05 19:57:17] [Rank 0] step:1741/10000 train_time:99247ms step_avg:57.01ms +[2025-09-05 19:57:18] [Rank 0] step:1761/10000 train_time:99980ms step_avg:56.77ms +[2025-09-05 19:57:18] [Rank 0] step:1761/10000 train_time:99980ms step_avg:56.77ms +[2025-09-05 19:57:18] [Rank 0] step:1781/10000 train_time:100712ms step_avg:56.55ms +[2025-09-05 19:57:18] [Rank 0] step:1781/10000 train_time:100712ms step_avg:56.55ms +[2025-09-05 19:57:19] [Rank 0] step:1801/10000 train_time:101444ms step_avg:56.33ms +[2025-09-05 19:57:19] [Rank 0] step:1801/10000 train_time:101444ms step_avg:56.33ms +[2025-09-05 19:57:20] [Rank 0] step:1821/10000 train_time:102177ms step_avg:56.11ms +[2025-09-05 19:57:20] [Rank 0] step:1821/10000 train_time:102177ms step_avg:56.11ms +[2025-09-05 19:57:21] [Rank 0] step:1841/10000 train_time:102909ms step_avg:55.90ms +[2025-09-05 19:57:21] [Rank 0] step:1841/10000 train_time:102909ms step_avg:55.90ms +[2025-09-05 19:57:21] [Rank 0] step:1861/10000 train_time:103641ms step_avg:55.69ms +[2025-09-05 19:57:21] [Rank 0] step:1861/10000 train_time:103641ms step_avg:55.69ms +[2025-09-05 19:57:22] [Rank 0] step:1881/10000 train_time:104373ms step_avg:55.49ms +[2025-09-05 19:57:22] [Rank 0] step:1881/10000 train_time:104373ms step_avg:55.49ms +[2025-09-05 19:57:23] [Rank 0] step:1901/10000 train_time:105105ms step_avg:55.29ms +[2025-09-05 19:57:23] [Rank 0] step:1901/10000 train_time:105105ms step_avg:55.29ms +[2025-09-05 19:57:23] [Rank 0] step:1921/10000 train_time:105839ms step_avg:55.10ms +[2025-09-05 19:57:23] [Rank 0] step:1921/10000 train_time:105839ms step_avg:55.10ms +[2025-09-05 19:57:24] [Rank 0] step:1941/10000 train_time:106570ms step_avg:54.90ms +[2025-09-05 19:57:24] [Rank 0] step:1941/10000 train_time:106570ms step_avg:54.90ms +[2025-09-05 19:57:25] [Rank 0] step:1961/10000 train_time:107302ms step_avg:54.72ms +[2025-09-05 19:57:25] [Rank 0] step:1961/10000 train_time:107302ms step_avg:54.72ms +[2025-09-05 19:57:26] [Rank 0] step:1981/10000 train_time:108034ms step_avg:54.54ms +[2025-09-05 19:57:26] [Rank 0] step:1981/10000 train_time:108034ms step_avg:54.54ms +[2025-09-05 19:57:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:57:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:57:27] [Rank 0] PRINT: step:2000/10000 train_loss:2.1961 val_loss:2.1038 train_time:108846ms step_avg:54.42ms +[2025-09-05 19:57:27] [Rank 0] PRINT: step:2000/10000 train_loss:2.1961 val_loss:2.1038 train_time:108846ms step_avg:54.42ms +[2025-09-05 19:57:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:57:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:57:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:57:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:58:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:58:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 19:58:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:58:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 19:58:49] [Rank 0] Total Loss: 4.4655 +[2025-09-05 19:58:49] [Rank 0] Total Loss: 4.4655 +[2025-09-05 19:58:49] [Rank 0] Total FTA (Unweighted): 0.2900 +[2025-09-05 19:58:49] [Rank 0] Total FTA (Unweighted): 0.2900 +[2025-09-05 19:58:49] [Rank 0] Total FTA (Weighted): 0.2900 +[2025-09-05 19:58:49] [Rank 0] Total FTA (Weighted): 0.2900 +[2025-09-05 19:58:49] [Rank 0] Group 0 Loss: 3.1751 +[2025-09-05 19:58:49] [Rank 0] Group 0 Loss: 3.1751 +[2025-09-05 19:58:49] [Rank 0] Group 1 Loss: 3.1114 +[2025-09-05 19:58:49] [Rank 0] Group 1 Loss: 3.1114 +[2025-09-05 19:58:49] [Rank 0] Group 2 Loss: 3.0774 +[2025-09-05 19:58:49] [Rank 0] Group 2 Loss: 3.0774 +[2025-09-05 19:58:49] [Rank 0] Group 3 Loss: 3.4609 +[2025-09-05 19:58:49] [Rank 0] Group 3 Loss: 3.4609 +[2025-09-05 19:58:49] [Rank 0] Group 4 Loss: 3.7407 +[2025-09-05 19:58:49] [Rank 0] Group 4 Loss: 3.7407 +[2025-09-05 19:58:49] [Rank 0] Group 5 Loss: 4.2051 +[2025-09-05 19:58:49] [Rank 0] Group 5 Loss: 4.2051 +[2025-09-05 19:58:49] [Rank 0] Group 6 Loss: 4.5061 +[2025-09-05 19:58:49] [Rank 0] Group 6 Loss: 4.5061 +[2025-09-05 19:58:49] [Rank 0] Group 7 Loss: 4.7160 +[2025-09-05 19:58:49] [Rank 0] Group 7 Loss: 4.7160 +[2025-09-05 19:58:49] [Rank 0] Group 8 Loss: 5.0047 +[2025-09-05 19:58:49] [Rank 0] Group 8 Loss: 5.0047 +[2025-09-05 19:58:49] [Rank 0] Group 9 Loss: 5.1369 +[2025-09-05 19:58:49] [Rank 0] Group 9 Loss: 5.1369 +[2025-09-05 19:58:49] [Rank 0] Group 10 Loss: 5.2473 +[2025-09-05 19:58:49] [Rank 0] Group 10 Loss: 5.2473 +[2025-09-05 19:58:49] [Rank 0] Group 11 Loss: 5.2452 +[2025-09-05 19:58:49] [Rank 0] Group 11 Loss: 5.2452 +[2025-09-05 19:58:49] [Rank 0] Group 12 Loss: 5.1642 +[2025-09-05 19:58:49] [Rank 0] Group 12 Loss: 5.1642 +[2025-09-05 19:58:49] [Rank 0] Group 13 Loss: 5.2074 +[2025-09-05 19:58:49] [Rank 0] Group 13 Loss: 5.2074 +[2025-09-05 19:58:49] [Rank 0] Group 14 Loss: 5.2315 +[2025-09-05 19:58:49] [Rank 0] Group 14 Loss: 5.2315 +[2025-09-05 19:58:49] [Rank 0] Group 15 Loss: 5.2175 +[2025-09-05 19:58:49] [Rank 0] Group 15 Loss: 5.2175 +[2025-09-05 19:58:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:58:49] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 19:58:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:58:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 19:58:49] [Rank 0] Group 2 FTA: 0.4800 +[2025-09-05 19:58:49] [Rank 0] Group 2 FTA: 0.4800 +[2025-09-05 19:58:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:58:49] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 19:58:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 19:58:49] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 19:58:49] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 19:58:49] [Rank 0] Group 5 FTA: 0.2400 +[2025-09-05 19:58:49] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 19:58:49] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 19:58:49] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 19:58:49] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 19:58:49] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 19:58:49] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 19:58:49] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 19:58:49] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 19:58:49] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 19:58:49] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 19:58:49] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 19:58:49] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 19:58:49] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:58:49] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 19:58:49] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 19:58:49] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 19:58:49] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:58:49] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 19:58:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:58:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 19:58:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 19:58:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 19:58:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 19:58:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 19:58:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 19:58:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 19:58:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 19:58:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 19:58:51] [Rank 0] step:2001/10000 train_time:108856ms step_avg:54.40ms +[2025-09-05 19:58:51] [Rank 0] step:2001/10000 train_time:108856ms step_avg:54.40ms +[2025-09-05 19:58:52] [Rank 0] step:2021/10000 train_time:109723ms step_avg:54.29ms +[2025-09-05 19:58:52] [Rank 0] step:2021/10000 train_time:109723ms step_avg:54.29ms +[2025-09-05 19:58:52] [Rank 0] step:2041/10000 train_time:110455ms step_avg:54.12ms +[2025-09-05 19:58:52] [Rank 0] step:2041/10000 train_time:110455ms step_avg:54.12ms +[2025-09-05 19:58:53] [Rank 0] step:2061/10000 train_time:111187ms step_avg:53.95ms +[2025-09-05 19:58:53] [Rank 0] step:2061/10000 train_time:111187ms step_avg:53.95ms +[2025-09-05 19:58:54] [Rank 0] step:2081/10000 train_time:111920ms step_avg:53.78ms +[2025-09-05 19:58:54] [Rank 0] step:2081/10000 train_time:111920ms step_avg:53.78ms +[2025-09-05 19:58:55] [Rank 0] step:2101/10000 train_time:112654ms step_avg:53.62ms +[2025-09-05 19:58:55] [Rank 0] step:2101/10000 train_time:112654ms step_avg:53.62ms +[2025-09-05 19:58:55] [Rank 0] step:2121/10000 train_time:113386ms step_avg:53.46ms +[2025-09-05 19:58:55] [Rank 0] step:2121/10000 train_time:113386ms step_avg:53.46ms +[2025-09-05 19:58:56] [Rank 0] step:2141/10000 train_time:114118ms step_avg:53.30ms +[2025-09-05 19:58:56] [Rank 0] step:2141/10000 train_time:114118ms step_avg:53.30ms +[2025-09-05 19:58:57] [Rank 0] step:2161/10000 train_time:114851ms step_avg:53.15ms +[2025-09-05 19:58:57] [Rank 0] step:2161/10000 train_time:114851ms step_avg:53.15ms +[2025-09-05 19:58:57] [Rank 0] step:2181/10000 train_time:115584ms step_avg:53.00ms +[2025-09-05 19:58:57] [Rank 0] step:2181/10000 train_time:115584ms step_avg:53.00ms +[2025-09-05 19:58:58] [Rank 0] step:2201/10000 train_time:116317ms step_avg:52.85ms +[2025-09-05 19:58:58] [Rank 0] step:2201/10000 train_time:116317ms step_avg:52.85ms +[2025-09-05 19:58:59] [Rank 0] step:2221/10000 train_time:117049ms step_avg:52.70ms +[2025-09-05 19:58:59] [Rank 0] step:2221/10000 train_time:117049ms step_avg:52.70ms +[2025-09-05 19:59:00] [Rank 0] step:2241/10000 train_time:117786ms step_avg:52.56ms +[2025-09-05 19:59:00] [Rank 0] step:2241/10000 train_time:117786ms step_avg:52.56ms +[2025-09-05 19:59:00] [Rank 0] step:2261/10000 train_time:118524ms step_avg:52.42ms +[2025-09-05 19:59:00] [Rank 0] step:2261/10000 train_time:118524ms step_avg:52.42ms +[2025-09-05 19:59:01] [Rank 0] step:2281/10000 train_time:119264ms step_avg:52.29ms +[2025-09-05 19:59:01] [Rank 0] step:2281/10000 train_time:119264ms step_avg:52.29ms +[2025-09-05 19:59:02] [Rank 0] step:2301/10000 train_time:120003ms step_avg:52.15ms +[2025-09-05 19:59:02] [Rank 0] step:2301/10000 train_time:120003ms step_avg:52.15ms +[2025-09-05 19:59:03] [Rank 0] step:2321/10000 train_time:120742ms step_avg:52.02ms +[2025-09-05 19:59:03] [Rank 0] step:2321/10000 train_time:120742ms step_avg:52.02ms +[2025-09-05 19:59:03] [Rank 0] step:2341/10000 train_time:121481ms step_avg:51.89ms +[2025-09-05 19:59:03] [Rank 0] step:2341/10000 train_time:121481ms step_avg:51.89ms +[2025-09-05 19:59:04] [Rank 0] step:2361/10000 train_time:122219ms step_avg:51.77ms +[2025-09-05 19:59:04] [Rank 0] step:2361/10000 train_time:122219ms step_avg:51.77ms +[2025-09-05 19:59:05] [Rank 0] step:2381/10000 train_time:122958ms step_avg:51.64ms +[2025-09-05 19:59:05] [Rank 0] step:2381/10000 train_time:122958ms step_avg:51.64ms +[2025-09-05 19:59:06] [Rank 0] step:2401/10000 train_time:123697ms step_avg:51.52ms +[2025-09-05 19:59:06] [Rank 0] step:2401/10000 train_time:123697ms step_avg:51.52ms +[2025-09-05 19:59:06] [Rank 0] step:2421/10000 train_time:124437ms step_avg:51.40ms +[2025-09-05 19:59:06] [Rank 0] step:2421/10000 train_time:124437ms step_avg:51.40ms +[2025-09-05 19:59:07] [Rank 0] step:2441/10000 train_time:125176ms step_avg:51.28ms +[2025-09-05 19:59:07] [Rank 0] step:2441/10000 train_time:125176ms step_avg:51.28ms +[2025-09-05 19:59:08] [Rank 0] step:2461/10000 train_time:125915ms step_avg:51.16ms +[2025-09-05 19:59:08] [Rank 0] step:2461/10000 train_time:125915ms step_avg:51.16ms +[2025-09-05 19:59:09] [Rank 0] step:2481/10000 train_time:126654ms step_avg:51.05ms +[2025-09-05 19:59:09] [Rank 0] step:2481/10000 train_time:126654ms step_avg:51.05ms +[2025-09-05 19:59:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:59:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 19:59:10] [Rank 0] PRINT: step:2500/10000 train_loss:2.0469 val_loss:1.9744 train_time:127472ms step_avg:50.99ms +[2025-09-05 19:59:10] [Rank 0] PRINT: step:2500/10000 train_loss:2.0469 val_loss:1.9744 train_time:127472ms step_avg:50.99ms +[2025-09-05 19:59:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:59:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 19:59:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 19:59:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:00:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:00:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:00:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:00:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:00:32] [Rank 0] Total Loss: 4.3601 +[2025-09-05 20:00:32] [Rank 0] Total Loss: 4.3601 +[2025-09-05 20:00:32] [Rank 0] Total FTA (Unweighted): 0.3144 +[2025-09-05 20:00:32] [Rank 0] Total FTA (Unweighted): 0.3144 +[2025-09-05 20:00:32] [Rank 0] Total FTA (Weighted): 0.3144 +[2025-09-05 20:00:32] [Rank 0] Total FTA (Weighted): 0.3144 +[2025-09-05 20:00:32] [Rank 0] Group 0 Loss: 3.2835 +[2025-09-05 20:00:32] [Rank 0] Group 0 Loss: 3.2835 +[2025-09-05 20:00:32] [Rank 0] Group 1 Loss: 3.0617 +[2025-09-05 20:00:32] [Rank 0] Group 1 Loss: 3.0617 +[2025-09-05 20:00:32] [Rank 0] Group 2 Loss: 3.0496 +[2025-09-05 20:00:32] [Rank 0] Group 2 Loss: 3.0496 +[2025-09-05 20:00:32] [Rank 0] Group 3 Loss: 3.4132 +[2025-09-05 20:00:32] [Rank 0] Group 3 Loss: 3.4132 +[2025-09-05 20:00:32] [Rank 0] Group 4 Loss: 3.6593 +[2025-09-05 20:00:32] [Rank 0] Group 4 Loss: 3.6593 +[2025-09-05 20:00:32] [Rank 0] Group 5 Loss: 4.0390 +[2025-09-05 20:00:32] [Rank 0] Group 5 Loss: 4.0390 +[2025-09-05 20:00:32] [Rank 0] Group 6 Loss: 4.3609 +[2025-09-05 20:00:32] [Rank 0] Group 6 Loss: 4.3609 +[2025-09-05 20:00:32] [Rank 0] Group 7 Loss: 4.5431 +[2025-09-05 20:00:32] [Rank 0] Group 7 Loss: 4.5431 +[2025-09-05 20:00:32] [Rank 0] Group 8 Loss: 4.8691 +[2025-09-05 20:00:32] [Rank 0] Group 8 Loss: 4.8691 +[2025-09-05 20:00:32] [Rank 0] Group 9 Loss: 4.9901 +[2025-09-05 20:00:32] [Rank 0] Group 9 Loss: 4.9901 +[2025-09-05 20:00:32] [Rank 0] Group 10 Loss: 5.1072 +[2025-09-05 20:00:32] [Rank 0] Group 10 Loss: 5.1072 +[2025-09-05 20:00:32] [Rank 0] Group 11 Loss: 5.1113 +[2025-09-05 20:00:32] [Rank 0] Group 11 Loss: 5.1113 +[2025-09-05 20:00:32] [Rank 0] Group 12 Loss: 5.0397 +[2025-09-05 20:00:32] [Rank 0] Group 12 Loss: 5.0397 +[2025-09-05 20:00:32] [Rank 0] Group 13 Loss: 5.0649 +[2025-09-05 20:00:32] [Rank 0] Group 13 Loss: 5.0649 +[2025-09-05 20:00:32] [Rank 0] Group 14 Loss: 5.0964 +[2025-09-05 20:00:32] [Rank 0] Group 14 Loss: 5.0964 +[2025-09-05 20:00:32] [Rank 0] Group 15 Loss: 5.0732 +[2025-09-05 20:00:32] [Rank 0] Group 15 Loss: 5.0732 +[2025-09-05 20:00:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:00:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:00:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:00:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:00:32] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 20:00:32] [Rank 0] Group 2 FTA: 0.7000 +[2025-09-05 20:00:32] [Rank 0] Group 3 FTA: 0.2300 +[2025-09-05 20:00:32] [Rank 0] Group 3 FTA: 0.2300 +[2025-09-05 20:00:32] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:00:32] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:00:32] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 20:00:32] [Rank 0] Group 5 FTA: 0.2700 +[2025-09-05 20:00:32] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:00:32] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:00:32] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 20:00:32] [Rank 0] Group 7 FTA: 0.1600 +[2025-09-05 20:00:32] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 20:00:32] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 20:00:32] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 20:00:32] [Rank 0] Group 9 FTA: 0.1600 +[2025-09-05 20:00:32] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 20:00:32] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 20:00:32] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 20:00:32] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 20:00:32] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 20:00:32] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 20:00:32] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:00:32] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 20:00:32] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 20:00:32] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 20:00:32] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:00:32] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 20:00:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:00:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:00:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:00:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:00:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:00:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:00:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:00:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:00:34] [Rank 0] step:2501/10000 train_time:127482ms step_avg:50.97ms +[2025-09-05 20:00:34] [Rank 0] step:2501/10000 train_time:127482ms step_avg:50.97ms +[2025-09-05 20:00:34] [Rank 0] step:2521/10000 train_time:128166ms step_avg:50.84ms +[2025-09-05 20:00:34] [Rank 0] step:2521/10000 train_time:128166ms step_avg:50.84ms +[2025-09-05 20:00:35] [Rank 0] step:2541/10000 train_time:128902ms step_avg:50.73ms +[2025-09-05 20:00:35] [Rank 0] step:2541/10000 train_time:128902ms step_avg:50.73ms +[2025-09-05 20:00:36] [Rank 0] step:2561/10000 train_time:129641ms step_avg:50.62ms +[2025-09-05 20:00:36] [Rank 0] step:2561/10000 train_time:129641ms step_avg:50.62ms +[2025-09-05 20:00:37] [Rank 0] step:2581/10000 train_time:130378ms step_avg:50.51ms +[2025-09-05 20:00:37] [Rank 0] step:2581/10000 train_time:130378ms step_avg:50.51ms +[2025-09-05 20:00:37] [Rank 0] step:2601/10000 train_time:131117ms step_avg:50.41ms +[2025-09-05 20:00:37] [Rank 0] step:2601/10000 train_time:131117ms step_avg:50.41ms +[2025-09-05 20:00:38] [Rank 0] step:2621/10000 train_time:131856ms step_avg:50.31ms +[2025-09-05 20:00:38] [Rank 0] step:2621/10000 train_time:131856ms step_avg:50.31ms +[2025-09-05 20:00:39] [Rank 0] step:2641/10000 train_time:132595ms step_avg:50.21ms +[2025-09-05 20:00:39] [Rank 0] step:2641/10000 train_time:132595ms step_avg:50.21ms +[2025-09-05 20:00:40] [Rank 0] step:2661/10000 train_time:133457ms step_avg:50.15ms +[2025-09-05 20:00:40] [Rank 0] step:2661/10000 train_time:133457ms step_avg:50.15ms +[2025-09-05 20:00:41] [Rank 0] step:2681/10000 train_time:134196ms step_avg:50.05ms +[2025-09-05 20:00:41] [Rank 0] step:2681/10000 train_time:134196ms step_avg:50.05ms +[2025-09-05 20:00:41] [Rank 0] step:2701/10000 train_time:134933ms step_avg:49.96ms +[2025-09-05 20:00:41] [Rank 0] step:2701/10000 train_time:134933ms step_avg:49.96ms +[2025-09-05 20:00:42] [Rank 0] step:2721/10000 train_time:135819ms step_avg:49.92ms +[2025-09-05 20:00:42] [Rank 0] step:2721/10000 train_time:135819ms step_avg:49.92ms +[2025-09-05 20:00:43] [Rank 0] step:2741/10000 train_time:136558ms step_avg:49.82ms +[2025-09-05 20:00:43] [Rank 0] step:2741/10000 train_time:136558ms step_avg:49.82ms +[2025-09-05 20:00:44] [Rank 0] step:2761/10000 train_time:137296ms step_avg:49.73ms +[2025-09-05 20:00:44] [Rank 0] step:2761/10000 train_time:137296ms step_avg:49.73ms +[2025-09-05 20:00:44] [Rank 0] step:2781/10000 train_time:138035ms step_avg:49.63ms +[2025-09-05 20:00:44] [Rank 0] step:2781/10000 train_time:138035ms step_avg:49.63ms +[2025-09-05 20:00:45] [Rank 0] step:2801/10000 train_time:138774ms step_avg:49.54ms +[2025-09-05 20:00:45] [Rank 0] step:2801/10000 train_time:138774ms step_avg:49.54ms +[2025-09-05 20:00:46] [Rank 0] step:2821/10000 train_time:140123ms step_avg:49.67ms +[2025-09-05 20:00:46] [Rank 0] step:2821/10000 train_time:140123ms step_avg:49.67ms +[2025-09-05 20:00:47] [Rank 0] step:2841/10000 train_time:140861ms step_avg:49.58ms +[2025-09-05 20:00:47] [Rank 0] step:2841/10000 train_time:140861ms step_avg:49.58ms +[2025-09-05 20:00:48] [Rank 0] step:2861/10000 train_time:141600ms step_avg:49.49ms +[2025-09-05 20:00:48] [Rank 0] step:2861/10000 train_time:141600ms step_avg:49.49ms +[2025-09-05 20:00:49] [Rank 0] step:2881/10000 train_time:142338ms step_avg:49.41ms +[2025-09-05 20:00:49] [Rank 0] step:2881/10000 train_time:142338ms step_avg:49.41ms +[2025-09-05 20:00:49] [Rank 0] step:2901/10000 train_time:143077ms step_avg:49.32ms +[2025-09-05 20:00:49] [Rank 0] step:2901/10000 train_time:143077ms step_avg:49.32ms +[2025-09-05 20:00:50] [Rank 0] step:2921/10000 train_time:143816ms step_avg:49.24ms +[2025-09-05 20:00:50] [Rank 0] step:2921/10000 train_time:143816ms step_avg:49.24ms +[2025-09-05 20:00:51] [Rank 0] step:2941/10000 train_time:144555ms step_avg:49.15ms +[2025-09-05 20:00:51] [Rank 0] step:2941/10000 train_time:144555ms step_avg:49.15ms +[2025-09-05 20:00:52] [Rank 0] step:2961/10000 train_time:145292ms step_avg:49.07ms +[2025-09-05 20:00:52] [Rank 0] step:2961/10000 train_time:145292ms step_avg:49.07ms +[2025-09-05 20:00:52] [Rank 0] step:2981/10000 train_time:146031ms step_avg:48.99ms +[2025-09-05 20:00:52] [Rank 0] step:2981/10000 train_time:146031ms step_avg:48.99ms +[2025-09-05 20:00:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:00:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:00:54] [Rank 0] PRINT: step:3000/10000 train_loss:1.9372 val_loss:1.8903 train_time:146850ms step_avg:48.95ms +[2025-09-05 20:00:54] [Rank 0] PRINT: step:3000/10000 train_loss:1.9372 val_loss:1.8903 train_time:146850ms step_avg:48.95ms +[2025-09-05 20:00:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:00:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:00:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:00:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:02:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:02:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:02:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:02:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:02:16] [Rank 0] Total Loss: 4.3055 +[2025-09-05 20:02:16] [Rank 0] Total Loss: 4.3055 +[2025-09-05 20:02:16] [Rank 0] Total FTA (Unweighted): 0.3463 +[2025-09-05 20:02:16] [Rank 0] Total FTA (Unweighted): 0.3463 +[2025-09-05 20:02:16] [Rank 0] Total FTA (Weighted): 0.3463 +[2025-09-05 20:02:16] [Rank 0] Total FTA (Weighted): 0.3463 +[2025-09-05 20:02:16] [Rank 0] Group 0 Loss: 3.2730 +[2025-09-05 20:02:16] [Rank 0] Group 0 Loss: 3.2730 +[2025-09-05 20:02:16] [Rank 0] Group 1 Loss: 3.0828 +[2025-09-05 20:02:16] [Rank 0] Group 1 Loss: 3.0828 +[2025-09-05 20:02:16] [Rank 0] Group 2 Loss: 3.1044 +[2025-09-05 20:02:16] [Rank 0] Group 2 Loss: 3.1044 +[2025-09-05 20:02:16] [Rank 0] Group 3 Loss: 3.4437 +[2025-09-05 20:02:16] [Rank 0] Group 3 Loss: 3.4437 +[2025-09-05 20:02:16] [Rank 0] Group 4 Loss: 3.6529 +[2025-09-05 20:02:16] [Rank 0] Group 4 Loss: 3.6529 +[2025-09-05 20:02:16] [Rank 0] Group 5 Loss: 3.9505 +[2025-09-05 20:02:16] [Rank 0] Group 5 Loss: 3.9505 +[2025-09-05 20:02:16] [Rank 0] Group 6 Loss: 4.2497 +[2025-09-05 20:02:16] [Rank 0] Group 6 Loss: 4.2497 +[2025-09-05 20:02:16] [Rank 0] Group 7 Loss: 4.4499 +[2025-09-05 20:02:16] [Rank 0] Group 7 Loss: 4.4499 +[2025-09-05 20:02:16] [Rank 0] Group 8 Loss: 4.7726 +[2025-09-05 20:02:16] [Rank 0] Group 8 Loss: 4.7726 +[2025-09-05 20:02:16] [Rank 0] Group 9 Loss: 4.9283 +[2025-09-05 20:02:16] [Rank 0] Group 9 Loss: 4.9283 +[2025-09-05 20:02:16] [Rank 0] Group 10 Loss: 5.0078 +[2025-09-05 20:02:16] [Rank 0] Group 10 Loss: 5.0078 +[2025-09-05 20:02:16] [Rank 0] Group 11 Loss: 5.0084 +[2025-09-05 20:02:16] [Rank 0] Group 11 Loss: 5.0084 +[2025-09-05 20:02:16] [Rank 0] Group 12 Loss: 4.9552 +[2025-09-05 20:02:16] [Rank 0] Group 12 Loss: 4.9552 +[2025-09-05 20:02:16] [Rank 0] Group 13 Loss: 4.9877 +[2025-09-05 20:02:16] [Rank 0] Group 13 Loss: 4.9877 +[2025-09-05 20:02:16] [Rank 0] Group 14 Loss: 5.0335 +[2025-09-05 20:02:16] [Rank 0] Group 14 Loss: 5.0335 +[2025-09-05 20:02:16] [Rank 0] Group 15 Loss: 4.9874 +[2025-09-05 20:02:16] [Rank 0] Group 15 Loss: 4.9874 +[2025-09-05 20:02:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:02:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:02:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:02:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:02:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:02:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:02:16] [Rank 0] Group 3 FTA: 0.3100 +[2025-09-05 20:02:16] [Rank 0] Group 3 FTA: 0.3100 +[2025-09-05 20:02:16] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:02:16] [Rank 0] Group 4 FTA: 0.2500 +[2025-09-05 20:02:16] [Rank 0] Group 5 FTA: 0.3600 +[2025-09-05 20:02:16] [Rank 0] Group 5 FTA: 0.3600 +[2025-09-05 20:02:16] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:02:16] [Rank 0] Group 6 FTA: 0.2900 +[2025-09-05 20:02:16] [Rank 0] Group 7 FTA: 0.1800 +[2025-09-05 20:02:16] [Rank 0] Group 7 FTA: 0.1800 +[2025-09-05 20:02:16] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 20:02:16] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 20:02:16] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:02:16] [Rank 0] Group 9 FTA: 0.1700 +[2025-09-05 20:02:16] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 20:02:16] [Rank 0] Group 10 FTA: 0.2100 +[2025-09-05 20:02:16] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 20:02:16] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 20:02:16] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 20:02:16] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 20:02:16] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 20:02:16] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 20:02:16] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 20:02:16] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 20:02:16] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 20:02:16] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 20:02:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:02:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:02:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:02:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:02:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:02:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:02:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:02:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:02:18] [Rank 0] step:3001/10000 train_time:146860ms step_avg:48.94ms +[2025-09-05 20:02:18] [Rank 0] step:3001/10000 train_time:146860ms step_avg:48.94ms +[2025-09-05 20:02:18] [Rank 0] step:3021/10000 train_time:147535ms step_avg:48.84ms +[2025-09-05 20:02:18] [Rank 0] step:3021/10000 train_time:147535ms step_avg:48.84ms +[2025-09-05 20:02:19] [Rank 0] step:3041/10000 train_time:148273ms step_avg:48.76ms +[2025-09-05 20:02:19] [Rank 0] step:3041/10000 train_time:148273ms step_avg:48.76ms +[2025-09-05 20:02:20] [Rank 0] step:3061/10000 train_time:149012ms step_avg:48.68ms +[2025-09-05 20:02:20] [Rank 0] step:3061/10000 train_time:149012ms step_avg:48.68ms +[2025-09-05 20:02:20] [Rank 0] step:3081/10000 train_time:149751ms step_avg:48.60ms +[2025-09-05 20:02:20] [Rank 0] step:3081/10000 train_time:149751ms step_avg:48.60ms +[2025-09-05 20:02:21] [Rank 0] step:3101/10000 train_time:150489ms step_avg:48.53ms +[2025-09-05 20:02:21] [Rank 0] step:3101/10000 train_time:150489ms step_avg:48.53ms +[2025-09-05 20:02:22] [Rank 0] step:3121/10000 train_time:151228ms step_avg:48.45ms +[2025-09-05 20:02:22] [Rank 0] step:3121/10000 train_time:151228ms step_avg:48.45ms +[2025-09-05 20:02:23] [Rank 0] step:3141/10000 train_time:151967ms step_avg:48.38ms +[2025-09-05 20:02:23] [Rank 0] step:3141/10000 train_time:151967ms step_avg:48.38ms +[2025-09-05 20:02:23] [Rank 0] step:3161/10000 train_time:152706ms step_avg:48.31ms +[2025-09-05 20:02:23] [Rank 0] step:3161/10000 train_time:152706ms step_avg:48.31ms +[2025-09-05 20:02:24] [Rank 0] step:3181/10000 train_time:153445ms step_avg:48.24ms +[2025-09-05 20:02:24] [Rank 0] step:3181/10000 train_time:153445ms step_avg:48.24ms +[2025-09-05 20:02:25] [Rank 0] step:3201/10000 train_time:154185ms step_avg:48.17ms +[2025-09-05 20:02:25] [Rank 0] step:3201/10000 train_time:154185ms step_avg:48.17ms +[2025-09-05 20:02:26] [Rank 0] step:3221/10000 train_time:154923ms step_avg:48.10ms +[2025-09-05 20:02:26] [Rank 0] step:3221/10000 train_time:154923ms step_avg:48.10ms +[2025-09-05 20:02:26] [Rank 0] step:3241/10000 train_time:155663ms step_avg:48.03ms +[2025-09-05 20:02:26] [Rank 0] step:3241/10000 train_time:155663ms step_avg:48.03ms +[2025-09-05 20:02:27] [Rank 0] step:3261/10000 train_time:156402ms step_avg:47.96ms +[2025-09-05 20:02:27] [Rank 0] step:3261/10000 train_time:156402ms step_avg:47.96ms +[2025-09-05 20:02:28] [Rank 0] step:3281/10000 train_time:157141ms step_avg:47.89ms +[2025-09-05 20:02:28] [Rank 0] step:3281/10000 train_time:157141ms step_avg:47.89ms +[2025-09-05 20:02:29] [Rank 0] step:3301/10000 train_time:157880ms step_avg:47.83ms +[2025-09-05 20:02:29] [Rank 0] step:3301/10000 train_time:157880ms step_avg:47.83ms +[2025-09-05 20:02:29] [Rank 0] step:3321/10000 train_time:158617ms step_avg:47.76ms +[2025-09-05 20:02:29] [Rank 0] step:3321/10000 train_time:158617ms step_avg:47.76ms +[2025-09-05 20:02:30] [Rank 0] step:3341/10000 train_time:159355ms step_avg:47.70ms +[2025-09-05 20:02:30] [Rank 0] step:3341/10000 train_time:159355ms step_avg:47.70ms +[2025-09-05 20:02:31] [Rank 0] step:3361/10000 train_time:160094ms step_avg:47.63ms +[2025-09-05 20:02:31] [Rank 0] step:3361/10000 train_time:160094ms step_avg:47.63ms +[2025-09-05 20:02:32] [Rank 0] step:3381/10000 train_time:160831ms step_avg:47.57ms +[2025-09-05 20:02:32] [Rank 0] step:3381/10000 train_time:160831ms step_avg:47.57ms +[2025-09-05 20:02:32] [Rank 0] step:3401/10000 train_time:161569ms step_avg:47.51ms +[2025-09-05 20:02:32] [Rank 0] step:3401/10000 train_time:161569ms step_avg:47.51ms +[2025-09-05 20:02:33] [Rank 0] step:3421/10000 train_time:162307ms step_avg:47.44ms +[2025-09-05 20:02:33] [Rank 0] step:3421/10000 train_time:162307ms step_avg:47.44ms +[2025-09-05 20:02:34] [Rank 0] step:3441/10000 train_time:163044ms step_avg:47.38ms +[2025-09-05 20:02:34] [Rank 0] step:3441/10000 train_time:163044ms step_avg:47.38ms +[2025-09-05 20:02:35] [Rank 0] step:3461/10000 train_time:163783ms step_avg:47.32ms +[2025-09-05 20:02:35] [Rank 0] step:3461/10000 train_time:163783ms step_avg:47.32ms +[2025-09-05 20:02:35] [Rank 0] step:3481/10000 train_time:164520ms step_avg:47.26ms +[2025-09-05 20:02:35] [Rank 0] step:3481/10000 train_time:164520ms step_avg:47.26ms +[2025-09-05 20:02:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:02:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:02:36] [Rank 0] PRINT: step:3500/10000 train_loss:1.8654 val_loss:1.8285 train_time:165338ms step_avg:47.24ms +[2025-09-05 20:02:36] [Rank 0] PRINT: step:3500/10000 train_loss:1.8654 val_loss:1.8285 train_time:165338ms step_avg:47.24ms +[2025-09-05 20:02:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:02:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:02:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:02:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:03:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:03:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:03:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:03:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:03:58] [Rank 0] Total Loss: 4.3310 +[2025-09-05 20:03:58] [Rank 0] Total Loss: 4.3310 +[2025-09-05 20:03:58] [Rank 0] Total FTA (Unweighted): 0.3750 +[2025-09-05 20:03:58] [Rank 0] Total FTA (Unweighted): 0.3750 +[2025-09-05 20:03:58] [Rank 0] Total FTA (Weighted): 0.3750 +[2025-09-05 20:03:58] [Rank 0] Total FTA (Weighted): 0.3750 +[2025-09-05 20:03:58] [Rank 0] Group 0 Loss: 3.4376 +[2025-09-05 20:03:58] [Rank 0] Group 0 Loss: 3.4376 +[2025-09-05 20:03:58] [Rank 0] Group 1 Loss: 3.0524 +[2025-09-05 20:03:58] [Rank 0] Group 1 Loss: 3.0524 +[2025-09-05 20:03:58] [Rank 0] Group 2 Loss: 3.0926 +[2025-09-05 20:03:58] [Rank 0] Group 2 Loss: 3.0926 +[2025-09-05 20:03:58] [Rank 0] Group 3 Loss: 3.5137 +[2025-09-05 20:03:58] [Rank 0] Group 3 Loss: 3.5137 +[2025-09-05 20:03:58] [Rank 0] Group 4 Loss: 3.6853 +[2025-09-05 20:03:58] [Rank 0] Group 4 Loss: 3.6853 +[2025-09-05 20:03:58] [Rank 0] Group 5 Loss: 3.9908 +[2025-09-05 20:03:58] [Rank 0] Group 5 Loss: 3.9908 +[2025-09-05 20:03:58] [Rank 0] Group 6 Loss: 4.2690 +[2025-09-05 20:03:58] [Rank 0] Group 6 Loss: 4.2690 +[2025-09-05 20:03:58] [Rank 0] Group 7 Loss: 4.4714 +[2025-09-05 20:03:58] [Rank 0] Group 7 Loss: 4.4714 +[2025-09-05 20:03:58] [Rank 0] Group 8 Loss: 4.7612 +[2025-09-05 20:03:58] [Rank 0] Group 8 Loss: 4.7612 +[2025-09-05 20:03:58] [Rank 0] Group 9 Loss: 4.9145 +[2025-09-05 20:03:58] [Rank 0] Group 9 Loss: 4.9145 +[2025-09-05 20:03:58] [Rank 0] Group 10 Loss: 5.0014 +[2025-09-05 20:03:58] [Rank 0] Group 10 Loss: 5.0014 +[2025-09-05 20:03:58] [Rank 0] Group 11 Loss: 5.0683 +[2025-09-05 20:03:58] [Rank 0] Group 11 Loss: 5.0683 +[2025-09-05 20:03:58] [Rank 0] Group 12 Loss: 4.9622 +[2025-09-05 20:03:58] [Rank 0] Group 12 Loss: 4.9622 +[2025-09-05 20:03:58] [Rank 0] Group 13 Loss: 5.0325 +[2025-09-05 20:03:58] [Rank 0] Group 13 Loss: 5.0325 +[2025-09-05 20:03:58] [Rank 0] Group 14 Loss: 5.0356 +[2025-09-05 20:03:58] [Rank 0] Group 14 Loss: 5.0356 +[2025-09-05 20:03:58] [Rank 0] Group 15 Loss: 5.0069 +[2025-09-05 20:03:58] [Rank 0] Group 15 Loss: 5.0069 +[2025-09-05 20:03:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:03:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:03:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:03:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:03:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:03:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:03:58] [Rank 0] Group 3 FTA: 0.4000 +[2025-09-05 20:03:58] [Rank 0] Group 3 FTA: 0.4000 +[2025-09-05 20:03:58] [Rank 0] Group 4 FTA: 0.2900 +[2025-09-05 20:03:58] [Rank 0] Group 4 FTA: 0.2900 +[2025-09-05 20:03:58] [Rank 0] Group 5 FTA: 0.3900 +[2025-09-05 20:03:58] [Rank 0] Group 5 FTA: 0.3900 +[2025-09-05 20:03:58] [Rank 0] Group 6 FTA: 0.3300 +[2025-09-05 20:03:58] [Rank 0] Group 6 FTA: 0.3300 +[2025-09-05 20:03:58] [Rank 0] Group 7 FTA: 0.2300 +[2025-09-05 20:03:58] [Rank 0] Group 7 FTA: 0.2300 +[2025-09-05 20:03:58] [Rank 0] Group 8 FTA: 0.2600 +[2025-09-05 20:03:58] [Rank 0] Group 8 FTA: 0.2600 +[2025-09-05 20:03:58] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 20:03:58] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 20:03:58] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 20:03:58] [Rank 0] Group 10 FTA: 0.2200 +[2025-09-05 20:03:58] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 20:03:58] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 20:03:58] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:03:58] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:03:58] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 20:03:58] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 20:03:58] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:03:58] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:03:58] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 20:03:58] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 20:03:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:03:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:03:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:03:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:03:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:03:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:04:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:04:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:04:00] [Rank 0] step:3501/10000 train_time:165348ms step_avg:47.23ms +[2025-09-05 20:04:00] [Rank 0] step:3501/10000 train_time:165348ms step_avg:47.23ms +[2025-09-05 20:04:01] [Rank 0] step:3521/10000 train_time:166031ms step_avg:47.15ms +[2025-09-05 20:04:01] [Rank 0] step:3521/10000 train_time:166031ms step_avg:47.15ms +[2025-09-05 20:04:01] [Rank 0] step:3541/10000 train_time:166768ms step_avg:47.10ms +[2025-09-05 20:04:01] [Rank 0] step:3541/10000 train_time:166768ms step_avg:47.10ms +[2025-09-05 20:04:02] [Rank 0] step:3561/10000 train_time:167507ms step_avg:47.04ms +[2025-09-05 20:04:02] [Rank 0] step:3561/10000 train_time:167507ms step_avg:47.04ms +[2025-09-05 20:04:03] [Rank 0] step:3581/10000 train_time:168245ms step_avg:46.98ms +[2025-09-05 20:04:03] [Rank 0] step:3581/10000 train_time:168245ms step_avg:46.98ms +[2025-09-05 20:04:03] [Rank 0] step:3601/10000 train_time:168983ms step_avg:46.93ms +[2025-09-05 20:04:03] [Rank 0] step:3601/10000 train_time:168983ms step_avg:46.93ms +[2025-09-05 20:04:04] [Rank 0] step:3621/10000 train_time:169721ms step_avg:46.87ms +[2025-09-05 20:04:04] [Rank 0] step:3621/10000 train_time:169721ms step_avg:46.87ms +[2025-09-05 20:04:06] [Rank 0] step:3641/10000 train_time:171087ms step_avg:46.99ms +[2025-09-05 20:04:06] [Rank 0] step:3641/10000 train_time:171087ms step_avg:46.99ms +[2025-09-05 20:04:06] [Rank 0] step:3661/10000 train_time:171824ms step_avg:46.93ms +[2025-09-05 20:04:06] [Rank 0] step:3661/10000 train_time:171824ms step_avg:46.93ms +[2025-09-05 20:04:07] [Rank 0] step:3681/10000 train_time:172563ms step_avg:46.88ms +[2025-09-05 20:04:07] [Rank 0] step:3681/10000 train_time:172563ms step_avg:46.88ms +[2025-09-05 20:04:08] [Rank 0] step:3701/10000 train_time:173300ms step_avg:46.83ms +[2025-09-05 20:04:08] [Rank 0] step:3701/10000 train_time:173300ms step_avg:46.83ms +[2025-09-05 20:04:09] [Rank 0] step:3721/10000 train_time:174037ms step_avg:46.77ms +[2025-09-05 20:04:09] [Rank 0] step:3721/10000 train_time:174037ms step_avg:46.77ms +[2025-09-05 20:04:09] [Rank 0] step:3741/10000 train_time:174775ms step_avg:46.72ms +[2025-09-05 20:04:09] [Rank 0] step:3741/10000 train_time:174775ms step_avg:46.72ms +[2025-09-05 20:04:10] [Rank 0] step:3761/10000 train_time:175512ms step_avg:46.67ms +[2025-09-05 20:04:10] [Rank 0] step:3761/10000 train_time:175512ms step_avg:46.67ms +[2025-09-05 20:04:11] [Rank 0] step:3781/10000 train_time:176250ms step_avg:46.61ms +[2025-09-05 20:04:11] [Rank 0] step:3781/10000 train_time:176250ms step_avg:46.61ms +[2025-09-05 20:04:11] [Rank 0] step:3801/10000 train_time:176987ms step_avg:46.56ms +[2025-09-05 20:04:11] [Rank 0] step:3801/10000 train_time:176987ms step_avg:46.56ms +[2025-09-05 20:04:12] [Rank 0] step:3821/10000 train_time:177725ms step_avg:46.51ms +[2025-09-05 20:04:12] [Rank 0] step:3821/10000 train_time:177725ms step_avg:46.51ms +[2025-09-05 20:04:13] [Rank 0] step:3841/10000 train_time:178463ms step_avg:46.46ms +[2025-09-05 20:04:13] [Rank 0] step:3841/10000 train_time:178463ms step_avg:46.46ms +[2025-09-05 20:04:14] [Rank 0] step:3861/10000 train_time:179200ms step_avg:46.41ms +[2025-09-05 20:04:14] [Rank 0] step:3861/10000 train_time:179200ms step_avg:46.41ms +[2025-09-05 20:04:14] [Rank 0] step:3881/10000 train_time:179937ms step_avg:46.36ms +[2025-09-05 20:04:14] [Rank 0] step:3881/10000 train_time:179937ms step_avg:46.36ms +[2025-09-05 20:04:15] [Rank 0] step:3901/10000 train_time:180674ms step_avg:46.31ms +[2025-09-05 20:04:15] [Rank 0] step:3901/10000 train_time:180674ms step_avg:46.31ms +[2025-09-05 20:04:16] [Rank 0] step:3921/10000 train_time:181411ms step_avg:46.27ms +[2025-09-05 20:04:16] [Rank 0] step:3921/10000 train_time:181411ms step_avg:46.27ms +[2025-09-05 20:04:17] [Rank 0] step:3941/10000 train_time:182149ms step_avg:46.22ms +[2025-09-05 20:04:17] [Rank 0] step:3941/10000 train_time:182149ms step_avg:46.22ms +[2025-09-05 20:04:17] [Rank 0] step:3961/10000 train_time:182886ms step_avg:46.17ms +[2025-09-05 20:04:17] [Rank 0] step:3961/10000 train_time:182886ms step_avg:46.17ms +[2025-09-05 20:04:18] [Rank 0] step:3981/10000 train_time:183622ms step_avg:46.12ms +[2025-09-05 20:04:18] [Rank 0] step:3981/10000 train_time:183622ms step_avg:46.12ms +[2025-09-05 20:04:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:04:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:04:19] [Rank 0] PRINT: step:4000/10000 train_loss:1.8158 val_loss:1.7858 train_time:184440ms step_avg:46.11ms +[2025-09-05 20:04:19] [Rank 0] PRINT: step:4000/10000 train_loss:1.8158 val_loss:1.7858 train_time:184440ms step_avg:46.11ms +[2025-09-05 20:04:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:04:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:04:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:04:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:05:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:05:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:05:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:05:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:05:42] [Rank 0] Total Loss: 4.3118 +[2025-09-05 20:05:42] [Rank 0] Total Loss: 4.3118 +[2025-09-05 20:05:42] [Rank 0] Total FTA (Unweighted): 0.3944 +[2025-09-05 20:05:42] [Rank 0] Total FTA (Unweighted): 0.3944 +[2025-09-05 20:05:42] [Rank 0] Total FTA (Weighted): 0.3944 +[2025-09-05 20:05:42] [Rank 0] Total FTA (Weighted): 0.3944 +[2025-09-05 20:05:42] [Rank 0] Group 0 Loss: 3.4484 +[2025-09-05 20:05:42] [Rank 0] Group 0 Loss: 3.4484 +[2025-09-05 20:05:42] [Rank 0] Group 1 Loss: 3.1288 +[2025-09-05 20:05:42] [Rank 0] Group 1 Loss: 3.1288 +[2025-09-05 20:05:42] [Rank 0] Group 2 Loss: 3.0925 +[2025-09-05 20:05:42] [Rank 0] Group 2 Loss: 3.0925 +[2025-09-05 20:05:42] [Rank 0] Group 3 Loss: 3.4935 +[2025-09-05 20:05:42] [Rank 0] Group 3 Loss: 3.4935 +[2025-09-05 20:05:42] [Rank 0] Group 4 Loss: 3.6842 +[2025-09-05 20:05:42] [Rank 0] Group 4 Loss: 3.6842 +[2025-09-05 20:05:42] [Rank 0] Group 5 Loss: 4.0092 +[2025-09-05 20:05:42] [Rank 0] Group 5 Loss: 4.0092 +[2025-09-05 20:05:42] [Rank 0] Group 6 Loss: 4.2209 +[2025-09-05 20:05:42] [Rank 0] Group 6 Loss: 4.2209 +[2025-09-05 20:05:42] [Rank 0] Group 7 Loss: 4.4228 +[2025-09-05 20:05:42] [Rank 0] Group 7 Loss: 4.4228 +[2025-09-05 20:05:42] [Rank 0] Group 8 Loss: 4.7384 +[2025-09-05 20:05:42] [Rank 0] Group 8 Loss: 4.7384 +[2025-09-05 20:05:42] [Rank 0] Group 9 Loss: 4.8676 +[2025-09-05 20:05:42] [Rank 0] Group 9 Loss: 4.8676 +[2025-09-05 20:05:42] [Rank 0] Group 10 Loss: 4.9398 +[2025-09-05 20:05:42] [Rank 0] Group 10 Loss: 4.9398 +[2025-09-05 20:05:42] [Rank 0] Group 11 Loss: 5.0081 +[2025-09-05 20:05:42] [Rank 0] Group 11 Loss: 5.0081 +[2025-09-05 20:05:42] [Rank 0] Group 12 Loss: 4.9534 +[2025-09-05 20:05:42] [Rank 0] Group 12 Loss: 4.9534 +[2025-09-05 20:05:42] [Rank 0] Group 13 Loss: 5.0027 +[2025-09-05 20:05:42] [Rank 0] Group 13 Loss: 5.0027 +[2025-09-05 20:05:42] [Rank 0] Group 14 Loss: 4.9965 +[2025-09-05 20:05:42] [Rank 0] Group 14 Loss: 4.9965 +[2025-09-05 20:05:42] [Rank 0] Group 15 Loss: 4.9824 +[2025-09-05 20:05:42] [Rank 0] Group 15 Loss: 4.9824 +[2025-09-05 20:05:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:05:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:05:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:05:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:05:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:05:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:05:42] [Rank 0] Group 3 FTA: 0.4300 +[2025-09-05 20:05:42] [Rank 0] Group 3 FTA: 0.4300 +[2025-09-05 20:05:42] [Rank 0] Group 4 FTA: 0.3600 +[2025-09-05 20:05:42] [Rank 0] Group 4 FTA: 0.3600 +[2025-09-05 20:05:42] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 20:05:42] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 20:05:42] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 20:05:42] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 20:05:42] [Rank 0] Group 7 FTA: 0.2500 +[2025-09-05 20:05:42] [Rank 0] Group 7 FTA: 0.2500 +[2025-09-05 20:05:42] [Rank 0] Group 8 FTA: 0.2900 +[2025-09-05 20:05:42] [Rank 0] Group 8 FTA: 0.2900 +[2025-09-05 20:05:42] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 20:05:42] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 20:05:42] [Rank 0] Group 10 FTA: 0.2600 +[2025-09-05 20:05:42] [Rank 0] Group 10 FTA: 0.2600 +[2025-09-05 20:05:42] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 20:05:42] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 20:05:42] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:05:42] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 20:05:42] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 20:05:42] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 20:05:42] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:05:42] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 20:05:42] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 20:05:42] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 20:05:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:05:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:05:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:05:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:05:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:05:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:05:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:05:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:05:43] [Rank 0] step:4001/10000 train_time:184449ms step_avg:46.10ms +[2025-09-05 20:05:43] [Rank 0] step:4001/10000 train_time:184449ms step_avg:46.10ms +[2025-09-05 20:05:45] [Rank 0] step:4021/10000 train_time:185745ms step_avg:46.19ms +[2025-09-05 20:05:45] [Rank 0] step:4021/10000 train_time:185745ms step_avg:46.19ms +[2025-09-05 20:05:45] [Rank 0] step:4041/10000 train_time:186482ms step_avg:46.15ms +[2025-09-05 20:05:45] [Rank 0] step:4041/10000 train_time:186482ms step_avg:46.15ms +[2025-09-05 20:05:46] [Rank 0] step:4061/10000 train_time:187218ms step_avg:46.10ms +[2025-09-05 20:05:46] [Rank 0] step:4061/10000 train_time:187218ms step_avg:46.10ms +[2025-09-05 20:05:47] [Rank 0] step:4081/10000 train_time:187955ms step_avg:46.06ms +[2025-09-05 20:05:47] [Rank 0] step:4081/10000 train_time:187955ms step_avg:46.06ms +[2025-09-05 20:05:47] [Rank 0] step:4101/10000 train_time:188691ms step_avg:46.01ms +[2025-09-05 20:05:47] [Rank 0] step:4101/10000 train_time:188691ms step_avg:46.01ms +[2025-09-05 20:05:48] [Rank 0] step:4121/10000 train_time:189428ms step_avg:45.97ms +[2025-09-05 20:05:48] [Rank 0] step:4121/10000 train_time:189428ms step_avg:45.97ms +[2025-09-05 20:05:49] [Rank 0] step:4141/10000 train_time:190165ms step_avg:45.92ms +[2025-09-05 20:05:49] [Rank 0] step:4141/10000 train_time:190165ms step_avg:45.92ms +[2025-09-05 20:05:50] [Rank 0] step:4161/10000 train_time:190903ms step_avg:45.88ms +[2025-09-05 20:05:50] [Rank 0] step:4161/10000 train_time:190903ms step_avg:45.88ms +[2025-09-05 20:05:50] [Rank 0] step:4181/10000 train_time:191640ms step_avg:45.84ms +[2025-09-05 20:05:50] [Rank 0] step:4181/10000 train_time:191640ms step_avg:45.84ms +[2025-09-05 20:05:51] [Rank 0] step:4201/10000 train_time:192378ms step_avg:45.79ms +[2025-09-05 20:05:51] [Rank 0] step:4201/10000 train_time:192378ms step_avg:45.79ms +[2025-09-05 20:05:52] [Rank 0] step:4221/10000 train_time:193115ms step_avg:45.75ms +[2025-09-05 20:05:52] [Rank 0] step:4221/10000 train_time:193115ms step_avg:45.75ms +[2025-09-05 20:05:53] [Rank 0] step:4241/10000 train_time:193852ms step_avg:45.71ms +[2025-09-05 20:05:53] [Rank 0] step:4241/10000 train_time:193852ms step_avg:45.71ms +[2025-09-05 20:05:53] [Rank 0] step:4261/10000 train_time:194589ms step_avg:45.67ms +[2025-09-05 20:05:53] [Rank 0] step:4261/10000 train_time:194589ms step_avg:45.67ms +[2025-09-05 20:05:54] [Rank 0] step:4281/10000 train_time:195326ms step_avg:45.63ms +[2025-09-05 20:05:54] [Rank 0] step:4281/10000 train_time:195326ms step_avg:45.63ms +[2025-09-05 20:05:55] [Rank 0] step:4301/10000 train_time:196065ms step_avg:45.59ms +[2025-09-05 20:05:55] [Rank 0] step:4301/10000 train_time:196065ms step_avg:45.59ms +[2025-09-05 20:05:56] [Rank 0] step:4321/10000 train_time:196894ms step_avg:45.57ms +[2025-09-05 20:05:56] [Rank 0] step:4321/10000 train_time:196894ms step_avg:45.57ms +[2025-09-05 20:05:56] [Rank 0] step:4341/10000 train_time:197632ms step_avg:45.53ms +[2025-09-05 20:05:56] [Rank 0] step:4341/10000 train_time:197632ms step_avg:45.53ms +[2025-09-05 20:05:57] [Rank 0] step:4361/10000 train_time:198370ms step_avg:45.49ms +[2025-09-05 20:05:57] [Rank 0] step:4361/10000 train_time:198370ms step_avg:45.49ms +[2025-09-05 20:05:58] [Rank 0] step:4381/10000 train_time:199252ms step_avg:45.48ms +[2025-09-05 20:05:58] [Rank 0] step:4381/10000 train_time:199252ms step_avg:45.48ms +[2025-09-05 20:05:59] [Rank 0] step:4401/10000 train_time:199991ms step_avg:45.44ms +[2025-09-05 20:05:59] [Rank 0] step:4401/10000 train_time:199991ms step_avg:45.44ms +[2025-09-05 20:06:00] [Rank 0] step:4421/10000 train_time:200729ms step_avg:45.40ms +[2025-09-05 20:06:00] [Rank 0] step:4421/10000 train_time:200729ms step_avg:45.40ms +[2025-09-05 20:06:00] [Rank 0] step:4441/10000 train_time:201468ms step_avg:45.37ms +[2025-09-05 20:06:00] [Rank 0] step:4441/10000 train_time:201468ms step_avg:45.37ms +[2025-09-05 20:06:01] [Rank 0] step:4461/10000 train_time:202205ms step_avg:45.33ms +[2025-09-05 20:06:01] [Rank 0] step:4461/10000 train_time:202205ms step_avg:45.33ms +[2025-09-05 20:06:02] [Rank 0] step:4481/10000 train_time:202942ms step_avg:45.29ms +[2025-09-05 20:06:02] [Rank 0] step:4481/10000 train_time:202942ms step_avg:45.29ms +[2025-09-05 20:06:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:06:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:06:03] [Rank 0] PRINT: step:4500/10000 train_loss:1.7787 val_loss:1.7538 train_time:203761ms step_avg:45.28ms +[2025-09-05 20:06:03] [Rank 0] PRINT: step:4500/10000 train_loss:1.7787 val_loss:1.7538 train_time:203761ms step_avg:45.28ms +[2025-09-05 20:06:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:06:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:06:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:06:03] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:07:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:07:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:07:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:07:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:07:25] [Rank 0] Total Loss: 4.3521 +[2025-09-05 20:07:25] [Rank 0] Total Loss: 4.3521 +[2025-09-05 20:07:25] [Rank 0] Total FTA (Unweighted): 0.4119 +[2025-09-05 20:07:25] [Rank 0] Total FTA (Unweighted): 0.4119 +[2025-09-05 20:07:25] [Rank 0] Total FTA (Weighted): 0.4119 +[2025-09-05 20:07:25] [Rank 0] Total FTA (Weighted): 0.4119 +[2025-09-05 20:07:25] [Rank 0] Group 0 Loss: 3.5178 +[2025-09-05 20:07:25] [Rank 0] Group 0 Loss: 3.5178 +[2025-09-05 20:07:25] [Rank 0] Group 1 Loss: 3.1770 +[2025-09-05 20:07:25] [Rank 0] Group 1 Loss: 3.1770 +[2025-09-05 20:07:25] [Rank 0] Group 2 Loss: 3.1942 +[2025-09-05 20:07:25] [Rank 0] Group 2 Loss: 3.1942 +[2025-09-05 20:07:25] [Rank 0] Group 3 Loss: 3.5512 +[2025-09-05 20:07:25] [Rank 0] Group 3 Loss: 3.5512 +[2025-09-05 20:07:25] [Rank 0] Group 4 Loss: 3.7412 +[2025-09-05 20:07:25] [Rank 0] Group 4 Loss: 3.7412 +[2025-09-05 20:07:25] [Rank 0] Group 5 Loss: 4.0588 +[2025-09-05 20:07:25] [Rank 0] Group 5 Loss: 4.0588 +[2025-09-05 20:07:25] [Rank 0] Group 6 Loss: 4.2592 +[2025-09-05 20:07:25] [Rank 0] Group 6 Loss: 4.2592 +[2025-09-05 20:07:25] [Rank 0] Group 7 Loss: 4.4531 +[2025-09-05 20:07:25] [Rank 0] Group 7 Loss: 4.4531 +[2025-09-05 20:07:25] [Rank 0] Group 8 Loss: 4.7559 +[2025-09-05 20:07:25] [Rank 0] Group 8 Loss: 4.7559 +[2025-09-05 20:07:25] [Rank 0] Group 9 Loss: 4.8865 +[2025-09-05 20:07:25] [Rank 0] Group 9 Loss: 4.8865 +[2025-09-05 20:07:25] [Rank 0] Group 10 Loss: 5.0194 +[2025-09-05 20:07:25] [Rank 0] Group 10 Loss: 5.0194 +[2025-09-05 20:07:25] [Rank 0] Group 11 Loss: 5.0637 +[2025-09-05 20:07:25] [Rank 0] Group 11 Loss: 5.0637 +[2025-09-05 20:07:25] [Rank 0] Group 12 Loss: 4.9569 +[2025-09-05 20:07:25] [Rank 0] Group 12 Loss: 4.9569 +[2025-09-05 20:07:25] [Rank 0] Group 13 Loss: 4.9935 +[2025-09-05 20:07:25] [Rank 0] Group 13 Loss: 4.9935 +[2025-09-05 20:07:25] [Rank 0] Group 14 Loss: 5.0127 +[2025-09-05 20:07:25] [Rank 0] Group 14 Loss: 5.0127 +[2025-09-05 20:07:25] [Rank 0] Group 15 Loss: 4.9924 +[2025-09-05 20:07:25] [Rank 0] Group 15 Loss: 4.9924 +[2025-09-05 20:07:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:07:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:07:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:07:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:07:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:07:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:07:25] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 20:07:25] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 20:07:25] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 20:07:25] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 20:07:25] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 20:07:25] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 20:07:25] [Rank 0] Group 6 FTA: 0.3600 +[2025-09-05 20:07:25] [Rank 0] Group 6 FTA: 0.3600 +[2025-09-05 20:07:25] [Rank 0] Group 7 FTA: 0.2800 +[2025-09-05 20:07:25] [Rank 0] Group 7 FTA: 0.2800 +[2025-09-05 20:07:25] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 20:07:25] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 20:07:25] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 20:07:25] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 20:07:25] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-05 20:07:25] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-05 20:07:25] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 20:07:25] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 20:07:25] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 20:07:25] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 20:07:25] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 20:07:25] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 20:07:25] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 20:07:25] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 20:07:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:07:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:07:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:07:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:07:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:07:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:07:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:07:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:07:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:07:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:07:26] [Rank 0] step:4501/10000 train_time:203772ms step_avg:45.27ms +[2025-09-05 20:07:26] [Rank 0] step:4501/10000 train_time:203772ms step_avg:45.27ms +[2025-09-05 20:07:27] [Rank 0] step:4521/10000 train_time:204449ms step_avg:45.22ms +[2025-09-05 20:07:27] [Rank 0] step:4521/10000 train_time:204449ms step_avg:45.22ms +[2025-09-05 20:07:28] [Rank 0] step:4541/10000 train_time:205187ms step_avg:45.19ms +[2025-09-05 20:07:28] [Rank 0] step:4541/10000 train_time:205187ms step_avg:45.19ms +[2025-09-05 20:07:28] [Rank 0] step:4561/10000 train_time:205926ms step_avg:45.15ms +[2025-09-05 20:07:28] [Rank 0] step:4561/10000 train_time:205926ms step_avg:45.15ms +[2025-09-05 20:07:29] [Rank 0] step:4581/10000 train_time:206665ms step_avg:45.11ms +[2025-09-05 20:07:29] [Rank 0] step:4581/10000 train_time:206665ms step_avg:45.11ms +[2025-09-05 20:07:30] [Rank 0] step:4601/10000 train_time:207404ms step_avg:45.08ms +[2025-09-05 20:07:30] [Rank 0] step:4601/10000 train_time:207404ms step_avg:45.08ms +[2025-09-05 20:07:31] [Rank 0] step:4621/10000 train_time:208142ms step_avg:45.04ms +[2025-09-05 20:07:31] [Rank 0] step:4621/10000 train_time:208142ms step_avg:45.04ms +[2025-09-05 20:07:31] [Rank 0] step:4641/10000 train_time:208879ms step_avg:45.01ms +[2025-09-05 20:07:31] [Rank 0] step:4641/10000 train_time:208879ms step_avg:45.01ms +[2025-09-05 20:07:32] [Rank 0] step:4661/10000 train_time:209618ms step_avg:44.97ms +[2025-09-05 20:07:32] [Rank 0] step:4661/10000 train_time:209618ms step_avg:44.97ms +[2025-09-05 20:07:33] [Rank 0] step:4681/10000 train_time:210356ms step_avg:44.94ms +[2025-09-05 20:07:33] [Rank 0] step:4681/10000 train_time:210356ms step_avg:44.94ms +[2025-09-05 20:07:34] [Rank 0] step:4701/10000 train_time:211093ms step_avg:44.90ms +[2025-09-05 20:07:34] [Rank 0] step:4701/10000 train_time:211093ms step_avg:44.90ms +[2025-09-05 20:07:34] [Rank 0] step:4721/10000 train_time:211832ms step_avg:44.87ms +[2025-09-05 20:07:34] [Rank 0] step:4721/10000 train_time:211832ms step_avg:44.87ms +[2025-09-05 20:07:35] [Rank 0] step:4741/10000 train_time:212572ms step_avg:44.84ms +[2025-09-05 20:07:35] [Rank 0] step:4741/10000 train_time:212572ms step_avg:44.84ms +[2025-09-05 20:07:36] [Rank 0] step:4761/10000 train_time:213310ms step_avg:44.80ms +[2025-09-05 20:07:36] [Rank 0] step:4761/10000 train_time:213310ms step_avg:44.80ms +[2025-09-05 20:07:37] [Rank 0] step:4781/10000 train_time:214050ms step_avg:44.77ms +[2025-09-05 20:07:37] [Rank 0] step:4781/10000 train_time:214050ms step_avg:44.77ms +[2025-09-05 20:07:37] [Rank 0] step:4801/10000 train_time:214787ms step_avg:44.74ms +[2025-09-05 20:07:37] [Rank 0] step:4801/10000 train_time:214787ms step_avg:44.74ms +[2025-09-05 20:07:38] [Rank 0] step:4821/10000 train_time:215525ms step_avg:44.71ms +[2025-09-05 20:07:38] [Rank 0] step:4821/10000 train_time:215525ms step_avg:44.71ms +[2025-09-05 20:07:39] [Rank 0] step:4841/10000 train_time:216568ms step_avg:44.74ms +[2025-09-05 20:07:39] [Rank 0] step:4841/10000 train_time:216568ms step_avg:44.74ms +[2025-09-05 20:07:40] [Rank 0] step:4861/10000 train_time:217307ms step_avg:44.70ms +[2025-09-05 20:07:40] [Rank 0] step:4861/10000 train_time:217307ms step_avg:44.70ms +[2025-09-05 20:07:41] [Rank 0] step:4881/10000 train_time:218045ms step_avg:44.67ms +[2025-09-05 20:07:41] [Rank 0] step:4881/10000 train_time:218045ms step_avg:44.67ms +[2025-09-05 20:07:41] [Rank 0] step:4901/10000 train_time:218784ms step_avg:44.64ms +[2025-09-05 20:07:41] [Rank 0] step:4901/10000 train_time:218784ms step_avg:44.64ms +[2025-09-05 20:07:42] [Rank 0] step:4921/10000 train_time:219522ms step_avg:44.61ms +[2025-09-05 20:07:42] [Rank 0] step:4921/10000 train_time:219522ms step_avg:44.61ms +[2025-09-05 20:07:43] [Rank 0] step:4941/10000 train_time:220261ms step_avg:44.58ms +[2025-09-05 20:07:43] [Rank 0] step:4941/10000 train_time:220261ms step_avg:44.58ms +[2025-09-05 20:07:44] [Rank 0] step:4961/10000 train_time:220999ms step_avg:44.55ms +[2025-09-05 20:07:44] [Rank 0] step:4961/10000 train_time:220999ms step_avg:44.55ms +[2025-09-05 20:07:44] [Rank 0] step:4981/10000 train_time:221738ms step_avg:44.52ms +[2025-09-05 20:07:44] [Rank 0] step:4981/10000 train_time:221738ms step_avg:44.52ms +[2025-09-05 20:07:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:07:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:07:45] [Rank 0] PRINT: step:5000/10000 train_loss:1.7502 val_loss:1.7289 train_time:222557ms step_avg:44.51ms +[2025-09-05 20:07:45] [Rank 0] PRINT: step:5000/10000 train_loss:1.7502 val_loss:1.7289 train_time:222557ms step_avg:44.51ms +[2025-09-05 20:07:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:07:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:07:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:07:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:09:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:09:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:09:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:09:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:09:08] [Rank 0] Total Loss: 4.2309 +[2025-09-05 20:09:08] [Rank 0] Total Loss: 4.2309 +[2025-09-05 20:09:08] [Rank 0] Total FTA (Unweighted): 0.4156 +[2025-09-05 20:09:08] [Rank 0] Total FTA (Unweighted): 0.4156 +[2025-09-05 20:09:08] [Rank 0] Total FTA (Weighted): 0.4156 +[2025-09-05 20:09:08] [Rank 0] Total FTA (Weighted): 0.4156 +[2025-09-05 20:09:08] [Rank 0] Group 0 Loss: 3.4192 +[2025-09-05 20:09:08] [Rank 0] Group 0 Loss: 3.4192 +[2025-09-05 20:09:08] [Rank 0] Group 1 Loss: 3.0628 +[2025-09-05 20:09:08] [Rank 0] Group 1 Loss: 3.0628 +[2025-09-05 20:09:08] [Rank 0] Group 2 Loss: 3.0793 +[2025-09-05 20:09:08] [Rank 0] Group 2 Loss: 3.0793 +[2025-09-05 20:09:08] [Rank 0] Group 3 Loss: 3.4678 +[2025-09-05 20:09:08] [Rank 0] Group 3 Loss: 3.4678 +[2025-09-05 20:09:08] [Rank 0] Group 4 Loss: 3.6200 +[2025-09-05 20:09:08] [Rank 0] Group 4 Loss: 3.6200 +[2025-09-05 20:09:08] [Rank 0] Group 5 Loss: 3.9143 +[2025-09-05 20:09:08] [Rank 0] Group 5 Loss: 3.9143 +[2025-09-05 20:09:08] [Rank 0] Group 6 Loss: 4.1288 +[2025-09-05 20:09:08] [Rank 0] Group 6 Loss: 4.1288 +[2025-09-05 20:09:08] [Rank 0] Group 7 Loss: 4.3304 +[2025-09-05 20:09:08] [Rank 0] Group 7 Loss: 4.3304 +[2025-09-05 20:09:08] [Rank 0] Group 8 Loss: 4.6291 +[2025-09-05 20:09:08] [Rank 0] Group 8 Loss: 4.6291 +[2025-09-05 20:09:08] [Rank 0] Group 9 Loss: 4.7786 +[2025-09-05 20:09:08] [Rank 0] Group 9 Loss: 4.7786 +[2025-09-05 20:09:08] [Rank 0] Group 10 Loss: 4.8614 +[2025-09-05 20:09:08] [Rank 0] Group 10 Loss: 4.8614 +[2025-09-05 20:09:08] [Rank 0] Group 11 Loss: 4.9166 +[2025-09-05 20:09:08] [Rank 0] Group 11 Loss: 4.9166 +[2025-09-05 20:09:08] [Rank 0] Group 12 Loss: 4.8574 +[2025-09-05 20:09:08] [Rank 0] Group 12 Loss: 4.8574 +[2025-09-05 20:09:08] [Rank 0] Group 13 Loss: 4.8781 +[2025-09-05 20:09:08] [Rank 0] Group 13 Loss: 4.8781 +[2025-09-05 20:09:08] [Rank 0] Group 14 Loss: 4.8871 +[2025-09-05 20:09:08] [Rank 0] Group 14 Loss: 4.8871 +[2025-09-05 20:09:08] [Rank 0] Group 15 Loss: 4.8632 +[2025-09-05 20:09:08] [Rank 0] Group 15 Loss: 4.8632 +[2025-09-05 20:09:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:09:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:09:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:09:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:09:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:09:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:09:08] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 20:09:08] [Rank 0] Group 3 FTA: 0.5300 +[2025-09-05 20:09:08] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 20:09:08] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 20:09:08] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 20:09:08] [Rank 0] Group 5 FTA: 0.4200 +[2025-09-05 20:09:08] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 20:09:08] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 20:09:08] [Rank 0] Group 7 FTA: 0.3000 +[2025-09-05 20:09:08] [Rank 0] Group 7 FTA: 0.3000 +[2025-09-05 20:09:08] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 20:09:08] [Rank 0] Group 8 FTA: 0.3000 +[2025-09-05 20:09:08] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 20:09:08] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 20:09:08] [Rank 0] Group 10 FTA: 0.2800 +[2025-09-05 20:09:08] [Rank 0] Group 10 FTA: 0.2800 +[2025-09-05 20:09:08] [Rank 0] Group 11 FTA: 0.2500 +[2025-09-05 20:09:08] [Rank 0] Group 11 FTA: 0.2500 +[2025-09-05 20:09:08] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 20:09:08] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 20:09:08] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 20:09:08] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 20:09:08] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 20:09:08] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 20:09:08] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:09:08] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 20:09:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:09:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:09:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:09:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:09:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:09:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:09:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:09:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:09:10] [Rank 0] step:5001/10000 train_time:222567ms step_avg:44.50ms +[2025-09-05 20:09:10] [Rank 0] step:5001/10000 train_time:222567ms step_avg:44.50ms +[2025-09-05 20:09:10] [Rank 0] step:5021/10000 train_time:223242ms step_avg:44.46ms +[2025-09-05 20:09:10] [Rank 0] step:5021/10000 train_time:223242ms step_avg:44.46ms +[2025-09-05 20:09:11] [Rank 0] step:5041/10000 train_time:223980ms step_avg:44.43ms +[2025-09-05 20:09:11] [Rank 0] step:5041/10000 train_time:223980ms step_avg:44.43ms +[2025-09-05 20:09:12] [Rank 0] step:5061/10000 train_time:224718ms step_avg:44.40ms +[2025-09-05 20:09:12] [Rank 0] step:5061/10000 train_time:224718ms step_avg:44.40ms +[2025-09-05 20:09:13] [Rank 0] step:5081/10000 train_time:225456ms step_avg:44.37ms +[2025-09-05 20:09:13] [Rank 0] step:5081/10000 train_time:225456ms step_avg:44.37ms +[2025-09-05 20:09:13] [Rank 0] step:5101/10000 train_time:226195ms step_avg:44.34ms +[2025-09-05 20:09:13] [Rank 0] step:5101/10000 train_time:226195ms step_avg:44.34ms +[2025-09-05 20:09:14] [Rank 0] step:5121/10000 train_time:226933ms step_avg:44.31ms +[2025-09-05 20:09:14] [Rank 0] step:5121/10000 train_time:226933ms step_avg:44.31ms +[2025-09-05 20:09:15] [Rank 0] step:5141/10000 train_time:227671ms step_avg:44.29ms +[2025-09-05 20:09:15] [Rank 0] step:5141/10000 train_time:227671ms step_avg:44.29ms +[2025-09-05 20:09:16] [Rank 0] step:5161/10000 train_time:228410ms step_avg:44.26ms +[2025-09-05 20:09:16] [Rank 0] step:5161/10000 train_time:228410ms step_avg:44.26ms +[2025-09-05 20:09:16] [Rank 0] step:5181/10000 train_time:229148ms step_avg:44.23ms +[2025-09-05 20:09:16] [Rank 0] step:5181/10000 train_time:229148ms step_avg:44.23ms +[2025-09-05 20:09:17] [Rank 0] step:5201/10000 train_time:229886ms step_avg:44.20ms +[2025-09-05 20:09:17] [Rank 0] step:5201/10000 train_time:229886ms step_avg:44.20ms +[2025-09-05 20:09:18] [Rank 0] step:5221/10000 train_time:230624ms step_avg:44.17ms +[2025-09-05 20:09:18] [Rank 0] step:5221/10000 train_time:230624ms step_avg:44.17ms +[2025-09-05 20:09:19] [Rank 0] step:5241/10000 train_time:231362ms step_avg:44.14ms +[2025-09-05 20:09:19] [Rank 0] step:5241/10000 train_time:231362ms step_avg:44.14ms +[2025-09-05 20:09:19] [Rank 0] step:5261/10000 train_time:232101ms step_avg:44.12ms +[2025-09-05 20:09:19] [Rank 0] step:5261/10000 train_time:232101ms step_avg:44.12ms +[2025-09-05 20:09:20] [Rank 0] step:5281/10000 train_time:232839ms step_avg:44.09ms +[2025-09-05 20:09:20] [Rank 0] step:5281/10000 train_time:232839ms step_avg:44.09ms +[2025-09-05 20:09:21] [Rank 0] step:5301/10000 train_time:233577ms step_avg:44.06ms +[2025-09-05 20:09:21] [Rank 0] step:5301/10000 train_time:233577ms step_avg:44.06ms +[2025-09-05 20:09:22] [Rank 0] step:5321/10000 train_time:234316ms step_avg:44.04ms +[2025-09-05 20:09:22] [Rank 0] step:5321/10000 train_time:234316ms step_avg:44.04ms +[2025-09-05 20:09:22] [Rank 0] step:5341/10000 train_time:235054ms step_avg:44.01ms +[2025-09-05 20:09:22] [Rank 0] step:5341/10000 train_time:235054ms step_avg:44.01ms +[2025-09-05 20:09:23] [Rank 0] step:5361/10000 train_time:235792ms step_avg:43.98ms +[2025-09-05 20:09:23] [Rank 0] step:5361/10000 train_time:235792ms step_avg:43.98ms +[2025-09-05 20:09:24] [Rank 0] step:5381/10000 train_time:236532ms step_avg:43.96ms +[2025-09-05 20:09:24] [Rank 0] step:5381/10000 train_time:236532ms step_avg:43.96ms +[2025-09-05 20:09:24] [Rank 0] step:5401/10000 train_time:237269ms step_avg:43.93ms +[2025-09-05 20:09:24] [Rank 0] step:5401/10000 train_time:237269ms step_avg:43.93ms +[2025-09-05 20:09:25] [Rank 0] step:5421/10000 train_time:238009ms step_avg:43.90ms +[2025-09-05 20:09:25] [Rank 0] step:5421/10000 train_time:238009ms step_avg:43.90ms +[2025-09-05 20:09:26] [Rank 0] step:5441/10000 train_time:238748ms step_avg:43.88ms +[2025-09-05 20:09:26] [Rank 0] step:5441/10000 train_time:238748ms step_avg:43.88ms +[2025-09-05 20:09:27] [Rank 0] step:5461/10000 train_time:239486ms step_avg:43.85ms +[2025-09-05 20:09:27] [Rank 0] step:5461/10000 train_time:239486ms step_avg:43.85ms +[2025-09-05 20:09:27] [Rank 0] step:5481/10000 train_time:240225ms step_avg:43.83ms +[2025-09-05 20:09:27] [Rank 0] step:5481/10000 train_time:240225ms step_avg:43.83ms +[2025-09-05 20:09:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:09:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:09:29] [Rank 0] PRINT: step:5500/10000 train_loss:1.7276 val_loss:1.7126 train_time:241046ms step_avg:43.83ms +[2025-09-05 20:09:29] [Rank 0] PRINT: step:5500/10000 train_loss:1.7276 val_loss:1.7126 train_time:241046ms step_avg:43.83ms +[2025-09-05 20:09:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:09:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:09:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:09:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:10:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:10:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:10:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:10:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:10:51] [Rank 0] Total Loss: 4.2700 +[2025-09-05 20:10:51] [Rank 0] Total Loss: 4.2700 +[2025-09-05 20:10:51] [Rank 0] Total FTA (Unweighted): 0.4294 +[2025-09-05 20:10:51] [Rank 0] Total FTA (Unweighted): 0.4294 +[2025-09-05 20:10:51] [Rank 0] Total FTA (Weighted): 0.4294 +[2025-09-05 20:10:51] [Rank 0] Total FTA (Weighted): 0.4294 +[2025-09-05 20:10:51] [Rank 0] Group 0 Loss: 3.4913 +[2025-09-05 20:10:51] [Rank 0] Group 0 Loss: 3.4913 +[2025-09-05 20:10:51] [Rank 0] Group 1 Loss: 3.0944 +[2025-09-05 20:10:51] [Rank 0] Group 1 Loss: 3.0944 +[2025-09-05 20:10:51] [Rank 0] Group 2 Loss: 3.0837 +[2025-09-05 20:10:51] [Rank 0] Group 2 Loss: 3.0837 +[2025-09-05 20:10:51] [Rank 0] Group 3 Loss: 3.5290 +[2025-09-05 20:10:51] [Rank 0] Group 3 Loss: 3.5290 +[2025-09-05 20:10:51] [Rank 0] Group 4 Loss: 3.6817 +[2025-09-05 20:10:51] [Rank 0] Group 4 Loss: 3.6817 +[2025-09-05 20:10:51] [Rank 0] Group 5 Loss: 3.9556 +[2025-09-05 20:10:51] [Rank 0] Group 5 Loss: 3.9556 +[2025-09-05 20:10:51] [Rank 0] Group 6 Loss: 4.1697 +[2025-09-05 20:10:51] [Rank 0] Group 6 Loss: 4.1697 +[2025-09-05 20:10:51] [Rank 0] Group 7 Loss: 4.3589 +[2025-09-05 20:10:51] [Rank 0] Group 7 Loss: 4.3589 +[2025-09-05 20:10:51] [Rank 0] Group 8 Loss: 4.6613 +[2025-09-05 20:10:51] [Rank 0] Group 8 Loss: 4.6613 +[2025-09-05 20:10:51] [Rank 0] Group 9 Loss: 4.8004 +[2025-09-05 20:10:51] [Rank 0] Group 9 Loss: 4.8004 +[2025-09-05 20:10:51] [Rank 0] Group 10 Loss: 4.9225 +[2025-09-05 20:10:51] [Rank 0] Group 10 Loss: 4.9225 +[2025-09-05 20:10:51] [Rank 0] Group 11 Loss: 4.9624 +[2025-09-05 20:10:51] [Rank 0] Group 11 Loss: 4.9624 +[2025-09-05 20:10:51] [Rank 0] Group 12 Loss: 4.8843 +[2025-09-05 20:10:51] [Rank 0] Group 12 Loss: 4.8843 +[2025-09-05 20:10:51] [Rank 0] Group 13 Loss: 4.9129 +[2025-09-05 20:10:51] [Rank 0] Group 13 Loss: 4.9129 +[2025-09-05 20:10:51] [Rank 0] Group 14 Loss: 4.9211 +[2025-09-05 20:10:51] [Rank 0] Group 14 Loss: 4.9211 +[2025-09-05 20:10:51] [Rank 0] Group 15 Loss: 4.8912 +[2025-09-05 20:10:51] [Rank 0] Group 15 Loss: 4.8912 +[2025-09-05 20:10:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:10:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:10:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:10:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:10:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:10:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:10:51] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 20:10:51] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 20:10:51] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 20:10:51] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 20:10:51] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 20:10:51] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 20:10:51] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:10:51] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:10:51] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 20:10:51] [Rank 0] Group 7 FTA: 0.3100 +[2025-09-05 20:10:51] [Rank 0] Group 8 FTA: 0.3100 +[2025-09-05 20:10:51] [Rank 0] Group 8 FTA: 0.3100 +[2025-09-05 20:10:51] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 20:10:51] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 20:10:51] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 20:10:51] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 20:10:51] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 20:10:51] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 20:10:51] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 20:10:51] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 20:10:51] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 20:10:51] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 20:10:51] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 20:10:51] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 20:10:51] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:10:51] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:10:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:10:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:10:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:10:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:10:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:10:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:10:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:10:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:10:53] [Rank 0] step:5501/10000 train_time:241056ms step_avg:43.82ms +[2025-09-05 20:10:53] [Rank 0] step:5501/10000 train_time:241056ms step_avg:43.82ms +[2025-09-05 20:10:53] [Rank 0] step:5521/10000 train_time:241719ms step_avg:43.78ms +[2025-09-05 20:10:53] [Rank 0] step:5521/10000 train_time:241719ms step_avg:43.78ms +[2025-09-05 20:10:54] [Rank 0] step:5541/10000 train_time:242458ms step_avg:43.76ms +[2025-09-05 20:10:54] [Rank 0] step:5541/10000 train_time:242458ms step_avg:43.76ms +[2025-09-05 20:10:55] [Rank 0] step:5561/10000 train_time:243195ms step_avg:43.73ms +[2025-09-05 20:10:55] [Rank 0] step:5561/10000 train_time:243195ms step_avg:43.73ms +[2025-09-05 20:10:56] [Rank 0] step:5581/10000 train_time:243934ms step_avg:43.71ms +[2025-09-05 20:10:56] [Rank 0] step:5581/10000 train_time:243934ms step_avg:43.71ms +[2025-09-05 20:10:56] [Rank 0] step:5601/10000 train_time:244673ms step_avg:43.68ms +[2025-09-05 20:10:56] [Rank 0] step:5601/10000 train_time:244673ms step_avg:43.68ms +[2025-09-05 20:10:57] [Rank 0] step:5621/10000 train_time:245411ms step_avg:43.66ms +[2025-09-05 20:10:57] [Rank 0] step:5621/10000 train_time:245411ms step_avg:43.66ms +[2025-09-05 20:10:58] [Rank 0] step:5641/10000 train_time:246343ms step_avg:43.67ms +[2025-09-05 20:10:58] [Rank 0] step:5641/10000 train_time:246343ms step_avg:43.67ms +[2025-09-05 20:10:59] [Rank 0] step:5661/10000 train_time:247081ms step_avg:43.65ms +[2025-09-05 20:10:59] [Rank 0] step:5661/10000 train_time:247081ms step_avg:43.65ms +[2025-09-05 20:10:59] [Rank 0] step:5681/10000 train_time:247821ms step_avg:43.62ms +[2025-09-05 20:10:59] [Rank 0] step:5681/10000 train_time:247821ms step_avg:43.62ms +[2025-09-05 20:11:00] [Rank 0] step:5701/10000 train_time:248560ms step_avg:43.60ms +[2025-09-05 20:11:00] [Rank 0] step:5701/10000 train_time:248560ms step_avg:43.60ms +[2025-09-05 20:11:01] [Rank 0] step:5721/10000 train_time:249299ms step_avg:43.58ms +[2025-09-05 20:11:01] [Rank 0] step:5721/10000 train_time:249299ms step_avg:43.58ms +[2025-09-05 20:11:02] [Rank 0] step:5741/10000 train_time:250038ms step_avg:43.55ms +[2025-09-05 20:11:02] [Rank 0] step:5741/10000 train_time:250038ms step_avg:43.55ms +[2025-09-05 20:11:02] [Rank 0] step:5761/10000 train_time:250776ms step_avg:43.53ms +[2025-09-05 20:11:02] [Rank 0] step:5761/10000 train_time:250776ms step_avg:43.53ms +[2025-09-05 20:11:03] [Rank 0] step:5781/10000 train_time:251515ms step_avg:43.51ms +[2025-09-05 20:11:03] [Rank 0] step:5781/10000 train_time:251515ms step_avg:43.51ms +[2025-09-05 20:11:04] [Rank 0] step:5801/10000 train_time:252255ms step_avg:43.48ms +[2025-09-05 20:11:04] [Rank 0] step:5801/10000 train_time:252255ms step_avg:43.48ms +[2025-09-05 20:11:05] [Rank 0] step:5821/10000 train_time:252993ms step_avg:43.46ms +[2025-09-05 20:11:05] [Rank 0] step:5821/10000 train_time:252993ms step_avg:43.46ms +[2025-09-05 20:11:05] [Rank 0] step:5841/10000 train_time:253732ms step_avg:43.44ms +[2025-09-05 20:11:05] [Rank 0] step:5841/10000 train_time:253732ms step_avg:43.44ms +[2025-09-05 20:11:06] [Rank 0] step:5861/10000 train_time:254471ms step_avg:43.42ms +[2025-09-05 20:11:06] [Rank 0] step:5861/10000 train_time:254471ms step_avg:43.42ms +[2025-09-05 20:11:07] [Rank 0] step:5881/10000 train_time:255210ms step_avg:43.40ms +[2025-09-05 20:11:07] [Rank 0] step:5881/10000 train_time:255210ms step_avg:43.40ms +[2025-09-05 20:11:08] [Rank 0] step:5901/10000 train_time:255949ms step_avg:43.37ms +[2025-09-05 20:11:08] [Rank 0] step:5901/10000 train_time:255949ms step_avg:43.37ms +[2025-09-05 20:11:08] [Rank 0] step:5921/10000 train_time:256687ms step_avg:43.35ms +[2025-09-05 20:11:08] [Rank 0] step:5921/10000 train_time:256687ms step_avg:43.35ms +[2025-09-05 20:11:09] [Rank 0] step:5941/10000 train_time:257425ms step_avg:43.33ms +[2025-09-05 20:11:09] [Rank 0] step:5941/10000 train_time:257425ms step_avg:43.33ms +[2025-09-05 20:11:10] [Rank 0] step:5961/10000 train_time:258163ms step_avg:43.31ms +[2025-09-05 20:11:10] [Rank 0] step:5961/10000 train_time:258163ms step_avg:43.31ms +[2025-09-05 20:11:10] [Rank 0] step:5981/10000 train_time:258902ms step_avg:43.29ms +[2025-09-05 20:11:10] [Rank 0] step:5981/10000 train_time:258902ms step_avg:43.29ms +[2025-09-05 20:11:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:11:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:11:12] [Rank 0] PRINT: step:6000/10000 train_loss:1.7116 val_loss:1.6952 train_time:259868ms step_avg:43.31ms +[2025-09-05 20:11:12] [Rank 0] PRINT: step:6000/10000 train_loss:1.7116 val_loss:1.6952 train_time:259868ms step_avg:43.31ms +[2025-09-05 20:11:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:11:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:11:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:11:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:12:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:12:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:12:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:12:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:12:33] [Rank 0] Total Loss: 4.3339 +[2025-09-05 20:12:33] [Rank 0] Total Loss: 4.3339 +[2025-09-05 20:12:33] [Rank 0] Total FTA (Unweighted): 0.4438 +[2025-09-05 20:12:33] [Rank 0] Total FTA (Unweighted): 0.4438 +[2025-09-05 20:12:33] [Rank 0] Total FTA (Weighted): 0.4437 +[2025-09-05 20:12:33] [Rank 0] Total FTA (Weighted): 0.4437 +[2025-09-05 20:12:33] [Rank 0] Group 0 Loss: 3.4733 +[2025-09-05 20:12:33] [Rank 0] Group 0 Loss: 3.4733 +[2025-09-05 20:12:33] [Rank 0] Group 1 Loss: 3.2363 +[2025-09-05 20:12:33] [Rank 0] Group 1 Loss: 3.2363 +[2025-09-05 20:12:33] [Rank 0] Group 2 Loss: 3.2526 +[2025-09-05 20:12:33] [Rank 0] Group 2 Loss: 3.2526 +[2025-09-05 20:12:33] [Rank 0] Group 3 Loss: 3.5819 +[2025-09-05 20:12:33] [Rank 0] Group 3 Loss: 3.5819 +[2025-09-05 20:12:33] [Rank 0] Group 4 Loss: 3.7457 +[2025-09-05 20:12:33] [Rank 0] Group 4 Loss: 3.7457 +[2025-09-05 20:12:33] [Rank 0] Group 5 Loss: 4.0306 +[2025-09-05 20:12:33] [Rank 0] Group 5 Loss: 4.0306 +[2025-09-05 20:12:33] [Rank 0] Group 6 Loss: 4.2242 +[2025-09-05 20:12:33] [Rank 0] Group 6 Loss: 4.2242 +[2025-09-05 20:12:33] [Rank 0] Group 7 Loss: 4.4260 +[2025-09-05 20:12:33] [Rank 0] Group 7 Loss: 4.4260 +[2025-09-05 20:12:33] [Rank 0] Group 8 Loss: 4.6885 +[2025-09-05 20:12:33] [Rank 0] Group 8 Loss: 4.6885 +[2025-09-05 20:12:33] [Rank 0] Group 9 Loss: 4.8443 +[2025-09-05 20:12:33] [Rank 0] Group 9 Loss: 4.8443 +[2025-09-05 20:12:33] [Rank 0] Group 10 Loss: 4.9746 +[2025-09-05 20:12:33] [Rank 0] Group 10 Loss: 4.9746 +[2025-09-05 20:12:33] [Rank 0] Group 11 Loss: 4.9957 +[2025-09-05 20:12:33] [Rank 0] Group 11 Loss: 4.9957 +[2025-09-05 20:12:33] [Rank 0] Group 12 Loss: 4.9588 +[2025-09-05 20:12:33] [Rank 0] Group 12 Loss: 4.9588 +[2025-09-05 20:12:33] [Rank 0] Group 13 Loss: 4.9708 +[2025-09-05 20:12:33] [Rank 0] Group 13 Loss: 4.9708 +[2025-09-05 20:12:33] [Rank 0] Group 14 Loss: 4.9749 +[2025-09-05 20:12:33] [Rank 0] Group 14 Loss: 4.9749 +[2025-09-05 20:12:33] [Rank 0] Group 15 Loss: 4.9642 +[2025-09-05 20:12:33] [Rank 0] Group 15 Loss: 4.9642 +[2025-09-05 20:12:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:12:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:12:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:12:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:12:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:12:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:12:33] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 20:12:33] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 20:12:33] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 20:12:33] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 20:12:33] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 20:12:33] [Rank 0] Group 5 FTA: 0.4800 +[2025-09-05 20:12:34] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 20:12:34] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 20:12:34] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 20:12:34] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 20:12:34] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:12:34] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:12:34] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 20:12:34] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 20:12:34] [Rank 0] Group 10 FTA: 0.3400 +[2025-09-05 20:12:34] [Rank 0] Group 10 FTA: 0.3400 +[2025-09-05 20:12:34] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 20:12:34] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 20:12:34] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 20:12:34] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 20:12:34] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 20:12:34] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 20:12:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 20:12:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 20:12:34] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:12:34] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 20:12:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:12:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:12:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:12:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:12:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:12:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:12:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:12:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:12:35] [Rank 0] step:6001/10000 train_time:259879ms step_avg:43.31ms +[2025-09-05 20:12:35] [Rank 0] step:6001/10000 train_time:259879ms step_avg:43.31ms +[2025-09-05 20:12:36] [Rank 0] step:6021/10000 train_time:261163ms step_avg:43.38ms +[2025-09-05 20:12:36] [Rank 0] step:6021/10000 train_time:261163ms step_avg:43.38ms +[2025-09-05 20:12:37] [Rank 0] step:6041/10000 train_time:261901ms step_avg:43.35ms +[2025-09-05 20:12:37] [Rank 0] step:6041/10000 train_time:261901ms step_avg:43.35ms +[2025-09-05 20:12:38] [Rank 0] step:6061/10000 train_time:262640ms step_avg:43.33ms +[2025-09-05 20:12:38] [Rank 0] step:6061/10000 train_time:262640ms step_avg:43.33ms +[2025-09-05 20:12:39] [Rank 0] step:6081/10000 train_time:263379ms step_avg:43.31ms +[2025-09-05 20:12:39] [Rank 0] step:6081/10000 train_time:263379ms step_avg:43.31ms +[2025-09-05 20:12:39] [Rank 0] step:6101/10000 train_time:264117ms step_avg:43.29ms +[2025-09-05 20:12:39] [Rank 0] step:6101/10000 train_time:264117ms step_avg:43.29ms +[2025-09-05 20:12:40] [Rank 0] step:6121/10000 train_time:264856ms step_avg:43.27ms +[2025-09-05 20:12:40] [Rank 0] step:6121/10000 train_time:264856ms step_avg:43.27ms +[2025-09-05 20:12:41] [Rank 0] step:6141/10000 train_time:265595ms step_avg:43.25ms +[2025-09-05 20:12:41] [Rank 0] step:6141/10000 train_time:265595ms step_avg:43.25ms +[2025-09-05 20:12:41] [Rank 0] step:6161/10000 train_time:266334ms step_avg:43.23ms +[2025-09-05 20:12:41] [Rank 0] step:6161/10000 train_time:266334ms step_avg:43.23ms +[2025-09-05 20:12:42] [Rank 0] step:6181/10000 train_time:267072ms step_avg:43.21ms +[2025-09-05 20:12:42] [Rank 0] step:6181/10000 train_time:267072ms step_avg:43.21ms +[2025-09-05 20:12:43] [Rank 0] step:6201/10000 train_time:267810ms step_avg:43.19ms +[2025-09-05 20:12:43] [Rank 0] step:6201/10000 train_time:267810ms step_avg:43.19ms +[2025-09-05 20:12:44] [Rank 0] step:6221/10000 train_time:268548ms step_avg:43.17ms +[2025-09-05 20:12:44] [Rank 0] step:6221/10000 train_time:268548ms step_avg:43.17ms +[2025-09-05 20:12:44] [Rank 0] step:6241/10000 train_time:269287ms step_avg:43.15ms +[2025-09-05 20:12:44] [Rank 0] step:6241/10000 train_time:269287ms step_avg:43.15ms +[2025-09-05 20:12:45] [Rank 0] step:6261/10000 train_time:270025ms step_avg:43.13ms +[2025-09-05 20:12:45] [Rank 0] step:6261/10000 train_time:270025ms step_avg:43.13ms +[2025-09-05 20:12:46] [Rank 0] step:6281/10000 train_time:270765ms step_avg:43.11ms +[2025-09-05 20:12:46] [Rank 0] step:6281/10000 train_time:270765ms step_avg:43.11ms +[2025-09-05 20:12:47] [Rank 0] step:6301/10000 train_time:271504ms step_avg:43.09ms +[2025-09-05 20:12:47] [Rank 0] step:6301/10000 train_time:271504ms step_avg:43.09ms +[2025-09-05 20:12:47] [Rank 0] step:6321/10000 train_time:272243ms step_avg:43.07ms +[2025-09-05 20:12:47] [Rank 0] step:6321/10000 train_time:272243ms step_avg:43.07ms +[2025-09-05 20:12:48] [Rank 0] step:6341/10000 train_time:272982ms step_avg:43.05ms +[2025-09-05 20:12:48] [Rank 0] step:6341/10000 train_time:272982ms step_avg:43.05ms +[2025-09-05 20:12:49] [Rank 0] step:6361/10000 train_time:273721ms step_avg:43.03ms +[2025-09-05 20:12:49] [Rank 0] step:6361/10000 train_time:273721ms step_avg:43.03ms +[2025-09-05 20:12:50] [Rank 0] step:6381/10000 train_time:274460ms step_avg:43.01ms +[2025-09-05 20:12:50] [Rank 0] step:6381/10000 train_time:274460ms step_avg:43.01ms +[2025-09-05 20:12:50] [Rank 0] step:6401/10000 train_time:275198ms step_avg:42.99ms +[2025-09-05 20:12:50] [Rank 0] step:6401/10000 train_time:275198ms step_avg:42.99ms +[2025-09-05 20:12:51] [Rank 0] step:6421/10000 train_time:275937ms step_avg:42.97ms +[2025-09-05 20:12:51] [Rank 0] step:6421/10000 train_time:275937ms step_avg:42.97ms +[2025-09-05 20:12:52] [Rank 0] step:6441/10000 train_time:276674ms step_avg:42.96ms +[2025-09-05 20:12:52] [Rank 0] step:6441/10000 train_time:276674ms step_avg:42.96ms +[2025-09-05 20:12:53] [Rank 0] step:6461/10000 train_time:277413ms step_avg:42.94ms +[2025-09-05 20:12:53] [Rank 0] step:6461/10000 train_time:277413ms step_avg:42.94ms +[2025-09-05 20:12:53] [Rank 0] step:6481/10000 train_time:278150ms step_avg:42.92ms +[2025-09-05 20:12:53] [Rank 0] step:6481/10000 train_time:278150ms step_avg:42.92ms +[2025-09-05 20:12:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:12:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:12:54] [Rank 0] PRINT: step:6500/10000 train_loss:1.6992 val_loss:1.6867 train_time:278969ms step_avg:42.92ms +[2025-09-05 20:12:54] [Rank 0] PRINT: step:6500/10000 train_loss:1.6992 val_loss:1.6867 train_time:278969ms step_avg:42.92ms +[2025-09-05 20:12:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:12:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:12:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:12:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:14:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:14:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:14:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:14:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:14:17] [Rank 0] Total Loss: 4.3688 +[2025-09-05 20:14:17] [Rank 0] Total Loss: 4.3688 +[2025-09-05 20:14:17] [Rank 0] Total FTA (Unweighted): 0.4431 +[2025-09-05 20:14:17] [Rank 0] Total FTA (Unweighted): 0.4431 +[2025-09-05 20:14:17] [Rank 0] Total FTA (Weighted): 0.4431 +[2025-09-05 20:14:17] [Rank 0] Total FTA (Weighted): 0.4431 +[2025-09-05 20:14:17] [Rank 0] Group 0 Loss: 3.5153 +[2025-09-05 20:14:17] [Rank 0] Group 0 Loss: 3.5153 +[2025-09-05 20:14:17] [Rank 0] Group 1 Loss: 3.2320 +[2025-09-05 20:14:17] [Rank 0] Group 1 Loss: 3.2320 +[2025-09-05 20:14:17] [Rank 0] Group 2 Loss: 3.2062 +[2025-09-05 20:14:17] [Rank 0] Group 2 Loss: 3.2062 +[2025-09-05 20:14:17] [Rank 0] Group 3 Loss: 3.6054 +[2025-09-05 20:14:17] [Rank 0] Group 3 Loss: 3.6054 +[2025-09-05 20:14:17] [Rank 0] Group 4 Loss: 3.7989 +[2025-09-05 20:14:17] [Rank 0] Group 4 Loss: 3.7989 +[2025-09-05 20:14:17] [Rank 0] Group 5 Loss: 4.0701 +[2025-09-05 20:14:17] [Rank 0] Group 5 Loss: 4.0701 +[2025-09-05 20:14:17] [Rank 0] Group 6 Loss: 4.3015 +[2025-09-05 20:14:17] [Rank 0] Group 6 Loss: 4.3015 +[2025-09-05 20:14:17] [Rank 0] Group 7 Loss: 4.4655 +[2025-09-05 20:14:17] [Rank 0] Group 7 Loss: 4.4655 +[2025-09-05 20:14:17] [Rank 0] Group 8 Loss: 4.7285 +[2025-09-05 20:14:17] [Rank 0] Group 8 Loss: 4.7285 +[2025-09-05 20:14:17] [Rank 0] Group 9 Loss: 4.8920 +[2025-09-05 20:14:17] [Rank 0] Group 9 Loss: 4.8920 +[2025-09-05 20:14:17] [Rank 0] Group 10 Loss: 5.0330 +[2025-09-05 20:14:17] [Rank 0] Group 10 Loss: 5.0330 +[2025-09-05 20:14:17] [Rank 0] Group 11 Loss: 5.0648 +[2025-09-05 20:14:17] [Rank 0] Group 11 Loss: 5.0648 +[2025-09-05 20:14:17] [Rank 0] Group 12 Loss: 4.9643 +[2025-09-05 20:14:17] [Rank 0] Group 12 Loss: 4.9643 +[2025-09-05 20:14:17] [Rank 0] Group 13 Loss: 5.0208 +[2025-09-05 20:14:17] [Rank 0] Group 13 Loss: 5.0208 +[2025-09-05 20:14:17] [Rank 0] Group 14 Loss: 5.0204 +[2025-09-05 20:14:17] [Rank 0] Group 14 Loss: 5.0204 +[2025-09-05 20:14:17] [Rank 0] Group 15 Loss: 4.9816 +[2025-09-05 20:14:17] [Rank 0] Group 15 Loss: 4.9816 +[2025-09-05 20:14:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:14:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:14:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:14:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:14:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:14:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:14:17] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 20:14:17] [Rank 0] Group 3 FTA: 0.5700 +[2025-09-05 20:14:17] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 20:14:17] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 20:14:17] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 20:14:17] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 20:14:17] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:14:17] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:14:17] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 20:14:17] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 20:14:17] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 20:14:17] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 20:14:17] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 20:14:17] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 20:14:17] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 20:14:17] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 20:14:17] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 20:14:17] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 20:14:17] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 20:14:17] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 20:14:17] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 20:14:17] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 20:14:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 20:14:17] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 20:14:17] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 20:14:17] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 20:14:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:14:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:14:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:14:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:14:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:14:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:14:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:14:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:14:19] [Rank 0] step:6501/10000 train_time:278979ms step_avg:42.91ms +[2025-09-05 20:14:19] [Rank 0] step:6501/10000 train_time:278979ms step_avg:42.91ms +[2025-09-05 20:14:19] [Rank 0] step:6521/10000 train_time:279654ms step_avg:42.89ms +[2025-09-05 20:14:19] [Rank 0] step:6521/10000 train_time:279654ms step_avg:42.89ms +[2025-09-05 20:14:20] [Rank 0] step:6541/10000 train_time:280393ms step_avg:42.87ms +[2025-09-05 20:14:20] [Rank 0] step:6541/10000 train_time:280393ms step_avg:42.87ms +[2025-09-05 20:14:21] [Rank 0] step:6561/10000 train_time:281133ms step_avg:42.85ms +[2025-09-05 20:14:21] [Rank 0] step:6561/10000 train_time:281133ms step_avg:42.85ms +[2025-09-05 20:14:22] [Rank 0] step:6581/10000 train_time:282005ms step_avg:42.85ms +[2025-09-05 20:14:22] [Rank 0] step:6581/10000 train_time:282005ms step_avg:42.85ms +[2025-09-05 20:14:23] [Rank 0] step:6601/10000 train_time:282744ms step_avg:42.83ms +[2025-09-05 20:14:23] [Rank 0] step:6601/10000 train_time:282744ms step_avg:42.83ms +[2025-09-05 20:14:23] [Rank 0] step:6621/10000 train_time:283483ms step_avg:42.82ms +[2025-09-05 20:14:23] [Rank 0] step:6621/10000 train_time:283483ms step_avg:42.82ms +[2025-09-05 20:14:24] [Rank 0] step:6641/10000 train_time:284334ms step_avg:42.81ms +[2025-09-05 20:14:24] [Rank 0] step:6641/10000 train_time:284334ms step_avg:42.81ms +[2025-09-05 20:14:25] [Rank 0] step:6661/10000 train_time:285073ms step_avg:42.80ms +[2025-09-05 20:14:25] [Rank 0] step:6661/10000 train_time:285073ms step_avg:42.80ms +[2025-09-05 20:14:26] [Rank 0] step:6681/10000 train_time:285813ms step_avg:42.78ms +[2025-09-05 20:14:26] [Rank 0] step:6681/10000 train_time:285813ms step_avg:42.78ms +[2025-09-05 20:14:26] [Rank 0] step:6701/10000 train_time:286552ms step_avg:42.76ms +[2025-09-05 20:14:26] [Rank 0] step:6701/10000 train_time:286552ms step_avg:42.76ms +[2025-09-05 20:14:27] [Rank 0] step:6721/10000 train_time:287291ms step_avg:42.75ms +[2025-09-05 20:14:27] [Rank 0] step:6721/10000 train_time:287291ms step_avg:42.75ms +[2025-09-05 20:14:28] [Rank 0] step:6741/10000 train_time:288030ms step_avg:42.73ms +[2025-09-05 20:14:28] [Rank 0] step:6741/10000 train_time:288030ms step_avg:42.73ms +[2025-09-05 20:14:29] [Rank 0] step:6761/10000 train_time:288768ms step_avg:42.71ms +[2025-09-05 20:14:29] [Rank 0] step:6761/10000 train_time:288768ms step_avg:42.71ms +[2025-09-05 20:14:29] [Rank 0] step:6781/10000 train_time:289508ms step_avg:42.69ms +[2025-09-05 20:14:29] [Rank 0] step:6781/10000 train_time:289508ms step_avg:42.69ms +[2025-09-05 20:14:30] [Rank 0] step:6801/10000 train_time:290246ms step_avg:42.68ms +[2025-09-05 20:14:30] [Rank 0] step:6801/10000 train_time:290246ms step_avg:42.68ms +[2025-09-05 20:14:31] [Rank 0] step:6821/10000 train_time:290985ms step_avg:42.66ms +[2025-09-05 20:14:31] [Rank 0] step:6821/10000 train_time:290985ms step_avg:42.66ms +[2025-09-05 20:14:32] [Rank 0] step:6841/10000 train_time:292337ms step_avg:42.73ms +[2025-09-05 20:14:32] [Rank 0] step:6841/10000 train_time:292337ms step_avg:42.73ms +[2025-09-05 20:14:33] [Rank 0] step:6861/10000 train_time:293076ms step_avg:42.72ms +[2025-09-05 20:14:33] [Rank 0] step:6861/10000 train_time:293076ms step_avg:42.72ms +[2025-09-05 20:14:34] [Rank 0] step:6881/10000 train_time:293815ms step_avg:42.70ms +[2025-09-05 20:14:34] [Rank 0] step:6881/10000 train_time:293815ms step_avg:42.70ms +[2025-09-05 20:14:34] [Rank 0] step:6901/10000 train_time:294554ms step_avg:42.68ms +[2025-09-05 20:14:34] [Rank 0] step:6901/10000 train_time:294554ms step_avg:42.68ms +[2025-09-05 20:14:35] [Rank 0] step:6921/10000 train_time:295292ms step_avg:42.67ms +[2025-09-05 20:14:35] [Rank 0] step:6921/10000 train_time:295292ms step_avg:42.67ms +[2025-09-05 20:14:36] [Rank 0] step:6941/10000 train_time:296032ms step_avg:42.65ms +[2025-09-05 20:14:36] [Rank 0] step:6941/10000 train_time:296032ms step_avg:42.65ms +[2025-09-05 20:14:37] [Rank 0] step:6961/10000 train_time:296772ms step_avg:42.63ms +[2025-09-05 20:14:37] [Rank 0] step:6961/10000 train_time:296772ms step_avg:42.63ms +[2025-09-05 20:14:37] [Rank 0] step:6981/10000 train_time:297510ms step_avg:42.62ms +[2025-09-05 20:14:37] [Rank 0] step:6981/10000 train_time:297510ms step_avg:42.62ms +[2025-09-05 20:14:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:14:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:14:39] [Rank 0] PRINT: step:7000/10000 train_loss:1.6884 val_loss:1.6742 train_time:298330ms step_avg:42.62ms +[2025-09-05 20:14:39] [Rank 0] PRINT: step:7000/10000 train_loss:1.6884 val_loss:1.6742 train_time:298330ms step_avg:42.62ms +[2025-09-05 20:14:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:14:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:14:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:14:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:16:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:16:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:16:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:16:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:16:00] [Rank 0] Total Loss: 4.3018 +[2025-09-05 20:16:00] [Rank 0] Total Loss: 4.3018 +[2025-09-05 20:16:00] [Rank 0] Total FTA (Unweighted): 0.4600 +[2025-09-05 20:16:00] [Rank 0] Total FTA (Unweighted): 0.4600 +[2025-09-05 20:16:00] [Rank 0] Total FTA (Weighted): 0.4600 +[2025-09-05 20:16:00] [Rank 0] Total FTA (Weighted): 0.4600 +[2025-09-05 20:16:00] [Rank 0] Group 0 Loss: 3.5325 +[2025-09-05 20:16:00] [Rank 0] Group 0 Loss: 3.5325 +[2025-09-05 20:16:00] [Rank 0] Group 1 Loss: 3.1799 +[2025-09-05 20:16:00] [Rank 0] Group 1 Loss: 3.1799 +[2025-09-05 20:16:00] [Rank 0] Group 2 Loss: 3.1323 +[2025-09-05 20:16:00] [Rank 0] Group 2 Loss: 3.1323 +[2025-09-05 20:16:00] [Rank 0] Group 3 Loss: 3.5695 +[2025-09-05 20:16:00] [Rank 0] Group 3 Loss: 3.5695 +[2025-09-05 20:16:00] [Rank 0] Group 4 Loss: 3.7314 +[2025-09-05 20:16:00] [Rank 0] Group 4 Loss: 3.7314 +[2025-09-05 20:16:00] [Rank 0] Group 5 Loss: 4.0042 +[2025-09-05 20:16:00] [Rank 0] Group 5 Loss: 4.0042 +[2025-09-05 20:16:00] [Rank 0] Group 6 Loss: 4.2155 +[2025-09-05 20:16:00] [Rank 0] Group 6 Loss: 4.2155 +[2025-09-05 20:16:00] [Rank 0] Group 7 Loss: 4.3982 +[2025-09-05 20:16:00] [Rank 0] Group 7 Loss: 4.3982 +[2025-09-05 20:16:00] [Rank 0] Group 8 Loss: 4.6789 +[2025-09-05 20:16:00] [Rank 0] Group 8 Loss: 4.6789 +[2025-09-05 20:16:00] [Rank 0] Group 9 Loss: 4.7915 +[2025-09-05 20:16:00] [Rank 0] Group 9 Loss: 4.7915 +[2025-09-05 20:16:00] [Rank 0] Group 10 Loss: 4.9765 +[2025-09-05 20:16:00] [Rank 0] Group 10 Loss: 4.9765 +[2025-09-05 20:16:00] [Rank 0] Group 11 Loss: 4.9668 +[2025-09-05 20:16:00] [Rank 0] Group 11 Loss: 4.9668 +[2025-09-05 20:16:00] [Rank 0] Group 12 Loss: 4.8990 +[2025-09-05 20:16:00] [Rank 0] Group 12 Loss: 4.8990 +[2025-09-05 20:16:00] [Rank 0] Group 13 Loss: 4.9212 +[2025-09-05 20:16:00] [Rank 0] Group 13 Loss: 4.9212 +[2025-09-05 20:16:00] [Rank 0] Group 14 Loss: 4.9072 +[2025-09-05 20:16:00] [Rank 0] Group 14 Loss: 4.9072 +[2025-09-05 20:16:00] [Rank 0] Group 15 Loss: 4.9236 +[2025-09-05 20:16:00] [Rank 0] Group 15 Loss: 4.9236 +[2025-09-05 20:16:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:16:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:16:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:16:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:16:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:16:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:16:00] [Rank 0] Group 3 FTA: 0.6200 +[2025-09-05 20:16:00] [Rank 0] Group 3 FTA: 0.6200 +[2025-09-05 20:16:00] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 20:16:00] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 20:16:00] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:16:00] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:16:00] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 20:16:00] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 20:16:00] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 20:16:00] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 20:16:00] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:16:00] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:16:00] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 20:16:00] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 20:16:01] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 20:16:01] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 20:16:01] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 20:16:01] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 20:16:01] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 20:16:01] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 20:16:01] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 20:16:01] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 20:16:01] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 20:16:01] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 20:16:01] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 20:16:01] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 20:16:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:16:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:16:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:16:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:16:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:16:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:16:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:16:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:16:02] [Rank 0] step:7001/10000 train_time:298340ms step_avg:42.61ms +[2025-09-05 20:16:02] [Rank 0] step:7001/10000 train_time:298340ms step_avg:42.61ms +[2025-09-05 20:16:03] [Rank 0] step:7021/10000 train_time:299016ms step_avg:42.59ms +[2025-09-05 20:16:03] [Rank 0] step:7021/10000 train_time:299016ms step_avg:42.59ms +[2025-09-05 20:16:04] [Rank 0] step:7041/10000 train_time:299754ms step_avg:42.57ms +[2025-09-05 20:16:04] [Rank 0] step:7041/10000 train_time:299754ms step_avg:42.57ms +[2025-09-05 20:16:04] [Rank 0] step:7061/10000 train_time:300492ms step_avg:42.56ms +[2025-09-05 20:16:04] [Rank 0] step:7061/10000 train_time:300492ms step_avg:42.56ms +[2025-09-05 20:16:05] [Rank 0] step:7081/10000 train_time:301230ms step_avg:42.54ms +[2025-09-05 20:16:05] [Rank 0] step:7081/10000 train_time:301230ms step_avg:42.54ms +[2025-09-05 20:16:06] [Rank 0] step:7101/10000 train_time:301969ms step_avg:42.52ms +[2025-09-05 20:16:06] [Rank 0] step:7101/10000 train_time:301969ms step_avg:42.52ms +[2025-09-05 20:16:06] [Rank 0] step:7121/10000 train_time:302708ms step_avg:42.51ms +[2025-09-05 20:16:06] [Rank 0] step:7121/10000 train_time:302708ms step_avg:42.51ms +[2025-09-05 20:16:07] [Rank 0] step:7141/10000 train_time:303446ms step_avg:42.49ms +[2025-09-05 20:16:07] [Rank 0] step:7141/10000 train_time:303446ms step_avg:42.49ms +[2025-09-05 20:16:08] [Rank 0] step:7161/10000 train_time:304184ms step_avg:42.48ms +[2025-09-05 20:16:08] [Rank 0] step:7161/10000 train_time:304184ms step_avg:42.48ms +[2025-09-05 20:16:09] [Rank 0] step:7181/10000 train_time:304923ms step_avg:42.46ms +[2025-09-05 20:16:09] [Rank 0] step:7181/10000 train_time:304923ms step_avg:42.46ms +[2025-09-05 20:16:09] [Rank 0] step:7201/10000 train_time:305660ms step_avg:42.45ms +[2025-09-05 20:16:09] [Rank 0] step:7201/10000 train_time:305660ms step_avg:42.45ms +[2025-09-05 20:16:10] [Rank 0] step:7221/10000 train_time:306398ms step_avg:42.43ms +[2025-09-05 20:16:10] [Rank 0] step:7221/10000 train_time:306398ms step_avg:42.43ms +[2025-09-05 20:16:11] [Rank 0] step:7241/10000 train_time:307136ms step_avg:42.42ms +[2025-09-05 20:16:11] [Rank 0] step:7241/10000 train_time:307136ms step_avg:42.42ms +[2025-09-05 20:16:12] [Rank 0] step:7261/10000 train_time:307874ms step_avg:42.40ms +[2025-09-05 20:16:12] [Rank 0] step:7261/10000 train_time:307874ms step_avg:42.40ms +[2025-09-05 20:16:12] [Rank 0] step:7281/10000 train_time:308612ms step_avg:42.39ms +[2025-09-05 20:16:12] [Rank 0] step:7281/10000 train_time:308612ms step_avg:42.39ms +[2025-09-05 20:16:13] [Rank 0] step:7301/10000 train_time:309350ms step_avg:42.37ms +[2025-09-05 20:16:13] [Rank 0] step:7301/10000 train_time:309350ms step_avg:42.37ms +[2025-09-05 20:16:14] [Rank 0] step:7321/10000 train_time:310089ms step_avg:42.36ms +[2025-09-05 20:16:14] [Rank 0] step:7321/10000 train_time:310089ms step_avg:42.36ms +[2025-09-05 20:16:15] [Rank 0] step:7341/10000 train_time:310826ms step_avg:42.34ms +[2025-09-05 20:16:15] [Rank 0] step:7341/10000 train_time:310826ms step_avg:42.34ms +[2025-09-05 20:16:15] [Rank 0] step:7361/10000 train_time:311565ms step_avg:42.33ms +[2025-09-05 20:16:15] [Rank 0] step:7361/10000 train_time:311565ms step_avg:42.33ms +[2025-09-05 20:16:16] [Rank 0] step:7381/10000 train_time:312304ms step_avg:42.31ms +[2025-09-05 20:16:16] [Rank 0] step:7381/10000 train_time:312304ms step_avg:42.31ms +[2025-09-05 20:16:17] [Rank 0] step:7401/10000 train_time:313042ms step_avg:42.30ms +[2025-09-05 20:16:17] [Rank 0] step:7401/10000 train_time:313042ms step_avg:42.30ms +[2025-09-05 20:16:18] [Rank 0] step:7421/10000 train_time:313780ms step_avg:42.28ms +[2025-09-05 20:16:18] [Rank 0] step:7421/10000 train_time:313780ms step_avg:42.28ms +[2025-09-05 20:16:18] [Rank 0] step:7441/10000 train_time:314519ms step_avg:42.27ms +[2025-09-05 20:16:18] [Rank 0] step:7441/10000 train_time:314519ms step_avg:42.27ms +[2025-09-05 20:16:19] [Rank 0] step:7461/10000 train_time:315258ms step_avg:42.25ms +[2025-09-05 20:16:19] [Rank 0] step:7461/10000 train_time:315258ms step_avg:42.25ms +[2025-09-05 20:16:20] [Rank 0] step:7481/10000 train_time:315996ms step_avg:42.24ms +[2025-09-05 20:16:20] [Rank 0] step:7481/10000 train_time:315996ms step_avg:42.24ms +[2025-09-05 20:16:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:16:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:16:21] [Rank 0] PRINT: step:7500/10000 train_loss:1.6786 val_loss:1.6672 train_time:316815ms step_avg:42.24ms +[2025-09-05 20:16:21] [Rank 0] PRINT: step:7500/10000 train_loss:1.6786 val_loss:1.6672 train_time:316815ms step_avg:42.24ms +[2025-09-05 20:16:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:16:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:16:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:16:21] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:17:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:17:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:17:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:17:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:17:43] [Rank 0] Total Loss: 4.2446 +[2025-09-05 20:17:43] [Rank 0] Total Loss: 4.2446 +[2025-09-05 20:17:43] [Rank 0] Total FTA (Unweighted): 0.4681 +[2025-09-05 20:17:43] [Rank 0] Total FTA (Unweighted): 0.4681 +[2025-09-05 20:17:43] [Rank 0] Total FTA (Weighted): 0.4681 +[2025-09-05 20:17:43] [Rank 0] Total FTA (Weighted): 0.4681 +[2025-09-05 20:17:43] [Rank 0] Group 0 Loss: 3.4778 +[2025-09-05 20:17:43] [Rank 0] Group 0 Loss: 3.4778 +[2025-09-05 20:17:43] [Rank 0] Group 1 Loss: 3.1613 +[2025-09-05 20:17:43] [Rank 0] Group 1 Loss: 3.1613 +[2025-09-05 20:17:43] [Rank 0] Group 2 Loss: 3.1114 +[2025-09-05 20:17:43] [Rank 0] Group 2 Loss: 3.1114 +[2025-09-05 20:17:43] [Rank 0] Group 3 Loss: 3.5249 +[2025-09-05 20:17:43] [Rank 0] Group 3 Loss: 3.5249 +[2025-09-05 20:17:43] [Rank 0] Group 4 Loss: 3.6939 +[2025-09-05 20:17:43] [Rank 0] Group 4 Loss: 3.6939 +[2025-09-05 20:17:43] [Rank 0] Group 5 Loss: 3.9592 +[2025-09-05 20:17:43] [Rank 0] Group 5 Loss: 3.9592 +[2025-09-05 20:17:43] [Rank 0] Group 6 Loss: 4.1414 +[2025-09-05 20:17:43] [Rank 0] Group 6 Loss: 4.1414 +[2025-09-05 20:17:43] [Rank 0] Group 7 Loss: 4.3409 +[2025-09-05 20:17:43] [Rank 0] Group 7 Loss: 4.3409 +[2025-09-05 20:17:43] [Rank 0] Group 8 Loss: 4.6099 +[2025-09-05 20:17:43] [Rank 0] Group 8 Loss: 4.6099 +[2025-09-05 20:17:43] [Rank 0] Group 9 Loss: 4.7258 +[2025-09-05 20:17:43] [Rank 0] Group 9 Loss: 4.7258 +[2025-09-05 20:17:43] [Rank 0] Group 10 Loss: 4.8808 +[2025-09-05 20:17:43] [Rank 0] Group 10 Loss: 4.8808 +[2025-09-05 20:17:43] [Rank 0] Group 11 Loss: 4.9074 +[2025-09-05 20:17:43] [Rank 0] Group 11 Loss: 4.9074 +[2025-09-05 20:17:43] [Rank 0] Group 12 Loss: 4.8323 +[2025-09-05 20:17:43] [Rank 0] Group 12 Loss: 4.8323 +[2025-09-05 20:17:43] [Rank 0] Group 13 Loss: 4.8554 +[2025-09-05 20:17:43] [Rank 0] Group 13 Loss: 4.8554 +[2025-09-05 20:17:43] [Rank 0] Group 14 Loss: 4.8505 +[2025-09-05 20:17:43] [Rank 0] Group 14 Loss: 4.8505 +[2025-09-05 20:17:43] [Rank 0] Group 15 Loss: 4.8408 +[2025-09-05 20:17:43] [Rank 0] Group 15 Loss: 4.8408 +[2025-09-05 20:17:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:17:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:17:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:17:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:17:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:17:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:17:43] [Rank 0] Group 3 FTA: 0.7200 +[2025-09-05 20:17:43] [Rank 0] Group 3 FTA: 0.7200 +[2025-09-05 20:17:43] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:17:43] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:17:43] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:17:43] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:17:43] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 20:17:43] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 20:17:43] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 20:17:43] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 20:17:43] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 20:17:43] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 20:17:43] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:17:43] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:17:43] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 20:17:43] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 20:17:43] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 20:17:43] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 20:17:43] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 20:17:43] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 20:17:43] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 20:17:43] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 20:17:43] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 20:17:43] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 20:17:43] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 20:17:43] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 20:17:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:17:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:17:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:17:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:17:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:17:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:17:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:17:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:17:44] [Rank 0] step:7501/10000 train_time:316826ms step_avg:42.24ms +[2025-09-05 20:17:44] [Rank 0] step:7501/10000 train_time:316826ms step_avg:42.24ms +[2025-09-05 20:17:45] [Rank 0] step:7521/10000 train_time:317506ms step_avg:42.22ms +[2025-09-05 20:17:45] [Rank 0] step:7521/10000 train_time:317506ms step_avg:42.22ms +[2025-09-05 20:17:46] [Rank 0] step:7541/10000 train_time:318245ms step_avg:42.20ms +[2025-09-05 20:17:46] [Rank 0] step:7541/10000 train_time:318245ms step_avg:42.20ms +[2025-09-05 20:17:47] [Rank 0] step:7561/10000 train_time:318983ms step_avg:42.19ms +[2025-09-05 20:17:47] [Rank 0] step:7561/10000 train_time:318983ms step_avg:42.19ms +[2025-09-05 20:17:47] [Rank 0] step:7581/10000 train_time:319721ms step_avg:42.17ms +[2025-09-05 20:17:47] [Rank 0] step:7581/10000 train_time:319721ms step_avg:42.17ms +[2025-09-05 20:17:48] [Rank 0] step:7601/10000 train_time:320461ms step_avg:42.16ms +[2025-09-05 20:17:48] [Rank 0] step:7601/10000 train_time:320461ms step_avg:42.16ms +[2025-09-05 20:17:49] [Rank 0] step:7621/10000 train_time:321199ms step_avg:42.15ms +[2025-09-05 20:17:49] [Rank 0] step:7621/10000 train_time:321199ms step_avg:42.15ms +[2025-09-05 20:17:50] [Rank 0] step:7641/10000 train_time:322162ms step_avg:42.16ms +[2025-09-05 20:17:50] [Rank 0] step:7641/10000 train_time:322162ms step_avg:42.16ms +[2025-09-05 20:17:51] [Rank 0] step:7661/10000 train_time:323294ms step_avg:42.20ms +[2025-09-05 20:17:51] [Rank 0] step:7661/10000 train_time:323294ms step_avg:42.20ms +[2025-09-05 20:17:52] [Rank 0] step:7681/10000 train_time:324032ms step_avg:42.19ms +[2025-09-05 20:17:52] [Rank 0] step:7681/10000 train_time:324032ms step_avg:42.19ms +[2025-09-05 20:17:52] [Rank 0] step:7701/10000 train_time:324770ms step_avg:42.17ms +[2025-09-05 20:17:52] [Rank 0] step:7701/10000 train_time:324770ms step_avg:42.17ms +[2025-09-05 20:17:53] [Rank 0] step:7721/10000 train_time:325509ms step_avg:42.16ms +[2025-09-05 20:17:53] [Rank 0] step:7721/10000 train_time:325509ms step_avg:42.16ms +[2025-09-05 20:17:54] [Rank 0] step:7741/10000 train_time:326248ms step_avg:42.15ms +[2025-09-05 20:17:54] [Rank 0] step:7741/10000 train_time:326248ms step_avg:42.15ms +[2025-09-05 20:17:55] [Rank 0] step:7761/10000 train_time:326986ms step_avg:42.13ms +[2025-09-05 20:17:55] [Rank 0] step:7761/10000 train_time:326986ms step_avg:42.13ms +[2025-09-05 20:17:55] [Rank 0] step:7781/10000 train_time:327725ms step_avg:42.12ms +[2025-09-05 20:17:55] [Rank 0] step:7781/10000 train_time:327725ms step_avg:42.12ms +[2025-09-05 20:17:56] [Rank 0] step:7801/10000 train_time:328465ms step_avg:42.11ms +[2025-09-05 20:17:56] [Rank 0] step:7801/10000 train_time:328465ms step_avg:42.11ms +[2025-09-05 20:17:57] [Rank 0] step:7821/10000 train_time:329204ms step_avg:42.09ms +[2025-09-05 20:17:57] [Rank 0] step:7821/10000 train_time:329204ms step_avg:42.09ms +[2025-09-05 20:17:57] [Rank 0] step:7841/10000 train_time:329943ms step_avg:42.08ms +[2025-09-05 20:17:57] [Rank 0] step:7841/10000 train_time:329943ms step_avg:42.08ms +[2025-09-05 20:17:58] [Rank 0] step:7861/10000 train_time:330682ms step_avg:42.07ms +[2025-09-05 20:17:58] [Rank 0] step:7861/10000 train_time:330682ms step_avg:42.07ms +[2025-09-05 20:17:59] [Rank 0] step:7881/10000 train_time:331421ms step_avg:42.05ms +[2025-09-05 20:17:59] [Rank 0] step:7881/10000 train_time:331421ms step_avg:42.05ms +[2025-09-05 20:18:00] [Rank 0] step:7901/10000 train_time:332160ms step_avg:42.04ms +[2025-09-05 20:18:00] [Rank 0] step:7901/10000 train_time:332160ms step_avg:42.04ms +[2025-09-05 20:18:00] [Rank 0] step:7921/10000 train_time:332899ms step_avg:42.03ms +[2025-09-05 20:18:00] [Rank 0] step:7921/10000 train_time:332899ms step_avg:42.03ms +[2025-09-05 20:18:01] [Rank 0] step:7941/10000 train_time:333637ms step_avg:42.01ms +[2025-09-05 20:18:01] [Rank 0] step:7941/10000 train_time:333637ms step_avg:42.01ms +[2025-09-05 20:18:02] [Rank 0] step:7961/10000 train_time:334377ms step_avg:42.00ms +[2025-09-05 20:18:02] [Rank 0] step:7961/10000 train_time:334377ms step_avg:42.00ms +[2025-09-05 20:18:03] [Rank 0] step:7981/10000 train_time:335116ms step_avg:41.99ms +[2025-09-05 20:18:03] [Rank 0] step:7981/10000 train_time:335116ms step_avg:41.99ms +[2025-09-05 20:18:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:18:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:18:04] [Rank 0] PRINT: step:8000/10000 train_loss:1.6735 val_loss:1.6632 train_time:335935ms step_avg:41.99ms +[2025-09-05 20:18:04] [Rank 0] PRINT: step:8000/10000 train_loss:1.6735 val_loss:1.6632 train_time:335935ms step_avg:41.99ms +[2025-09-05 20:18:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:18:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:18:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:18:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:19:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:19:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:19:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:19:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:19:26] [Rank 0] Total Loss: 4.2717 +[2025-09-05 20:19:26] [Rank 0] Total Loss: 4.2717 +[2025-09-05 20:19:26] [Rank 0] Total FTA (Unweighted): 0.4838 +[2025-09-05 20:19:26] [Rank 0] Total FTA (Unweighted): 0.4838 +[2025-09-05 20:19:26] [Rank 0] Total FTA (Weighted): 0.4838 +[2025-09-05 20:19:26] [Rank 0] Total FTA (Weighted): 0.4838 +[2025-09-05 20:19:26] [Rank 0] Group 0 Loss: 3.4779 +[2025-09-05 20:19:26] [Rank 0] Group 0 Loss: 3.4779 +[2025-09-05 20:19:26] [Rank 0] Group 1 Loss: 3.1685 +[2025-09-05 20:19:26] [Rank 0] Group 1 Loss: 3.1685 +[2025-09-05 20:19:26] [Rank 0] Group 2 Loss: 3.1605 +[2025-09-05 20:19:26] [Rank 0] Group 2 Loss: 3.1605 +[2025-09-05 20:19:26] [Rank 0] Group 3 Loss: 3.5950 +[2025-09-05 20:19:26] [Rank 0] Group 3 Loss: 3.5950 +[2025-09-05 20:19:26] [Rank 0] Group 4 Loss: 3.7083 +[2025-09-05 20:19:26] [Rank 0] Group 4 Loss: 3.7083 +[2025-09-05 20:19:26] [Rank 0] Group 5 Loss: 3.9891 +[2025-09-05 20:19:26] [Rank 0] Group 5 Loss: 3.9891 +[2025-09-05 20:19:26] [Rank 0] Group 6 Loss: 4.1577 +[2025-09-05 20:19:26] [Rank 0] Group 6 Loss: 4.1577 +[2025-09-05 20:19:26] [Rank 0] Group 7 Loss: 4.3680 +[2025-09-05 20:19:26] [Rank 0] Group 7 Loss: 4.3680 +[2025-09-05 20:19:26] [Rank 0] Group 8 Loss: 4.6271 +[2025-09-05 20:19:26] [Rank 0] Group 8 Loss: 4.6271 +[2025-09-05 20:19:26] [Rank 0] Group 9 Loss: 4.7584 +[2025-09-05 20:19:26] [Rank 0] Group 9 Loss: 4.7584 +[2025-09-05 20:19:26] [Rank 0] Group 10 Loss: 4.9274 +[2025-09-05 20:19:26] [Rank 0] Group 10 Loss: 4.9274 +[2025-09-05 20:19:26] [Rank 0] Group 11 Loss: 4.9376 +[2025-09-05 20:19:26] [Rank 0] Group 11 Loss: 4.9376 +[2025-09-05 20:19:26] [Rank 0] Group 12 Loss: 4.8497 +[2025-09-05 20:19:26] [Rank 0] Group 12 Loss: 4.8497 +[2025-09-05 20:19:26] [Rank 0] Group 13 Loss: 4.8829 +[2025-09-05 20:19:26] [Rank 0] Group 13 Loss: 4.8829 +[2025-09-05 20:19:26] [Rank 0] Group 14 Loss: 4.8670 +[2025-09-05 20:19:26] [Rank 0] Group 14 Loss: 4.8670 +[2025-09-05 20:19:26] [Rank 0] Group 15 Loss: 4.8713 +[2025-09-05 20:19:26] [Rank 0] Group 15 Loss: 4.8713 +[2025-09-05 20:19:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:19:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:19:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:19:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:19:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:19:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:19:26] [Rank 0] Group 3 FTA: 0.8900 +[2025-09-05 20:19:26] [Rank 0] Group 3 FTA: 0.8900 +[2025-09-05 20:19:26] [Rank 0] Group 4 FTA: 0.4800 +[2025-09-05 20:19:26] [Rank 0] Group 4 FTA: 0.4800 +[2025-09-05 20:19:26] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:19:26] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:19:26] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:19:26] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:19:26] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 20:19:26] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 20:19:26] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 20:19:26] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 20:19:26] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:19:26] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:19:26] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 20:19:26] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 20:19:26] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 20:19:26] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 20:19:26] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 20:19:26] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 20:19:26] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 20:19:26] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 20:19:26] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 20:19:26] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 20:19:26] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 20:19:26] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 20:19:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:19:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:19:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:19:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:19:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:19:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:19:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:19:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:19:27] [Rank 0] step:8001/10000 train_time:335946ms step_avg:41.99ms +[2025-09-05 20:19:27] [Rank 0] step:8001/10000 train_time:335946ms step_avg:41.99ms +[2025-09-05 20:19:29] [Rank 0] step:8021/10000 train_time:337238ms step_avg:42.04ms +[2025-09-05 20:19:29] [Rank 0] step:8021/10000 train_time:337238ms step_avg:42.04ms +[2025-09-05 20:19:29] [Rank 0] step:8041/10000 train_time:337976ms step_avg:42.03ms +[2025-09-05 20:19:29] [Rank 0] step:8041/10000 train_time:337976ms step_avg:42.03ms +[2025-09-05 20:19:30] [Rank 0] step:8061/10000 train_time:338714ms step_avg:42.02ms +[2025-09-05 20:19:30] [Rank 0] step:8061/10000 train_time:338714ms step_avg:42.02ms +[2025-09-05 20:19:31] [Rank 0] step:8081/10000 train_time:339451ms step_avg:42.01ms +[2025-09-05 20:19:31] [Rank 0] step:8081/10000 train_time:339451ms step_avg:42.01ms +[2025-09-05 20:19:31] [Rank 0] step:8101/10000 train_time:340189ms step_avg:41.99ms +[2025-09-05 20:19:31] [Rank 0] step:8101/10000 train_time:340189ms step_avg:41.99ms +[2025-09-05 20:19:32] [Rank 0] step:8121/10000 train_time:340927ms step_avg:41.98ms +[2025-09-05 20:19:32] [Rank 0] step:8121/10000 train_time:340927ms step_avg:41.98ms +[2025-09-05 20:19:33] [Rank 0] step:8141/10000 train_time:341665ms step_avg:41.97ms +[2025-09-05 20:19:33] [Rank 0] step:8141/10000 train_time:341665ms step_avg:41.97ms +[2025-09-05 20:19:34] [Rank 0] step:8161/10000 train_time:342404ms step_avg:41.96ms +[2025-09-05 20:19:34] [Rank 0] step:8161/10000 train_time:342404ms step_avg:41.96ms +[2025-09-05 20:19:34] [Rank 0] step:8181/10000 train_time:343141ms step_avg:41.94ms +[2025-09-05 20:19:34] [Rank 0] step:8181/10000 train_time:343141ms step_avg:41.94ms +[2025-09-05 20:19:35] [Rank 0] step:8201/10000 train_time:343879ms step_avg:41.93ms +[2025-09-05 20:19:35] [Rank 0] step:8201/10000 train_time:343879ms step_avg:41.93ms +[2025-09-05 20:19:36] [Rank 0] step:8221/10000 train_time:344618ms step_avg:41.92ms +[2025-09-05 20:19:36] [Rank 0] step:8221/10000 train_time:344618ms step_avg:41.92ms +[2025-09-05 20:19:37] [Rank 0] step:8241/10000 train_time:345357ms step_avg:41.91ms +[2025-09-05 20:19:37] [Rank 0] step:8241/10000 train_time:345357ms step_avg:41.91ms +[2025-09-05 20:19:38] [Rank 0] step:8261/10000 train_time:346096ms step_avg:41.90ms +[2025-09-05 20:19:38] [Rank 0] step:8261/10000 train_time:346096ms step_avg:41.90ms +[2025-09-05 20:19:38] [Rank 0] step:8281/10000 train_time:346979ms step_avg:41.90ms +[2025-09-05 20:19:38] [Rank 0] step:8281/10000 train_time:346979ms step_avg:41.90ms +[2025-09-05 20:19:39] [Rank 0] step:8301/10000 train_time:347718ms step_avg:41.89ms +[2025-09-05 20:19:39] [Rank 0] step:8301/10000 train_time:347718ms step_avg:41.89ms +[2025-09-05 20:19:40] [Rank 0] step:8321/10000 train_time:348457ms step_avg:41.88ms +[2025-09-05 20:19:40] [Rank 0] step:8321/10000 train_time:348457ms step_avg:41.88ms +[2025-09-05 20:19:41] [Rank 0] step:8341/10000 train_time:349293ms step_avg:41.88ms +[2025-09-05 20:19:41] [Rank 0] step:8341/10000 train_time:349293ms step_avg:41.88ms +[2025-09-05 20:19:41] [Rank 0] step:8361/10000 train_time:350032ms step_avg:41.86ms +[2025-09-05 20:19:41] [Rank 0] step:8361/10000 train_time:350032ms step_avg:41.86ms +[2025-09-05 20:19:42] [Rank 0] step:8381/10000 train_time:350770ms step_avg:41.85ms +[2025-09-05 20:19:42] [Rank 0] step:8381/10000 train_time:350770ms step_avg:41.85ms +[2025-09-05 20:19:43] [Rank 0] step:8401/10000 train_time:351509ms step_avg:41.84ms +[2025-09-05 20:19:43] [Rank 0] step:8401/10000 train_time:351509ms step_avg:41.84ms +[2025-09-05 20:19:44] [Rank 0] step:8421/10000 train_time:352249ms step_avg:41.83ms +[2025-09-05 20:19:44] [Rank 0] step:8421/10000 train_time:352249ms step_avg:41.83ms +[2025-09-05 20:19:44] [Rank 0] step:8441/10000 train_time:352988ms step_avg:41.82ms +[2025-09-05 20:19:44] [Rank 0] step:8441/10000 train_time:352988ms step_avg:41.82ms +[2025-09-05 20:19:45] [Rank 0] step:8461/10000 train_time:353727ms step_avg:41.81ms +[2025-09-05 20:19:45] [Rank 0] step:8461/10000 train_time:353727ms step_avg:41.81ms +[2025-09-05 20:19:46] [Rank 0] step:8481/10000 train_time:354466ms step_avg:41.80ms +[2025-09-05 20:19:46] [Rank 0] step:8481/10000 train_time:354466ms step_avg:41.80ms +[2025-09-05 20:19:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:19:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:19:47] [Rank 0] PRINT: step:8500/10000 train_loss:1.6675 val_loss:1.6559 train_time:355284ms step_avg:41.80ms +[2025-09-05 20:19:47] [Rank 0] PRINT: step:8500/10000 train_loss:1.6675 val_loss:1.6559 train_time:355284ms step_avg:41.80ms +[2025-09-05 20:19:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:19:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:19:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:19:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:21:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:21:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:21:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:21:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:21:09] [Rank 0] Total Loss: 4.2473 +[2025-09-05 20:21:09] [Rank 0] Total Loss: 4.2473 +[2025-09-05 20:21:09] [Rank 0] Total FTA (Unweighted): 0.4906 +[2025-09-05 20:21:09] [Rank 0] Total FTA (Unweighted): 0.4906 +[2025-09-05 20:21:09] [Rank 0] Total FTA (Weighted): 0.4906 +[2025-09-05 20:21:09] [Rank 0] Total FTA (Weighted): 0.4906 +[2025-09-05 20:21:09] [Rank 0] Group 0 Loss: 3.4418 +[2025-09-05 20:21:09] [Rank 0] Group 0 Loss: 3.4418 +[2025-09-05 20:21:09] [Rank 0] Group 1 Loss: 3.1887 +[2025-09-05 20:21:09] [Rank 0] Group 1 Loss: 3.1887 +[2025-09-05 20:21:09] [Rank 0] Group 2 Loss: 3.1003 +[2025-09-05 20:21:09] [Rank 0] Group 2 Loss: 3.1003 +[2025-09-05 20:21:09] [Rank 0] Group 3 Loss: 3.5614 +[2025-09-05 20:21:09] [Rank 0] Group 3 Loss: 3.5614 +[2025-09-05 20:21:09] [Rank 0] Group 4 Loss: 3.6855 +[2025-09-05 20:21:09] [Rank 0] Group 4 Loss: 3.6855 +[2025-09-05 20:21:09] [Rank 0] Group 5 Loss: 3.9712 +[2025-09-05 20:21:09] [Rank 0] Group 5 Loss: 3.9712 +[2025-09-05 20:21:09] [Rank 0] Group 6 Loss: 4.1320 +[2025-09-05 20:21:09] [Rank 0] Group 6 Loss: 4.1320 +[2025-09-05 20:21:09] [Rank 0] Group 7 Loss: 4.3372 +[2025-09-05 20:21:09] [Rank 0] Group 7 Loss: 4.3372 +[2025-09-05 20:21:09] [Rank 0] Group 8 Loss: 4.5983 +[2025-09-05 20:21:09] [Rank 0] Group 8 Loss: 4.5983 +[2025-09-05 20:21:09] [Rank 0] Group 9 Loss: 4.7320 +[2025-09-05 20:21:09] [Rank 0] Group 9 Loss: 4.7320 +[2025-09-05 20:21:09] [Rank 0] Group 10 Loss: 4.8989 +[2025-09-05 20:21:09] [Rank 0] Group 10 Loss: 4.8989 +[2025-09-05 20:21:09] [Rank 0] Group 11 Loss: 4.8942 +[2025-09-05 20:21:09] [Rank 0] Group 11 Loss: 4.8942 +[2025-09-05 20:21:09] [Rank 0] Group 12 Loss: 4.8568 +[2025-09-05 20:21:09] [Rank 0] Group 12 Loss: 4.8568 +[2025-09-05 20:21:09] [Rank 0] Group 13 Loss: 4.8586 +[2025-09-05 20:21:09] [Rank 0] Group 13 Loss: 4.8586 +[2025-09-05 20:21:09] [Rank 0] Group 14 Loss: 4.8586 +[2025-09-05 20:21:09] [Rank 0] Group 14 Loss: 4.8586 +[2025-09-05 20:21:09] [Rank 0] Group 15 Loss: 4.8419 +[2025-09-05 20:21:09] [Rank 0] Group 15 Loss: 4.8419 +[2025-09-05 20:21:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:21:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:21:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:21:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:21:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:21:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:21:09] [Rank 0] Group 3 FTA: 0.8700 +[2025-09-05 20:21:09] [Rank 0] Group 3 FTA: 0.8700 +[2025-09-05 20:21:09] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:21:09] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:21:09] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:21:09] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:21:09] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:21:09] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:21:09] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 20:21:09] [Rank 0] Group 7 FTA: 0.3400 +[2025-09-05 20:21:09] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:21:09] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:21:09] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 20:21:09] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 20:21:10] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 20:21:10] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 20:21:10] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 20:21:10] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 20:21:10] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 20:21:10] [Rank 0] Group 12 FTA: 0.3000 +[2025-09-05 20:21:10] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 20:21:10] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 20:21:10] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 20:21:10] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 20:21:10] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 20:21:10] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 20:21:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:21:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:21:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:21:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:21:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:21:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:21:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:21:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:21:11] [Rank 0] step:8501/10000 train_time:355294ms step_avg:41.79ms +[2025-09-05 20:21:11] [Rank 0] step:8501/10000 train_time:355294ms step_avg:41.79ms +[2025-09-05 20:21:12] [Rank 0] step:8521/10000 train_time:355971ms step_avg:41.78ms +[2025-09-05 20:21:12] [Rank 0] step:8521/10000 train_time:355971ms step_avg:41.78ms +[2025-09-05 20:21:12] [Rank 0] step:8541/10000 train_time:356710ms step_avg:41.76ms +[2025-09-05 20:21:12] [Rank 0] step:8541/10000 train_time:356710ms step_avg:41.76ms +[2025-09-05 20:21:13] [Rank 0] step:8561/10000 train_time:357449ms step_avg:41.75ms +[2025-09-05 20:21:13] [Rank 0] step:8561/10000 train_time:357449ms step_avg:41.75ms +[2025-09-05 20:21:14] [Rank 0] step:8581/10000 train_time:358187ms step_avg:41.74ms +[2025-09-05 20:21:14] [Rank 0] step:8581/10000 train_time:358187ms step_avg:41.74ms +[2025-09-05 20:21:15] [Rank 0] step:8601/10000 train_time:358925ms step_avg:41.73ms +[2025-09-05 20:21:15] [Rank 0] step:8601/10000 train_time:358925ms step_avg:41.73ms +[2025-09-05 20:21:15] [Rank 0] step:8621/10000 train_time:359664ms step_avg:41.72ms +[2025-09-05 20:21:15] [Rank 0] step:8621/10000 train_time:359664ms step_avg:41.72ms +[2025-09-05 20:21:16] [Rank 0] step:8641/10000 train_time:360403ms step_avg:41.71ms +[2025-09-05 20:21:16] [Rank 0] step:8641/10000 train_time:360403ms step_avg:41.71ms +[2025-09-05 20:21:17] [Rank 0] step:8661/10000 train_time:361141ms step_avg:41.70ms +[2025-09-05 20:21:17] [Rank 0] step:8661/10000 train_time:361141ms step_avg:41.70ms +[2025-09-05 20:21:18] [Rank 0] step:8681/10000 train_time:361880ms step_avg:41.69ms +[2025-09-05 20:21:18] [Rank 0] step:8681/10000 train_time:361880ms step_avg:41.69ms +[2025-09-05 20:21:18] [Rank 0] step:8701/10000 train_time:362619ms step_avg:41.68ms +[2025-09-05 20:21:18] [Rank 0] step:8701/10000 train_time:362619ms step_avg:41.68ms +[2025-09-05 20:21:19] [Rank 0] step:8721/10000 train_time:363358ms step_avg:41.66ms +[2025-09-05 20:21:19] [Rank 0] step:8721/10000 train_time:363358ms step_avg:41.66ms +[2025-09-05 20:21:20] [Rank 0] step:8741/10000 train_time:364096ms step_avg:41.65ms +[2025-09-05 20:21:20] [Rank 0] step:8741/10000 train_time:364096ms step_avg:41.65ms +[2025-09-05 20:21:21] [Rank 0] step:8761/10000 train_time:364834ms step_avg:41.64ms +[2025-09-05 20:21:21] [Rank 0] step:8761/10000 train_time:364834ms step_avg:41.64ms +[2025-09-05 20:21:21] [Rank 0] step:8781/10000 train_time:365573ms step_avg:41.63ms +[2025-09-05 20:21:21] [Rank 0] step:8781/10000 train_time:365573ms step_avg:41.63ms +[2025-09-05 20:21:22] [Rank 0] step:8801/10000 train_time:366312ms step_avg:41.62ms +[2025-09-05 20:21:22] [Rank 0] step:8801/10000 train_time:366312ms step_avg:41.62ms +[2025-09-05 20:21:23] [Rank 0] step:8821/10000 train_time:367051ms step_avg:41.61ms +[2025-09-05 20:21:23] [Rank 0] step:8821/10000 train_time:367051ms step_avg:41.61ms +[2025-09-05 20:21:24] [Rank 0] step:8841/10000 train_time:368407ms step_avg:41.67ms +[2025-09-05 20:21:24] [Rank 0] step:8841/10000 train_time:368407ms step_avg:41.67ms +[2025-09-05 20:21:25] [Rank 0] step:8861/10000 train_time:369147ms step_avg:41.66ms +[2025-09-05 20:21:25] [Rank 0] step:8861/10000 train_time:369147ms step_avg:41.66ms +[2025-09-05 20:21:26] [Rank 0] step:8881/10000 train_time:369885ms step_avg:41.65ms +[2025-09-05 20:21:26] [Rank 0] step:8881/10000 train_time:369885ms step_avg:41.65ms +[2025-09-05 20:21:26] [Rank 0] step:8901/10000 train_time:370624ms step_avg:41.64ms +[2025-09-05 20:21:26] [Rank 0] step:8901/10000 train_time:370624ms step_avg:41.64ms +[2025-09-05 20:21:27] [Rank 0] step:8921/10000 train_time:371363ms step_avg:41.63ms +[2025-09-05 20:21:27] [Rank 0] step:8921/10000 train_time:371363ms step_avg:41.63ms +[2025-09-05 20:21:28] [Rank 0] step:8941/10000 train_time:372101ms step_avg:41.62ms +[2025-09-05 20:21:28] [Rank 0] step:8941/10000 train_time:372101ms step_avg:41.62ms +[2025-09-05 20:21:29] [Rank 0] step:8961/10000 train_time:372840ms step_avg:41.61ms +[2025-09-05 20:21:29] [Rank 0] step:8961/10000 train_time:372840ms step_avg:41.61ms +[2025-09-05 20:21:29] [Rank 0] step:8981/10000 train_time:373579ms step_avg:41.60ms +[2025-09-05 20:21:29] [Rank 0] step:8981/10000 train_time:373579ms step_avg:41.60ms +[2025-09-05 20:21:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:21:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:21:31] [Rank 0] PRINT: step:9000/10000 train_loss:1.6608 val_loss:1.6496 train_time:374397ms step_avg:41.60ms +[2025-09-05 20:21:31] [Rank 0] PRINT: step:9000/10000 train_loss:1.6608 val_loss:1.6496 train_time:374397ms step_avg:41.60ms +[2025-09-05 20:21:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:21:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:21:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:21:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:22:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:22:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:22:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:22:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:22:52] [Rank 0] Total Loss: 4.2546 +[2025-09-05 20:22:52] [Rank 0] Total Loss: 4.2546 +[2025-09-05 20:22:52] [Rank 0] Total FTA (Unweighted): 0.4962 +[2025-09-05 20:22:52] [Rank 0] Total FTA (Unweighted): 0.4962 +[2025-09-05 20:22:52] [Rank 0] Total FTA (Weighted): 0.4963 +[2025-09-05 20:22:52] [Rank 0] Total FTA (Weighted): 0.4963 +[2025-09-05 20:22:52] [Rank 0] Group 0 Loss: 3.5023 +[2025-09-05 20:22:52] [Rank 0] Group 0 Loss: 3.5023 +[2025-09-05 20:22:52] [Rank 0] Group 1 Loss: 3.1525 +[2025-09-05 20:22:52] [Rank 0] Group 1 Loss: 3.1525 +[2025-09-05 20:22:52] [Rank 0] Group 2 Loss: 3.0949 +[2025-09-05 20:22:52] [Rank 0] Group 2 Loss: 3.0949 +[2025-09-05 20:22:52] [Rank 0] Group 3 Loss: 3.5620 +[2025-09-05 20:22:52] [Rank 0] Group 3 Loss: 3.5620 +[2025-09-05 20:22:52] [Rank 0] Group 4 Loss: 3.6885 +[2025-09-05 20:22:52] [Rank 0] Group 4 Loss: 3.6885 +[2025-09-05 20:22:52] [Rank 0] Group 5 Loss: 3.9874 +[2025-09-05 20:22:52] [Rank 0] Group 5 Loss: 3.9874 +[2025-09-05 20:22:52] [Rank 0] Group 6 Loss: 4.1688 +[2025-09-05 20:22:52] [Rank 0] Group 6 Loss: 4.1688 +[2025-09-05 20:22:52] [Rank 0] Group 7 Loss: 4.3574 +[2025-09-05 20:22:52] [Rank 0] Group 7 Loss: 4.3574 +[2025-09-05 20:22:52] [Rank 0] Group 8 Loss: 4.6052 +[2025-09-05 20:22:52] [Rank 0] Group 8 Loss: 4.6052 +[2025-09-05 20:22:52] [Rank 0] Group 9 Loss: 4.7534 +[2025-09-05 20:22:52] [Rank 0] Group 9 Loss: 4.7534 +[2025-09-05 20:22:52] [Rank 0] Group 10 Loss: 4.8972 +[2025-09-05 20:22:52] [Rank 0] Group 10 Loss: 4.8972 +[2025-09-05 20:22:52] [Rank 0] Group 11 Loss: 4.8990 +[2025-09-05 20:22:52] [Rank 0] Group 11 Loss: 4.8990 +[2025-09-05 20:22:52] [Rank 0] Group 12 Loss: 4.8553 +[2025-09-05 20:22:52] [Rank 0] Group 12 Loss: 4.8553 +[2025-09-05 20:22:52] [Rank 0] Group 13 Loss: 4.8698 +[2025-09-05 20:22:52] [Rank 0] Group 13 Loss: 4.8698 +[2025-09-05 20:22:52] [Rank 0] Group 14 Loss: 4.8593 +[2025-09-05 20:22:52] [Rank 0] Group 14 Loss: 4.8593 +[2025-09-05 20:22:52] [Rank 0] Group 15 Loss: 4.8200 +[2025-09-05 20:22:52] [Rank 0] Group 15 Loss: 4.8200 +[2025-09-05 20:22:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:22:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:22:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:22:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:22:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:22:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:22:52] [Rank 0] Group 3 FTA: 0.8900 +[2025-09-05 20:22:52] [Rank 0] Group 3 FTA: 0.8900 +[2025-09-05 20:22:52] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:22:52] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:22:52] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:22:52] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:22:52] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 20:22:52] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 20:22:52] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 20:22:52] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 20:22:52] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 20:22:52] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 20:22:52] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:22:52] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:22:52] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 20:22:52] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 20:22:52] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 20:22:52] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 20:22:52] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 20:22:52] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 20:22:52] [Rank 0] Group 13 FTA: 0.3200 +[2025-09-05 20:22:52] [Rank 0] Group 13 FTA: 0.3200 +[2025-09-05 20:22:52] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 20:22:52] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 20:22:53] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 20:22:53] [Rank 0] Group 15 FTA: 0.1600 +[2025-09-05 20:22:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:22:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:22:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:22:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:22:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:22:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:22:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:22:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:22:54] [Rank 0] step:9001/10000 train_time:374407ms step_avg:41.60ms +[2025-09-05 20:22:54] [Rank 0] step:9001/10000 train_time:374407ms step_avg:41.60ms +[2025-09-05 20:22:55] [Rank 0] step:9021/10000 train_time:375087ms step_avg:41.58ms +[2025-09-05 20:22:55] [Rank 0] step:9021/10000 train_time:375087ms step_avg:41.58ms +[2025-09-05 20:22:55] [Rank 0] step:9041/10000 train_time:375825ms step_avg:41.57ms +[2025-09-05 20:22:55] [Rank 0] step:9041/10000 train_time:375825ms step_avg:41.57ms +[2025-09-05 20:22:56] [Rank 0] step:9061/10000 train_time:376564ms step_avg:41.56ms +[2025-09-05 20:22:56] [Rank 0] step:9061/10000 train_time:376564ms step_avg:41.56ms +[2025-09-05 20:22:57] [Rank 0] step:9081/10000 train_time:377302ms step_avg:41.55ms +[2025-09-05 20:22:57] [Rank 0] step:9081/10000 train_time:377302ms step_avg:41.55ms +[2025-09-05 20:22:58] [Rank 0] step:9101/10000 train_time:378041ms step_avg:41.54ms +[2025-09-05 20:22:58] [Rank 0] step:9101/10000 train_time:378041ms step_avg:41.54ms +[2025-09-05 20:22:58] [Rank 0] step:9121/10000 train_time:378779ms step_avg:41.53ms +[2025-09-05 20:22:58] [Rank 0] step:9121/10000 train_time:378779ms step_avg:41.53ms +[2025-09-05 20:22:59] [Rank 0] step:9141/10000 train_time:379518ms step_avg:41.52ms +[2025-09-05 20:22:59] [Rank 0] step:9141/10000 train_time:379518ms step_avg:41.52ms +[2025-09-05 20:23:00] [Rank 0] step:9161/10000 train_time:380257ms step_avg:41.51ms +[2025-09-05 20:23:00] [Rank 0] step:9161/10000 train_time:380257ms step_avg:41.51ms +[2025-09-05 20:23:01] [Rank 0] step:9181/10000 train_time:380997ms step_avg:41.50ms +[2025-09-05 20:23:01] [Rank 0] step:9181/10000 train_time:380997ms step_avg:41.50ms +[2025-09-05 20:23:01] [Rank 0] step:9201/10000 train_time:381735ms step_avg:41.49ms +[2025-09-05 20:23:01] [Rank 0] step:9201/10000 train_time:381735ms step_avg:41.49ms +[2025-09-05 20:23:02] [Rank 0] step:9221/10000 train_time:382476ms step_avg:41.48ms +[2025-09-05 20:23:02] [Rank 0] step:9221/10000 train_time:382476ms step_avg:41.48ms +[2025-09-05 20:23:03] [Rank 0] step:9241/10000 train_time:383215ms step_avg:41.47ms +[2025-09-05 20:23:03] [Rank 0] step:9241/10000 train_time:383215ms step_avg:41.47ms +[2025-09-05 20:23:03] [Rank 0] step:9261/10000 train_time:383954ms step_avg:41.46ms +[2025-09-05 20:23:03] [Rank 0] step:9261/10000 train_time:383954ms step_avg:41.46ms +[2025-09-05 20:23:04] [Rank 0] step:9281/10000 train_time:384692ms step_avg:41.45ms +[2025-09-05 20:23:04] [Rank 0] step:9281/10000 train_time:384692ms step_avg:41.45ms +[2025-09-05 20:23:05] [Rank 0] step:9301/10000 train_time:385431ms step_avg:41.44ms +[2025-09-05 20:23:05] [Rank 0] step:9301/10000 train_time:385431ms step_avg:41.44ms +[2025-09-05 20:23:06] [Rank 0] step:9321/10000 train_time:386171ms step_avg:41.43ms +[2025-09-05 20:23:06] [Rank 0] step:9321/10000 train_time:386171ms step_avg:41.43ms +[2025-09-05 20:23:06] [Rank 0] step:9341/10000 train_time:386910ms step_avg:41.42ms +[2025-09-05 20:23:06] [Rank 0] step:9341/10000 train_time:386910ms step_avg:41.42ms +[2025-09-05 20:23:07] [Rank 0] step:9361/10000 train_time:387649ms step_avg:41.41ms +[2025-09-05 20:23:07] [Rank 0] step:9361/10000 train_time:387649ms step_avg:41.41ms +[2025-09-05 20:23:08] [Rank 0] step:9381/10000 train_time:388387ms step_avg:41.40ms +[2025-09-05 20:23:08] [Rank 0] step:9381/10000 train_time:388387ms step_avg:41.40ms +[2025-09-05 20:23:09] [Rank 0] step:9401/10000 train_time:389125ms step_avg:41.39ms +[2025-09-05 20:23:09] [Rank 0] step:9401/10000 train_time:389125ms step_avg:41.39ms +[2025-09-05 20:23:09] [Rank 0] step:9421/10000 train_time:389864ms step_avg:41.38ms +[2025-09-05 20:23:09] [Rank 0] step:9421/10000 train_time:389864ms step_avg:41.38ms +[2025-09-05 20:23:10] [Rank 0] step:9441/10000 train_time:390602ms step_avg:41.37ms +[2025-09-05 20:23:10] [Rank 0] step:9441/10000 train_time:390602ms step_avg:41.37ms +[2025-09-05 20:23:11] [Rank 0] step:9461/10000 train_time:391340ms step_avg:41.36ms +[2025-09-05 20:23:11] [Rank 0] step:9461/10000 train_time:391340ms step_avg:41.36ms +[2025-09-05 20:23:12] [Rank 0] step:9481/10000 train_time:392079ms step_avg:41.35ms +[2025-09-05 20:23:12] [Rank 0] step:9481/10000 train_time:392079ms step_avg:41.35ms +[2025-09-05 20:23:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:23:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:23:13] [Rank 0] PRINT: step:9500/10000 train_loss:1.6549 val_loss:1.6449 train_time:392899ms step_avg:41.36ms +[2025-09-05 20:23:13] [Rank 0] PRINT: step:9500/10000 train_loss:1.6549 val_loss:1.6449 train_time:392899ms step_avg:41.36ms +[2025-09-05 20:23:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:23:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:23:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:23:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:24:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:24:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:24:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:24:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:24:35] [Rank 0] Total Loss: 4.2750 +[2025-09-05 20:24:35] [Rank 0] Total Loss: 4.2750 +[2025-09-05 20:24:35] [Rank 0] Total FTA (Unweighted): 0.4912 +[2025-09-05 20:24:35] [Rank 0] Total FTA (Unweighted): 0.4912 +[2025-09-05 20:24:35] [Rank 0] Total FTA (Weighted): 0.4913 +[2025-09-05 20:24:35] [Rank 0] Total FTA (Weighted): 0.4913 +[2025-09-05 20:24:35] [Rank 0] Group 0 Loss: 3.4745 +[2025-09-05 20:24:35] [Rank 0] Group 0 Loss: 3.4745 +[2025-09-05 20:24:35] [Rank 0] Group 1 Loss: 3.2259 +[2025-09-05 20:24:35] [Rank 0] Group 1 Loss: 3.2259 +[2025-09-05 20:24:35] [Rank 0] Group 2 Loss: 3.1105 +[2025-09-05 20:24:35] [Rank 0] Group 2 Loss: 3.1105 +[2025-09-05 20:24:35] [Rank 0] Group 3 Loss: 3.5839 +[2025-09-05 20:24:35] [Rank 0] Group 3 Loss: 3.5839 +[2025-09-05 20:24:35] [Rank 0] Group 4 Loss: 3.6994 +[2025-09-05 20:24:35] [Rank 0] Group 4 Loss: 3.6994 +[2025-09-05 20:24:35] [Rank 0] Group 5 Loss: 4.0136 +[2025-09-05 20:24:35] [Rank 0] Group 5 Loss: 4.0136 +[2025-09-05 20:24:35] [Rank 0] Group 6 Loss: 4.1844 +[2025-09-05 20:24:35] [Rank 0] Group 6 Loss: 4.1844 +[2025-09-05 20:24:35] [Rank 0] Group 7 Loss: 4.3788 +[2025-09-05 20:24:35] [Rank 0] Group 7 Loss: 4.3788 +[2025-09-05 20:24:35] [Rank 0] Group 8 Loss: 4.6309 +[2025-09-05 20:24:35] [Rank 0] Group 8 Loss: 4.6309 +[2025-09-05 20:24:35] [Rank 0] Group 9 Loss: 4.7665 +[2025-09-05 20:24:35] [Rank 0] Group 9 Loss: 4.7665 +[2025-09-05 20:24:35] [Rank 0] Group 10 Loss: 4.9233 +[2025-09-05 20:24:35] [Rank 0] Group 10 Loss: 4.9233 +[2025-09-05 20:24:35] [Rank 0] Group 11 Loss: 4.9174 +[2025-09-05 20:24:35] [Rank 0] Group 11 Loss: 4.9174 +[2025-09-05 20:24:35] [Rank 0] Group 12 Loss: 4.8759 +[2025-09-05 20:24:35] [Rank 0] Group 12 Loss: 4.8759 +[2025-09-05 20:24:35] [Rank 0] Group 13 Loss: 4.8952 +[2025-09-05 20:24:35] [Rank 0] Group 13 Loss: 4.8952 +[2025-09-05 20:24:35] [Rank 0] Group 14 Loss: 4.8769 +[2025-09-05 20:24:35] [Rank 0] Group 14 Loss: 4.8769 +[2025-09-05 20:24:35] [Rank 0] Group 15 Loss: 4.8424 +[2025-09-05 20:24:35] [Rank 0] Group 15 Loss: 4.8424 +[2025-09-05 20:24:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:24:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:24:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:24:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:24:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:24:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:24:35] [Rank 0] Group 3 FTA: 0.8400 +[2025-09-05 20:24:35] [Rank 0] Group 3 FTA: 0.8400 +[2025-09-05 20:24:35] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:24:35] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:24:35] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:24:35] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:24:35] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 20:24:35] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 20:24:35] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 20:24:35] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 20:24:35] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:24:35] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:24:35] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:24:35] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:24:35] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 20:24:35] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 20:24:35] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 20:24:35] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 20:24:35] [Rank 0] Group 12 FTA: 0.2800 +[2025-09-05 20:24:35] [Rank 0] Group 12 FTA: 0.2800 +[2025-09-05 20:24:36] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 20:24:36] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 20:24:36] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 20:24:36] [Rank 0] Group 14 FTA: 0.2000 +[2025-09-05 20:24:36] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 20:24:36] [Rank 0] Group 15 FTA: 0.1400 +[2025-09-05 20:24:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:24:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:24:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:24:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:24:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:24:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:24:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:24:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:24:37] [Rank 0] step:9501/10000 train_time:392908ms step_avg:41.35ms +[2025-09-05 20:24:37] [Rank 0] step:9501/10000 train_time:392908ms step_avg:41.35ms +[2025-09-05 20:24:38] [Rank 0] step:9521/10000 train_time:393574ms step_avg:41.34ms +[2025-09-05 20:24:38] [Rank 0] step:9521/10000 train_time:393574ms step_avg:41.34ms +[2025-09-05 20:24:38] [Rank 0] step:9541/10000 train_time:394313ms step_avg:41.33ms +[2025-09-05 20:24:38] [Rank 0] step:9541/10000 train_time:394313ms step_avg:41.33ms +[2025-09-05 20:24:39] [Rank 0] step:9561/10000 train_time:395051ms step_avg:41.32ms +[2025-09-05 20:24:39] [Rank 0] step:9561/10000 train_time:395051ms step_avg:41.32ms +[2025-09-05 20:24:40] [Rank 0] step:9581/10000 train_time:395788ms step_avg:41.31ms +[2025-09-05 20:24:40] [Rank 0] step:9581/10000 train_time:395788ms step_avg:41.31ms +[2025-09-05 20:24:41] [Rank 0] step:9601/10000 train_time:396527ms step_avg:41.30ms +[2025-09-05 20:24:41] [Rank 0] step:9601/10000 train_time:396527ms step_avg:41.30ms +[2025-09-05 20:24:41] [Rank 0] step:9621/10000 train_time:397265ms step_avg:41.29ms +[2025-09-05 20:24:41] [Rank 0] step:9621/10000 train_time:397265ms step_avg:41.29ms +[2025-09-05 20:24:42] [Rank 0] step:9641/10000 train_time:398004ms step_avg:41.28ms +[2025-09-05 20:24:42] [Rank 0] step:9641/10000 train_time:398004ms step_avg:41.28ms +[2025-09-05 20:24:43] [Rank 0] step:9661/10000 train_time:399017ms step_avg:41.30ms +[2025-09-05 20:24:43] [Rank 0] step:9661/10000 train_time:399017ms step_avg:41.30ms +[2025-09-05 20:24:44] [Rank 0] step:9681/10000 train_time:399756ms step_avg:41.29ms +[2025-09-05 20:24:44] [Rank 0] step:9681/10000 train_time:399756ms step_avg:41.29ms +[2025-09-05 20:24:45] [Rank 0] step:9701/10000 train_time:400497ms step_avg:41.28ms +[2025-09-05 20:24:45] [Rank 0] step:9701/10000 train_time:400497ms step_avg:41.28ms +[2025-09-05 20:24:45] [Rank 0] step:9721/10000 train_time:401236ms step_avg:41.28ms +[2025-09-05 20:24:45] [Rank 0] step:9721/10000 train_time:401236ms step_avg:41.28ms +[2025-09-05 20:24:46] [Rank 0] step:9741/10000 train_time:401974ms step_avg:41.27ms +[2025-09-05 20:24:46] [Rank 0] step:9741/10000 train_time:401974ms step_avg:41.27ms +[2025-09-05 20:24:47] [Rank 0] step:9761/10000 train_time:402712ms step_avg:41.26ms +[2025-09-05 20:24:47] [Rank 0] step:9761/10000 train_time:402712ms step_avg:41.26ms +[2025-09-05 20:24:48] [Rank 0] step:9781/10000 train_time:403450ms step_avg:41.25ms +[2025-09-05 20:24:48] [Rank 0] step:9781/10000 train_time:403450ms step_avg:41.25ms +[2025-09-05 20:24:48] [Rank 0] step:9801/10000 train_time:404190ms step_avg:41.24ms +[2025-09-05 20:24:48] [Rank 0] step:9801/10000 train_time:404190ms step_avg:41.24ms +[2025-09-05 20:24:49] [Rank 0] step:9821/10000 train_time:404928ms step_avg:41.23ms +[2025-09-05 20:24:49] [Rank 0] step:9821/10000 train_time:404928ms step_avg:41.23ms +[2025-09-05 20:24:50] [Rank 0] step:9841/10000 train_time:405667ms step_avg:41.22ms +[2025-09-05 20:24:50] [Rank 0] step:9841/10000 train_time:405667ms step_avg:41.22ms +[2025-09-05 20:24:50] [Rank 0] step:9861/10000 train_time:406406ms step_avg:41.21ms +[2025-09-05 20:24:50] [Rank 0] step:9861/10000 train_time:406406ms step_avg:41.21ms +[2025-09-05 20:24:51] [Rank 0] step:9881/10000 train_time:407145ms step_avg:41.20ms +[2025-09-05 20:24:51] [Rank 0] step:9881/10000 train_time:407145ms step_avg:41.20ms +[2025-09-05 20:24:52] [Rank 0] step:9901/10000 train_time:407884ms step_avg:41.20ms +[2025-09-05 20:24:52] [Rank 0] step:9901/10000 train_time:407884ms step_avg:41.20ms +[2025-09-05 20:24:53] [Rank 0] step:9921/10000 train_time:408622ms step_avg:41.19ms +[2025-09-05 20:24:53] [Rank 0] step:9921/10000 train_time:408622ms step_avg:41.19ms +[2025-09-05 20:24:54] [Rank 0] step:9941/10000 train_time:409478ms step_avg:41.19ms +[2025-09-05 20:24:54] [Rank 0] step:9941/10000 train_time:409478ms step_avg:41.19ms +[2025-09-05 20:24:54] [Rank 0] step:9961/10000 train_time:410276ms step_avg:41.19ms +[2025-09-05 20:24:54] [Rank 0] step:9961/10000 train_time:410276ms step_avg:41.19ms +[2025-09-05 20:24:55] [Rank 0] step:9981/10000 train_time:411015ms step_avg:41.18ms +[2025-09-05 20:24:55] [Rank 0] step:9981/10000 train_time:411015ms step_avg:41.18ms +[2025-09-05 20:24:56] [Rank 0] step:10000/10000 train_time:411717ms step_avg:41.17ms +[2025-09-05 20:24:56] [Rank 0] step:10000/10000 train_time:411717ms step_avg:41.17ms +[2025-09-05 20:24:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:24:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 20:24:56] [Rank 0] PRINT: step:10000/10000 train_loss:1.6499 val_loss:1.6393 train_time:411843ms step_avg:41.18ms +[2025-09-05 20:24:56] [Rank 0] PRINT: step:10000/10000 train_loss:1.6499 val_loss:1.6393 train_time:411843ms step_avg:41.18ms +[2025-09-05 20:24:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:24:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 20:24:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:24:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 20:26:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:26:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 20:26:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:26:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 20:26:18] [Rank 0] Total Loss: 4.2387 +[2025-09-05 20:26:18] [Rank 0] Total Loss: 4.2387 +[2025-09-05 20:26:18] [Rank 0] Total FTA (Unweighted): 0.5006 +[2025-09-05 20:26:18] [Rank 0] Total FTA (Unweighted): 0.5006 +[2025-09-05 20:26:18] [Rank 0] Total FTA (Weighted): 0.5006 +[2025-09-05 20:26:18] [Rank 0] Total FTA (Weighted): 0.5006 +[2025-09-05 20:26:18] [Rank 0] Group 0 Loss: 3.4458 +[2025-09-05 20:26:18] [Rank 0] Group 0 Loss: 3.4458 +[2025-09-05 20:26:18] [Rank 0] Group 1 Loss: 3.1924 +[2025-09-05 20:26:18] [Rank 0] Group 1 Loss: 3.1924 +[2025-09-05 20:26:18] [Rank 0] Group 2 Loss: 3.0918 +[2025-09-05 20:26:18] [Rank 0] Group 2 Loss: 3.0918 +[2025-09-05 20:26:18] [Rank 0] Group 3 Loss: 3.5627 +[2025-09-05 20:26:18] [Rank 0] Group 3 Loss: 3.5627 +[2025-09-05 20:26:18] [Rank 0] Group 4 Loss: 3.6681 +[2025-09-05 20:26:18] [Rank 0] Group 4 Loss: 3.6681 +[2025-09-05 20:26:18] [Rank 0] Group 5 Loss: 3.9669 +[2025-09-05 20:26:18] [Rank 0] Group 5 Loss: 3.9669 +[2025-09-05 20:26:18] [Rank 0] Group 6 Loss: 4.1369 +[2025-09-05 20:26:18] [Rank 0] Group 6 Loss: 4.1369 +[2025-09-05 20:26:18] [Rank 0] Group 7 Loss: 4.3458 +[2025-09-05 20:26:18] [Rank 0] Group 7 Loss: 4.3458 +[2025-09-05 20:26:18] [Rank 0] Group 8 Loss: 4.5908 +[2025-09-05 20:26:18] [Rank 0] Group 8 Loss: 4.5908 +[2025-09-05 20:26:18] [Rank 0] Group 9 Loss: 4.7144 +[2025-09-05 20:26:18] [Rank 0] Group 9 Loss: 4.7144 +[2025-09-05 20:26:18] [Rank 0] Group 10 Loss: 4.8823 +[2025-09-05 20:26:18] [Rank 0] Group 10 Loss: 4.8823 +[2025-09-05 20:26:18] [Rank 0] Group 11 Loss: 4.8840 +[2025-09-05 20:26:18] [Rank 0] Group 11 Loss: 4.8840 +[2025-09-05 20:26:18] [Rank 0] Group 12 Loss: 4.8288 +[2025-09-05 20:26:18] [Rank 0] Group 12 Loss: 4.8288 +[2025-09-05 20:26:18] [Rank 0] Group 13 Loss: 4.8552 +[2025-09-05 20:26:18] [Rank 0] Group 13 Loss: 4.8552 +[2025-09-05 20:26:18] [Rank 0] Group 14 Loss: 4.8418 +[2025-09-05 20:26:18] [Rank 0] Group 14 Loss: 4.8418 +[2025-09-05 20:26:18] [Rank 0] Group 15 Loss: 4.8120 +[2025-09-05 20:26:18] [Rank 0] Group 15 Loss: 4.8120 +[2025-09-05 20:26:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:26:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 20:26:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:26:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 20:26:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:26:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 20:26:18] [Rank 0] Group 3 FTA: 0.8700 +[2025-09-05 20:26:18] [Rank 0] Group 3 FTA: 0.8700 +[2025-09-05 20:26:18] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:26:18] [Rank 0] Group 4 FTA: 0.5000 +[2025-09-05 20:26:18] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:26:18] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 20:26:18] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:26:18] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 20:26:18] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 20:26:18] [Rank 0] Group 7 FTA: 0.3600 +[2025-09-05 20:26:18] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:26:18] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 20:26:18] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:26:18] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 20:26:18] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 20:26:18] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 20:26:18] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 20:26:18] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 20:26:18] [Rank 0] Group 12 FTA: 0.3100 +[2025-09-05 20:26:18] [Rank 0] Group 12 FTA: 0.3100 +[2025-09-05 20:26:18] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 20:26:18] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 20:26:18] [Rank 0] Group 14 FTA: 0.2700 +[2025-09-05 20:26:18] [Rank 0] Group 14 FTA: 0.2700 +[2025-09-05 20:26:18] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 20:26:18] [Rank 0] Group 15 FTA: 0.1700 +[2025-09-05 20:26:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:26:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_loss_curves.png +[2025-09-05 20:26:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:26:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/per_class_acc_curves.png +[2025-09-05 20:26:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:26:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_loss_curve.png +[2025-09-05 20:26:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:26:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.2_seed_46/total_acc_curve.png +[2025-09-05 20:26:19] [Rank 0] step:10001/10000 train_time:411853ms step_avg:41.18ms +[2025-09-05 20:26:19] [Rank 0] step:10001/10000 train_time:411853ms step_avg:41.18ms +[2025-09-05 20:26:19] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 20:26:19 2025 --- +[2025-09-05 20:26:19] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 20:26:19 2025 --- +[2025-09-05 20:26:19] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 20:26:19] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a721e10a920b6e7c49b09f9f5882542115ee0e50 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.5, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "59350442-1cb4-40b1-9241-deef1051b055", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..2ce06e463cfa31c3891dfe8aa6d3f273b2a7f252 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a38fea8f24b61c45d833eeaf785933d3b73581bd310287a730fa169bae425853 +size 400555 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..0694a2d70b698e99642e6bfd0d33142d9a488c7c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:478da077bbd53475b1525f4f40c328c3fa2a2f90392441c230cd351b5234adc9 +size 486614 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d9856fb956be8af19b77a78c9326a8fe8c66f51c --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:368ba48fb128a2c885e4cc31b2bd1a5b78c133acf9cf8ba9f6842bfa26541427 +size 93580 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..4ed46aa25cf316e6db8f60b3e1c9672855f09e4f --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a5eae0eedf87494d5bcce90388a940c3ab26f375c60d298457cef42fcc82851 +size 117633 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/training_log_59350442-1cb4-40b1-9241-deef1051b055.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/training_log_59350442-1cb4-40b1-9241-deef1051b055.txt new file mode 100644 index 0000000000000000000000000000000000000000..59fdd4733a05c9f508ca3ba7c391408092dd2f42 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/training_log_59350442-1cb4-40b1-9241-deef1051b055.txt @@ -0,0 +1,5614 @@ +[2025-09-05 14:18:49] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:18:49 2025 --- +[2025-09-05 14:18:49] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:18:49 2025 --- +[2025-09-05 14:18:49] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:18:49] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:18:49] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:18:49] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:18:49] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 14:18:49] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 14:18:49] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42 +[2025-09-05 14:18:49] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42 +[2025-09-05 14:18:49] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:18:49] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:18:49] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:18:49] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:18:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:18:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:18:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:18:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:18:51] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:18:51] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:18:55] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 14:18:55] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 14:18:55] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 14:18:55] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 14:18:55] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:18:55] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:18:55] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:18:55] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:18:55] [Rank 0] PRINT: Model returns: +[2025-09-05 14:18:55] [Rank 0] PRINT: Model returns: +[2025-09-05 14:18:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:18:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:18:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:18:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:18:55] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:18:55] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:18:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:18:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:18:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:18:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:18:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:18:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:18:59] [Rank 0] PRINT: Starting warmup... +[2025-09-05 14:18:59] [Rank 0] PRINT: Starting warmup... +[2025-09-05 14:22:27] [Rank 0] PRINT: Warmup complete. +[2025-09-05 14:22:27] [Rank 0] PRINT: Warmup complete. +[2025-09-05 14:22:27] [Rank 0] PRINT: Starting training... +[2025-09-05 14:22:27] [Rank 0] PRINT: Starting training... +[2025-09-05 14:22:35] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/fixed_eval_indices.json +[2025-09-05 14:22:35] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/fixed_eval_indices.json +[2025-09-05 14:22:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:22:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:25:11] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 14:25:11] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 14:25:56] [Rank 0] step:21/10000 train_time:45018ms step_avg:2143.72ms +[2025-09-05 14:25:56] [Rank 0] step:21/10000 train_time:45018ms step_avg:2143.72ms +[2025-09-05 14:25:57] [Rank 0] step:41/10000 train_time:45749ms step_avg:1115.84ms +[2025-09-05 14:25:57] [Rank 0] step:41/10000 train_time:45749ms step_avg:1115.84ms +[2025-09-05 14:25:57] [Rank 0] step:61/10000 train_time:46481ms step_avg:761.98ms +[2025-09-05 14:25:57] [Rank 0] step:61/10000 train_time:46481ms step_avg:761.98ms +[2025-09-05 14:25:58] [Rank 0] step:81/10000 train_time:47211ms step_avg:582.85ms +[2025-09-05 14:25:58] [Rank 0] step:81/10000 train_time:47211ms step_avg:582.85ms +[2025-09-05 14:25:59] [Rank 0] step:101/10000 train_time:47942ms step_avg:474.68ms +[2025-09-05 14:25:59] [Rank 0] step:101/10000 train_time:47942ms step_avg:474.68ms +[2025-09-05 14:25:59] [Rank 0] step:121/10000 train_time:48673ms step_avg:402.26ms +[2025-09-05 14:25:59] [Rank 0] step:121/10000 train_time:48673ms step_avg:402.26ms +[2025-09-05 14:26:00] [Rank 0] step:141/10000 train_time:49404ms step_avg:350.38ms +[2025-09-05 14:26:00] [Rank 0] step:141/10000 train_time:49404ms step_avg:350.38ms +[2025-09-05 14:26:01] [Rank 0] step:161/10000 train_time:50135ms step_avg:311.40ms +[2025-09-05 14:26:01] [Rank 0] step:161/10000 train_time:50135ms step_avg:311.40ms +[2025-09-05 14:26:02] [Rank 0] step:181/10000 train_time:50866ms step_avg:281.03ms +[2025-09-05 14:26:02] [Rank 0] step:181/10000 train_time:50866ms step_avg:281.03ms +[2025-09-05 14:26:02] [Rank 0] step:201/10000 train_time:51597ms step_avg:256.70ms +[2025-09-05 14:26:02] [Rank 0] step:201/10000 train_time:51597ms step_avg:256.70ms +[2025-09-05 14:26:03] [Rank 0] step:221/10000 train_time:52328ms step_avg:236.78ms +[2025-09-05 14:26:03] [Rank 0] step:221/10000 train_time:52328ms step_avg:236.78ms +[2025-09-05 14:26:04] [Rank 0] step:241/10000 train_time:53059ms step_avg:220.16ms +[2025-09-05 14:26:04] [Rank 0] step:241/10000 train_time:53059ms step_avg:220.16ms +[2025-09-05 14:26:05] [Rank 0] step:261/10000 train_time:53791ms step_avg:206.10ms +[2025-09-05 14:26:05] [Rank 0] step:261/10000 train_time:53791ms step_avg:206.10ms +[2025-09-05 14:26:05] [Rank 0] step:281/10000 train_time:54522ms step_avg:194.03ms +[2025-09-05 14:26:05] [Rank 0] step:281/10000 train_time:54522ms step_avg:194.03ms +[2025-09-05 14:26:06] [Rank 0] step:301/10000 train_time:55254ms step_avg:183.57ms +[2025-09-05 14:26:06] [Rank 0] step:301/10000 train_time:55254ms step_avg:183.57ms +[2025-09-05 14:26:07] [Rank 0] step:321/10000 train_time:55985ms step_avg:174.41ms +[2025-09-05 14:26:07] [Rank 0] step:321/10000 train_time:55985ms step_avg:174.41ms +[2025-09-05 14:26:08] [Rank 0] step:341/10000 train_time:56717ms step_avg:166.32ms +[2025-09-05 14:26:08] [Rank 0] step:341/10000 train_time:56717ms step_avg:166.32ms +[2025-09-05 14:26:08] [Rank 0] step:361/10000 train_time:57463ms step_avg:159.18ms +[2025-09-05 14:26:08] [Rank 0] step:361/10000 train_time:57463ms step_avg:159.18ms +[2025-09-05 14:26:09] [Rank 0] step:381/10000 train_time:58194ms step_avg:152.74ms +[2025-09-05 14:26:09] [Rank 0] step:381/10000 train_time:58194ms step_avg:152.74ms +[2025-09-05 14:26:10] [Rank 0] step:401/10000 train_time:58925ms step_avg:146.94ms +[2025-09-05 14:26:10] [Rank 0] step:401/10000 train_time:58925ms step_avg:146.94ms +[2025-09-05 14:26:10] [Rank 0] step:421/10000 train_time:59656ms step_avg:141.70ms +[2025-09-05 14:26:10] [Rank 0] step:421/10000 train_time:59656ms step_avg:141.70ms +[2025-09-05 14:26:11] [Rank 0] step:441/10000 train_time:60388ms step_avg:136.93ms +[2025-09-05 14:26:11] [Rank 0] step:441/10000 train_time:60388ms step_avg:136.93ms +[2025-09-05 14:26:12] [Rank 0] step:461/10000 train_time:61121ms step_avg:132.58ms +[2025-09-05 14:26:12] [Rank 0] step:461/10000 train_time:61121ms step_avg:132.58ms +[2025-09-05 14:26:13] [Rank 0] step:481/10000 train_time:61852ms step_avg:128.59ms +[2025-09-05 14:26:13] [Rank 0] step:481/10000 train_time:61852ms step_avg:128.59ms +[2025-09-05 14:26:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:26:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:26:14] [Rank 0] PRINT: step:500/10000 train_loss:3.6375 val_loss:2.4106 train_time:62663ms step_avg:125.33ms +[2025-09-05 14:26:14] [Rank 0] PRINT: step:500/10000 train_loss:3.6375 val_loss:2.4106 train_time:62663ms step_avg:125.33ms +[2025-09-05 14:26:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:26:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:26:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:26:14] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:27:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:27:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:27:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:27:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:27:36] [Rank 0] Total Loss: 4.8765 +[2025-09-05 14:27:36] [Rank 0] Total Loss: 4.8765 +[2025-09-05 14:27:36] [Rank 0] Total FTA (Unweighted): 0.2394 +[2025-09-05 14:27:36] [Rank 0] Total FTA (Unweighted): 0.2394 +[2025-09-05 14:27:36] [Rank 0] Total FTA (Weighted): 0.2394 +[2025-09-05 14:27:36] [Rank 0] Total FTA (Weighted): 0.2394 +[2025-09-05 14:27:36] [Rank 0] Group 0 Loss: 3.1187 +[2025-09-05 14:27:36] [Rank 0] Group 0 Loss: 3.1187 +[2025-09-05 14:27:36] [Rank 0] Group 1 Loss: 3.1587 +[2025-09-05 14:27:36] [Rank 0] Group 1 Loss: 3.1587 +[2025-09-05 14:27:36] [Rank 0] Group 2 Loss: 3.2338 +[2025-09-05 14:27:36] [Rank 0] Group 2 Loss: 3.2338 +[2025-09-05 14:27:36] [Rank 0] Group 3 Loss: 3.7394 +[2025-09-05 14:27:36] [Rank 0] Group 3 Loss: 3.7394 +[2025-09-05 14:27:36] [Rank 0] Group 4 Loss: 4.1812 +[2025-09-05 14:27:36] [Rank 0] Group 4 Loss: 4.1812 +[2025-09-05 14:27:36] [Rank 0] Group 5 Loss: 4.6877 +[2025-09-05 14:27:36] [Rank 0] Group 5 Loss: 4.6877 +[2025-09-05 14:27:36] [Rank 0] Group 6 Loss: 5.0323 +[2025-09-05 14:27:36] [Rank 0] Group 6 Loss: 5.0323 +[2025-09-05 14:27:36] [Rank 0] Group 7 Loss: 5.2067 +[2025-09-05 14:27:36] [Rank 0] Group 7 Loss: 5.2067 +[2025-09-05 14:27:36] [Rank 0] Group 8 Loss: 5.5332 +[2025-09-05 14:27:36] [Rank 0] Group 8 Loss: 5.5332 +[2025-09-05 14:27:36] [Rank 0] Group 9 Loss: 5.6712 +[2025-09-05 14:27:36] [Rank 0] Group 9 Loss: 5.6712 +[2025-09-05 14:27:36] [Rank 0] Group 10 Loss: 5.7619 +[2025-09-05 14:27:36] [Rank 0] Group 10 Loss: 5.7619 +[2025-09-05 14:27:36] [Rank 0] Group 11 Loss: 5.8274 +[2025-09-05 14:27:36] [Rank 0] Group 11 Loss: 5.8274 +[2025-09-05 14:27:36] [Rank 0] Group 12 Loss: 5.6897 +[2025-09-05 14:27:36] [Rank 0] Group 12 Loss: 5.6897 +[2025-09-05 14:27:36] [Rank 0] Group 13 Loss: 5.7322 +[2025-09-05 14:27:36] [Rank 0] Group 13 Loss: 5.7322 +[2025-09-05 14:27:36] [Rank 0] Group 14 Loss: 5.7696 +[2025-09-05 14:27:36] [Rank 0] Group 14 Loss: 5.7696 +[2025-09-05 14:27:36] [Rank 0] Group 15 Loss: 5.6799 +[2025-09-05 14:27:36] [Rank 0] Group 15 Loss: 5.6799 +[2025-09-05 14:27:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:27:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:27:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:27:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:27:36] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 14:27:36] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 14:27:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 14:27:36] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 14:27:36] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-05 14:27:36] [Rank 0] Group 4 FTA: 0.1900 +[2025-09-05 14:27:36] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 14:27:36] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 14:27:36] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 14:27:36] [Rank 0] Group 6 FTA: 0.0800 +[2025-09-05 14:27:36] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 14:27:36] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 14:27:36] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-05 14:27:36] [Rank 0] Group 8 FTA: 0.1800 +[2025-09-05 14:27:36] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 14:27:36] [Rank 0] Group 9 FTA: 0.1200 +[2025-09-05 14:27:36] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 14:27:36] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 14:27:36] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 14:27:36] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 14:27:36] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 14:27:36] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 14:27:36] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 14:27:36] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 14:27:36] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 14:27:36] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 14:27:36] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 14:27:36] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 14:27:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:27:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:27:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:27:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:27:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:27:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:27:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:27:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:27:37] [Rank 0] step:501/10000 train_time:62673ms step_avg:125.10ms +[2025-09-05 14:27:37] [Rank 0] step:501/10000 train_time:62673ms step_avg:125.10ms +[2025-09-05 14:27:38] [Rank 0] step:521/10000 train_time:63365ms step_avg:121.62ms +[2025-09-05 14:27:38] [Rank 0] step:521/10000 train_time:63365ms step_avg:121.62ms +[2025-09-05 14:27:39] [Rank 0] step:541/10000 train_time:64097ms step_avg:118.48ms +[2025-09-05 14:27:39] [Rank 0] step:541/10000 train_time:64097ms step_avg:118.48ms +[2025-09-05 14:27:39] [Rank 0] step:561/10000 train_time:64827ms step_avg:115.56ms +[2025-09-05 14:27:39] [Rank 0] step:561/10000 train_time:64827ms step_avg:115.56ms +[2025-09-05 14:27:40] [Rank 0] step:581/10000 train_time:65559ms step_avg:112.84ms +[2025-09-05 14:27:40] [Rank 0] step:581/10000 train_time:65559ms step_avg:112.84ms +[2025-09-05 14:27:41] [Rank 0] step:601/10000 train_time:66290ms step_avg:110.30ms +[2025-09-05 14:27:41] [Rank 0] step:601/10000 train_time:66290ms step_avg:110.30ms +[2025-09-05 14:27:42] [Rank 0] step:621/10000 train_time:67021ms step_avg:107.92ms +[2025-09-05 14:27:42] [Rank 0] step:621/10000 train_time:67021ms step_avg:107.92ms +[2025-09-05 14:27:42] [Rank 0] step:641/10000 train_time:67752ms step_avg:105.70ms +[2025-09-05 14:27:42] [Rank 0] step:641/10000 train_time:67752ms step_avg:105.70ms +[2025-09-05 14:27:43] [Rank 0] step:661/10000 train_time:68483ms step_avg:103.60ms +[2025-09-05 14:27:43] [Rank 0] step:661/10000 train_time:68483ms step_avg:103.60ms +[2025-09-05 14:27:44] [Rank 0] step:681/10000 train_time:69214ms step_avg:101.64ms +[2025-09-05 14:27:44] [Rank 0] step:681/10000 train_time:69214ms step_avg:101.64ms +[2025-09-05 14:27:45] [Rank 0] step:701/10000 train_time:69944ms step_avg:99.78ms +[2025-09-05 14:27:45] [Rank 0] step:701/10000 train_time:69944ms step_avg:99.78ms +[2025-09-05 14:27:45] [Rank 0] step:721/10000 train_time:70675ms step_avg:98.02ms +[2025-09-05 14:27:45] [Rank 0] step:721/10000 train_time:70675ms step_avg:98.02ms +[2025-09-05 14:27:46] [Rank 0] step:741/10000 train_time:71406ms step_avg:96.36ms +[2025-09-05 14:27:46] [Rank 0] step:741/10000 train_time:71406ms step_avg:96.36ms +[2025-09-05 14:27:47] [Rank 0] step:761/10000 train_time:72141ms step_avg:94.80ms +[2025-09-05 14:27:47] [Rank 0] step:761/10000 train_time:72141ms step_avg:94.80ms +[2025-09-05 14:27:47] [Rank 0] step:781/10000 train_time:72880ms step_avg:93.32ms +[2025-09-05 14:27:47] [Rank 0] step:781/10000 train_time:72880ms step_avg:93.32ms +[2025-09-05 14:27:48] [Rank 0] step:801/10000 train_time:73616ms step_avg:91.91ms +[2025-09-05 14:27:48] [Rank 0] step:801/10000 train_time:73616ms step_avg:91.91ms +[2025-09-05 14:27:50] [Rank 0] step:821/10000 train_time:74981ms step_avg:91.33ms +[2025-09-05 14:27:50] [Rank 0] step:821/10000 train_time:74981ms step_avg:91.33ms +[2025-09-05 14:27:50] [Rank 0] step:841/10000 train_time:75716ms step_avg:90.03ms +[2025-09-05 14:27:50] [Rank 0] step:841/10000 train_time:75716ms step_avg:90.03ms +[2025-09-05 14:27:51] [Rank 0] step:861/10000 train_time:76452ms step_avg:88.79ms +[2025-09-05 14:27:51] [Rank 0] step:861/10000 train_time:76452ms step_avg:88.79ms +[2025-09-05 14:27:52] [Rank 0] step:881/10000 train_time:77188ms step_avg:87.61ms +[2025-09-05 14:27:52] [Rank 0] step:881/10000 train_time:77188ms step_avg:87.61ms +[2025-09-05 14:27:52] [Rank 0] step:901/10000 train_time:77924ms step_avg:86.49ms +[2025-09-05 14:27:52] [Rank 0] step:901/10000 train_time:77924ms step_avg:86.49ms +[2025-09-05 14:27:53] [Rank 0] step:921/10000 train_time:78659ms step_avg:85.41ms +[2025-09-05 14:27:53] [Rank 0] step:921/10000 train_time:78659ms step_avg:85.41ms +[2025-09-05 14:27:54] [Rank 0] step:941/10000 train_time:79395ms step_avg:84.37ms +[2025-09-05 14:27:54] [Rank 0] step:941/10000 train_time:79395ms step_avg:84.37ms +[2025-09-05 14:27:55] [Rank 0] step:961/10000 train_time:80131ms step_avg:83.38ms +[2025-09-05 14:27:55] [Rank 0] step:961/10000 train_time:80131ms step_avg:83.38ms +[2025-09-05 14:27:55] [Rank 0] step:981/10000 train_time:80867ms step_avg:82.43ms +[2025-09-05 14:27:55] [Rank 0] step:981/10000 train_time:80867ms step_avg:82.43ms +[2025-09-05 14:27:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:27:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:27:57] [Rank 0] PRINT: step:1000/10000 train_loss:2.0922 val_loss:1.8571 train_time:81684ms step_avg:81.68ms +[2025-09-05 14:27:57] [Rank 0] PRINT: step:1000/10000 train_loss:2.0922 val_loss:1.8571 train_time:81684ms step_avg:81.68ms +[2025-09-05 14:27:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:27:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:27:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:27:57] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:29:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:29:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:29:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:29:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:29:19] [Rank 0] Total Loss: 4.5731 +[2025-09-05 14:29:19] [Rank 0] Total Loss: 4.5731 +[2025-09-05 14:29:19] [Rank 0] Total FTA (Unweighted): 0.3319 +[2025-09-05 14:29:19] [Rank 0] Total FTA (Unweighted): 0.3319 +[2025-09-05 14:29:19] [Rank 0] Total FTA (Weighted): 0.3319 +[2025-09-05 14:29:19] [Rank 0] Total FTA (Weighted): 0.3319 +[2025-09-05 14:29:19] [Rank 0] Group 0 Loss: 3.5040 +[2025-09-05 14:29:19] [Rank 0] Group 0 Loss: 3.5040 +[2025-09-05 14:29:19] [Rank 0] Group 1 Loss: 3.2605 +[2025-09-05 14:29:19] [Rank 0] Group 1 Loss: 3.2605 +[2025-09-05 14:29:19] [Rank 0] Group 2 Loss: 3.2531 +[2025-09-05 14:29:19] [Rank 0] Group 2 Loss: 3.2531 +[2025-09-05 14:29:19] [Rank 0] Group 3 Loss: 3.6020 +[2025-09-05 14:29:19] [Rank 0] Group 3 Loss: 3.6020 +[2025-09-05 14:29:19] [Rank 0] Group 4 Loss: 3.9589 +[2025-09-05 14:29:19] [Rank 0] Group 4 Loss: 3.9589 +[2025-09-05 14:29:19] [Rank 0] Group 5 Loss: 4.2063 +[2025-09-05 14:29:19] [Rank 0] Group 5 Loss: 4.2063 +[2025-09-05 14:29:19] [Rank 0] Group 6 Loss: 4.5191 +[2025-09-05 14:29:19] [Rank 0] Group 6 Loss: 4.5191 +[2025-09-05 14:29:19] [Rank 0] Group 7 Loss: 4.7208 +[2025-09-05 14:29:19] [Rank 0] Group 7 Loss: 4.7208 +[2025-09-05 14:29:19] [Rank 0] Group 8 Loss: 5.0088 +[2025-09-05 14:29:19] [Rank 0] Group 8 Loss: 5.0088 +[2025-09-05 14:29:19] [Rank 0] Group 9 Loss: 5.1719 +[2025-09-05 14:29:19] [Rank 0] Group 9 Loss: 5.1719 +[2025-09-05 14:29:19] [Rank 0] Group 10 Loss: 5.3019 +[2025-09-05 14:29:19] [Rank 0] Group 10 Loss: 5.3019 +[2025-09-05 14:29:19] [Rank 0] Group 11 Loss: 5.3547 +[2025-09-05 14:29:19] [Rank 0] Group 11 Loss: 5.3547 +[2025-09-05 14:29:19] [Rank 0] Group 12 Loss: 5.2750 +[2025-09-05 14:29:19] [Rank 0] Group 12 Loss: 5.2750 +[2025-09-05 14:29:19] [Rank 0] Group 13 Loss: 5.3716 +[2025-09-05 14:29:19] [Rank 0] Group 13 Loss: 5.3716 +[2025-09-05 14:29:19] [Rank 0] Group 14 Loss: 5.3390 +[2025-09-05 14:29:19] [Rank 0] Group 14 Loss: 5.3390 +[2025-09-05 14:29:19] [Rank 0] Group 15 Loss: 5.3224 +[2025-09-05 14:29:19] [Rank 0] Group 15 Loss: 5.3224 +[2025-09-05 14:29:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:29:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:29:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:29:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:29:19] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 14:29:19] [Rank 0] Group 2 FTA: 0.8800 +[2025-09-05 14:29:19] [Rank 0] Group 3 FTA: 0.4300 +[2025-09-05 14:29:19] [Rank 0] Group 3 FTA: 0.4300 +[2025-09-05 14:29:19] [Rank 0] Group 4 FTA: 0.3100 +[2025-09-05 14:29:19] [Rank 0] Group 4 FTA: 0.3100 +[2025-09-05 14:29:19] [Rank 0] Group 5 FTA: 0.2800 +[2025-09-05 14:29:19] [Rank 0] Group 5 FTA: 0.2800 +[2025-09-05 14:29:19] [Rank 0] Group 6 FTA: 0.2600 +[2025-09-05 14:29:19] [Rank 0] Group 6 FTA: 0.2600 +[2025-09-05 14:29:19] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 14:29:19] [Rank 0] Group 7 FTA: 0.1200 +[2025-09-05 14:29:19] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 14:29:19] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 14:29:19] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 14:29:19] [Rank 0] Group 9 FTA: 0.1400 +[2025-09-05 14:29:19] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 14:29:19] [Rank 0] Group 10 FTA: 0.1400 +[2025-09-05 14:29:19] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 14:29:19] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 14:29:19] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 14:29:19] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 14:29:19] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 14:29:19] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 14:29:19] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 14:29:19] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 14:29:19] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 14:29:19] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 14:29:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:29:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:29:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:29:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:29:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:29:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:29:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:29:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:29:20] [Rank 0] step:1001/10000 train_time:81693ms step_avg:81.61ms +[2025-09-05 14:29:20] [Rank 0] step:1001/10000 train_time:81693ms step_avg:81.61ms +[2025-09-05 14:29:21] [Rank 0] step:1021/10000 train_time:82371ms step_avg:80.68ms +[2025-09-05 14:29:21] [Rank 0] step:1021/10000 train_time:82371ms step_avg:80.68ms +[2025-09-05 14:29:22] [Rank 0] step:1041/10000 train_time:83106ms step_avg:79.83ms +[2025-09-05 14:29:22] [Rank 0] step:1041/10000 train_time:83106ms step_avg:79.83ms +[2025-09-05 14:29:22] [Rank 0] step:1061/10000 train_time:83841ms step_avg:79.02ms +[2025-09-05 14:29:22] [Rank 0] step:1061/10000 train_time:83841ms step_avg:79.02ms +[2025-09-05 14:29:23] [Rank 0] step:1081/10000 train_time:84576ms step_avg:78.24ms +[2025-09-05 14:29:23] [Rank 0] step:1081/10000 train_time:84576ms step_avg:78.24ms +[2025-09-05 14:29:24] [Rank 0] step:1101/10000 train_time:85312ms step_avg:77.49ms +[2025-09-05 14:29:24] [Rank 0] step:1101/10000 train_time:85312ms step_avg:77.49ms +[2025-09-05 14:29:25] [Rank 0] step:1121/10000 train_time:86048ms step_avg:76.76ms +[2025-09-05 14:29:25] [Rank 0] step:1121/10000 train_time:86048ms step_avg:76.76ms +[2025-09-05 14:29:25] [Rank 0] step:1141/10000 train_time:86783ms step_avg:76.06ms +[2025-09-05 14:29:25] [Rank 0] step:1141/10000 train_time:86783ms step_avg:76.06ms +[2025-09-05 14:29:26] [Rank 0] step:1161/10000 train_time:87518ms step_avg:75.38ms +[2025-09-05 14:29:26] [Rank 0] step:1161/10000 train_time:87518ms step_avg:75.38ms +[2025-09-05 14:29:27] [Rank 0] step:1181/10000 train_time:88254ms step_avg:74.73ms +[2025-09-05 14:29:27] [Rank 0] step:1181/10000 train_time:88254ms step_avg:74.73ms +[2025-09-05 14:29:28] [Rank 0] step:1201/10000 train_time:88991ms step_avg:74.10ms +[2025-09-05 14:29:28] [Rank 0] step:1201/10000 train_time:88991ms step_avg:74.10ms +[2025-09-05 14:29:28] [Rank 0] step:1221/10000 train_time:89726ms step_avg:73.49ms +[2025-09-05 14:29:28] [Rank 0] step:1221/10000 train_time:89726ms step_avg:73.49ms +[2025-09-05 14:29:29] [Rank 0] step:1241/10000 train_time:90462ms step_avg:72.89ms +[2025-09-05 14:29:29] [Rank 0] step:1241/10000 train_time:90462ms step_avg:72.89ms +[2025-09-05 14:29:30] [Rank 0] step:1261/10000 train_time:91198ms step_avg:72.32ms +[2025-09-05 14:29:30] [Rank 0] step:1261/10000 train_time:91198ms step_avg:72.32ms +[2025-09-05 14:29:30] [Rank 0] step:1281/10000 train_time:91933ms step_avg:71.77ms +[2025-09-05 14:29:30] [Rank 0] step:1281/10000 train_time:91933ms step_avg:71.77ms +[2025-09-05 14:29:31] [Rank 0] step:1301/10000 train_time:92669ms step_avg:71.23ms +[2025-09-05 14:29:31] [Rank 0] step:1301/10000 train_time:92669ms step_avg:71.23ms +[2025-09-05 14:29:32] [Rank 0] step:1321/10000 train_time:93405ms step_avg:70.71ms +[2025-09-05 14:29:32] [Rank 0] step:1321/10000 train_time:93405ms step_avg:70.71ms +[2025-09-05 14:29:33] [Rank 0] step:1341/10000 train_time:94142ms step_avg:70.20ms +[2025-09-05 14:29:33] [Rank 0] step:1341/10000 train_time:94142ms step_avg:70.20ms +[2025-09-05 14:29:33] [Rank 0] step:1361/10000 train_time:94877ms step_avg:69.71ms +[2025-09-05 14:29:33] [Rank 0] step:1361/10000 train_time:94877ms step_avg:69.71ms +[2025-09-05 14:29:34] [Rank 0] step:1381/10000 train_time:95613ms step_avg:69.23ms +[2025-09-05 14:29:34] [Rank 0] step:1381/10000 train_time:95613ms step_avg:69.23ms +[2025-09-05 14:29:35] [Rank 0] step:1401/10000 train_time:96349ms step_avg:68.77ms +[2025-09-05 14:29:35] [Rank 0] step:1401/10000 train_time:96349ms step_avg:68.77ms +[2025-09-05 14:29:36] [Rank 0] step:1421/10000 train_time:97086ms step_avg:68.32ms +[2025-09-05 14:29:36] [Rank 0] step:1421/10000 train_time:97086ms step_avg:68.32ms +[2025-09-05 14:29:36] [Rank 0] step:1441/10000 train_time:97824ms step_avg:67.89ms +[2025-09-05 14:29:36] [Rank 0] step:1441/10000 train_time:97824ms step_avg:67.89ms +[2025-09-05 14:29:37] [Rank 0] step:1461/10000 train_time:98560ms step_avg:67.46ms +[2025-09-05 14:29:37] [Rank 0] step:1461/10000 train_time:98560ms step_avg:67.46ms +[2025-09-05 14:29:38] [Rank 0] step:1481/10000 train_time:99296ms step_avg:67.05ms +[2025-09-05 14:29:38] [Rank 0] step:1481/10000 train_time:99296ms step_avg:67.05ms +[2025-09-05 14:29:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:29:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:29:39] [Rank 0] PRINT: step:1500/10000 train_loss:1.7601 val_loss:1.6702 train_time:100113ms step_avg:66.74ms +[2025-09-05 14:29:39] [Rank 0] PRINT: step:1500/10000 train_loss:1.7601 val_loss:1.6702 train_time:100113ms step_avg:66.74ms +[2025-09-05 14:29:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:29:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:29:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:29:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:31:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:31:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:31:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:31:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:31:01] [Rank 0] Total Loss: 4.3509 +[2025-09-05 14:31:01] [Rank 0] Total Loss: 4.3509 +[2025-09-05 14:31:01] [Rank 0] Total FTA (Unweighted): 0.3931 +[2025-09-05 14:31:01] [Rank 0] Total FTA (Unweighted): 0.3931 +[2025-09-05 14:31:01] [Rank 0] Total FTA (Weighted): 0.3931 +[2025-09-05 14:31:01] [Rank 0] Total FTA (Weighted): 0.3931 +[2025-09-05 14:31:01] [Rank 0] Group 0 Loss: 3.3818 +[2025-09-05 14:31:01] [Rank 0] Group 0 Loss: 3.3818 +[2025-09-05 14:31:01] [Rank 0] Group 1 Loss: 3.3270 +[2025-09-05 14:31:01] [Rank 0] Group 1 Loss: 3.3270 +[2025-09-05 14:31:01] [Rank 0] Group 2 Loss: 3.1584 +[2025-09-05 14:31:01] [Rank 0] Group 2 Loss: 3.1584 +[2025-09-05 14:31:01] [Rank 0] Group 3 Loss: 3.5460 +[2025-09-05 14:31:01] [Rank 0] Group 3 Loss: 3.5460 +[2025-09-05 14:31:01] [Rank 0] Group 4 Loss: 3.7372 +[2025-09-05 14:31:01] [Rank 0] Group 4 Loss: 3.7372 +[2025-09-05 14:31:01] [Rank 0] Group 5 Loss: 3.9932 +[2025-09-05 14:31:01] [Rank 0] Group 5 Loss: 3.9932 +[2025-09-05 14:31:01] [Rank 0] Group 6 Loss: 4.1738 +[2025-09-05 14:31:01] [Rank 0] Group 6 Loss: 4.1738 +[2025-09-05 14:31:01] [Rank 0] Group 7 Loss: 4.4314 +[2025-09-05 14:31:01] [Rank 0] Group 7 Loss: 4.4314 +[2025-09-05 14:31:01] [Rank 0] Group 8 Loss: 4.7445 +[2025-09-05 14:31:01] [Rank 0] Group 8 Loss: 4.7445 +[2025-09-05 14:31:01] [Rank 0] Group 9 Loss: 4.8587 +[2025-09-05 14:31:01] [Rank 0] Group 9 Loss: 4.8587 +[2025-09-05 14:31:01] [Rank 0] Group 10 Loss: 5.0167 +[2025-09-05 14:31:01] [Rank 0] Group 10 Loss: 5.0167 +[2025-09-05 14:31:01] [Rank 0] Group 11 Loss: 5.0209 +[2025-09-05 14:31:01] [Rank 0] Group 11 Loss: 5.0209 +[2025-09-05 14:31:01] [Rank 0] Group 12 Loss: 4.9950 +[2025-09-05 14:31:01] [Rank 0] Group 12 Loss: 4.9950 +[2025-09-05 14:31:01] [Rank 0] Group 13 Loss: 5.0941 +[2025-09-05 14:31:01] [Rank 0] Group 13 Loss: 5.0941 +[2025-09-05 14:31:01] [Rank 0] Group 14 Loss: 5.0710 +[2025-09-05 14:31:01] [Rank 0] Group 14 Loss: 5.0710 +[2025-09-05 14:31:01] [Rank 0] Group 15 Loss: 5.0644 +[2025-09-05 14:31:01] [Rank 0] Group 15 Loss: 5.0644 +[2025-09-05 14:31:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:31:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:31:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:31:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:31:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:31:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:31:01] [Rank 0] Group 3 FTA: 0.6200 +[2025-09-05 14:31:01] [Rank 0] Group 3 FTA: 0.6200 +[2025-09-05 14:31:01] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 14:31:01] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 14:31:01] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 14:31:01] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 14:31:01] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 14:31:01] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 14:31:01] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 14:31:01] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 14:31:01] [Rank 0] Group 8 FTA: 0.3100 +[2025-09-05 14:31:01] [Rank 0] Group 8 FTA: 0.3100 +[2025-09-05 14:31:01] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 14:31:01] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 14:31:01] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 14:31:01] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 14:31:01] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 14:31:01] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 14:31:01] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 14:31:01] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 14:31:01] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 14:31:01] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 14:31:01] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 14:31:01] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 14:31:01] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 14:31:01] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 14:31:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:31:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:31:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:31:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:31:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:31:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:31:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:31:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:31:02] [Rank 0] step:1501/10000 train_time:100122ms step_avg:66.70ms +[2025-09-05 14:31:02] [Rank 0] step:1501/10000 train_time:100122ms step_avg:66.70ms +[2025-09-05 14:31:03] [Rank 0] step:1521/10000 train_time:100798ms step_avg:66.27ms +[2025-09-05 14:31:03] [Rank 0] step:1521/10000 train_time:100798ms step_avg:66.27ms +[2025-09-05 14:31:04] [Rank 0] step:1541/10000 train_time:101534ms step_avg:65.89ms +[2025-09-05 14:31:04] [Rank 0] step:1541/10000 train_time:101534ms step_avg:65.89ms +[2025-09-05 14:31:04] [Rank 0] step:1561/10000 train_time:102270ms step_avg:65.52ms +[2025-09-05 14:31:04] [Rank 0] step:1561/10000 train_time:102270ms step_avg:65.52ms +[2025-09-05 14:31:05] [Rank 0] step:1581/10000 train_time:103005ms step_avg:65.15ms +[2025-09-05 14:31:05] [Rank 0] step:1581/10000 train_time:103005ms step_avg:65.15ms +[2025-09-05 14:31:06] [Rank 0] step:1601/10000 train_time:103741ms step_avg:64.80ms +[2025-09-05 14:31:06] [Rank 0] step:1601/10000 train_time:103741ms step_avg:64.80ms +[2025-09-05 14:31:07] [Rank 0] step:1621/10000 train_time:104477ms step_avg:64.45ms +[2025-09-05 14:31:07] [Rank 0] step:1621/10000 train_time:104477ms step_avg:64.45ms +[2025-09-05 14:31:08] [Rank 0] step:1641/10000 train_time:105823ms step_avg:64.49ms +[2025-09-05 14:31:08] [Rank 0] step:1641/10000 train_time:105823ms step_avg:64.49ms +[2025-09-05 14:31:09] [Rank 0] step:1661/10000 train_time:106558ms step_avg:64.15ms +[2025-09-05 14:31:09] [Rank 0] step:1661/10000 train_time:106558ms step_avg:64.15ms +[2025-09-05 14:31:10] [Rank 0] step:1681/10000 train_time:107293ms step_avg:63.83ms +[2025-09-05 14:31:10] [Rank 0] step:1681/10000 train_time:107293ms step_avg:63.83ms +[2025-09-05 14:31:10] [Rank 0] step:1701/10000 train_time:108029ms step_avg:63.51ms +[2025-09-05 14:31:10] [Rank 0] step:1701/10000 train_time:108029ms step_avg:63.51ms +[2025-09-05 14:31:11] [Rank 0] step:1721/10000 train_time:108764ms step_avg:63.20ms +[2025-09-05 14:31:11] [Rank 0] step:1721/10000 train_time:108764ms step_avg:63.20ms +[2025-09-05 14:31:12] [Rank 0] step:1741/10000 train_time:109500ms step_avg:62.90ms +[2025-09-05 14:31:12] [Rank 0] step:1741/10000 train_time:109500ms step_avg:62.90ms +[2025-09-05 14:31:12] [Rank 0] step:1761/10000 train_time:110239ms step_avg:62.60ms +[2025-09-05 14:31:12] [Rank 0] step:1761/10000 train_time:110239ms step_avg:62.60ms +[2025-09-05 14:31:13] [Rank 0] step:1781/10000 train_time:110975ms step_avg:62.31ms +[2025-09-05 14:31:13] [Rank 0] step:1781/10000 train_time:110975ms step_avg:62.31ms +[2025-09-05 14:31:14] [Rank 0] step:1801/10000 train_time:111712ms step_avg:62.03ms +[2025-09-05 14:31:14] [Rank 0] step:1801/10000 train_time:111712ms step_avg:62.03ms +[2025-09-05 14:31:15] [Rank 0] step:1821/10000 train_time:112448ms step_avg:61.75ms +[2025-09-05 14:31:15] [Rank 0] step:1821/10000 train_time:112448ms step_avg:61.75ms +[2025-09-05 14:31:15] [Rank 0] step:1841/10000 train_time:113184ms step_avg:61.48ms +[2025-09-05 14:31:15] [Rank 0] step:1841/10000 train_time:113184ms step_avg:61.48ms +[2025-09-05 14:31:16] [Rank 0] step:1861/10000 train_time:113920ms step_avg:61.21ms +[2025-09-05 14:31:16] [Rank 0] step:1861/10000 train_time:113920ms step_avg:61.21ms +[2025-09-05 14:31:17] [Rank 0] step:1881/10000 train_time:114656ms step_avg:60.95ms +[2025-09-05 14:31:17] [Rank 0] step:1881/10000 train_time:114656ms step_avg:60.95ms +[2025-09-05 14:31:18] [Rank 0] step:1901/10000 train_time:115391ms step_avg:60.70ms +[2025-09-05 14:31:18] [Rank 0] step:1901/10000 train_time:115391ms step_avg:60.70ms +[2025-09-05 14:31:18] [Rank 0] step:1921/10000 train_time:116128ms step_avg:60.45ms +[2025-09-05 14:31:18] [Rank 0] step:1921/10000 train_time:116128ms step_avg:60.45ms +[2025-09-05 14:31:19] [Rank 0] step:1941/10000 train_time:116864ms step_avg:60.21ms +[2025-09-05 14:31:19] [Rank 0] step:1941/10000 train_time:116864ms step_avg:60.21ms +[2025-09-05 14:31:20] [Rank 0] step:1961/10000 train_time:117600ms step_avg:59.97ms +[2025-09-05 14:31:20] [Rank 0] step:1961/10000 train_time:117600ms step_avg:59.97ms +[2025-09-05 14:31:21] [Rank 0] step:1981/10000 train_time:118336ms step_avg:59.74ms +[2025-09-05 14:31:21] [Rank 0] step:1981/10000 train_time:118336ms step_avg:59.74ms +[2025-09-05 14:31:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:31:21] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:31:22] [Rank 0] PRINT: step:2000/10000 train_loss:1.6203 val_loss:1.5674 train_time:119153ms step_avg:59.58ms +[2025-09-05 14:31:22] [Rank 0] PRINT: step:2000/10000 train_loss:1.6203 val_loss:1.5674 train_time:119153ms step_avg:59.58ms +[2025-09-05 14:31:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:31:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:31:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:31:22] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:32:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:32:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:32:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:32:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:32:43] [Rank 0] Total Loss: 4.3508 +[2025-09-05 14:32:43] [Rank 0] Total Loss: 4.3508 +[2025-09-05 14:32:43] [Rank 0] Total FTA (Unweighted): 0.4313 +[2025-09-05 14:32:43] [Rank 0] Total FTA (Unweighted): 0.4313 +[2025-09-05 14:32:43] [Rank 0] Total FTA (Weighted): 0.4313 +[2025-09-05 14:32:43] [Rank 0] Total FTA (Weighted): 0.4313 +[2025-09-05 14:32:43] [Rank 0] Group 0 Loss: 3.4655 +[2025-09-05 14:32:43] [Rank 0] Group 0 Loss: 3.4655 +[2025-09-05 14:32:43] [Rank 0] Group 1 Loss: 3.3186 +[2025-09-05 14:32:43] [Rank 0] Group 1 Loss: 3.3186 +[2025-09-05 14:32:43] [Rank 0] Group 2 Loss: 3.2318 +[2025-09-05 14:32:43] [Rank 0] Group 2 Loss: 3.2318 +[2025-09-05 14:32:43] [Rank 0] Group 3 Loss: 3.5753 +[2025-09-05 14:32:43] [Rank 0] Group 3 Loss: 3.5753 +[2025-09-05 14:32:43] [Rank 0] Group 4 Loss: 3.8651 +[2025-09-05 14:32:43] [Rank 0] Group 4 Loss: 3.8651 +[2025-09-05 14:32:43] [Rank 0] Group 5 Loss: 3.9662 +[2025-09-05 14:32:43] [Rank 0] Group 5 Loss: 3.9662 +[2025-09-05 14:32:43] [Rank 0] Group 6 Loss: 4.1492 +[2025-09-05 14:32:43] [Rank 0] Group 6 Loss: 4.1492 +[2025-09-05 14:32:43] [Rank 0] Group 7 Loss: 4.3718 +[2025-09-05 14:32:43] [Rank 0] Group 7 Loss: 4.3718 +[2025-09-05 14:32:43] [Rank 0] Group 8 Loss: 4.7133 +[2025-09-05 14:32:43] [Rank 0] Group 8 Loss: 4.7133 +[2025-09-05 14:32:43] [Rank 0] Group 9 Loss: 4.8375 +[2025-09-05 14:32:43] [Rank 0] Group 9 Loss: 4.8375 +[2025-09-05 14:32:43] [Rank 0] Group 10 Loss: 4.9517 +[2025-09-05 14:32:43] [Rank 0] Group 10 Loss: 4.9517 +[2025-09-05 14:32:43] [Rank 0] Group 11 Loss: 4.9822 +[2025-09-05 14:32:43] [Rank 0] Group 11 Loss: 4.9822 +[2025-09-05 14:32:43] [Rank 0] Group 12 Loss: 4.9703 +[2025-09-05 14:32:43] [Rank 0] Group 12 Loss: 4.9703 +[2025-09-05 14:32:43] [Rank 0] Group 13 Loss: 5.0406 +[2025-09-05 14:32:43] [Rank 0] Group 13 Loss: 5.0406 +[2025-09-05 14:32:43] [Rank 0] Group 14 Loss: 5.0833 +[2025-09-05 14:32:43] [Rank 0] Group 14 Loss: 5.0833 +[2025-09-05 14:32:43] [Rank 0] Group 15 Loss: 5.0903 +[2025-09-05 14:32:43] [Rank 0] Group 15 Loss: 5.0903 +[2025-09-05 14:32:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:32:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:32:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:32:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:32:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:32:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:32:43] [Rank 0] Group 3 FTA: 0.9000 +[2025-09-05 14:32:43] [Rank 0] Group 3 FTA: 0.9000 +[2025-09-05 14:32:43] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 14:32:43] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 14:32:43] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 14:32:43] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 14:32:43] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 14:32:43] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 14:32:43] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 14:32:43] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 14:32:43] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 14:32:43] [Rank 0] Group 8 FTA: 0.3300 +[2025-09-05 14:32:43] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 14:32:43] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 14:32:43] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 14:32:43] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 14:32:43] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 14:32:43] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 14:32:43] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 14:32:43] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 14:32:43] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 14:32:43] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 14:32:43] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 14:32:43] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 14:32:43] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 14:32:43] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 14:32:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:32:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:32:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:32:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:32:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:32:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:32:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:32:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:32:45] [Rank 0] step:2001/10000 train_time:119163ms step_avg:59.55ms +[2025-09-05 14:32:45] [Rank 0] step:2001/10000 train_time:119163ms step_avg:59.55ms +[2025-09-05 14:32:45] [Rank 0] step:2021/10000 train_time:119830ms step_avg:59.29ms +[2025-09-05 14:32:45] [Rank 0] step:2021/10000 train_time:119830ms step_avg:59.29ms +[2025-09-05 14:32:46] [Rank 0] step:2041/10000 train_time:120566ms step_avg:59.07ms +[2025-09-05 14:32:46] [Rank 0] step:2041/10000 train_time:120566ms step_avg:59.07ms +[2025-09-05 14:32:47] [Rank 0] step:2061/10000 train_time:121302ms step_avg:58.86ms +[2025-09-05 14:32:47] [Rank 0] step:2061/10000 train_time:121302ms step_avg:58.86ms +[2025-09-05 14:32:48] [Rank 0] step:2081/10000 train_time:122038ms step_avg:58.64ms +[2025-09-05 14:32:48] [Rank 0] step:2081/10000 train_time:122038ms step_avg:58.64ms +[2025-09-05 14:32:48] [Rank 0] step:2101/10000 train_time:122774ms step_avg:58.44ms +[2025-09-05 14:32:48] [Rank 0] step:2101/10000 train_time:122774ms step_avg:58.44ms +[2025-09-05 14:32:49] [Rank 0] step:2121/10000 train_time:123509ms step_avg:58.23ms +[2025-09-05 14:32:49] [Rank 0] step:2121/10000 train_time:123509ms step_avg:58.23ms +[2025-09-05 14:32:50] [Rank 0] step:2141/10000 train_time:124245ms step_avg:58.03ms +[2025-09-05 14:32:50] [Rank 0] step:2141/10000 train_time:124245ms step_avg:58.03ms +[2025-09-05 14:32:51] [Rank 0] step:2161/10000 train_time:125127ms step_avg:57.90ms +[2025-09-05 14:32:51] [Rank 0] step:2161/10000 train_time:125127ms step_avg:57.90ms +[2025-09-05 14:32:52] [Rank 0] step:2181/10000 train_time:125905ms step_avg:57.73ms +[2025-09-05 14:32:52] [Rank 0] step:2181/10000 train_time:125905ms step_avg:57.73ms +[2025-09-05 14:32:52] [Rank 0] step:2201/10000 train_time:126640ms step_avg:57.54ms +[2025-09-05 14:32:52] [Rank 0] step:2201/10000 train_time:126640ms step_avg:57.54ms +[2025-09-05 14:32:53] [Rank 0] step:2221/10000 train_time:127377ms step_avg:57.35ms +[2025-09-05 14:32:53] [Rank 0] step:2221/10000 train_time:127377ms step_avg:57.35ms +[2025-09-05 14:32:54] [Rank 0] step:2241/10000 train_time:128238ms step_avg:57.22ms +[2025-09-05 14:32:54] [Rank 0] step:2241/10000 train_time:128238ms step_avg:57.22ms +[2025-09-05 14:32:55] [Rank 0] step:2261/10000 train_time:128981ms step_avg:57.05ms +[2025-09-05 14:32:55] [Rank 0] step:2261/10000 train_time:128981ms step_avg:57.05ms +[2025-09-05 14:32:55] [Rank 0] step:2281/10000 train_time:129724ms step_avg:56.87ms +[2025-09-05 14:32:55] [Rank 0] step:2281/10000 train_time:129724ms step_avg:56.87ms +[2025-09-05 14:32:56] [Rank 0] step:2301/10000 train_time:130466ms step_avg:56.70ms +[2025-09-05 14:32:56] [Rank 0] step:2301/10000 train_time:130466ms step_avg:56.70ms +[2025-09-05 14:32:57] [Rank 0] step:2321/10000 train_time:131208ms step_avg:56.53ms +[2025-09-05 14:32:57] [Rank 0] step:2321/10000 train_time:131208ms step_avg:56.53ms +[2025-09-05 14:32:58] [Rank 0] step:2341/10000 train_time:131950ms step_avg:56.36ms +[2025-09-05 14:32:58] [Rank 0] step:2341/10000 train_time:131950ms step_avg:56.36ms +[2025-09-05 14:32:58] [Rank 0] step:2361/10000 train_time:132691ms step_avg:56.20ms +[2025-09-05 14:32:58] [Rank 0] step:2361/10000 train_time:132691ms step_avg:56.20ms +[2025-09-05 14:32:59] [Rank 0] step:2381/10000 train_time:133433ms step_avg:56.04ms +[2025-09-05 14:32:59] [Rank 0] step:2381/10000 train_time:133433ms step_avg:56.04ms +[2025-09-05 14:33:00] [Rank 0] step:2401/10000 train_time:134175ms step_avg:55.88ms +[2025-09-05 14:33:00] [Rank 0] step:2401/10000 train_time:134175ms step_avg:55.88ms +[2025-09-05 14:33:01] [Rank 0] step:2421/10000 train_time:134917ms step_avg:55.73ms +[2025-09-05 14:33:01] [Rank 0] step:2421/10000 train_time:134917ms step_avg:55.73ms +[2025-09-05 14:33:01] [Rank 0] step:2441/10000 train_time:135659ms step_avg:55.58ms +[2025-09-05 14:33:01] [Rank 0] step:2441/10000 train_time:135659ms step_avg:55.58ms +[2025-09-05 14:33:02] [Rank 0] step:2461/10000 train_time:136402ms step_avg:55.43ms +[2025-09-05 14:33:02] [Rank 0] step:2461/10000 train_time:136402ms step_avg:55.43ms +[2025-09-05 14:33:03] [Rank 0] step:2481/10000 train_time:137144ms step_avg:55.28ms +[2025-09-05 14:33:03] [Rank 0] step:2481/10000 train_time:137144ms step_avg:55.28ms +[2025-09-05 14:33:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:33:03] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:33:04] [Rank 0] PRINT: step:2500/10000 train_loss:1.5347 val_loss:1.4897 train_time:137968ms step_avg:55.19ms +[2025-09-05 14:33:04] [Rank 0] PRINT: step:2500/10000 train_loss:1.5347 val_loss:1.4897 train_time:137968ms step_avg:55.19ms +[2025-09-05 14:33:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:33:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:33:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:33:04] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:34:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:34:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:34:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:34:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:34:26] [Rank 0] Total Loss: 4.3116 +[2025-09-05 14:34:26] [Rank 0] Total Loss: 4.3116 +[2025-09-05 14:34:26] [Rank 0] Total FTA (Unweighted): 0.4625 +[2025-09-05 14:34:26] [Rank 0] Total FTA (Unweighted): 0.4625 +[2025-09-05 14:34:26] [Rank 0] Total FTA (Weighted): 0.4625 +[2025-09-05 14:34:26] [Rank 0] Total FTA (Weighted): 0.4625 +[2025-09-05 14:34:26] [Rank 0] Group 0 Loss: 3.4616 +[2025-09-05 14:34:26] [Rank 0] Group 0 Loss: 3.4616 +[2025-09-05 14:34:26] [Rank 0] Group 1 Loss: 3.3659 +[2025-09-05 14:34:26] [Rank 0] Group 1 Loss: 3.3659 +[2025-09-05 14:34:26] [Rank 0] Group 2 Loss: 3.2313 +[2025-09-05 14:34:26] [Rank 0] Group 2 Loss: 3.2313 +[2025-09-05 14:34:26] [Rank 0] Group 3 Loss: 3.6580 +[2025-09-05 14:34:26] [Rank 0] Group 3 Loss: 3.6580 +[2025-09-05 14:34:26] [Rank 0] Group 4 Loss: 3.7994 +[2025-09-05 14:34:26] [Rank 0] Group 4 Loss: 3.7994 +[2025-09-05 14:34:26] [Rank 0] Group 5 Loss: 4.0129 +[2025-09-05 14:34:26] [Rank 0] Group 5 Loss: 4.0129 +[2025-09-05 14:34:26] [Rank 0] Group 6 Loss: 4.0617 +[2025-09-05 14:34:26] [Rank 0] Group 6 Loss: 4.0617 +[2025-09-05 14:34:26] [Rank 0] Group 7 Loss: 4.3312 +[2025-09-05 14:34:26] [Rank 0] Group 7 Loss: 4.3312 +[2025-09-05 14:34:26] [Rank 0] Group 8 Loss: 4.6211 +[2025-09-05 14:34:26] [Rank 0] Group 8 Loss: 4.6211 +[2025-09-05 14:34:26] [Rank 0] Group 9 Loss: 4.7436 +[2025-09-05 14:34:26] [Rank 0] Group 9 Loss: 4.7436 +[2025-09-05 14:34:26] [Rank 0] Group 10 Loss: 4.8994 +[2025-09-05 14:34:26] [Rank 0] Group 10 Loss: 4.8994 +[2025-09-05 14:34:26] [Rank 0] Group 11 Loss: 4.9501 +[2025-09-05 14:34:26] [Rank 0] Group 11 Loss: 4.9501 +[2025-09-05 14:34:26] [Rank 0] Group 12 Loss: 4.8998 +[2025-09-05 14:34:26] [Rank 0] Group 12 Loss: 4.8998 +[2025-09-05 14:34:26] [Rank 0] Group 13 Loss: 4.9683 +[2025-09-05 14:34:26] [Rank 0] Group 13 Loss: 4.9683 +[2025-09-05 14:34:26] [Rank 0] Group 14 Loss: 4.9821 +[2025-09-05 14:34:26] [Rank 0] Group 14 Loss: 4.9821 +[2025-09-05 14:34:26] [Rank 0] Group 15 Loss: 5.0001 +[2025-09-05 14:34:26] [Rank 0] Group 15 Loss: 5.0001 +[2025-09-05 14:34:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:34:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:34:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:34:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:34:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:34:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:34:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:34:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:34:26] [Rank 0] Group 4 FTA: 0.5700 +[2025-09-05 14:34:26] [Rank 0] Group 4 FTA: 0.5700 +[2025-09-05 14:34:26] [Rank 0] Group 5 FTA: 0.5400 +[2025-09-05 14:34:26] [Rank 0] Group 5 FTA: 0.5400 +[2025-09-05 14:34:26] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 14:34:26] [Rank 0] Group 6 FTA: 0.4100 +[2025-09-05 14:34:26] [Rank 0] Group 7 FTA: 0.3700 +[2025-09-05 14:34:26] [Rank 0] Group 7 FTA: 0.3700 +[2025-09-05 14:34:26] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 14:34:26] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 14:34:26] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 14:34:26] [Rank 0] Group 9 FTA: 0.2500 +[2025-09-05 14:34:26] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 14:34:26] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 14:34:26] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 14:34:26] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 14:34:26] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 14:34:26] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 14:34:26] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 14:34:26] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 14:34:26] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 14:34:26] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 14:34:26] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:34:26] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:34:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:34:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:34:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:34:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:34:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:34:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:34:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:34:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:34:27] [Rank 0] step:2501/10000 train_time:137978ms step_avg:55.17ms +[2025-09-05 14:34:27] [Rank 0] step:2501/10000 train_time:137978ms step_avg:55.17ms +[2025-09-05 14:34:28] [Rank 0] step:2521/10000 train_time:138659ms step_avg:55.00ms +[2025-09-05 14:34:28] [Rank 0] step:2521/10000 train_time:138659ms step_avg:55.00ms +[2025-09-05 14:34:29] [Rank 0] step:2541/10000 train_time:139400ms step_avg:54.86ms +[2025-09-05 14:34:29] [Rank 0] step:2541/10000 train_time:139400ms step_avg:54.86ms +[2025-09-05 14:34:30] [Rank 0] step:2561/10000 train_time:140142ms step_avg:54.72ms +[2025-09-05 14:34:30] [Rank 0] step:2561/10000 train_time:140142ms step_avg:54.72ms +[2025-09-05 14:34:30] [Rank 0] step:2581/10000 train_time:140884ms step_avg:54.58ms +[2025-09-05 14:34:30] [Rank 0] step:2581/10000 train_time:140884ms step_avg:54.58ms +[2025-09-05 14:34:31] [Rank 0] step:2601/10000 train_time:141625ms step_avg:54.45ms +[2025-09-05 14:34:31] [Rank 0] step:2601/10000 train_time:141625ms step_avg:54.45ms +[2025-09-05 14:34:32] [Rank 0] step:2621/10000 train_time:142367ms step_avg:54.32ms +[2025-09-05 14:34:32] [Rank 0] step:2621/10000 train_time:142367ms step_avg:54.32ms +[2025-09-05 14:34:33] [Rank 0] step:2641/10000 train_time:143108ms step_avg:54.19ms +[2025-09-05 14:34:33] [Rank 0] step:2641/10000 train_time:143108ms step_avg:54.19ms +[2025-09-05 14:34:33] [Rank 0] step:2661/10000 train_time:143849ms step_avg:54.06ms +[2025-09-05 14:34:33] [Rank 0] step:2661/10000 train_time:143849ms step_avg:54.06ms +[2025-09-05 14:34:34] [Rank 0] step:2681/10000 train_time:144590ms step_avg:53.93ms +[2025-09-05 14:34:34] [Rank 0] step:2681/10000 train_time:144590ms step_avg:53.93ms +[2025-09-05 14:34:35] [Rank 0] step:2701/10000 train_time:145332ms step_avg:53.81ms +[2025-09-05 14:34:35] [Rank 0] step:2701/10000 train_time:145332ms step_avg:53.81ms +[2025-09-05 14:34:35] [Rank 0] step:2721/10000 train_time:146074ms step_avg:53.68ms +[2025-09-05 14:34:35] [Rank 0] step:2721/10000 train_time:146074ms step_avg:53.68ms +[2025-09-05 14:34:36] [Rank 0] step:2741/10000 train_time:146816ms step_avg:53.56ms +[2025-09-05 14:34:36] [Rank 0] step:2741/10000 train_time:146816ms step_avg:53.56ms +[2025-09-05 14:34:37] [Rank 0] step:2761/10000 train_time:147558ms step_avg:53.44ms +[2025-09-05 14:34:37] [Rank 0] step:2761/10000 train_time:147558ms step_avg:53.44ms +[2025-09-05 14:34:38] [Rank 0] step:2781/10000 train_time:148300ms step_avg:53.33ms +[2025-09-05 14:34:38] [Rank 0] step:2781/10000 train_time:148300ms step_avg:53.33ms +[2025-09-05 14:34:38] [Rank 0] step:2801/10000 train_time:149041ms step_avg:53.21ms +[2025-09-05 14:34:38] [Rank 0] step:2801/10000 train_time:149041ms step_avg:53.21ms +[2025-09-05 14:34:40] [Rank 0] step:2821/10000 train_time:150407ms step_avg:53.32ms +[2025-09-05 14:34:40] [Rank 0] step:2821/10000 train_time:150407ms step_avg:53.32ms +[2025-09-05 14:34:41] [Rank 0] step:2841/10000 train_time:151149ms step_avg:53.20ms +[2025-09-05 14:34:41] [Rank 0] step:2841/10000 train_time:151149ms step_avg:53.20ms +[2025-09-05 14:34:41] [Rank 0] step:2861/10000 train_time:151891ms step_avg:53.09ms +[2025-09-05 14:34:41] [Rank 0] step:2861/10000 train_time:151891ms step_avg:53.09ms +[2025-09-05 14:34:42] [Rank 0] step:2881/10000 train_time:152633ms step_avg:52.98ms +[2025-09-05 14:34:42] [Rank 0] step:2881/10000 train_time:152633ms step_avg:52.98ms +[2025-09-05 14:34:43] [Rank 0] step:2901/10000 train_time:153375ms step_avg:52.87ms +[2025-09-05 14:34:43] [Rank 0] step:2901/10000 train_time:153375ms step_avg:52.87ms +[2025-09-05 14:34:44] [Rank 0] step:2921/10000 train_time:154116ms step_avg:52.76ms +[2025-09-05 14:34:44] [Rank 0] step:2921/10000 train_time:154116ms step_avg:52.76ms +[2025-09-05 14:34:44] [Rank 0] step:2941/10000 train_time:154857ms step_avg:52.65ms +[2025-09-05 14:34:44] [Rank 0] step:2941/10000 train_time:154857ms step_avg:52.65ms +[2025-09-05 14:34:45] [Rank 0] step:2961/10000 train_time:155599ms step_avg:52.55ms +[2025-09-05 14:34:45] [Rank 0] step:2961/10000 train_time:155599ms step_avg:52.55ms +[2025-09-05 14:34:46] [Rank 0] step:2981/10000 train_time:156340ms step_avg:52.45ms +[2025-09-05 14:34:46] [Rank 0] step:2981/10000 train_time:156340ms step_avg:52.45ms +[2025-09-05 14:34:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:34:46] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:34:47] [Rank 0] PRINT: step:3000/10000 train_loss:1.4791 val_loss:1.4510 train_time:157162ms step_avg:52.39ms +[2025-09-05 14:34:47] [Rank 0] PRINT: step:3000/10000 train_loss:1.4791 val_loss:1.4510 train_time:157162ms step_avg:52.39ms +[2025-09-05 14:34:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:34:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:34:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:34:47] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:36:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:36:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:36:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:36:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:36:09] [Rank 0] Total Loss: 4.2945 +[2025-09-05 14:36:09] [Rank 0] Total Loss: 4.2945 +[2025-09-05 14:36:09] [Rank 0] Total FTA (Unweighted): 0.4844 +[2025-09-05 14:36:09] [Rank 0] Total FTA (Unweighted): 0.4844 +[2025-09-05 14:36:09] [Rank 0] Total FTA (Weighted): 0.4844 +[2025-09-05 14:36:09] [Rank 0] Total FTA (Weighted): 0.4844 +[2025-09-05 14:36:09] [Rank 0] Group 0 Loss: 3.4318 +[2025-09-05 14:36:09] [Rank 0] Group 0 Loss: 3.4318 +[2025-09-05 14:36:09] [Rank 0] Group 1 Loss: 3.4153 +[2025-09-05 14:36:09] [Rank 0] Group 1 Loss: 3.4153 +[2025-09-05 14:36:09] [Rank 0] Group 2 Loss: 3.2665 +[2025-09-05 14:36:09] [Rank 0] Group 2 Loss: 3.2665 +[2025-09-05 14:36:09] [Rank 0] Group 3 Loss: 3.5841 +[2025-09-05 14:36:09] [Rank 0] Group 3 Loss: 3.5841 +[2025-09-05 14:36:09] [Rank 0] Group 4 Loss: 3.7707 +[2025-09-05 14:36:09] [Rank 0] Group 4 Loss: 3.7707 +[2025-09-05 14:36:09] [Rank 0] Group 5 Loss: 4.0309 +[2025-09-05 14:36:09] [Rank 0] Group 5 Loss: 4.0309 +[2025-09-05 14:36:09] [Rank 0] Group 6 Loss: 4.0577 +[2025-09-05 14:36:09] [Rank 0] Group 6 Loss: 4.0577 +[2025-09-05 14:36:09] [Rank 0] Group 7 Loss: 4.2922 +[2025-09-05 14:36:09] [Rank 0] Group 7 Loss: 4.2922 +[2025-09-05 14:36:09] [Rank 0] Group 8 Loss: 4.5874 +[2025-09-05 14:36:09] [Rank 0] Group 8 Loss: 4.5874 +[2025-09-05 14:36:09] [Rank 0] Group 9 Loss: 4.7282 +[2025-09-05 14:36:09] [Rank 0] Group 9 Loss: 4.7282 +[2025-09-05 14:36:09] [Rank 0] Group 10 Loss: 4.9072 +[2025-09-05 14:36:09] [Rank 0] Group 10 Loss: 4.9072 +[2025-09-05 14:36:09] [Rank 0] Group 11 Loss: 4.9034 +[2025-09-05 14:36:09] [Rank 0] Group 11 Loss: 4.9034 +[2025-09-05 14:36:09] [Rank 0] Group 12 Loss: 4.8650 +[2025-09-05 14:36:09] [Rank 0] Group 12 Loss: 4.8650 +[2025-09-05 14:36:09] [Rank 0] Group 13 Loss: 4.9131 +[2025-09-05 14:36:09] [Rank 0] Group 13 Loss: 4.9131 +[2025-09-05 14:36:09] [Rank 0] Group 14 Loss: 4.9690 +[2025-09-05 14:36:09] [Rank 0] Group 14 Loss: 4.9690 +[2025-09-05 14:36:09] [Rank 0] Group 15 Loss: 4.9899 +[2025-09-05 14:36:09] [Rank 0] Group 15 Loss: 4.9899 +[2025-09-05 14:36:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:36:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:36:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:36:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:36:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:36:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:36:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:36:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:36:09] [Rank 0] Group 4 FTA: 0.6000 +[2025-09-05 14:36:09] [Rank 0] Group 4 FTA: 0.6000 +[2025-09-05 14:36:09] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 14:36:09] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 14:36:09] [Rank 0] Group 6 FTA: 0.4700 +[2025-09-05 14:36:09] [Rank 0] Group 6 FTA: 0.4700 +[2025-09-05 14:36:09] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 14:36:09] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 14:36:09] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 14:36:09] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 14:36:09] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 14:36:09] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 14:36:09] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 14:36:09] [Rank 0] Group 10 FTA: 0.3000 +[2025-09-05 14:36:09] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 14:36:09] [Rank 0] Group 11 FTA: 0.2200 +[2025-09-05 14:36:09] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 14:36:09] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 14:36:09] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 14:36:09] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 14:36:09] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:36:09] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:36:09] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 14:36:09] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 14:36:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:36:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:36:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:36:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:36:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:36:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:36:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:36:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:36:11] [Rank 0] step:3001/10000 train_time:157173ms step_avg:52.37ms +[2025-09-05 14:36:11] [Rank 0] step:3001/10000 train_time:157173ms step_avg:52.37ms +[2025-09-05 14:36:11] [Rank 0] step:3021/10000 train_time:157852ms step_avg:52.25ms +[2025-09-05 14:36:11] [Rank 0] step:3021/10000 train_time:157852ms step_avg:52.25ms +[2025-09-05 14:36:12] [Rank 0] step:3041/10000 train_time:158594ms step_avg:52.15ms +[2025-09-05 14:36:12] [Rank 0] step:3041/10000 train_time:158594ms step_avg:52.15ms +[2025-09-05 14:36:13] [Rank 0] step:3061/10000 train_time:159336ms step_avg:52.05ms +[2025-09-05 14:36:13] [Rank 0] step:3061/10000 train_time:159336ms step_avg:52.05ms +[2025-09-05 14:36:14] [Rank 0] step:3081/10000 train_time:160078ms step_avg:51.96ms +[2025-09-05 14:36:14] [Rank 0] step:3081/10000 train_time:160078ms step_avg:51.96ms +[2025-09-05 14:36:14] [Rank 0] step:3101/10000 train_time:160820ms step_avg:51.86ms +[2025-09-05 14:36:14] [Rank 0] step:3101/10000 train_time:160820ms step_avg:51.86ms +[2025-09-05 14:36:15] [Rank 0] step:3121/10000 train_time:161562ms step_avg:51.77ms +[2025-09-05 14:36:15] [Rank 0] step:3121/10000 train_time:161562ms step_avg:51.77ms +[2025-09-05 14:36:16] [Rank 0] step:3141/10000 train_time:162304ms step_avg:51.67ms +[2025-09-05 14:36:16] [Rank 0] step:3141/10000 train_time:162304ms step_avg:51.67ms +[2025-09-05 14:36:17] [Rank 0] step:3161/10000 train_time:163046ms step_avg:51.58ms +[2025-09-05 14:36:17] [Rank 0] step:3161/10000 train_time:163046ms step_avg:51.58ms +[2025-09-05 14:36:17] [Rank 0] step:3181/10000 train_time:163788ms step_avg:51.49ms +[2025-09-05 14:36:17] [Rank 0] step:3181/10000 train_time:163788ms step_avg:51.49ms +[2025-09-05 14:36:18] [Rank 0] step:3201/10000 train_time:164530ms step_avg:51.40ms +[2025-09-05 14:36:18] [Rank 0] step:3201/10000 train_time:164530ms step_avg:51.40ms +[2025-09-05 14:36:19] [Rank 0] step:3221/10000 train_time:165272ms step_avg:51.31ms +[2025-09-05 14:36:19] [Rank 0] step:3221/10000 train_time:165272ms step_avg:51.31ms +[2025-09-05 14:36:19] [Rank 0] step:3241/10000 train_time:166014ms step_avg:51.22ms +[2025-09-05 14:36:19] [Rank 0] step:3241/10000 train_time:166014ms step_avg:51.22ms +[2025-09-05 14:36:20] [Rank 0] step:3261/10000 train_time:166756ms step_avg:51.14ms +[2025-09-05 14:36:20] [Rank 0] step:3261/10000 train_time:166756ms step_avg:51.14ms +[2025-09-05 14:36:21] [Rank 0] step:3281/10000 train_time:167498ms step_avg:51.05ms +[2025-09-05 14:36:21] [Rank 0] step:3281/10000 train_time:167498ms step_avg:51.05ms +[2025-09-05 14:36:22] [Rank 0] step:3301/10000 train_time:168240ms step_avg:50.97ms +[2025-09-05 14:36:22] [Rank 0] step:3301/10000 train_time:168240ms step_avg:50.97ms +[2025-09-05 14:36:22] [Rank 0] step:3321/10000 train_time:168982ms step_avg:50.88ms +[2025-09-05 14:36:22] [Rank 0] step:3321/10000 train_time:168982ms step_avg:50.88ms +[2025-09-05 14:36:23] [Rank 0] step:3341/10000 train_time:169724ms step_avg:50.80ms +[2025-09-05 14:36:23] [Rank 0] step:3341/10000 train_time:169724ms step_avg:50.80ms +[2025-09-05 14:36:24] [Rank 0] step:3361/10000 train_time:170466ms step_avg:50.72ms +[2025-09-05 14:36:24] [Rank 0] step:3361/10000 train_time:170466ms step_avg:50.72ms +[2025-09-05 14:36:25] [Rank 0] step:3381/10000 train_time:171209ms step_avg:50.64ms +[2025-09-05 14:36:25] [Rank 0] step:3381/10000 train_time:171209ms step_avg:50.64ms +[2025-09-05 14:36:25] [Rank 0] step:3401/10000 train_time:171953ms step_avg:50.56ms +[2025-09-05 14:36:25] [Rank 0] step:3401/10000 train_time:171953ms step_avg:50.56ms +[2025-09-05 14:36:26] [Rank 0] step:3421/10000 train_time:172695ms step_avg:50.48ms +[2025-09-05 14:36:26] [Rank 0] step:3421/10000 train_time:172695ms step_avg:50.48ms +[2025-09-05 14:36:27] [Rank 0] step:3441/10000 train_time:173437ms step_avg:50.40ms +[2025-09-05 14:36:27] [Rank 0] step:3441/10000 train_time:173437ms step_avg:50.40ms +[2025-09-05 14:36:28] [Rank 0] step:3461/10000 train_time:174179ms step_avg:50.33ms +[2025-09-05 14:36:28] [Rank 0] step:3461/10000 train_time:174179ms step_avg:50.33ms +[2025-09-05 14:36:28] [Rank 0] step:3481/10000 train_time:174922ms step_avg:50.25ms +[2025-09-05 14:36:28] [Rank 0] step:3481/10000 train_time:174922ms step_avg:50.25ms +[2025-09-05 14:36:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:36:29] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:36:30] [Rank 0] PRINT: step:3500/10000 train_loss:1.4447 val_loss:1.4228 train_time:175745ms step_avg:50.21ms +[2025-09-05 14:36:30] [Rank 0] PRINT: step:3500/10000 train_loss:1.4447 val_loss:1.4228 train_time:175745ms step_avg:50.21ms +[2025-09-05 14:36:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:36:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:36:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:36:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:37:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:37:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:37:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:37:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:37:51] [Rank 0] Total Loss: 4.2185 +[2025-09-05 14:37:51] [Rank 0] Total Loss: 4.2185 +[2025-09-05 14:37:51] [Rank 0] Total FTA (Unweighted): 0.4988 +[2025-09-05 14:37:51] [Rank 0] Total FTA (Unweighted): 0.4988 +[2025-09-05 14:37:51] [Rank 0] Total FTA (Weighted): 0.4988 +[2025-09-05 14:37:51] [Rank 0] Total FTA (Weighted): 0.4988 +[2025-09-05 14:37:51] [Rank 0] Group 0 Loss: 3.3318 +[2025-09-05 14:37:51] [Rank 0] Group 0 Loss: 3.3318 +[2025-09-05 14:37:51] [Rank 0] Group 1 Loss: 3.3637 +[2025-09-05 14:37:51] [Rank 0] Group 1 Loss: 3.3637 +[2025-09-05 14:37:51] [Rank 0] Group 2 Loss: 3.2297 +[2025-09-05 14:37:51] [Rank 0] Group 2 Loss: 3.2297 +[2025-09-05 14:37:51] [Rank 0] Group 3 Loss: 3.6538 +[2025-09-05 14:37:51] [Rank 0] Group 3 Loss: 3.6538 +[2025-09-05 14:37:51] [Rank 0] Group 4 Loss: 3.7116 +[2025-09-05 14:37:51] [Rank 0] Group 4 Loss: 3.7116 +[2025-09-05 14:37:51] [Rank 0] Group 5 Loss: 3.9035 +[2025-09-05 14:37:51] [Rank 0] Group 5 Loss: 3.9035 +[2025-09-05 14:37:51] [Rank 0] Group 6 Loss: 3.9679 +[2025-09-05 14:37:51] [Rank 0] Group 6 Loss: 3.9679 +[2025-09-05 14:37:51] [Rank 0] Group 7 Loss: 4.1884 +[2025-09-05 14:37:51] [Rank 0] Group 7 Loss: 4.1884 +[2025-09-05 14:37:51] [Rank 0] Group 8 Loss: 4.4984 +[2025-09-05 14:37:51] [Rank 0] Group 8 Loss: 4.4984 +[2025-09-05 14:37:51] [Rank 0] Group 9 Loss: 4.6273 +[2025-09-05 14:37:51] [Rank 0] Group 9 Loss: 4.6273 +[2025-09-05 14:37:51] [Rank 0] Group 10 Loss: 4.7882 +[2025-09-05 14:37:51] [Rank 0] Group 10 Loss: 4.7882 +[2025-09-05 14:37:51] [Rank 0] Group 11 Loss: 4.8185 +[2025-09-05 14:37:51] [Rank 0] Group 11 Loss: 4.8185 +[2025-09-05 14:37:51] [Rank 0] Group 12 Loss: 4.7930 +[2025-09-05 14:37:51] [Rank 0] Group 12 Loss: 4.7930 +[2025-09-05 14:37:51] [Rank 0] Group 13 Loss: 4.8589 +[2025-09-05 14:37:51] [Rank 0] Group 13 Loss: 4.8589 +[2025-09-05 14:37:51] [Rank 0] Group 14 Loss: 4.8658 +[2025-09-05 14:37:51] [Rank 0] Group 14 Loss: 4.8658 +[2025-09-05 14:37:51] [Rank 0] Group 15 Loss: 4.8948 +[2025-09-05 14:37:51] [Rank 0] Group 15 Loss: 4.8948 +[2025-09-05 14:37:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:37:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:37:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:37:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:37:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:37:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:37:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:37:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:37:51] [Rank 0] Group 4 FTA: 0.6500 +[2025-09-05 14:37:51] [Rank 0] Group 4 FTA: 0.6500 +[2025-09-05 14:37:51] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 14:37:51] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 14:37:51] [Rank 0] Group 6 FTA: 0.4700 +[2025-09-05 14:37:51] [Rank 0] Group 6 FTA: 0.4700 +[2025-09-05 14:37:51] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 14:37:51] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 14:37:51] [Rank 0] Group 8 FTA: 0.4300 +[2025-09-05 14:37:51] [Rank 0] Group 8 FTA: 0.4300 +[2025-09-05 14:37:51] [Rank 0] Group 9 FTA: 0.3300 +[2025-09-05 14:37:51] [Rank 0] Group 9 FTA: 0.3300 +[2025-09-05 14:37:51] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 14:37:51] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 14:37:51] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 14:37:51] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 14:37:51] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 14:37:51] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 14:37:51] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 14:37:51] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 14:37:51] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:37:51] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:37:51] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:37:51] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:37:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:37:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:37:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:37:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:37:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:37:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:37:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:37:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:37:52] [Rank 0] step:3501/10000 train_time:175754ms step_avg:50.20ms +[2025-09-05 14:37:52] [Rank 0] step:3501/10000 train_time:175754ms step_avg:50.20ms +[2025-09-05 14:37:53] [Rank 0] step:3521/10000 train_time:176431ms step_avg:50.11ms +[2025-09-05 14:37:53] [Rank 0] step:3521/10000 train_time:176431ms step_avg:50.11ms +[2025-09-05 14:37:54] [Rank 0] step:3541/10000 train_time:177177ms step_avg:50.04ms +[2025-09-05 14:37:54] [Rank 0] step:3541/10000 train_time:177177ms step_avg:50.04ms +[2025-09-05 14:37:55] [Rank 0] step:3561/10000 train_time:177918ms step_avg:49.96ms +[2025-09-05 14:37:55] [Rank 0] step:3561/10000 train_time:177918ms step_avg:49.96ms +[2025-09-05 14:37:55] [Rank 0] step:3581/10000 train_time:178659ms step_avg:49.89ms +[2025-09-05 14:37:55] [Rank 0] step:3581/10000 train_time:178659ms step_avg:49.89ms +[2025-09-05 14:37:56] [Rank 0] step:3601/10000 train_time:179402ms step_avg:49.82ms +[2025-09-05 14:37:56] [Rank 0] step:3601/10000 train_time:179402ms step_avg:49.82ms +[2025-09-05 14:37:57] [Rank 0] step:3621/10000 train_time:180144ms step_avg:49.75ms +[2025-09-05 14:37:57] [Rank 0] step:3621/10000 train_time:180144ms step_avg:49.75ms +[2025-09-05 14:37:58] [Rank 0] step:3641/10000 train_time:181485ms step_avg:49.84ms +[2025-09-05 14:37:58] [Rank 0] step:3641/10000 train_time:181485ms step_avg:49.84ms +[2025-09-05 14:37:59] [Rank 0] step:3661/10000 train_time:182226ms step_avg:49.78ms +[2025-09-05 14:37:59] [Rank 0] step:3661/10000 train_time:182226ms step_avg:49.78ms +[2025-09-05 14:38:00] [Rank 0] step:3681/10000 train_time:182968ms step_avg:49.71ms +[2025-09-05 14:38:00] [Rank 0] step:3681/10000 train_time:182968ms step_avg:49.71ms +[2025-09-05 14:38:00] [Rank 0] step:3701/10000 train_time:183710ms step_avg:49.64ms +[2025-09-05 14:38:00] [Rank 0] step:3701/10000 train_time:183710ms step_avg:49.64ms +[2025-09-05 14:38:01] [Rank 0] step:3721/10000 train_time:184451ms step_avg:49.57ms +[2025-09-05 14:38:01] [Rank 0] step:3721/10000 train_time:184451ms step_avg:49.57ms +[2025-09-05 14:38:02] [Rank 0] step:3741/10000 train_time:185194ms step_avg:49.50ms +[2025-09-05 14:38:02] [Rank 0] step:3741/10000 train_time:185194ms step_avg:49.50ms +[2025-09-05 14:38:03] [Rank 0] step:3761/10000 train_time:185936ms step_avg:49.44ms +[2025-09-05 14:38:03] [Rank 0] step:3761/10000 train_time:185936ms step_avg:49.44ms +[2025-09-05 14:38:03] [Rank 0] step:3781/10000 train_time:186679ms step_avg:49.37ms +[2025-09-05 14:38:03] [Rank 0] step:3781/10000 train_time:186679ms step_avg:49.37ms +[2025-09-05 14:38:04] [Rank 0] step:3801/10000 train_time:187421ms step_avg:49.31ms +[2025-09-05 14:38:04] [Rank 0] step:3801/10000 train_time:187421ms step_avg:49.31ms +[2025-09-05 14:38:05] [Rank 0] step:3821/10000 train_time:188163ms step_avg:49.24ms +[2025-09-05 14:38:05] [Rank 0] step:3821/10000 train_time:188163ms step_avg:49.24ms +[2025-09-05 14:38:06] [Rank 0] step:3841/10000 train_time:188905ms step_avg:49.18ms +[2025-09-05 14:38:06] [Rank 0] step:3841/10000 train_time:188905ms step_avg:49.18ms +[2025-09-05 14:38:06] [Rank 0] step:3861/10000 train_time:189647ms step_avg:49.12ms +[2025-09-05 14:38:06] [Rank 0] step:3861/10000 train_time:189647ms step_avg:49.12ms +[2025-09-05 14:38:07] [Rank 0] step:3881/10000 train_time:190505ms step_avg:49.09ms +[2025-09-05 14:38:07] [Rank 0] step:3881/10000 train_time:190505ms step_avg:49.09ms +[2025-09-05 14:38:08] [Rank 0] step:3901/10000 train_time:191247ms step_avg:49.03ms +[2025-09-05 14:38:08] [Rank 0] step:3901/10000 train_time:191247ms step_avg:49.03ms +[2025-09-05 14:38:09] [Rank 0] step:3921/10000 train_time:191989ms step_avg:48.96ms +[2025-09-05 14:38:09] [Rank 0] step:3921/10000 train_time:191989ms step_avg:48.96ms +[2025-09-05 14:38:10] [Rank 0] step:3941/10000 train_time:192868ms step_avg:48.94ms +[2025-09-05 14:38:10] [Rank 0] step:3941/10000 train_time:192868ms step_avg:48.94ms +[2025-09-05 14:38:10] [Rank 0] step:3961/10000 train_time:193610ms step_avg:48.88ms +[2025-09-05 14:38:10] [Rank 0] step:3961/10000 train_time:193610ms step_avg:48.88ms +[2025-09-05 14:38:11] [Rank 0] step:3981/10000 train_time:194352ms step_avg:48.82ms +[2025-09-05 14:38:11] [Rank 0] step:3981/10000 train_time:194352ms step_avg:48.82ms +[2025-09-05 14:38:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:38:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:38:12] [Rank 0] PRINT: step:4000/10000 train_loss:1.4242 val_loss:1.4106 train_time:195176ms step_avg:48.79ms +[2025-09-05 14:38:12] [Rank 0] PRINT: step:4000/10000 train_loss:1.4242 val_loss:1.4106 train_time:195176ms step_avg:48.79ms +[2025-09-05 14:38:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:38:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:38:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:38:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:39:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:39:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:39:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:39:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:39:33] [Rank 0] Total Loss: 4.2326 +[2025-09-05 14:39:33] [Rank 0] Total Loss: 4.2326 +[2025-09-05 14:39:33] [Rank 0] Total FTA (Unweighted): 0.5181 +[2025-09-05 14:39:33] [Rank 0] Total FTA (Unweighted): 0.5181 +[2025-09-05 14:39:33] [Rank 0] Total FTA (Weighted): 0.5181 +[2025-09-05 14:39:33] [Rank 0] Total FTA (Weighted): 0.5181 +[2025-09-05 14:39:33] [Rank 0] Group 0 Loss: 3.4104 +[2025-09-05 14:39:33] [Rank 0] Group 0 Loss: 3.4104 +[2025-09-05 14:39:33] [Rank 0] Group 1 Loss: 3.3190 +[2025-09-05 14:39:33] [Rank 0] Group 1 Loss: 3.3190 +[2025-09-05 14:39:33] [Rank 0] Group 2 Loss: 3.1962 +[2025-09-05 14:39:33] [Rank 0] Group 2 Loss: 3.1962 +[2025-09-05 14:39:33] [Rank 0] Group 3 Loss: 3.6837 +[2025-09-05 14:39:33] [Rank 0] Group 3 Loss: 3.6837 +[2025-09-05 14:39:33] [Rank 0] Group 4 Loss: 3.7387 +[2025-09-05 14:39:33] [Rank 0] Group 4 Loss: 3.7387 +[2025-09-05 14:39:33] [Rank 0] Group 5 Loss: 3.9875 +[2025-09-05 14:39:33] [Rank 0] Group 5 Loss: 3.9875 +[2025-09-05 14:39:33] [Rank 0] Group 6 Loss: 3.9482 +[2025-09-05 14:39:33] [Rank 0] Group 6 Loss: 3.9482 +[2025-09-05 14:39:33] [Rank 0] Group 7 Loss: 4.2132 +[2025-09-05 14:39:33] [Rank 0] Group 7 Loss: 4.2132 +[2025-09-05 14:39:33] [Rank 0] Group 8 Loss: 4.5443 +[2025-09-05 14:39:33] [Rank 0] Group 8 Loss: 4.5443 +[2025-09-05 14:39:33] [Rank 0] Group 9 Loss: 4.6235 +[2025-09-05 14:39:33] [Rank 0] Group 9 Loss: 4.6235 +[2025-09-05 14:39:33] [Rank 0] Group 10 Loss: 4.7875 +[2025-09-05 14:39:33] [Rank 0] Group 10 Loss: 4.7875 +[2025-09-05 14:39:33] [Rank 0] Group 11 Loss: 4.8413 +[2025-09-05 14:39:33] [Rank 0] Group 11 Loss: 4.8413 +[2025-09-05 14:39:33] [Rank 0] Group 12 Loss: 4.7660 +[2025-09-05 14:39:33] [Rank 0] Group 12 Loss: 4.7660 +[2025-09-05 14:39:33] [Rank 0] Group 13 Loss: 4.8638 +[2025-09-05 14:39:33] [Rank 0] Group 13 Loss: 4.8638 +[2025-09-05 14:39:33] [Rank 0] Group 14 Loss: 4.8744 +[2025-09-05 14:39:33] [Rank 0] Group 14 Loss: 4.8744 +[2025-09-05 14:39:33] [Rank 0] Group 15 Loss: 4.9238 +[2025-09-05 14:39:33] [Rank 0] Group 15 Loss: 4.9238 +[2025-09-05 14:39:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:39:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:39:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:39:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:39:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:39:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:39:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:39:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:39:33] [Rank 0] Group 4 FTA: 0.7500 +[2025-09-05 14:39:33] [Rank 0] Group 4 FTA: 0.7500 +[2025-09-05 14:39:33] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 14:39:33] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 14:39:33] [Rank 0] Group 6 FTA: 0.4800 +[2025-09-05 14:39:33] [Rank 0] Group 6 FTA: 0.4800 +[2025-09-05 14:39:33] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 14:39:33] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 14:39:33] [Rank 0] Group 8 FTA: 0.4200 +[2025-09-05 14:39:33] [Rank 0] Group 8 FTA: 0.4200 +[2025-09-05 14:39:33] [Rank 0] Group 9 FTA: 0.3500 +[2025-09-05 14:39:33] [Rank 0] Group 9 FTA: 0.3500 +[2025-09-05 14:39:33] [Rank 0] Group 10 FTA: 0.4100 +[2025-09-05 14:39:33] [Rank 0] Group 10 FTA: 0.4100 +[2025-09-05 14:39:33] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 14:39:33] [Rank 0] Group 11 FTA: 0.2400 +[2025-09-05 14:39:33] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 14:39:33] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 14:39:33] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 14:39:33] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 14:39:33] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 14:39:33] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 14:39:33] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:39:33] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:39:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:39:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:39:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:39:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:39:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:39:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:39:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:39:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:39:35] [Rank 0] step:4001/10000 train_time:195184ms step_avg:48.78ms +[2025-09-05 14:39:35] [Rank 0] step:4001/10000 train_time:195184ms step_avg:48.78ms +[2025-09-05 14:39:36] [Rank 0] step:4021/10000 train_time:196483ms step_avg:48.86ms +[2025-09-05 14:39:36] [Rank 0] step:4021/10000 train_time:196483ms step_avg:48.86ms +[2025-09-05 14:39:37] [Rank 0] step:4041/10000 train_time:197224ms step_avg:48.81ms +[2025-09-05 14:39:37] [Rank 0] step:4041/10000 train_time:197224ms step_avg:48.81ms +[2025-09-05 14:39:38] [Rank 0] step:4061/10000 train_time:197966ms step_avg:48.75ms +[2025-09-05 14:39:38] [Rank 0] step:4061/10000 train_time:197966ms step_avg:48.75ms +[2025-09-05 14:39:38] [Rank 0] step:4081/10000 train_time:198708ms step_avg:48.69ms +[2025-09-05 14:39:38] [Rank 0] step:4081/10000 train_time:198708ms step_avg:48.69ms +[2025-09-05 14:39:39] [Rank 0] step:4101/10000 train_time:199449ms step_avg:48.63ms +[2025-09-05 14:39:39] [Rank 0] step:4101/10000 train_time:199449ms step_avg:48.63ms +[2025-09-05 14:39:40] [Rank 0] step:4121/10000 train_time:200191ms step_avg:48.58ms +[2025-09-05 14:39:40] [Rank 0] step:4121/10000 train_time:200191ms step_avg:48.58ms +[2025-09-05 14:39:41] [Rank 0] step:4141/10000 train_time:200933ms step_avg:48.52ms +[2025-09-05 14:39:41] [Rank 0] step:4141/10000 train_time:200933ms step_avg:48.52ms +[2025-09-05 14:39:41] [Rank 0] step:4161/10000 train_time:201675ms step_avg:48.47ms +[2025-09-05 14:39:41] [Rank 0] step:4161/10000 train_time:201675ms step_avg:48.47ms +[2025-09-05 14:39:42] [Rank 0] step:4181/10000 train_time:202417ms step_avg:48.41ms +[2025-09-05 14:39:42] [Rank 0] step:4181/10000 train_time:202417ms step_avg:48.41ms +[2025-09-05 14:39:43] [Rank 0] step:4201/10000 train_time:203158ms step_avg:48.36ms +[2025-09-05 14:39:43] [Rank 0] step:4201/10000 train_time:203158ms step_avg:48.36ms +[2025-09-05 14:39:44] [Rank 0] step:4221/10000 train_time:203900ms step_avg:48.31ms +[2025-09-05 14:39:44] [Rank 0] step:4221/10000 train_time:203900ms step_avg:48.31ms +[2025-09-05 14:39:44] [Rank 0] step:4241/10000 train_time:204641ms step_avg:48.25ms +[2025-09-05 14:39:44] [Rank 0] step:4241/10000 train_time:204641ms step_avg:48.25ms +[2025-09-05 14:39:45] [Rank 0] step:4261/10000 train_time:205383ms step_avg:48.20ms +[2025-09-05 14:39:45] [Rank 0] step:4261/10000 train_time:205383ms step_avg:48.20ms +[2025-09-05 14:39:46] [Rank 0] step:4281/10000 train_time:206125ms step_avg:48.15ms +[2025-09-05 14:39:46] [Rank 0] step:4281/10000 train_time:206125ms step_avg:48.15ms +[2025-09-05 14:39:47] [Rank 0] step:4301/10000 train_time:206866ms step_avg:48.10ms +[2025-09-05 14:39:47] [Rank 0] step:4301/10000 train_time:206866ms step_avg:48.10ms +[2025-09-05 14:39:47] [Rank 0] step:4321/10000 train_time:207608ms step_avg:48.05ms +[2025-09-05 14:39:47] [Rank 0] step:4321/10000 train_time:207608ms step_avg:48.05ms +[2025-09-05 14:39:48] [Rank 0] step:4341/10000 train_time:208350ms step_avg:48.00ms +[2025-09-05 14:39:48] [Rank 0] step:4341/10000 train_time:208350ms step_avg:48.00ms +[2025-09-05 14:39:49] [Rank 0] step:4361/10000 train_time:209092ms step_avg:47.95ms +[2025-09-05 14:39:49] [Rank 0] step:4361/10000 train_time:209092ms step_avg:47.95ms +[2025-09-05 14:39:50] [Rank 0] step:4381/10000 train_time:209833ms step_avg:47.90ms +[2025-09-05 14:39:50] [Rank 0] step:4381/10000 train_time:209833ms step_avg:47.90ms +[2025-09-05 14:39:50] [Rank 0] step:4401/10000 train_time:210575ms step_avg:47.85ms +[2025-09-05 14:39:50] [Rank 0] step:4401/10000 train_time:210575ms step_avg:47.85ms +[2025-09-05 14:39:51] [Rank 0] step:4421/10000 train_time:211316ms step_avg:47.80ms +[2025-09-05 14:39:51] [Rank 0] step:4421/10000 train_time:211316ms step_avg:47.80ms +[2025-09-05 14:39:52] [Rank 0] step:4441/10000 train_time:212058ms step_avg:47.75ms +[2025-09-05 14:39:52] [Rank 0] step:4441/10000 train_time:212058ms step_avg:47.75ms +[2025-09-05 14:39:52] [Rank 0] step:4461/10000 train_time:212799ms step_avg:47.70ms +[2025-09-05 14:39:52] [Rank 0] step:4461/10000 train_time:212799ms step_avg:47.70ms +[2025-09-05 14:39:53] [Rank 0] step:4481/10000 train_time:213541ms step_avg:47.65ms +[2025-09-05 14:39:53] [Rank 0] step:4481/10000 train_time:213541ms step_avg:47.65ms +[2025-09-05 14:39:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:39:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:39:54] [Rank 0] PRINT: step:4500/10000 train_loss:1.4160 val_loss:1.4025 train_time:214364ms step_avg:47.64ms +[2025-09-05 14:39:54] [Rank 0] PRINT: step:4500/10000 train_loss:1.4160 val_loss:1.4025 train_time:214364ms step_avg:47.64ms +[2025-09-05 14:39:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:39:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:39:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:39:55] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:41:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:41:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:41:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:41:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:41:16] [Rank 0] Total Loss: 4.2586 +[2025-09-05 14:41:16] [Rank 0] Total Loss: 4.2586 +[2025-09-05 14:41:16] [Rank 0] Total FTA (Unweighted): 0.5194 +[2025-09-05 14:41:16] [Rank 0] Total FTA (Unweighted): 0.5194 +[2025-09-05 14:41:16] [Rank 0] Total FTA (Weighted): 0.5194 +[2025-09-05 14:41:16] [Rank 0] Total FTA (Weighted): 0.5194 +[2025-09-05 14:41:16] [Rank 0] Group 0 Loss: 3.5351 +[2025-09-05 14:41:16] [Rank 0] Group 0 Loss: 3.5351 +[2025-09-05 14:41:16] [Rank 0] Group 1 Loss: 3.4490 +[2025-09-05 14:41:16] [Rank 0] Group 1 Loss: 3.4490 +[2025-09-05 14:41:16] [Rank 0] Group 2 Loss: 3.2209 +[2025-09-05 14:41:16] [Rank 0] Group 2 Loss: 3.2209 +[2025-09-05 14:41:16] [Rank 0] Group 3 Loss: 3.5999 +[2025-09-05 14:41:16] [Rank 0] Group 3 Loss: 3.5999 +[2025-09-05 14:41:16] [Rank 0] Group 4 Loss: 3.8538 +[2025-09-05 14:41:16] [Rank 0] Group 4 Loss: 3.8538 +[2025-09-05 14:41:16] [Rank 0] Group 5 Loss: 3.9773 +[2025-09-05 14:41:16] [Rank 0] Group 5 Loss: 3.9773 +[2025-09-05 14:41:16] [Rank 0] Group 6 Loss: 4.0049 +[2025-09-05 14:41:16] [Rank 0] Group 6 Loss: 4.0049 +[2025-09-05 14:41:16] [Rank 0] Group 7 Loss: 4.2134 +[2025-09-05 14:41:16] [Rank 0] Group 7 Loss: 4.2134 +[2025-09-05 14:41:16] [Rank 0] Group 8 Loss: 4.5239 +[2025-09-05 14:41:16] [Rank 0] Group 8 Loss: 4.5239 +[2025-09-05 14:41:16] [Rank 0] Group 9 Loss: 4.6709 +[2025-09-05 14:41:16] [Rank 0] Group 9 Loss: 4.6709 +[2025-09-05 14:41:16] [Rank 0] Group 10 Loss: 4.8226 +[2025-09-05 14:41:16] [Rank 0] Group 10 Loss: 4.8226 +[2025-09-05 14:41:16] [Rank 0] Group 11 Loss: 4.7989 +[2025-09-05 14:41:16] [Rank 0] Group 11 Loss: 4.7989 +[2025-09-05 14:41:16] [Rank 0] Group 12 Loss: 4.7904 +[2025-09-05 14:41:16] [Rank 0] Group 12 Loss: 4.7904 +[2025-09-05 14:41:16] [Rank 0] Group 13 Loss: 4.8684 +[2025-09-05 14:41:16] [Rank 0] Group 13 Loss: 4.8684 +[2025-09-05 14:41:16] [Rank 0] Group 14 Loss: 4.8584 +[2025-09-05 14:41:16] [Rank 0] Group 14 Loss: 4.8584 +[2025-09-05 14:41:16] [Rank 0] Group 15 Loss: 4.9496 +[2025-09-05 14:41:16] [Rank 0] Group 15 Loss: 4.9496 +[2025-09-05 14:41:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:41:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:41:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:41:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:41:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:41:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:41:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:41:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:41:16] [Rank 0] Group 4 FTA: 0.7300 +[2025-09-05 14:41:16] [Rank 0] Group 4 FTA: 0.7300 +[2025-09-05 14:41:16] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:41:16] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:41:16] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 14:41:16] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 14:41:16] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 14:41:16] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 14:41:16] [Rank 0] Group 8 FTA: 0.4400 +[2025-09-05 14:41:16] [Rank 0] Group 8 FTA: 0.4400 +[2025-09-05 14:41:16] [Rank 0] Group 9 FTA: 0.3700 +[2025-09-05 14:41:16] [Rank 0] Group 9 FTA: 0.3700 +[2025-09-05 14:41:16] [Rank 0] Group 10 FTA: 0.4300 +[2025-09-05 14:41:16] [Rank 0] Group 10 FTA: 0.4300 +[2025-09-05 14:41:16] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 14:41:16] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 14:41:16] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 14:41:16] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 14:41:16] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 14:41:16] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 14:41:16] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:41:16] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:41:16] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 14:41:16] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 14:41:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:41:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:41:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:41:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:41:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:41:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:41:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:41:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:41:17] [Rank 0] step:4501/10000 train_time:214373ms step_avg:47.63ms +[2025-09-05 14:41:17] [Rank 0] step:4501/10000 train_time:214373ms step_avg:47.63ms +[2025-09-05 14:41:18] [Rank 0] step:4521/10000 train_time:215048ms step_avg:47.57ms +[2025-09-05 14:41:18] [Rank 0] step:4521/10000 train_time:215048ms step_avg:47.57ms +[2025-09-05 14:41:19] [Rank 0] step:4541/10000 train_time:215790ms step_avg:47.52ms +[2025-09-05 14:41:19] [Rank 0] step:4541/10000 train_time:215790ms step_avg:47.52ms +[2025-09-05 14:41:20] [Rank 0] step:4561/10000 train_time:216747ms step_avg:47.52ms +[2025-09-05 14:41:20] [Rank 0] step:4561/10000 train_time:216747ms step_avg:47.52ms +[2025-09-05 14:41:20] [Rank 0] step:4581/10000 train_time:217489ms step_avg:47.48ms +[2025-09-05 14:41:20] [Rank 0] step:4581/10000 train_time:217489ms step_avg:47.48ms +[2025-09-05 14:41:21] [Rank 0] step:4601/10000 train_time:218231ms step_avg:47.43ms +[2025-09-05 14:41:21] [Rank 0] step:4601/10000 train_time:218231ms step_avg:47.43ms +[2025-09-05 14:41:22] [Rank 0] step:4621/10000 train_time:218980ms step_avg:47.39ms +[2025-09-05 14:41:22] [Rank 0] step:4621/10000 train_time:218980ms step_avg:47.39ms +[2025-09-05 14:41:23] [Rank 0] step:4641/10000 train_time:219721ms step_avg:47.34ms +[2025-09-05 14:41:23] [Rank 0] step:4641/10000 train_time:219721ms step_avg:47.34ms +[2025-09-05 14:41:23] [Rank 0] step:4661/10000 train_time:220463ms step_avg:47.30ms +[2025-09-05 14:41:23] [Rank 0] step:4661/10000 train_time:220463ms step_avg:47.30ms +[2025-09-05 14:41:24] [Rank 0] step:4681/10000 train_time:221204ms step_avg:47.26ms +[2025-09-05 14:41:24] [Rank 0] step:4681/10000 train_time:221204ms step_avg:47.26ms +[2025-09-05 14:41:25] [Rank 0] step:4701/10000 train_time:221947ms step_avg:47.21ms +[2025-09-05 14:41:25] [Rank 0] step:4701/10000 train_time:221947ms step_avg:47.21ms +[2025-09-05 14:41:26] [Rank 0] step:4721/10000 train_time:222688ms step_avg:47.17ms +[2025-09-05 14:41:26] [Rank 0] step:4721/10000 train_time:222688ms step_avg:47.17ms +[2025-09-05 14:41:26] [Rank 0] step:4741/10000 train_time:223430ms step_avg:47.13ms +[2025-09-05 14:41:26] [Rank 0] step:4741/10000 train_time:223430ms step_avg:47.13ms +[2025-09-05 14:41:27] [Rank 0] step:4761/10000 train_time:224171ms step_avg:47.08ms +[2025-09-05 14:41:27] [Rank 0] step:4761/10000 train_time:224171ms step_avg:47.08ms +[2025-09-05 14:41:28] [Rank 0] step:4781/10000 train_time:224914ms step_avg:47.04ms +[2025-09-05 14:41:28] [Rank 0] step:4781/10000 train_time:224914ms step_avg:47.04ms +[2025-09-05 14:41:29] [Rank 0] step:4801/10000 train_time:225655ms step_avg:47.00ms +[2025-09-05 14:41:29] [Rank 0] step:4801/10000 train_time:225655ms step_avg:47.00ms +[2025-09-05 14:41:29] [Rank 0] step:4821/10000 train_time:226397ms step_avg:46.96ms +[2025-09-05 14:41:29] [Rank 0] step:4821/10000 train_time:226397ms step_avg:46.96ms +[2025-09-05 14:41:30] [Rank 0] step:4841/10000 train_time:227448ms step_avg:46.98ms +[2025-09-05 14:41:30] [Rank 0] step:4841/10000 train_time:227448ms step_avg:46.98ms +[2025-09-05 14:41:31] [Rank 0] step:4861/10000 train_time:228190ms step_avg:46.94ms +[2025-09-05 14:41:31] [Rank 0] step:4861/10000 train_time:228190ms step_avg:46.94ms +[2025-09-05 14:41:32] [Rank 0] step:4881/10000 train_time:228932ms step_avg:46.90ms +[2025-09-05 14:41:32] [Rank 0] step:4881/10000 train_time:228932ms step_avg:46.90ms +[2025-09-05 14:41:33] [Rank 0] step:4901/10000 train_time:229674ms step_avg:46.86ms +[2025-09-05 14:41:33] [Rank 0] step:4901/10000 train_time:229674ms step_avg:46.86ms +[2025-09-05 14:41:33] [Rank 0] step:4921/10000 train_time:230415ms step_avg:46.82ms +[2025-09-05 14:41:33] [Rank 0] step:4921/10000 train_time:230415ms step_avg:46.82ms +[2025-09-05 14:41:34] [Rank 0] step:4941/10000 train_time:231157ms step_avg:46.78ms +[2025-09-05 14:41:34] [Rank 0] step:4941/10000 train_time:231157ms step_avg:46.78ms +[2025-09-05 14:41:35] [Rank 0] step:4961/10000 train_time:231899ms step_avg:46.74ms +[2025-09-05 14:41:35] [Rank 0] step:4961/10000 train_time:231899ms step_avg:46.74ms +[2025-09-05 14:41:36] [Rank 0] step:4981/10000 train_time:232641ms step_avg:46.71ms +[2025-09-05 14:41:36] [Rank 0] step:4981/10000 train_time:232641ms step_avg:46.71ms +[2025-09-05 14:41:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:41:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:41:37] [Rank 0] PRINT: step:5000/10000 train_loss:1.4096 val_loss:1.3992 train_time:233463ms step_avg:46.69ms +[2025-09-05 14:41:37] [Rank 0] PRINT: step:5000/10000 train_loss:1.4096 val_loss:1.3992 train_time:233463ms step_avg:46.69ms +[2025-09-05 14:41:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:41:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:41:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:41:37] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:42:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:42:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:42:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:42:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:42:58] [Rank 0] Total Loss: 4.2959 +[2025-09-05 14:42:58] [Rank 0] Total Loss: 4.2959 +[2025-09-05 14:42:58] [Rank 0] Total FTA (Unweighted): 0.5231 +[2025-09-05 14:42:58] [Rank 0] Total FTA (Unweighted): 0.5231 +[2025-09-05 14:42:58] [Rank 0] Total FTA (Weighted): 0.5231 +[2025-09-05 14:42:58] [Rank 0] Total FTA (Weighted): 0.5231 +[2025-09-05 14:42:58] [Rank 0] Group 0 Loss: 3.4296 +[2025-09-05 14:42:58] [Rank 0] Group 0 Loss: 3.4296 +[2025-09-05 14:42:58] [Rank 0] Group 1 Loss: 3.4629 +[2025-09-05 14:42:58] [Rank 0] Group 1 Loss: 3.4629 +[2025-09-05 14:42:58] [Rank 0] Group 2 Loss: 3.2777 +[2025-09-05 14:42:58] [Rank 0] Group 2 Loss: 3.2777 +[2025-09-05 14:42:58] [Rank 0] Group 3 Loss: 3.7370 +[2025-09-05 14:42:58] [Rank 0] Group 3 Loss: 3.7370 +[2025-09-05 14:42:58] [Rank 0] Group 4 Loss: 3.8707 +[2025-09-05 14:42:58] [Rank 0] Group 4 Loss: 3.8707 +[2025-09-05 14:42:58] [Rank 0] Group 5 Loss: 4.0459 +[2025-09-05 14:42:58] [Rank 0] Group 5 Loss: 4.0459 +[2025-09-05 14:42:58] [Rank 0] Group 6 Loss: 4.0486 +[2025-09-05 14:42:58] [Rank 0] Group 6 Loss: 4.0486 +[2025-09-05 14:42:58] [Rank 0] Group 7 Loss: 4.2802 +[2025-09-05 14:42:58] [Rank 0] Group 7 Loss: 4.2802 +[2025-09-05 14:42:58] [Rank 0] Group 8 Loss: 4.5655 +[2025-09-05 14:42:58] [Rank 0] Group 8 Loss: 4.5655 +[2025-09-05 14:42:58] [Rank 0] Group 9 Loss: 4.6776 +[2025-09-05 14:42:58] [Rank 0] Group 9 Loss: 4.6776 +[2025-09-05 14:42:58] [Rank 0] Group 10 Loss: 4.8645 +[2025-09-05 14:42:58] [Rank 0] Group 10 Loss: 4.8645 +[2025-09-05 14:42:58] [Rank 0] Group 11 Loss: 4.8603 +[2025-09-05 14:42:58] [Rank 0] Group 11 Loss: 4.8603 +[2025-09-05 14:42:58] [Rank 0] Group 12 Loss: 4.8439 +[2025-09-05 14:42:58] [Rank 0] Group 12 Loss: 4.8439 +[2025-09-05 14:42:58] [Rank 0] Group 13 Loss: 4.8791 +[2025-09-05 14:42:58] [Rank 0] Group 13 Loss: 4.8791 +[2025-09-05 14:42:58] [Rank 0] Group 14 Loss: 4.9281 +[2025-09-05 14:42:58] [Rank 0] Group 14 Loss: 4.9281 +[2025-09-05 14:42:58] [Rank 0] Group 15 Loss: 4.9624 +[2025-09-05 14:42:58] [Rank 0] Group 15 Loss: 4.9624 +[2025-09-05 14:42:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:42:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:42:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:42:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:42:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:42:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:42:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:42:58] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:42:58] [Rank 0] Group 4 FTA: 0.7400 +[2025-09-05 14:42:58] [Rank 0] Group 4 FTA: 0.7400 +[2025-09-05 14:42:58] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:42:58] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:42:58] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 14:42:58] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 14:42:58] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 14:42:58] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 14:42:58] [Rank 0] Group 8 FTA: 0.4400 +[2025-09-05 14:42:58] [Rank 0] Group 8 FTA: 0.4400 +[2025-09-05 14:42:58] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-05 14:42:58] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-05 14:42:58] [Rank 0] Group 10 FTA: 0.4700 +[2025-09-05 14:42:58] [Rank 0] Group 10 FTA: 0.4700 +[2025-09-05 14:42:58] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 14:42:58] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 14:42:58] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 14:42:58] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 14:42:58] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 14:42:58] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 14:42:58] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 14:42:58] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 14:42:58] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:42:58] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:42:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:42:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:42:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:42:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:42:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:42:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:43:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:43:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:43:00] [Rank 0] step:5001/10000 train_time:233473ms step_avg:46.69ms +[2025-09-05 14:43:00] [Rank 0] step:5001/10000 train_time:233473ms step_avg:46.69ms +[2025-09-05 14:43:00] [Rank 0] step:5021/10000 train_time:234153ms step_avg:46.63ms +[2025-09-05 14:43:00] [Rank 0] step:5021/10000 train_time:234153ms step_avg:46.63ms +[2025-09-05 14:43:01] [Rank 0] step:5041/10000 train_time:234895ms step_avg:46.60ms +[2025-09-05 14:43:01] [Rank 0] step:5041/10000 train_time:234895ms step_avg:46.60ms +[2025-09-05 14:43:02] [Rank 0] step:5061/10000 train_time:235638ms step_avg:46.56ms +[2025-09-05 14:43:02] [Rank 0] step:5061/10000 train_time:235638ms step_avg:46.56ms +[2025-09-05 14:43:03] [Rank 0] step:5081/10000 train_time:236380ms step_avg:46.52ms +[2025-09-05 14:43:03] [Rank 0] step:5081/10000 train_time:236380ms step_avg:46.52ms +[2025-09-05 14:43:03] [Rank 0] step:5101/10000 train_time:237121ms step_avg:46.49ms +[2025-09-05 14:43:03] [Rank 0] step:5101/10000 train_time:237121ms step_avg:46.49ms +[2025-09-05 14:43:04] [Rank 0] step:5121/10000 train_time:237863ms step_avg:46.45ms +[2025-09-05 14:43:04] [Rank 0] step:5121/10000 train_time:237863ms step_avg:46.45ms +[2025-09-05 14:43:05] [Rank 0] step:5141/10000 train_time:238605ms step_avg:46.41ms +[2025-09-05 14:43:05] [Rank 0] step:5141/10000 train_time:238605ms step_avg:46.41ms +[2025-09-05 14:43:06] [Rank 0] step:5161/10000 train_time:239347ms step_avg:46.38ms +[2025-09-05 14:43:06] [Rank 0] step:5161/10000 train_time:239347ms step_avg:46.38ms +[2025-09-05 14:43:06] [Rank 0] step:5181/10000 train_time:240089ms step_avg:46.34ms +[2025-09-05 14:43:06] [Rank 0] step:5181/10000 train_time:240089ms step_avg:46.34ms +[2025-09-05 14:43:07] [Rank 0] step:5201/10000 train_time:240831ms step_avg:46.30ms +[2025-09-05 14:43:07] [Rank 0] step:5201/10000 train_time:240831ms step_avg:46.30ms +[2025-09-05 14:43:08] [Rank 0] step:5221/10000 train_time:241573ms step_avg:46.27ms +[2025-09-05 14:43:08] [Rank 0] step:5221/10000 train_time:241573ms step_avg:46.27ms +[2025-09-05 14:43:09] [Rank 0] step:5241/10000 train_time:242314ms step_avg:46.23ms +[2025-09-05 14:43:09] [Rank 0] step:5241/10000 train_time:242314ms step_avg:46.23ms +[2025-09-05 14:43:09] [Rank 0] step:5261/10000 train_time:243056ms step_avg:46.20ms +[2025-09-05 14:43:09] [Rank 0] step:5261/10000 train_time:243056ms step_avg:46.20ms +[2025-09-05 14:43:10] [Rank 0] step:5281/10000 train_time:243798ms step_avg:46.17ms +[2025-09-05 14:43:10] [Rank 0] step:5281/10000 train_time:243798ms step_avg:46.17ms +[2025-09-05 14:43:11] [Rank 0] step:5301/10000 train_time:244540ms step_avg:46.13ms +[2025-09-05 14:43:11] [Rank 0] step:5301/10000 train_time:244540ms step_avg:46.13ms +[2025-09-05 14:43:12] [Rank 0] step:5321/10000 train_time:245281ms step_avg:46.10ms +[2025-09-05 14:43:12] [Rank 0] step:5321/10000 train_time:245281ms step_avg:46.10ms +[2025-09-05 14:43:12] [Rank 0] step:5341/10000 train_time:246023ms step_avg:46.06ms +[2025-09-05 14:43:12] [Rank 0] step:5341/10000 train_time:246023ms step_avg:46.06ms +[2025-09-05 14:43:13] [Rank 0] step:5361/10000 train_time:246765ms step_avg:46.03ms +[2025-09-05 14:43:13] [Rank 0] step:5361/10000 train_time:246765ms step_avg:46.03ms +[2025-09-05 14:43:14] [Rank 0] step:5381/10000 train_time:247507ms step_avg:46.00ms +[2025-09-05 14:43:14] [Rank 0] step:5381/10000 train_time:247507ms step_avg:46.00ms +[2025-09-05 14:43:15] [Rank 0] step:5401/10000 train_time:248248ms step_avg:45.96ms +[2025-09-05 14:43:15] [Rank 0] step:5401/10000 train_time:248248ms step_avg:45.96ms +[2025-09-05 14:43:15] [Rank 0] step:5421/10000 train_time:248990ms step_avg:45.93ms +[2025-09-05 14:43:15] [Rank 0] step:5421/10000 train_time:248990ms step_avg:45.93ms +[2025-09-05 14:43:16] [Rank 0] step:5441/10000 train_time:249732ms step_avg:45.90ms +[2025-09-05 14:43:16] [Rank 0] step:5441/10000 train_time:249732ms step_avg:45.90ms +[2025-09-05 14:43:17] [Rank 0] step:5461/10000 train_time:250474ms step_avg:45.87ms +[2025-09-05 14:43:17] [Rank 0] step:5461/10000 train_time:250474ms step_avg:45.87ms +[2025-09-05 14:43:17] [Rank 0] step:5481/10000 train_time:251216ms step_avg:45.83ms +[2025-09-05 14:43:17] [Rank 0] step:5481/10000 train_time:251216ms step_avg:45.83ms +[2025-09-05 14:43:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:43:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:43:19] [Rank 0] PRINT: step:5500/10000 train_loss:1.4067 val_loss:1.3973 train_time:252040ms step_avg:45.83ms +[2025-09-05 14:43:19] [Rank 0] PRINT: step:5500/10000 train_loss:1.4067 val_loss:1.3973 train_time:252040ms step_avg:45.83ms +[2025-09-05 14:43:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:43:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:43:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:43:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:44:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:44:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:44:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:44:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:44:40] [Rank 0] Total Loss: 4.3793 +[2025-09-05 14:44:40] [Rank 0] Total Loss: 4.3793 +[2025-09-05 14:44:40] [Rank 0] Total FTA (Unweighted): 0.5381 +[2025-09-05 14:44:40] [Rank 0] Total FTA (Unweighted): 0.5381 +[2025-09-05 14:44:40] [Rank 0] Total FTA (Weighted): 0.5381 +[2025-09-05 14:44:40] [Rank 0] Total FTA (Weighted): 0.5381 +[2025-09-05 14:44:40] [Rank 0] Group 0 Loss: 3.5351 +[2025-09-05 14:44:40] [Rank 0] Group 0 Loss: 3.5351 +[2025-09-05 14:44:40] [Rank 0] Group 1 Loss: 3.4421 +[2025-09-05 14:44:40] [Rank 0] Group 1 Loss: 3.4421 +[2025-09-05 14:44:40] [Rank 0] Group 2 Loss: 3.4033 +[2025-09-05 14:44:40] [Rank 0] Group 2 Loss: 3.4033 +[2025-09-05 14:44:40] [Rank 0] Group 3 Loss: 3.7582 +[2025-09-05 14:44:40] [Rank 0] Group 3 Loss: 3.7582 +[2025-09-05 14:44:40] [Rank 0] Group 4 Loss: 3.9681 +[2025-09-05 14:44:40] [Rank 0] Group 4 Loss: 3.9681 +[2025-09-05 14:44:40] [Rank 0] Group 5 Loss: 4.1613 +[2025-09-05 14:44:40] [Rank 0] Group 5 Loss: 4.1613 +[2025-09-05 14:44:40] [Rank 0] Group 6 Loss: 4.1770 +[2025-09-05 14:44:40] [Rank 0] Group 6 Loss: 4.1770 +[2025-09-05 14:44:40] [Rank 0] Group 7 Loss: 4.3294 +[2025-09-05 14:44:40] [Rank 0] Group 7 Loss: 4.3294 +[2025-09-05 14:44:40] [Rank 0] Group 8 Loss: 4.6800 +[2025-09-05 14:44:40] [Rank 0] Group 8 Loss: 4.6800 +[2025-09-05 14:44:40] [Rank 0] Group 9 Loss: 4.7920 +[2025-09-05 14:44:40] [Rank 0] Group 9 Loss: 4.7920 +[2025-09-05 14:44:40] [Rank 0] Group 10 Loss: 5.0050 +[2025-09-05 14:44:40] [Rank 0] Group 10 Loss: 5.0050 +[2025-09-05 14:44:40] [Rank 0] Group 11 Loss: 4.9524 +[2025-09-05 14:44:40] [Rank 0] Group 11 Loss: 4.9524 +[2025-09-05 14:44:40] [Rank 0] Group 12 Loss: 4.8801 +[2025-09-05 14:44:40] [Rank 0] Group 12 Loss: 4.8801 +[2025-09-05 14:44:40] [Rank 0] Group 13 Loss: 4.9404 +[2025-09-05 14:44:40] [Rank 0] Group 13 Loss: 4.9404 +[2025-09-05 14:44:40] [Rank 0] Group 14 Loss: 5.0050 +[2025-09-05 14:44:40] [Rank 0] Group 14 Loss: 5.0050 +[2025-09-05 14:44:40] [Rank 0] Group 15 Loss: 5.0401 +[2025-09-05 14:44:40] [Rank 0] Group 15 Loss: 5.0401 +[2025-09-05 14:44:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:44:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:44:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:44:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:44:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:44:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:44:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:44:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:44:40] [Rank 0] Group 4 FTA: 0.8200 +[2025-09-05 14:44:40] [Rank 0] Group 4 FTA: 0.8200 +[2025-09-05 14:44:40] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:44:40] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:44:40] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:44:40] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:44:40] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 14:44:40] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 14:44:40] [Rank 0] Group 8 FTA: 0.4500 +[2025-09-05 14:44:40] [Rank 0] Group 8 FTA: 0.4500 +[2025-09-05 14:44:40] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 14:44:40] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 14:44:40] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 14:44:40] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 14:44:40] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 14:44:40] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 14:44:40] [Rank 0] Group 12 FTA: 0.2300 +[2025-09-05 14:44:40] [Rank 0] Group 12 FTA: 0.2300 +[2025-09-05 14:44:40] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 14:44:40] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 14:44:40] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 14:44:40] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 14:44:40] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 14:44:40] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 14:44:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:44:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:44:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:44:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:44:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:44:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:44:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:44:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:44:41] [Rank 0] step:5501/10000 train_time:252049ms step_avg:45.82ms +[2025-09-05 14:44:41] [Rank 0] step:5501/10000 train_time:252049ms step_avg:45.82ms +[2025-09-05 14:44:42] [Rank 0] step:5521/10000 train_time:252723ms step_avg:45.77ms +[2025-09-05 14:44:42] [Rank 0] step:5521/10000 train_time:252723ms step_avg:45.77ms +[2025-09-05 14:44:43] [Rank 0] step:5541/10000 train_time:253464ms step_avg:45.74ms +[2025-09-05 14:44:43] [Rank 0] step:5541/10000 train_time:253464ms step_avg:45.74ms +[2025-09-05 14:44:44] [Rank 0] step:5561/10000 train_time:254206ms step_avg:45.71ms +[2025-09-05 14:44:44] [Rank 0] step:5561/10000 train_time:254206ms step_avg:45.71ms +[2025-09-05 14:44:44] [Rank 0] step:5581/10000 train_time:254948ms step_avg:45.68ms +[2025-09-05 14:44:44] [Rank 0] step:5581/10000 train_time:254948ms step_avg:45.68ms +[2025-09-05 14:44:45] [Rank 0] step:5601/10000 train_time:255690ms step_avg:45.65ms +[2025-09-05 14:44:45] [Rank 0] step:5601/10000 train_time:255690ms step_avg:45.65ms +[2025-09-05 14:44:46] [Rank 0] step:5621/10000 train_time:256435ms step_avg:45.62ms +[2025-09-05 14:44:46] [Rank 0] step:5621/10000 train_time:256435ms step_avg:45.62ms +[2025-09-05 14:44:47] [Rank 0] step:5641/10000 train_time:257789ms step_avg:45.70ms +[2025-09-05 14:44:47] [Rank 0] step:5641/10000 train_time:257789ms step_avg:45.70ms +[2025-09-05 14:44:48] [Rank 0] step:5661/10000 train_time:258531ms step_avg:45.67ms +[2025-09-05 14:44:48] [Rank 0] step:5661/10000 train_time:258531ms step_avg:45.67ms +[2025-09-05 14:44:49] [Rank 0] step:5681/10000 train_time:259273ms step_avg:45.64ms +[2025-09-05 14:44:49] [Rank 0] step:5681/10000 train_time:259273ms step_avg:45.64ms +[2025-09-05 14:44:49] [Rank 0] step:5701/10000 train_time:260015ms step_avg:45.61ms +[2025-09-05 14:44:49] [Rank 0] step:5701/10000 train_time:260015ms step_avg:45.61ms +[2025-09-05 14:44:50] [Rank 0] step:5721/10000 train_time:260756ms step_avg:45.58ms +[2025-09-05 14:44:50] [Rank 0] step:5721/10000 train_time:260756ms step_avg:45.58ms +[2025-09-05 14:44:51] [Rank 0] step:5741/10000 train_time:261498ms step_avg:45.55ms +[2025-09-05 14:44:51] [Rank 0] step:5741/10000 train_time:261498ms step_avg:45.55ms +[2025-09-05 14:44:52] [Rank 0] step:5761/10000 train_time:262241ms step_avg:45.52ms +[2025-09-05 14:44:52] [Rank 0] step:5761/10000 train_time:262241ms step_avg:45.52ms +[2025-09-05 14:44:52] [Rank 0] step:5781/10000 train_time:262982ms step_avg:45.49ms +[2025-09-05 14:44:52] [Rank 0] step:5781/10000 train_time:262982ms step_avg:45.49ms +[2025-09-05 14:44:53] [Rank 0] step:5801/10000 train_time:263724ms step_avg:45.46ms +[2025-09-05 14:44:53] [Rank 0] step:5801/10000 train_time:263724ms step_avg:45.46ms +[2025-09-05 14:44:54] [Rank 0] step:5821/10000 train_time:264466ms step_avg:45.43ms +[2025-09-05 14:44:54] [Rank 0] step:5821/10000 train_time:264466ms step_avg:45.43ms +[2025-09-05 14:44:55] [Rank 0] step:5841/10000 train_time:265208ms step_avg:45.40ms +[2025-09-05 14:44:55] [Rank 0] step:5841/10000 train_time:265208ms step_avg:45.40ms +[2025-09-05 14:44:55] [Rank 0] step:5861/10000 train_time:265950ms step_avg:45.38ms +[2025-09-05 14:44:55] [Rank 0] step:5861/10000 train_time:265950ms step_avg:45.38ms +[2025-09-05 14:44:56] [Rank 0] step:5881/10000 train_time:266692ms step_avg:45.35ms +[2025-09-05 14:44:56] [Rank 0] step:5881/10000 train_time:266692ms step_avg:45.35ms +[2025-09-05 14:44:57] [Rank 0] step:5901/10000 train_time:267434ms step_avg:45.32ms +[2025-09-05 14:44:57] [Rank 0] step:5901/10000 train_time:267434ms step_avg:45.32ms +[2025-09-05 14:44:58] [Rank 0] step:5921/10000 train_time:268176ms step_avg:45.29ms +[2025-09-05 14:44:58] [Rank 0] step:5921/10000 train_time:268176ms step_avg:45.29ms +[2025-09-05 14:44:58] [Rank 0] step:5941/10000 train_time:268918ms step_avg:45.26ms +[2025-09-05 14:44:58] [Rank 0] step:5941/10000 train_time:268918ms step_avg:45.26ms +[2025-09-05 14:44:59] [Rank 0] step:5961/10000 train_time:269659ms step_avg:45.24ms +[2025-09-05 14:44:59] [Rank 0] step:5961/10000 train_time:269659ms step_avg:45.24ms +[2025-09-05 14:45:00] [Rank 0] step:5981/10000 train_time:270402ms step_avg:45.21ms +[2025-09-05 14:45:00] [Rank 0] step:5981/10000 train_time:270402ms step_avg:45.21ms +[2025-09-05 14:45:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:45:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:45:01] [Rank 0] PRINT: step:6000/10000 train_loss:1.4046 val_loss:1.3944 train_time:271225ms step_avg:45.20ms +[2025-09-05 14:45:01] [Rank 0] PRINT: step:6000/10000 train_loss:1.4046 val_loss:1.3944 train_time:271225ms step_avg:45.20ms +[2025-09-05 14:45:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:45:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:45:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:45:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:46:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:46:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:46:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:46:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:46:22] [Rank 0] Total Loss: 4.2265 +[2025-09-05 14:46:22] [Rank 0] Total Loss: 4.2265 +[2025-09-05 14:46:22] [Rank 0] Total FTA (Unweighted): 0.5519 +[2025-09-05 14:46:22] [Rank 0] Total FTA (Unweighted): 0.5519 +[2025-09-05 14:46:22] [Rank 0] Total FTA (Weighted): 0.5519 +[2025-09-05 14:46:22] [Rank 0] Total FTA (Weighted): 0.5519 +[2025-09-05 14:46:22] [Rank 0] Group 0 Loss: 3.4188 +[2025-09-05 14:46:22] [Rank 0] Group 0 Loss: 3.4188 +[2025-09-05 14:46:22] [Rank 0] Group 1 Loss: 3.3855 +[2025-09-05 14:46:22] [Rank 0] Group 1 Loss: 3.3855 +[2025-09-05 14:46:22] [Rank 0] Group 2 Loss: 3.2525 +[2025-09-05 14:46:22] [Rank 0] Group 2 Loss: 3.2525 +[2025-09-05 14:46:22] [Rank 0] Group 3 Loss: 3.6111 +[2025-09-05 14:46:22] [Rank 0] Group 3 Loss: 3.6111 +[2025-09-05 14:46:22] [Rank 0] Group 4 Loss: 3.8598 +[2025-09-05 14:46:22] [Rank 0] Group 4 Loss: 3.8598 +[2025-09-05 14:46:22] [Rank 0] Group 5 Loss: 3.9660 +[2025-09-05 14:46:22] [Rank 0] Group 5 Loss: 3.9660 +[2025-09-05 14:46:22] [Rank 0] Group 6 Loss: 3.9810 +[2025-09-05 14:46:22] [Rank 0] Group 6 Loss: 3.9810 +[2025-09-05 14:46:22] [Rank 0] Group 7 Loss: 4.1801 +[2025-09-05 14:46:22] [Rank 0] Group 7 Loss: 4.1801 +[2025-09-05 14:46:22] [Rank 0] Group 8 Loss: 4.5068 +[2025-09-05 14:46:22] [Rank 0] Group 8 Loss: 4.5068 +[2025-09-05 14:46:22] [Rank 0] Group 9 Loss: 4.6204 +[2025-09-05 14:46:22] [Rank 0] Group 9 Loss: 4.6204 +[2025-09-05 14:46:22] [Rank 0] Group 10 Loss: 4.8050 +[2025-09-05 14:46:22] [Rank 0] Group 10 Loss: 4.8050 +[2025-09-05 14:46:22] [Rank 0] Group 11 Loss: 4.7913 +[2025-09-05 14:46:22] [Rank 0] Group 11 Loss: 4.7913 +[2025-09-05 14:46:22] [Rank 0] Group 12 Loss: 4.7441 +[2025-09-05 14:46:22] [Rank 0] Group 12 Loss: 4.7441 +[2025-09-05 14:46:22] [Rank 0] Group 13 Loss: 4.7968 +[2025-09-05 14:46:22] [Rank 0] Group 13 Loss: 4.7968 +[2025-09-05 14:46:22] [Rank 0] Group 14 Loss: 4.8146 +[2025-09-05 14:46:22] [Rank 0] Group 14 Loss: 4.8146 +[2025-09-05 14:46:22] [Rank 0] Group 15 Loss: 4.8901 +[2025-09-05 14:46:22] [Rank 0] Group 15 Loss: 4.8901 +[2025-09-05 14:46:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:46:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:46:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:46:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:46:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:46:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:46:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:46:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:46:22] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 14:46:22] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 14:46:22] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:46:22] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:46:22] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:46:22] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:46:22] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:46:22] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:46:22] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 14:46:22] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 14:46:22] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 14:46:22] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 14:46:22] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 14:46:22] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 14:46:22] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 14:46:22] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 14:46:22] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 14:46:22] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 14:46:22] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 14:46:22] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 14:46:22] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 14:46:22] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 14:46:22] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:46:22] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:46:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:46:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:46:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:46:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:46:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:46:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:46:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:46:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:46:24] [Rank 0] step:6001/10000 train_time:271234ms step_avg:45.20ms +[2025-09-05 14:46:24] [Rank 0] step:6001/10000 train_time:271234ms step_avg:45.20ms +[2025-09-05 14:46:25] [Rank 0] step:6021/10000 train_time:272502ms step_avg:45.26ms +[2025-09-05 14:46:25] [Rank 0] step:6021/10000 train_time:272502ms step_avg:45.26ms +[2025-09-05 14:46:26] [Rank 0] step:6041/10000 train_time:273244ms step_avg:45.23ms +[2025-09-05 14:46:26] [Rank 0] step:6041/10000 train_time:273244ms step_avg:45.23ms +[2025-09-05 14:46:27] [Rank 0] step:6061/10000 train_time:273985ms step_avg:45.20ms +[2025-09-05 14:46:27] [Rank 0] step:6061/10000 train_time:273985ms step_avg:45.20ms +[2025-09-05 14:46:27] [Rank 0] step:6081/10000 train_time:274727ms step_avg:45.18ms +[2025-09-05 14:46:27] [Rank 0] step:6081/10000 train_time:274727ms step_avg:45.18ms +[2025-09-05 14:46:28] [Rank 0] step:6101/10000 train_time:275469ms step_avg:45.15ms +[2025-09-05 14:46:28] [Rank 0] step:6101/10000 train_time:275469ms step_avg:45.15ms +[2025-09-05 14:46:29] [Rank 0] step:6121/10000 train_time:276210ms step_avg:45.13ms +[2025-09-05 14:46:29] [Rank 0] step:6121/10000 train_time:276210ms step_avg:45.13ms +[2025-09-05 14:46:30] [Rank 0] step:6141/10000 train_time:276952ms step_avg:45.10ms +[2025-09-05 14:46:30] [Rank 0] step:6141/10000 train_time:276952ms step_avg:45.10ms +[2025-09-05 14:46:30] [Rank 0] step:6161/10000 train_time:277695ms step_avg:45.07ms +[2025-09-05 14:46:30] [Rank 0] step:6161/10000 train_time:277695ms step_avg:45.07ms +[2025-09-05 14:46:31] [Rank 0] step:6181/10000 train_time:278437ms step_avg:45.05ms +[2025-09-05 14:46:31] [Rank 0] step:6181/10000 train_time:278437ms step_avg:45.05ms +[2025-09-05 14:46:32] [Rank 0] step:6201/10000 train_time:279178ms step_avg:45.02ms +[2025-09-05 14:46:32] [Rank 0] step:6201/10000 train_time:279178ms step_avg:45.02ms +[2025-09-05 14:46:33] [Rank 0] step:6221/10000 train_time:279921ms step_avg:45.00ms +[2025-09-05 14:46:33] [Rank 0] step:6221/10000 train_time:279921ms step_avg:45.00ms +[2025-09-05 14:46:33] [Rank 0] step:6241/10000 train_time:280808ms step_avg:44.99ms +[2025-09-05 14:46:33] [Rank 0] step:6241/10000 train_time:280808ms step_avg:44.99ms +[2025-09-05 14:46:34] [Rank 0] step:6261/10000 train_time:281550ms step_avg:44.97ms +[2025-09-05 14:46:34] [Rank 0] step:6261/10000 train_time:281550ms step_avg:44.97ms +[2025-09-05 14:46:35] [Rank 0] step:6281/10000 train_time:282293ms step_avg:44.94ms +[2025-09-05 14:46:35] [Rank 0] step:6281/10000 train_time:282293ms step_avg:44.94ms +[2025-09-05 14:46:36] [Rank 0] step:6301/10000 train_time:283174ms step_avg:44.94ms +[2025-09-05 14:46:36] [Rank 0] step:6301/10000 train_time:283174ms step_avg:44.94ms +[2025-09-05 14:46:37] [Rank 0] step:6321/10000 train_time:283920ms step_avg:44.92ms +[2025-09-05 14:46:37] [Rank 0] step:6321/10000 train_time:283920ms step_avg:44.92ms +[2025-09-05 14:46:37] [Rank 0] step:6341/10000 train_time:284662ms step_avg:44.89ms +[2025-09-05 14:46:37] [Rank 0] step:6341/10000 train_time:284662ms step_avg:44.89ms +[2025-09-05 14:46:38] [Rank 0] step:6361/10000 train_time:285404ms step_avg:44.87ms +[2025-09-05 14:46:38] [Rank 0] step:6361/10000 train_time:285404ms step_avg:44.87ms +[2025-09-05 14:46:39] [Rank 0] step:6381/10000 train_time:286146ms step_avg:44.84ms +[2025-09-05 14:46:39] [Rank 0] step:6381/10000 train_time:286146ms step_avg:44.84ms +[2025-09-05 14:46:40] [Rank 0] step:6401/10000 train_time:286888ms step_avg:44.82ms +[2025-09-05 14:46:40] [Rank 0] step:6401/10000 train_time:286888ms step_avg:44.82ms +[2025-09-05 14:46:40] [Rank 0] step:6421/10000 train_time:287631ms step_avg:44.80ms +[2025-09-05 14:46:40] [Rank 0] step:6421/10000 train_time:287631ms step_avg:44.80ms +[2025-09-05 14:46:41] [Rank 0] step:6441/10000 train_time:288373ms step_avg:44.77ms +[2025-09-05 14:46:41] [Rank 0] step:6441/10000 train_time:288373ms step_avg:44.77ms +[2025-09-05 14:46:42] [Rank 0] step:6461/10000 train_time:289115ms step_avg:44.75ms +[2025-09-05 14:46:42] [Rank 0] step:6461/10000 train_time:289115ms step_avg:44.75ms +[2025-09-05 14:46:42] [Rank 0] step:6481/10000 train_time:289857ms step_avg:44.72ms +[2025-09-05 14:46:42] [Rank 0] step:6481/10000 train_time:289857ms step_avg:44.72ms +[2025-09-05 14:46:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:46:43] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:46:44] [Rank 0] PRINT: step:6500/10000 train_loss:1.4034 val_loss:1.3943 train_time:290681ms step_avg:44.72ms +[2025-09-05 14:46:44] [Rank 0] PRINT: step:6500/10000 train_loss:1.4034 val_loss:1.3943 train_time:290681ms step_avg:44.72ms +[2025-09-05 14:46:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:46:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:46:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:46:44] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:48:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:48:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:48:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:48:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:48:05] [Rank 0] Total Loss: 4.2565 +[2025-09-05 14:48:05] [Rank 0] Total Loss: 4.2565 +[2025-09-05 14:48:05] [Rank 0] Total FTA (Unweighted): 0.5531 +[2025-09-05 14:48:05] [Rank 0] Total FTA (Unweighted): 0.5531 +[2025-09-05 14:48:05] [Rank 0] Total FTA (Weighted): 0.5531 +[2025-09-05 14:48:05] [Rank 0] Total FTA (Weighted): 0.5531 +[2025-09-05 14:48:05] [Rank 0] Group 0 Loss: 3.5052 +[2025-09-05 14:48:05] [Rank 0] Group 0 Loss: 3.5052 +[2025-09-05 14:48:05] [Rank 0] Group 1 Loss: 3.3971 +[2025-09-05 14:48:05] [Rank 0] Group 1 Loss: 3.3971 +[2025-09-05 14:48:05] [Rank 0] Group 2 Loss: 3.3193 +[2025-09-05 14:48:05] [Rank 0] Group 2 Loss: 3.3193 +[2025-09-05 14:48:05] [Rank 0] Group 3 Loss: 3.6820 +[2025-09-05 14:48:05] [Rank 0] Group 3 Loss: 3.6820 +[2025-09-05 14:48:05] [Rank 0] Group 4 Loss: 3.8257 +[2025-09-05 14:48:05] [Rank 0] Group 4 Loss: 3.8257 +[2025-09-05 14:48:05] [Rank 0] Group 5 Loss: 4.0163 +[2025-09-05 14:48:05] [Rank 0] Group 5 Loss: 4.0163 +[2025-09-05 14:48:05] [Rank 0] Group 6 Loss: 4.0417 +[2025-09-05 14:48:05] [Rank 0] Group 6 Loss: 4.0417 +[2025-09-05 14:48:05] [Rank 0] Group 7 Loss: 4.2057 +[2025-09-05 14:48:05] [Rank 0] Group 7 Loss: 4.2057 +[2025-09-05 14:48:05] [Rank 0] Group 8 Loss: 4.5315 +[2025-09-05 14:48:05] [Rank 0] Group 8 Loss: 4.5315 +[2025-09-05 14:48:05] [Rank 0] Group 9 Loss: 4.6448 +[2025-09-05 14:48:05] [Rank 0] Group 9 Loss: 4.6448 +[2025-09-05 14:48:05] [Rank 0] Group 10 Loss: 4.8062 +[2025-09-05 14:48:05] [Rank 0] Group 10 Loss: 4.8062 +[2025-09-05 14:48:05] [Rank 0] Group 11 Loss: 4.7918 +[2025-09-05 14:48:05] [Rank 0] Group 11 Loss: 4.7918 +[2025-09-05 14:48:05] [Rank 0] Group 12 Loss: 4.7746 +[2025-09-05 14:48:05] [Rank 0] Group 12 Loss: 4.7746 +[2025-09-05 14:48:05] [Rank 0] Group 13 Loss: 4.8161 +[2025-09-05 14:48:05] [Rank 0] Group 13 Loss: 4.8161 +[2025-09-05 14:48:05] [Rank 0] Group 14 Loss: 4.8568 +[2025-09-05 14:48:05] [Rank 0] Group 14 Loss: 4.8568 +[2025-09-05 14:48:05] [Rank 0] Group 15 Loss: 4.8892 +[2025-09-05 14:48:05] [Rank 0] Group 15 Loss: 4.8892 +[2025-09-05 14:48:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:48:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:48:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:48:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:48:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:48:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:48:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:48:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:48:05] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 14:48:05] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 14:48:05] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:48:05] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:48:05] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:48:05] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:48:05] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:48:05] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:48:05] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 14:48:05] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 14:48:05] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 14:48:05] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 14:48:05] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 14:48:05] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 14:48:05] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 14:48:05] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 14:48:05] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 14:48:05] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 14:48:05] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 14:48:05] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 14:48:05] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 14:48:05] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 14:48:05] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:48:05] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:48:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:48:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:48:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:48:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:48:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:48:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:48:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:48:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:48:06] [Rank 0] step:6501/10000 train_time:290690ms step_avg:44.71ms +[2025-09-05 14:48:06] [Rank 0] step:6501/10000 train_time:290690ms step_avg:44.71ms +[2025-09-05 14:48:07] [Rank 0] step:6521/10000 train_time:291360ms step_avg:44.68ms +[2025-09-05 14:48:07] [Rank 0] step:6521/10000 train_time:291360ms step_avg:44.68ms +[2025-09-05 14:48:08] [Rank 0] step:6541/10000 train_time:292101ms step_avg:44.66ms +[2025-09-05 14:48:08] [Rank 0] step:6541/10000 train_time:292101ms step_avg:44.66ms +[2025-09-05 14:48:08] [Rank 0] step:6561/10000 train_time:292844ms step_avg:44.63ms +[2025-09-05 14:48:08] [Rank 0] step:6561/10000 train_time:292844ms step_avg:44.63ms +[2025-09-05 14:48:09] [Rank 0] step:6581/10000 train_time:293586ms step_avg:44.61ms +[2025-09-05 14:48:09] [Rank 0] step:6581/10000 train_time:293586ms step_avg:44.61ms +[2025-09-05 14:48:10] [Rank 0] step:6601/10000 train_time:294328ms step_avg:44.59ms +[2025-09-05 14:48:10] [Rank 0] step:6601/10000 train_time:294328ms step_avg:44.59ms +[2025-09-05 14:48:11] [Rank 0] step:6621/10000 train_time:295071ms step_avg:44.57ms +[2025-09-05 14:48:11] [Rank 0] step:6621/10000 train_time:295071ms step_avg:44.57ms +[2025-09-05 14:48:11] [Rank 0] step:6641/10000 train_time:295814ms step_avg:44.54ms +[2025-09-05 14:48:11] [Rank 0] step:6641/10000 train_time:295814ms step_avg:44.54ms +[2025-09-05 14:48:12] [Rank 0] step:6661/10000 train_time:296555ms step_avg:44.52ms +[2025-09-05 14:48:12] [Rank 0] step:6661/10000 train_time:296555ms step_avg:44.52ms +[2025-09-05 14:48:13] [Rank 0] step:6681/10000 train_time:297297ms step_avg:44.50ms +[2025-09-05 14:48:13] [Rank 0] step:6681/10000 train_time:297297ms step_avg:44.50ms +[2025-09-05 14:48:14] [Rank 0] step:6701/10000 train_time:298038ms step_avg:44.48ms +[2025-09-05 14:48:14] [Rank 0] step:6701/10000 train_time:298038ms step_avg:44.48ms +[2025-09-05 14:48:14] [Rank 0] step:6721/10000 train_time:298780ms step_avg:44.45ms +[2025-09-05 14:48:14] [Rank 0] step:6721/10000 train_time:298780ms step_avg:44.45ms +[2025-09-05 14:48:15] [Rank 0] step:6741/10000 train_time:299523ms step_avg:44.43ms +[2025-09-05 14:48:15] [Rank 0] step:6741/10000 train_time:299523ms step_avg:44.43ms +[2025-09-05 14:48:16] [Rank 0] step:6761/10000 train_time:300265ms step_avg:44.41ms +[2025-09-05 14:48:16] [Rank 0] step:6761/10000 train_time:300265ms step_avg:44.41ms +[2025-09-05 14:48:17] [Rank 0] step:6781/10000 train_time:301007ms step_avg:44.39ms +[2025-09-05 14:48:17] [Rank 0] step:6781/10000 train_time:301007ms step_avg:44.39ms +[2025-09-05 14:48:17] [Rank 0] step:6801/10000 train_time:301749ms step_avg:44.37ms +[2025-09-05 14:48:17] [Rank 0] step:6801/10000 train_time:301749ms step_avg:44.37ms +[2025-09-05 14:48:18] [Rank 0] step:6821/10000 train_time:302491ms step_avg:44.35ms +[2025-09-05 14:48:18] [Rank 0] step:6821/10000 train_time:302491ms step_avg:44.35ms +[2025-09-05 14:48:19] [Rank 0] step:6841/10000 train_time:303854ms step_avg:44.42ms +[2025-09-05 14:48:19] [Rank 0] step:6841/10000 train_time:303854ms step_avg:44.42ms +[2025-09-05 14:48:20] [Rank 0] step:6861/10000 train_time:304596ms step_avg:44.40ms +[2025-09-05 14:48:20] [Rank 0] step:6861/10000 train_time:304596ms step_avg:44.40ms +[2025-09-05 14:48:21] [Rank 0] step:6881/10000 train_time:305339ms step_avg:44.37ms +[2025-09-05 14:48:21] [Rank 0] step:6881/10000 train_time:305339ms step_avg:44.37ms +[2025-09-05 14:48:22] [Rank 0] step:6901/10000 train_time:306081ms step_avg:44.35ms +[2025-09-05 14:48:22] [Rank 0] step:6901/10000 train_time:306081ms step_avg:44.35ms +[2025-09-05 14:48:22] [Rank 0] step:6921/10000 train_time:306823ms step_avg:44.33ms +[2025-09-05 14:48:22] [Rank 0] step:6921/10000 train_time:306823ms step_avg:44.33ms +[2025-09-05 14:48:23] [Rank 0] step:6941/10000 train_time:307566ms step_avg:44.31ms +[2025-09-05 14:48:23] [Rank 0] step:6941/10000 train_time:307566ms step_avg:44.31ms +[2025-09-05 14:48:24] [Rank 0] step:6961/10000 train_time:308308ms step_avg:44.29ms +[2025-09-05 14:48:24] [Rank 0] step:6961/10000 train_time:308308ms step_avg:44.29ms +[2025-09-05 14:48:25] [Rank 0] step:6981/10000 train_time:309051ms step_avg:44.27ms +[2025-09-05 14:48:25] [Rank 0] step:6981/10000 train_time:309051ms step_avg:44.27ms +[2025-09-05 14:48:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:48:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:48:26] [Rank 0] PRINT: step:7000/10000 train_loss:1.4025 val_loss:1.3943 train_time:309874ms step_avg:44.27ms +[2025-09-05 14:48:26] [Rank 0] PRINT: step:7000/10000 train_loss:1.4025 val_loss:1.3943 train_time:309874ms step_avg:44.27ms +[2025-09-05 14:48:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:48:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:48:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:48:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:49:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:49:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:49:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:49:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:49:47] [Rank 0] Total Loss: 4.1394 +[2025-09-05 14:49:47] [Rank 0] Total Loss: 4.1394 +[2025-09-05 14:49:47] [Rank 0] Total FTA (Unweighted): 0.5600 +[2025-09-05 14:49:47] [Rank 0] Total FTA (Unweighted): 0.5600 +[2025-09-05 14:49:47] [Rank 0] Total FTA (Weighted): 0.5600 +[2025-09-05 14:49:47] [Rank 0] Total FTA (Weighted): 0.5600 +[2025-09-05 14:49:47] [Rank 0] Group 0 Loss: 3.4092 +[2025-09-05 14:49:47] [Rank 0] Group 0 Loss: 3.4092 +[2025-09-05 14:49:47] [Rank 0] Group 1 Loss: 3.2027 +[2025-09-05 14:49:47] [Rank 0] Group 1 Loss: 3.2027 +[2025-09-05 14:49:47] [Rank 0] Group 2 Loss: 3.2518 +[2025-09-05 14:49:47] [Rank 0] Group 2 Loss: 3.2518 +[2025-09-05 14:49:47] [Rank 0] Group 3 Loss: 3.6059 +[2025-09-05 14:49:47] [Rank 0] Group 3 Loss: 3.6059 +[2025-09-05 14:49:47] [Rank 0] Group 4 Loss: 3.7006 +[2025-09-05 14:49:47] [Rank 0] Group 4 Loss: 3.7006 +[2025-09-05 14:49:47] [Rank 0] Group 5 Loss: 3.8608 +[2025-09-05 14:49:47] [Rank 0] Group 5 Loss: 3.8608 +[2025-09-05 14:49:47] [Rank 0] Group 6 Loss: 3.9644 +[2025-09-05 14:49:47] [Rank 0] Group 6 Loss: 3.9644 +[2025-09-05 14:49:47] [Rank 0] Group 7 Loss: 4.0891 +[2025-09-05 14:49:47] [Rank 0] Group 7 Loss: 4.0891 +[2025-09-05 14:49:47] [Rank 0] Group 8 Loss: 4.4184 +[2025-09-05 14:49:47] [Rank 0] Group 8 Loss: 4.4184 +[2025-09-05 14:49:47] [Rank 0] Group 9 Loss: 4.5309 +[2025-09-05 14:49:47] [Rank 0] Group 9 Loss: 4.5309 +[2025-09-05 14:49:47] [Rank 0] Group 10 Loss: 4.6727 +[2025-09-05 14:49:47] [Rank 0] Group 10 Loss: 4.6727 +[2025-09-05 14:49:47] [Rank 0] Group 11 Loss: 4.6551 +[2025-09-05 14:49:47] [Rank 0] Group 11 Loss: 4.6551 +[2025-09-05 14:49:47] [Rank 0] Group 12 Loss: 4.6768 +[2025-09-05 14:49:47] [Rank 0] Group 12 Loss: 4.6768 +[2025-09-05 14:49:48] [Rank 0] Group 13 Loss: 4.7117 +[2025-09-05 14:49:48] [Rank 0] Group 13 Loss: 4.7117 +[2025-09-05 14:49:48] [Rank 0] Group 14 Loss: 4.7025 +[2025-09-05 14:49:48] [Rank 0] Group 14 Loss: 4.7025 +[2025-09-05 14:49:48] [Rank 0] Group 15 Loss: 4.7786 +[2025-09-05 14:49:48] [Rank 0] Group 15 Loss: 4.7786 +[2025-09-05 14:49:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:49:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:49:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:49:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:49:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:49:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:49:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:49:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:49:48] [Rank 0] Group 4 FTA: 0.8800 +[2025-09-05 14:49:48] [Rank 0] Group 4 FTA: 0.8800 +[2025-09-05 14:49:48] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 14:49:48] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 14:49:48] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:49:48] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:49:48] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 14:49:48] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 14:49:48] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 14:49:48] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 14:49:48] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 14:49:48] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 14:49:48] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 14:49:48] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 14:49:48] [Rank 0] Group 11 FTA: 0.3500 +[2025-09-05 14:49:48] [Rank 0] Group 11 FTA: 0.3500 +[2025-09-05 14:49:48] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 14:49:48] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 14:49:48] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 14:49:48] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 14:49:48] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 14:49:48] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 14:49:48] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 14:49:48] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 14:49:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:49:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:49:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:49:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:49:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:49:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:49:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:49:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:49:49] [Rank 0] step:7001/10000 train_time:309883ms step_avg:44.26ms +[2025-09-05 14:49:49] [Rank 0] step:7001/10000 train_time:309883ms step_avg:44.26ms +[2025-09-05 14:49:50] [Rank 0] step:7021/10000 train_time:310549ms step_avg:44.23ms +[2025-09-05 14:49:50] [Rank 0] step:7021/10000 train_time:310549ms step_avg:44.23ms +[2025-09-05 14:49:51] [Rank 0] step:7041/10000 train_time:311291ms step_avg:44.21ms +[2025-09-05 14:49:51] [Rank 0] step:7041/10000 train_time:311291ms step_avg:44.21ms +[2025-09-05 14:49:51] [Rank 0] step:7061/10000 train_time:312032ms step_avg:44.19ms +[2025-09-05 14:49:51] [Rank 0] step:7061/10000 train_time:312032ms step_avg:44.19ms +[2025-09-05 14:49:52] [Rank 0] step:7081/10000 train_time:312773ms step_avg:44.17ms +[2025-09-05 14:49:52] [Rank 0] step:7081/10000 train_time:312773ms step_avg:44.17ms +[2025-09-05 14:49:53] [Rank 0] step:7101/10000 train_time:313515ms step_avg:44.15ms +[2025-09-05 14:49:53] [Rank 0] step:7101/10000 train_time:313515ms step_avg:44.15ms +[2025-09-05 14:49:54] [Rank 0] step:7121/10000 train_time:314257ms step_avg:44.13ms +[2025-09-05 14:49:54] [Rank 0] step:7121/10000 train_time:314257ms step_avg:44.13ms +[2025-09-05 14:49:54] [Rank 0] step:7141/10000 train_time:314999ms step_avg:44.11ms +[2025-09-05 14:49:54] [Rank 0] step:7141/10000 train_time:314999ms step_avg:44.11ms +[2025-09-05 14:49:55] [Rank 0] step:7161/10000 train_time:315741ms step_avg:44.09ms +[2025-09-05 14:49:55] [Rank 0] step:7161/10000 train_time:315741ms step_avg:44.09ms +[2025-09-05 14:49:56] [Rank 0] step:7181/10000 train_time:316483ms step_avg:44.07ms +[2025-09-05 14:49:56] [Rank 0] step:7181/10000 train_time:316483ms step_avg:44.07ms +[2025-09-05 14:49:56] [Rank 0] step:7201/10000 train_time:317226ms step_avg:44.05ms +[2025-09-05 14:49:56] [Rank 0] step:7201/10000 train_time:317226ms step_avg:44.05ms +[2025-09-05 14:49:57] [Rank 0] step:7221/10000 train_time:317968ms step_avg:44.03ms +[2025-09-05 14:49:57] [Rank 0] step:7221/10000 train_time:317968ms step_avg:44.03ms +[2025-09-05 14:49:58] [Rank 0] step:7241/10000 train_time:318710ms step_avg:44.01ms +[2025-09-05 14:49:58] [Rank 0] step:7241/10000 train_time:318710ms step_avg:44.01ms +[2025-09-05 14:49:59] [Rank 0] step:7261/10000 train_time:319451ms step_avg:44.00ms +[2025-09-05 14:49:59] [Rank 0] step:7261/10000 train_time:319451ms step_avg:44.00ms +[2025-09-05 14:49:59] [Rank 0] step:7281/10000 train_time:320193ms step_avg:43.98ms +[2025-09-05 14:49:59] [Rank 0] step:7281/10000 train_time:320193ms step_avg:43.98ms +[2025-09-05 14:50:00] [Rank 0] step:7301/10000 train_time:320935ms step_avg:43.96ms +[2025-09-05 14:50:00] [Rank 0] step:7301/10000 train_time:320935ms step_avg:43.96ms +[2025-09-05 14:50:01] [Rank 0] step:7321/10000 train_time:321677ms step_avg:43.94ms +[2025-09-05 14:50:01] [Rank 0] step:7321/10000 train_time:321677ms step_avg:43.94ms +[2025-09-05 14:50:02] [Rank 0] step:7341/10000 train_time:322419ms step_avg:43.92ms +[2025-09-05 14:50:02] [Rank 0] step:7341/10000 train_time:322419ms step_avg:43.92ms +[2025-09-05 14:50:02] [Rank 0] step:7361/10000 train_time:323174ms step_avg:43.90ms +[2025-09-05 14:50:02] [Rank 0] step:7361/10000 train_time:323174ms step_avg:43.90ms +[2025-09-05 14:50:03] [Rank 0] step:7381/10000 train_time:323916ms step_avg:43.89ms +[2025-09-05 14:50:03] [Rank 0] step:7381/10000 train_time:323916ms step_avg:43.89ms +[2025-09-05 14:50:04] [Rank 0] step:7401/10000 train_time:324658ms step_avg:43.87ms +[2025-09-05 14:50:04] [Rank 0] step:7401/10000 train_time:324658ms step_avg:43.87ms +[2025-09-05 14:50:05] [Rank 0] step:7421/10000 train_time:325400ms step_avg:43.85ms +[2025-09-05 14:50:05] [Rank 0] step:7421/10000 train_time:325400ms step_avg:43.85ms +[2025-09-05 14:50:05] [Rank 0] step:7441/10000 train_time:326142ms step_avg:43.83ms +[2025-09-05 14:50:05] [Rank 0] step:7441/10000 train_time:326142ms step_avg:43.83ms +[2025-09-05 14:50:06] [Rank 0] step:7461/10000 train_time:326885ms step_avg:43.81ms +[2025-09-05 14:50:06] [Rank 0] step:7461/10000 train_time:326885ms step_avg:43.81ms +[2025-09-05 14:50:07] [Rank 0] step:7481/10000 train_time:327627ms step_avg:43.79ms +[2025-09-05 14:50:07] [Rank 0] step:7481/10000 train_time:327627ms step_avg:43.79ms +[2025-09-05 14:50:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:50:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:50:08] [Rank 0] PRINT: step:7500/10000 train_loss:1.4022 val_loss:1.3928 train_time:328450ms step_avg:43.79ms +[2025-09-05 14:50:08] [Rank 0] PRINT: step:7500/10000 train_loss:1.4022 val_loss:1.3928 train_time:328450ms step_avg:43.79ms +[2025-09-05 14:50:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:50:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:50:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:50:08] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:51:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:51:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:51:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:51:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:51:29] [Rank 0] Total Loss: 4.3328 +[2025-09-05 14:51:29] [Rank 0] Total Loss: 4.3328 +[2025-09-05 14:51:29] [Rank 0] Total FTA (Unweighted): 0.5637 +[2025-09-05 14:51:29] [Rank 0] Total FTA (Unweighted): 0.5637 +[2025-09-05 14:51:29] [Rank 0] Total FTA (Weighted): 0.5637 +[2025-09-05 14:51:29] [Rank 0] Total FTA (Weighted): 0.5637 +[2025-09-05 14:51:29] [Rank 0] Group 0 Loss: 3.5777 +[2025-09-05 14:51:29] [Rank 0] Group 0 Loss: 3.5777 +[2025-09-05 14:51:29] [Rank 0] Group 1 Loss: 3.5643 +[2025-09-05 14:51:29] [Rank 0] Group 1 Loss: 3.5643 +[2025-09-05 14:51:29] [Rank 0] Group 2 Loss: 3.3262 +[2025-09-05 14:51:29] [Rank 0] Group 2 Loss: 3.3262 +[2025-09-05 14:51:29] [Rank 0] Group 3 Loss: 3.7632 +[2025-09-05 14:51:29] [Rank 0] Group 3 Loss: 3.7632 +[2025-09-05 14:51:29] [Rank 0] Group 4 Loss: 3.9017 +[2025-09-05 14:51:29] [Rank 0] Group 4 Loss: 3.9017 +[2025-09-05 14:51:29] [Rank 0] Group 5 Loss: 4.0663 +[2025-09-05 14:51:29] [Rank 0] Group 5 Loss: 4.0663 +[2025-09-05 14:51:29] [Rank 0] Group 6 Loss: 4.1311 +[2025-09-05 14:51:29] [Rank 0] Group 6 Loss: 4.1311 +[2025-09-05 14:51:29] [Rank 0] Group 7 Loss: 4.3028 +[2025-09-05 14:51:29] [Rank 0] Group 7 Loss: 4.3028 +[2025-09-05 14:51:29] [Rank 0] Group 8 Loss: 4.6530 +[2025-09-05 14:51:29] [Rank 0] Group 8 Loss: 4.6530 +[2025-09-05 14:51:29] [Rank 0] Group 9 Loss: 4.7146 +[2025-09-05 14:51:29] [Rank 0] Group 9 Loss: 4.7146 +[2025-09-05 14:51:29] [Rank 0] Group 10 Loss: 4.9109 +[2025-09-05 14:51:29] [Rank 0] Group 10 Loss: 4.9109 +[2025-09-05 14:51:29] [Rank 0] Group 11 Loss: 4.8509 +[2025-09-05 14:51:29] [Rank 0] Group 11 Loss: 4.8509 +[2025-09-05 14:51:29] [Rank 0] Group 12 Loss: 4.8518 +[2025-09-05 14:51:29] [Rank 0] Group 12 Loss: 4.8518 +[2025-09-05 14:51:29] [Rank 0] Group 13 Loss: 4.8657 +[2025-09-05 14:51:29] [Rank 0] Group 13 Loss: 4.8657 +[2025-09-05 14:51:29] [Rank 0] Group 14 Loss: 4.9254 +[2025-09-05 14:51:29] [Rank 0] Group 14 Loss: 4.9254 +[2025-09-05 14:51:29] [Rank 0] Group 15 Loss: 4.9197 +[2025-09-05 14:51:29] [Rank 0] Group 15 Loss: 4.9197 +[2025-09-05 14:51:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:51:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:51:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:51:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:51:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:51:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:51:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:51:29] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:51:29] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 14:51:29] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 14:51:29] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 14:51:29] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 14:51:29] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:51:29] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:51:29] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:51:29] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:51:29] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 14:51:29] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 14:51:29] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 14:51:29] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 14:51:29] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 14:51:29] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 14:51:29] [Rank 0] Group 11 FTA: 0.3500 +[2025-09-05 14:51:29] [Rank 0] Group 11 FTA: 0.3500 +[2025-09-05 14:51:29] [Rank 0] Group 12 FTA: 0.3300 +[2025-09-05 14:51:29] [Rank 0] Group 12 FTA: 0.3300 +[2025-09-05 14:51:29] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 14:51:29] [Rank 0] Group 13 FTA: 0.1800 +[2025-09-05 14:51:29] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 14:51:29] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 14:51:29] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:51:29] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:51:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:51:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:51:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:51:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:51:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:51:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:51:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:51:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:51:30] [Rank 0] step:7501/10000 train_time:328459ms step_avg:43.79ms +[2025-09-05 14:51:30] [Rank 0] step:7501/10000 train_time:328459ms step_avg:43.79ms +[2025-09-05 14:51:31] [Rank 0] step:7521/10000 train_time:329132ms step_avg:43.76ms +[2025-09-05 14:51:31] [Rank 0] step:7521/10000 train_time:329132ms step_avg:43.76ms +[2025-09-05 14:51:32] [Rank 0] step:7541/10000 train_time:329874ms step_avg:43.74ms +[2025-09-05 14:51:32] [Rank 0] step:7541/10000 train_time:329874ms step_avg:43.74ms +[2025-09-05 14:51:32] [Rank 0] step:7561/10000 train_time:330616ms step_avg:43.73ms +[2025-09-05 14:51:32] [Rank 0] step:7561/10000 train_time:330616ms step_avg:43.73ms +[2025-09-05 14:51:33] [Rank 0] step:7581/10000 train_time:331358ms step_avg:43.71ms +[2025-09-05 14:51:33] [Rank 0] step:7581/10000 train_time:331358ms step_avg:43.71ms +[2025-09-05 14:51:34] [Rank 0] step:7601/10000 train_time:332100ms step_avg:43.69ms +[2025-09-05 14:51:34] [Rank 0] step:7601/10000 train_time:332100ms step_avg:43.69ms +[2025-09-05 14:51:35] [Rank 0] step:7621/10000 train_time:332842ms step_avg:43.67ms +[2025-09-05 14:51:35] [Rank 0] step:7621/10000 train_time:332842ms step_avg:43.67ms +[2025-09-05 14:51:36] [Rank 0] step:7641/10000 train_time:333809ms step_avg:43.69ms +[2025-09-05 14:51:36] [Rank 0] step:7641/10000 train_time:333809ms step_avg:43.69ms +[2025-09-05 14:51:37] [Rank 0] step:7661/10000 train_time:334949ms step_avg:43.72ms +[2025-09-05 14:51:37] [Rank 0] step:7661/10000 train_time:334949ms step_avg:43.72ms +[2025-09-05 14:51:37] [Rank 0] step:7681/10000 train_time:335690ms step_avg:43.70ms +[2025-09-05 14:51:37] [Rank 0] step:7681/10000 train_time:335690ms step_avg:43.70ms +[2025-09-05 14:51:38] [Rank 0] step:7701/10000 train_time:336432ms step_avg:43.69ms +[2025-09-05 14:51:38] [Rank 0] step:7701/10000 train_time:336432ms step_avg:43.69ms +[2025-09-05 14:51:39] [Rank 0] step:7721/10000 train_time:337173ms step_avg:43.67ms +[2025-09-05 14:51:39] [Rank 0] step:7721/10000 train_time:337173ms step_avg:43.67ms +[2025-09-05 14:51:40] [Rank 0] step:7741/10000 train_time:337915ms step_avg:43.65ms +[2025-09-05 14:51:40] [Rank 0] step:7741/10000 train_time:337915ms step_avg:43.65ms +[2025-09-05 14:51:40] [Rank 0] step:7761/10000 train_time:338657ms step_avg:43.64ms +[2025-09-05 14:51:40] [Rank 0] step:7761/10000 train_time:338657ms step_avg:43.64ms +[2025-09-05 14:51:41] [Rank 0] step:7781/10000 train_time:339399ms step_avg:43.62ms +[2025-09-05 14:51:41] [Rank 0] step:7781/10000 train_time:339399ms step_avg:43.62ms +[2025-09-05 14:51:42] [Rank 0] step:7801/10000 train_time:340141ms step_avg:43.60ms +[2025-09-05 14:51:42] [Rank 0] step:7801/10000 train_time:340141ms step_avg:43.60ms +[2025-09-05 14:51:43] [Rank 0] step:7821/10000 train_time:340884ms step_avg:43.59ms +[2025-09-05 14:51:43] [Rank 0] step:7821/10000 train_time:340884ms step_avg:43.59ms +[2025-09-05 14:51:43] [Rank 0] step:7841/10000 train_time:341626ms step_avg:43.57ms +[2025-09-05 14:51:43] [Rank 0] step:7841/10000 train_time:341626ms step_avg:43.57ms +[2025-09-05 14:51:44] [Rank 0] step:7861/10000 train_time:342369ms step_avg:43.55ms +[2025-09-05 14:51:44] [Rank 0] step:7861/10000 train_time:342369ms step_avg:43.55ms +[2025-09-05 14:51:45] [Rank 0] step:7881/10000 train_time:343110ms step_avg:43.54ms +[2025-09-05 14:51:45] [Rank 0] step:7881/10000 train_time:343110ms step_avg:43.54ms +[2025-09-05 14:51:46] [Rank 0] step:7901/10000 train_time:343852ms step_avg:43.52ms +[2025-09-05 14:51:46] [Rank 0] step:7901/10000 train_time:343852ms step_avg:43.52ms +[2025-09-05 14:51:46] [Rank 0] step:7921/10000 train_time:344594ms step_avg:43.50ms +[2025-09-05 14:51:46] [Rank 0] step:7921/10000 train_time:344594ms step_avg:43.50ms +[2025-09-05 14:51:47] [Rank 0] step:7941/10000 train_time:345336ms step_avg:43.49ms +[2025-09-05 14:51:47] [Rank 0] step:7941/10000 train_time:345336ms step_avg:43.49ms +[2025-09-05 14:51:48] [Rank 0] step:7961/10000 train_time:346078ms step_avg:43.47ms +[2025-09-05 14:51:48] [Rank 0] step:7961/10000 train_time:346078ms step_avg:43.47ms +[2025-09-05 14:51:49] [Rank 0] step:7981/10000 train_time:346821ms step_avg:43.46ms +[2025-09-05 14:51:49] [Rank 0] step:7981/10000 train_time:346821ms step_avg:43.46ms +[2025-09-05 14:51:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:51:49] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:51:50] [Rank 0] PRINT: step:8000/10000 train_loss:1.4014 val_loss:1.3916 train_time:347744ms step_avg:43.47ms +[2025-09-05 14:51:50] [Rank 0] PRINT: step:8000/10000 train_loss:1.4014 val_loss:1.3916 train_time:347744ms step_avg:43.47ms +[2025-09-05 14:51:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:51:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:51:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:51:50] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:53:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:53:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:53:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:53:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:53:11] [Rank 0] Total Loss: 4.2055 +[2025-09-05 14:53:11] [Rank 0] Total Loss: 4.2055 +[2025-09-05 14:53:11] [Rank 0] Total FTA (Unweighted): 0.5700 +[2025-09-05 14:53:11] [Rank 0] Total FTA (Unweighted): 0.5700 +[2025-09-05 14:53:11] [Rank 0] Total FTA (Weighted): 0.5700 +[2025-09-05 14:53:11] [Rank 0] Total FTA (Weighted): 0.5700 +[2025-09-05 14:53:11] [Rank 0] Group 0 Loss: 3.4376 +[2025-09-05 14:53:11] [Rank 0] Group 0 Loss: 3.4376 +[2025-09-05 14:53:11] [Rank 0] Group 1 Loss: 3.3436 +[2025-09-05 14:53:11] [Rank 0] Group 1 Loss: 3.3436 +[2025-09-05 14:53:11] [Rank 0] Group 2 Loss: 3.2804 +[2025-09-05 14:53:11] [Rank 0] Group 2 Loss: 3.2804 +[2025-09-05 14:53:11] [Rank 0] Group 3 Loss: 3.6042 +[2025-09-05 14:53:11] [Rank 0] Group 3 Loss: 3.6042 +[2025-09-05 14:53:11] [Rank 0] Group 4 Loss: 3.7975 +[2025-09-05 14:53:11] [Rank 0] Group 4 Loss: 3.7975 +[2025-09-05 14:53:11] [Rank 0] Group 5 Loss: 3.9475 +[2025-09-05 14:53:11] [Rank 0] Group 5 Loss: 3.9475 +[2025-09-05 14:53:11] [Rank 0] Group 6 Loss: 4.0159 +[2025-09-05 14:53:11] [Rank 0] Group 6 Loss: 4.0159 +[2025-09-05 14:53:11] [Rank 0] Group 7 Loss: 4.1827 +[2025-09-05 14:53:11] [Rank 0] Group 7 Loss: 4.1827 +[2025-09-05 14:53:11] [Rank 0] Group 8 Loss: 4.4943 +[2025-09-05 14:53:11] [Rank 0] Group 8 Loss: 4.4943 +[2025-09-05 14:53:11] [Rank 0] Group 9 Loss: 4.5988 +[2025-09-05 14:53:11] [Rank 0] Group 9 Loss: 4.5988 +[2025-09-05 14:53:11] [Rank 0] Group 10 Loss: 4.7390 +[2025-09-05 14:53:11] [Rank 0] Group 10 Loss: 4.7390 +[2025-09-05 14:53:11] [Rank 0] Group 11 Loss: 4.7390 +[2025-09-05 14:53:11] [Rank 0] Group 11 Loss: 4.7390 +[2025-09-05 14:53:11] [Rank 0] Group 12 Loss: 4.7354 +[2025-09-05 14:53:11] [Rank 0] Group 12 Loss: 4.7354 +[2025-09-05 14:53:11] [Rank 0] Group 13 Loss: 4.7738 +[2025-09-05 14:53:11] [Rank 0] Group 13 Loss: 4.7738 +[2025-09-05 14:53:11] [Rank 0] Group 14 Loss: 4.7761 +[2025-09-05 14:53:11] [Rank 0] Group 14 Loss: 4.7761 +[2025-09-05 14:53:11] [Rank 0] Group 15 Loss: 4.8219 +[2025-09-05 14:53:11] [Rank 0] Group 15 Loss: 4.8219 +[2025-09-05 14:53:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:53:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:53:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:53:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:53:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:53:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:53:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:53:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:53:11] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 14:53:11] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 14:53:11] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 14:53:11] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 14:53:11] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:53:11] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:53:11] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:53:11] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:53:11] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 14:53:11] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 14:53:11] [Rank 0] Group 9 FTA: 0.4400 +[2025-09-05 14:53:11] [Rank 0] Group 9 FTA: 0.4400 +[2025-09-05 14:53:11] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 14:53:11] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 14:53:11] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 14:53:11] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 14:53:11] [Rank 0] Group 12 FTA: 0.3600 +[2025-09-05 14:53:11] [Rank 0] Group 12 FTA: 0.3600 +[2025-09-05 14:53:11] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 14:53:11] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 14:53:11] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:53:11] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:53:11] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 14:53:11] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 14:53:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:53:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:53:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:53:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:53:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:53:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:53:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:53:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:53:12] [Rank 0] step:8001/10000 train_time:347754ms step_avg:43.46ms +[2025-09-05 14:53:12] [Rank 0] step:8001/10000 train_time:347754ms step_avg:43.46ms +[2025-09-05 14:53:14] [Rank 0] step:8021/10000 train_time:349057ms step_avg:43.52ms +[2025-09-05 14:53:14] [Rank 0] step:8021/10000 train_time:349057ms step_avg:43.52ms +[2025-09-05 14:53:15] [Rank 0] step:8041/10000 train_time:349798ms step_avg:43.50ms +[2025-09-05 14:53:15] [Rank 0] step:8041/10000 train_time:349798ms step_avg:43.50ms +[2025-09-05 14:53:15] [Rank 0] step:8061/10000 train_time:350540ms step_avg:43.49ms +[2025-09-05 14:53:15] [Rank 0] step:8061/10000 train_time:350540ms step_avg:43.49ms +[2025-09-05 14:53:16] [Rank 0] step:8081/10000 train_time:351281ms step_avg:43.47ms +[2025-09-05 14:53:16] [Rank 0] step:8081/10000 train_time:351281ms step_avg:43.47ms +[2025-09-05 14:53:17] [Rank 0] step:8101/10000 train_time:352024ms step_avg:43.45ms +[2025-09-05 14:53:17] [Rank 0] step:8101/10000 train_time:352024ms step_avg:43.45ms +[2025-09-05 14:53:18] [Rank 0] step:8121/10000 train_time:352766ms step_avg:43.44ms +[2025-09-05 14:53:18] [Rank 0] step:8121/10000 train_time:352766ms step_avg:43.44ms +[2025-09-05 14:53:18] [Rank 0] step:8141/10000 train_time:353509ms step_avg:43.42ms +[2025-09-05 14:53:18] [Rank 0] step:8141/10000 train_time:353509ms step_avg:43.42ms +[2025-09-05 14:53:19] [Rank 0] step:8161/10000 train_time:354251ms step_avg:43.41ms +[2025-09-05 14:53:19] [Rank 0] step:8161/10000 train_time:354251ms step_avg:43.41ms +[2025-09-05 14:53:20] [Rank 0] step:8181/10000 train_time:354993ms step_avg:43.39ms +[2025-09-05 14:53:20] [Rank 0] step:8181/10000 train_time:354993ms step_avg:43.39ms +[2025-09-05 14:53:21] [Rank 0] step:8201/10000 train_time:355735ms step_avg:43.38ms +[2025-09-05 14:53:21] [Rank 0] step:8201/10000 train_time:355735ms step_avg:43.38ms +[2025-09-05 14:53:21] [Rank 0] step:8221/10000 train_time:356476ms step_avg:43.36ms +[2025-09-05 14:53:21] [Rank 0] step:8221/10000 train_time:356476ms step_avg:43.36ms +[2025-09-05 14:53:22] [Rank 0] step:8241/10000 train_time:357218ms step_avg:43.35ms +[2025-09-05 14:53:22] [Rank 0] step:8241/10000 train_time:357218ms step_avg:43.35ms +[2025-09-05 14:53:23] [Rank 0] step:8261/10000 train_time:357959ms step_avg:43.33ms +[2025-09-05 14:53:23] [Rank 0] step:8261/10000 train_time:357959ms step_avg:43.33ms +[2025-09-05 14:53:24] [Rank 0] step:8281/10000 train_time:358702ms step_avg:43.32ms +[2025-09-05 14:53:24] [Rank 0] step:8281/10000 train_time:358702ms step_avg:43.32ms +[2025-09-05 14:53:24] [Rank 0] step:8301/10000 train_time:359444ms step_avg:43.30ms +[2025-09-05 14:53:24] [Rank 0] step:8301/10000 train_time:359444ms step_avg:43.30ms +[2025-09-05 14:53:25] [Rank 0] step:8321/10000 train_time:360187ms step_avg:43.29ms +[2025-09-05 14:53:25] [Rank 0] step:8321/10000 train_time:360187ms step_avg:43.29ms +[2025-09-05 14:53:26] [Rank 0] step:8341/10000 train_time:360929ms step_avg:43.27ms +[2025-09-05 14:53:26] [Rank 0] step:8341/10000 train_time:360929ms step_avg:43.27ms +[2025-09-05 14:53:26] [Rank 0] step:8361/10000 train_time:361671ms step_avg:43.26ms +[2025-09-05 14:53:26] [Rank 0] step:8361/10000 train_time:361671ms step_avg:43.26ms +[2025-09-05 14:53:27] [Rank 0] step:8381/10000 train_time:362413ms step_avg:43.24ms +[2025-09-05 14:53:27] [Rank 0] step:8381/10000 train_time:362413ms step_avg:43.24ms +[2025-09-05 14:53:28] [Rank 0] step:8401/10000 train_time:363156ms step_avg:43.23ms +[2025-09-05 14:53:28] [Rank 0] step:8401/10000 train_time:363156ms step_avg:43.23ms +[2025-09-05 14:53:29] [Rank 0] step:8421/10000 train_time:363898ms step_avg:43.21ms +[2025-09-05 14:53:29] [Rank 0] step:8421/10000 train_time:363898ms step_avg:43.21ms +[2025-09-05 14:53:29] [Rank 0] step:8441/10000 train_time:364639ms step_avg:43.20ms +[2025-09-05 14:53:29] [Rank 0] step:8441/10000 train_time:364639ms step_avg:43.20ms +[2025-09-05 14:53:30] [Rank 0] step:8461/10000 train_time:365383ms step_avg:43.18ms +[2025-09-05 14:53:30] [Rank 0] step:8461/10000 train_time:365383ms step_avg:43.18ms +[2025-09-05 14:53:31] [Rank 0] step:8481/10000 train_time:366125ms step_avg:43.17ms +[2025-09-05 14:53:31] [Rank 0] step:8481/10000 train_time:366125ms step_avg:43.17ms +[2025-09-05 14:53:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:53:32] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:53:32] [Rank 0] PRINT: step:8500/10000 train_loss:1.3990 val_loss:1.3886 train_time:366948ms step_avg:43.17ms +[2025-09-05 14:53:32] [Rank 0] PRINT: step:8500/10000 train_loss:1.3990 val_loss:1.3886 train_time:366948ms step_avg:43.17ms +[2025-09-05 14:53:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:53:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:53:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:53:32] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:54:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:54:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:54:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:54:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:54:53] [Rank 0] Total Loss: 4.1947 +[2025-09-05 14:54:53] [Rank 0] Total Loss: 4.1947 +[2025-09-05 14:54:53] [Rank 0] Total FTA (Unweighted): 0.5725 +[2025-09-05 14:54:53] [Rank 0] Total FTA (Unweighted): 0.5725 +[2025-09-05 14:54:53] [Rank 0] Total FTA (Weighted): 0.5725 +[2025-09-05 14:54:53] [Rank 0] Total FTA (Weighted): 0.5725 +[2025-09-05 14:54:53] [Rank 0] Group 0 Loss: 3.4450 +[2025-09-05 14:54:53] [Rank 0] Group 0 Loss: 3.4450 +[2025-09-05 14:54:53] [Rank 0] Group 1 Loss: 3.3944 +[2025-09-05 14:54:53] [Rank 0] Group 1 Loss: 3.3944 +[2025-09-05 14:54:53] [Rank 0] Group 2 Loss: 3.2425 +[2025-09-05 14:54:53] [Rank 0] Group 2 Loss: 3.2425 +[2025-09-05 14:54:53] [Rank 0] Group 3 Loss: 3.6124 +[2025-09-05 14:54:53] [Rank 0] Group 3 Loss: 3.6124 +[2025-09-05 14:54:53] [Rank 0] Group 4 Loss: 3.7684 +[2025-09-05 14:54:53] [Rank 0] Group 4 Loss: 3.7684 +[2025-09-05 14:54:53] [Rank 0] Group 5 Loss: 3.9576 +[2025-09-05 14:54:53] [Rank 0] Group 5 Loss: 3.9576 +[2025-09-05 14:54:53] [Rank 0] Group 6 Loss: 4.0306 +[2025-09-05 14:54:53] [Rank 0] Group 6 Loss: 4.0306 +[2025-09-05 14:54:53] [Rank 0] Group 7 Loss: 4.1467 +[2025-09-05 14:54:53] [Rank 0] Group 7 Loss: 4.1467 +[2025-09-05 14:54:53] [Rank 0] Group 8 Loss: 4.4786 +[2025-09-05 14:54:53] [Rank 0] Group 8 Loss: 4.4786 +[2025-09-05 14:54:53] [Rank 0] Group 9 Loss: 4.5711 +[2025-09-05 14:54:53] [Rank 0] Group 9 Loss: 4.5711 +[2025-09-05 14:54:53] [Rank 0] Group 10 Loss: 4.7599 +[2025-09-05 14:54:53] [Rank 0] Group 10 Loss: 4.7599 +[2025-09-05 14:54:53] [Rank 0] Group 11 Loss: 4.7395 +[2025-09-05 14:54:53] [Rank 0] Group 11 Loss: 4.7395 +[2025-09-05 14:54:53] [Rank 0] Group 12 Loss: 4.6872 +[2025-09-05 14:54:53] [Rank 0] Group 12 Loss: 4.6872 +[2025-09-05 14:54:53] [Rank 0] Group 13 Loss: 4.7269 +[2025-09-05 14:54:53] [Rank 0] Group 13 Loss: 4.7269 +[2025-09-05 14:54:53] [Rank 0] Group 14 Loss: 4.7602 +[2025-09-05 14:54:53] [Rank 0] Group 14 Loss: 4.7602 +[2025-09-05 14:54:53] [Rank 0] Group 15 Loss: 4.7940 +[2025-09-05 14:54:53] [Rank 0] Group 15 Loss: 4.7940 +[2025-09-05 14:54:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:54:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:54:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:54:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:54:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:54:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:54:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:54:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:54:53] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 14:54:53] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 14:54:53] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 14:54:53] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 14:54:53] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:54:53] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:54:53] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 14:54:53] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 14:54:53] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 14:54:53] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 14:54:53] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 14:54:53] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 14:54:53] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 14:54:53] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 14:54:53] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 14:54:53] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 14:54:53] [Rank 0] Group 12 FTA: 0.3800 +[2025-09-05 14:54:53] [Rank 0] Group 12 FTA: 0.3800 +[2025-09-05 14:54:53] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 14:54:53] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 14:54:53] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:54:53] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 14:54:53] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 14:54:53] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 14:54:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:54:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:54:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:54:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:54:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:54:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:54:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:54:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:54:55] [Rank 0] step:8501/10000 train_time:366958ms step_avg:43.17ms +[2025-09-05 14:54:55] [Rank 0] step:8501/10000 train_time:366958ms step_avg:43.17ms +[2025-09-05 14:54:56] [Rank 0] step:8521/10000 train_time:367634ms step_avg:43.14ms +[2025-09-05 14:54:56] [Rank 0] step:8521/10000 train_time:367634ms step_avg:43.14ms +[2025-09-05 14:54:56] [Rank 0] step:8541/10000 train_time:368376ms step_avg:43.13ms +[2025-09-05 14:54:56] [Rank 0] step:8541/10000 train_time:368376ms step_avg:43.13ms +[2025-09-05 14:54:57] [Rank 0] step:8561/10000 train_time:369118ms step_avg:43.12ms +[2025-09-05 14:54:57] [Rank 0] step:8561/10000 train_time:369118ms step_avg:43.12ms +[2025-09-05 14:54:58] [Rank 0] step:8581/10000 train_time:369859ms step_avg:43.10ms +[2025-09-05 14:54:58] [Rank 0] step:8581/10000 train_time:369859ms step_avg:43.10ms +[2025-09-05 14:54:59] [Rank 0] step:8601/10000 train_time:370738ms step_avg:43.10ms +[2025-09-05 14:54:59] [Rank 0] step:8601/10000 train_time:370738ms step_avg:43.10ms +[2025-09-05 14:54:59] [Rank 0] step:8621/10000 train_time:371479ms step_avg:43.09ms +[2025-09-05 14:54:59] [Rank 0] step:8621/10000 train_time:371479ms step_avg:43.09ms +[2025-09-05 14:55:00] [Rank 0] step:8641/10000 train_time:372220ms step_avg:43.08ms +[2025-09-05 14:55:00] [Rank 0] step:8641/10000 train_time:372220ms step_avg:43.08ms +[2025-09-05 14:55:01] [Rank 0] step:8661/10000 train_time:372963ms step_avg:43.06ms +[2025-09-05 14:55:01] [Rank 0] step:8661/10000 train_time:372963ms step_avg:43.06ms +[2025-09-05 14:55:02] [Rank 0] step:8681/10000 train_time:373828ms step_avg:43.06ms +[2025-09-05 14:55:02] [Rank 0] step:8681/10000 train_time:373828ms step_avg:43.06ms +[2025-09-05 14:55:03] [Rank 0] step:8701/10000 train_time:374570ms step_avg:43.05ms +[2025-09-05 14:55:03] [Rank 0] step:8701/10000 train_time:374570ms step_avg:43.05ms +[2025-09-05 14:55:03] [Rank 0] step:8721/10000 train_time:375312ms step_avg:43.04ms +[2025-09-05 14:55:03] [Rank 0] step:8721/10000 train_time:375312ms step_avg:43.04ms +[2025-09-05 14:55:04] [Rank 0] step:8741/10000 train_time:376054ms step_avg:43.02ms +[2025-09-05 14:55:04] [Rank 0] step:8741/10000 train_time:376054ms step_avg:43.02ms +[2025-09-05 14:55:05] [Rank 0] step:8761/10000 train_time:376796ms step_avg:43.01ms +[2025-09-05 14:55:05] [Rank 0] step:8761/10000 train_time:376796ms step_avg:43.01ms +[2025-09-05 14:55:06] [Rank 0] step:8781/10000 train_time:377537ms step_avg:42.99ms +[2025-09-05 14:55:06] [Rank 0] step:8781/10000 train_time:377537ms step_avg:42.99ms +[2025-09-05 14:55:06] [Rank 0] step:8801/10000 train_time:378279ms step_avg:42.98ms +[2025-09-05 14:55:06] [Rank 0] step:8801/10000 train_time:378279ms step_avg:42.98ms +[2025-09-05 14:55:07] [Rank 0] step:8821/10000 train_time:379021ms step_avg:42.97ms +[2025-09-05 14:55:07] [Rank 0] step:8821/10000 train_time:379021ms step_avg:42.97ms +[2025-09-05 14:55:08] [Rank 0] step:8841/10000 train_time:380377ms step_avg:43.02ms +[2025-09-05 14:55:08] [Rank 0] step:8841/10000 train_time:380377ms step_avg:43.02ms +[2025-09-05 14:55:09] [Rank 0] step:8861/10000 train_time:381119ms step_avg:43.01ms +[2025-09-05 14:55:09] [Rank 0] step:8861/10000 train_time:381119ms step_avg:43.01ms +[2025-09-05 14:55:10] [Rank 0] step:8881/10000 train_time:381861ms step_avg:43.00ms +[2025-09-05 14:55:10] [Rank 0] step:8881/10000 train_time:381861ms step_avg:43.00ms +[2025-09-05 14:55:11] [Rank 0] step:8901/10000 train_time:382603ms step_avg:42.98ms +[2025-09-05 14:55:11] [Rank 0] step:8901/10000 train_time:382603ms step_avg:42.98ms +[2025-09-05 14:55:11] [Rank 0] step:8921/10000 train_time:383344ms step_avg:42.97ms +[2025-09-05 14:55:11] [Rank 0] step:8921/10000 train_time:383344ms step_avg:42.97ms +[2025-09-05 14:55:12] [Rank 0] step:8941/10000 train_time:384086ms step_avg:42.96ms +[2025-09-05 14:55:12] [Rank 0] step:8941/10000 train_time:384086ms step_avg:42.96ms +[2025-09-05 14:55:13] [Rank 0] step:8961/10000 train_time:384827ms step_avg:42.94ms +[2025-09-05 14:55:13] [Rank 0] step:8961/10000 train_time:384827ms step_avg:42.94ms +[2025-09-05 14:55:14] [Rank 0] step:8981/10000 train_time:385569ms step_avg:42.93ms +[2025-09-05 14:55:14] [Rank 0] step:8981/10000 train_time:385569ms step_avg:42.93ms +[2025-09-05 14:55:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:55:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:55:15] [Rank 0] PRINT: step:9000/10000 train_loss:1.3953 val_loss:1.3850 train_time:386392ms step_avg:42.93ms +[2025-09-05 14:55:15] [Rank 0] PRINT: step:9000/10000 train_loss:1.3953 val_loss:1.3850 train_time:386392ms step_avg:42.93ms +[2025-09-05 14:55:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:55:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:55:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:55:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:56:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:56:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:56:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:56:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:56:35] [Rank 0] Total Loss: 4.1905 +[2025-09-05 14:56:35] [Rank 0] Total Loss: 4.1905 +[2025-09-05 14:56:35] [Rank 0] Total FTA (Unweighted): 0.5844 +[2025-09-05 14:56:35] [Rank 0] Total FTA (Unweighted): 0.5844 +[2025-09-05 14:56:35] [Rank 0] Total FTA (Weighted): 0.5844 +[2025-09-05 14:56:35] [Rank 0] Total FTA (Weighted): 0.5844 +[2025-09-05 14:56:35] [Rank 0] Group 0 Loss: 3.5049 +[2025-09-05 14:56:35] [Rank 0] Group 0 Loss: 3.5049 +[2025-09-05 14:56:35] [Rank 0] Group 1 Loss: 3.3022 +[2025-09-05 14:56:35] [Rank 0] Group 1 Loss: 3.3022 +[2025-09-05 14:56:35] [Rank 0] Group 2 Loss: 3.2493 +[2025-09-05 14:56:35] [Rank 0] Group 2 Loss: 3.2493 +[2025-09-05 14:56:35] [Rank 0] Group 3 Loss: 3.6319 +[2025-09-05 14:56:35] [Rank 0] Group 3 Loss: 3.6319 +[2025-09-05 14:56:35] [Rank 0] Group 4 Loss: 3.7893 +[2025-09-05 14:56:35] [Rank 0] Group 4 Loss: 3.7893 +[2025-09-05 14:56:35] [Rank 0] Group 5 Loss: 3.9574 +[2025-09-05 14:56:35] [Rank 0] Group 5 Loss: 3.9574 +[2025-09-05 14:56:35] [Rank 0] Group 6 Loss: 3.9969 +[2025-09-05 14:56:35] [Rank 0] Group 6 Loss: 3.9969 +[2025-09-05 14:56:35] [Rank 0] Group 7 Loss: 4.1431 +[2025-09-05 14:56:35] [Rank 0] Group 7 Loss: 4.1431 +[2025-09-05 14:56:35] [Rank 0] Group 8 Loss: 4.4628 +[2025-09-05 14:56:35] [Rank 0] Group 8 Loss: 4.4628 +[2025-09-05 14:56:35] [Rank 0] Group 9 Loss: 4.5588 +[2025-09-05 14:56:35] [Rank 0] Group 9 Loss: 4.5588 +[2025-09-05 14:56:35] [Rank 0] Group 10 Loss: 4.7558 +[2025-09-05 14:56:35] [Rank 0] Group 10 Loss: 4.7558 +[2025-09-05 14:56:35] [Rank 0] Group 11 Loss: 4.7039 +[2025-09-05 14:56:35] [Rank 0] Group 11 Loss: 4.7039 +[2025-09-05 14:56:35] [Rank 0] Group 12 Loss: 4.7285 +[2025-09-05 14:56:35] [Rank 0] Group 12 Loss: 4.7285 +[2025-09-05 14:56:35] [Rank 0] Group 13 Loss: 4.7440 +[2025-09-05 14:56:35] [Rank 0] Group 13 Loss: 4.7440 +[2025-09-05 14:56:35] [Rank 0] Group 14 Loss: 4.7472 +[2025-09-05 14:56:35] [Rank 0] Group 14 Loss: 4.7472 +[2025-09-05 14:56:35] [Rank 0] Group 15 Loss: 4.7727 +[2025-09-05 14:56:35] [Rank 0] Group 15 Loss: 4.7727 +[2025-09-05 14:56:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:56:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:56:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:56:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:56:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:56:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:56:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:56:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:56:35] [Rank 0] Group 4 FTA: 0.8800 +[2025-09-05 14:56:35] [Rank 0] Group 4 FTA: 0.8800 +[2025-09-05 14:56:35] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 14:56:35] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 14:56:35] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:56:35] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:56:35] [Rank 0] Group 7 FTA: 0.5300 +[2025-09-05 14:56:35] [Rank 0] Group 7 FTA: 0.5300 +[2025-09-05 14:56:35] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 14:56:35] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 14:56:35] [Rank 0] Group 9 FTA: 0.4400 +[2025-09-05 14:56:35] [Rank 0] Group 9 FTA: 0.4400 +[2025-09-05 14:56:35] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 14:56:35] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 14:56:35] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 14:56:35] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 14:56:35] [Rank 0] Group 12 FTA: 0.4400 +[2025-09-05 14:56:35] [Rank 0] Group 12 FTA: 0.4400 +[2025-09-05 14:56:35] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 14:56:35] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 14:56:35] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 14:56:35] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 14:56:35] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:56:35] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 14:56:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:56:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:56:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:56:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:56:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:56:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:56:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:56:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:56:37] [Rank 0] step:9001/10000 train_time:386401ms step_avg:42.93ms +[2025-09-05 14:56:37] [Rank 0] step:9001/10000 train_time:386401ms step_avg:42.93ms +[2025-09-05 14:56:37] [Rank 0] step:9021/10000 train_time:387077ms step_avg:42.91ms +[2025-09-05 14:56:37] [Rank 0] step:9021/10000 train_time:387077ms step_avg:42.91ms +[2025-09-05 14:56:38] [Rank 0] step:9041/10000 train_time:387819ms step_avg:42.90ms +[2025-09-05 14:56:38] [Rank 0] step:9041/10000 train_time:387819ms step_avg:42.90ms +[2025-09-05 14:56:39] [Rank 0] step:9061/10000 train_time:388561ms step_avg:42.88ms +[2025-09-05 14:56:39] [Rank 0] step:9061/10000 train_time:388561ms step_avg:42.88ms +[2025-09-05 14:56:40] [Rank 0] step:9081/10000 train_time:389304ms step_avg:42.87ms +[2025-09-05 14:56:40] [Rank 0] step:9081/10000 train_time:389304ms step_avg:42.87ms +[2025-09-05 14:56:40] [Rank 0] step:9101/10000 train_time:390047ms step_avg:42.86ms +[2025-09-05 14:56:40] [Rank 0] step:9101/10000 train_time:390047ms step_avg:42.86ms +[2025-09-05 14:56:41] [Rank 0] step:9121/10000 train_time:390789ms step_avg:42.84ms +[2025-09-05 14:56:41] [Rank 0] step:9121/10000 train_time:390789ms step_avg:42.84ms +[2025-09-05 14:56:42] [Rank 0] step:9141/10000 train_time:391531ms step_avg:42.83ms +[2025-09-05 14:56:42] [Rank 0] step:9141/10000 train_time:391531ms step_avg:42.83ms +[2025-09-05 14:56:43] [Rank 0] step:9161/10000 train_time:392273ms step_avg:42.82ms +[2025-09-05 14:56:43] [Rank 0] step:9161/10000 train_time:392273ms step_avg:42.82ms +[2025-09-05 14:56:43] [Rank 0] step:9181/10000 train_time:393015ms step_avg:42.81ms +[2025-09-05 14:56:43] [Rank 0] step:9181/10000 train_time:393015ms step_avg:42.81ms +[2025-09-05 14:56:44] [Rank 0] step:9201/10000 train_time:393758ms step_avg:42.80ms +[2025-09-05 14:56:44] [Rank 0] step:9201/10000 train_time:393758ms step_avg:42.80ms +[2025-09-05 14:56:45] [Rank 0] step:9221/10000 train_time:394500ms step_avg:42.78ms +[2025-09-05 14:56:45] [Rank 0] step:9221/10000 train_time:394500ms step_avg:42.78ms +[2025-09-05 14:56:46] [Rank 0] step:9241/10000 train_time:395242ms step_avg:42.77ms +[2025-09-05 14:56:46] [Rank 0] step:9241/10000 train_time:395242ms step_avg:42.77ms +[2025-09-05 14:56:46] [Rank 0] step:9261/10000 train_time:395984ms step_avg:42.76ms +[2025-09-05 14:56:46] [Rank 0] step:9261/10000 train_time:395984ms step_avg:42.76ms +[2025-09-05 14:56:47] [Rank 0] step:9281/10000 train_time:396728ms step_avg:42.75ms +[2025-09-05 14:56:47] [Rank 0] step:9281/10000 train_time:396728ms step_avg:42.75ms +[2025-09-05 14:56:48] [Rank 0] step:9301/10000 train_time:397470ms step_avg:42.73ms +[2025-09-05 14:56:48] [Rank 0] step:9301/10000 train_time:397470ms step_avg:42.73ms +[2025-09-05 14:56:49] [Rank 0] step:9321/10000 train_time:398211ms step_avg:42.72ms +[2025-09-05 14:56:49] [Rank 0] step:9321/10000 train_time:398211ms step_avg:42.72ms +[2025-09-05 14:56:49] [Rank 0] step:9341/10000 train_time:398952ms step_avg:42.71ms +[2025-09-05 14:56:49] [Rank 0] step:9341/10000 train_time:398952ms step_avg:42.71ms +[2025-09-05 14:56:50] [Rank 0] step:9361/10000 train_time:399694ms step_avg:42.70ms +[2025-09-05 14:56:50] [Rank 0] step:9361/10000 train_time:399694ms step_avg:42.70ms +[2025-09-05 14:56:51] [Rank 0] step:9381/10000 train_time:400436ms step_avg:42.69ms +[2025-09-05 14:56:51] [Rank 0] step:9381/10000 train_time:400436ms step_avg:42.69ms +[2025-09-05 14:56:51] [Rank 0] step:9401/10000 train_time:401178ms step_avg:42.67ms +[2025-09-05 14:56:51] [Rank 0] step:9401/10000 train_time:401178ms step_avg:42.67ms +[2025-09-05 14:56:52] [Rank 0] step:9421/10000 train_time:401920ms step_avg:42.66ms +[2025-09-05 14:56:52] [Rank 0] step:9421/10000 train_time:401920ms step_avg:42.66ms +[2025-09-05 14:56:53] [Rank 0] step:9441/10000 train_time:402662ms step_avg:42.65ms +[2025-09-05 14:56:53] [Rank 0] step:9441/10000 train_time:402662ms step_avg:42.65ms +[2025-09-05 14:56:54] [Rank 0] step:9461/10000 train_time:403404ms step_avg:42.64ms +[2025-09-05 14:56:54] [Rank 0] step:9461/10000 train_time:403404ms step_avg:42.64ms +[2025-09-05 14:56:54] [Rank 0] step:9481/10000 train_time:404146ms step_avg:42.63ms +[2025-09-05 14:56:54] [Rank 0] step:9481/10000 train_time:404146ms step_avg:42.63ms +[2025-09-05 14:56:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:56:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:56:56] [Rank 0] PRINT: step:9500/10000 train_loss:1.3910 val_loss:1.3808 train_time:404968ms step_avg:42.63ms +[2025-09-05 14:56:56] [Rank 0] PRINT: step:9500/10000 train_loss:1.3910 val_loss:1.3808 train_time:404968ms step_avg:42.63ms +[2025-09-05 14:56:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:56:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:56:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:56:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:58:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:58:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:58:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:58:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:58:17] [Rank 0] Total Loss: 4.2677 +[2025-09-05 14:58:17] [Rank 0] Total Loss: 4.2677 +[2025-09-05 14:58:17] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 14:58:17] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 14:58:17] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 14:58:17] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 14:58:17] [Rank 0] Group 0 Loss: 3.6352 +[2025-09-05 14:58:17] [Rank 0] Group 0 Loss: 3.6352 +[2025-09-05 14:58:17] [Rank 0] Group 1 Loss: 3.4569 +[2025-09-05 14:58:17] [Rank 0] Group 1 Loss: 3.4569 +[2025-09-05 14:58:17] [Rank 0] Group 2 Loss: 3.3142 +[2025-09-05 14:58:17] [Rank 0] Group 2 Loss: 3.3142 +[2025-09-05 14:58:17] [Rank 0] Group 3 Loss: 3.7008 +[2025-09-05 14:58:17] [Rank 0] Group 3 Loss: 3.7008 +[2025-09-05 14:58:17] [Rank 0] Group 4 Loss: 3.8947 +[2025-09-05 14:58:17] [Rank 0] Group 4 Loss: 3.8947 +[2025-09-05 14:58:17] [Rank 0] Group 5 Loss: 4.0233 +[2025-09-05 14:58:17] [Rank 0] Group 5 Loss: 4.0233 +[2025-09-05 14:58:17] [Rank 0] Group 6 Loss: 4.0219 +[2025-09-05 14:58:17] [Rank 0] Group 6 Loss: 4.0219 +[2025-09-05 14:58:17] [Rank 0] Group 7 Loss: 4.2338 +[2025-09-05 14:58:17] [Rank 0] Group 7 Loss: 4.2338 +[2025-09-05 14:58:17] [Rank 0] Group 8 Loss: 4.5120 +[2025-09-05 14:58:17] [Rank 0] Group 8 Loss: 4.5120 +[2025-09-05 14:58:17] [Rank 0] Group 9 Loss: 4.6269 +[2025-09-05 14:58:17] [Rank 0] Group 9 Loss: 4.6269 +[2025-09-05 14:58:17] [Rank 0] Group 10 Loss: 4.8035 +[2025-09-05 14:58:17] [Rank 0] Group 10 Loss: 4.8035 +[2025-09-05 14:58:17] [Rank 0] Group 11 Loss: 4.8051 +[2025-09-05 14:58:17] [Rank 0] Group 11 Loss: 4.8051 +[2025-09-05 14:58:17] [Rank 0] Group 12 Loss: 4.7862 +[2025-09-05 14:58:17] [Rank 0] Group 12 Loss: 4.7862 +[2025-09-05 14:58:17] [Rank 0] Group 13 Loss: 4.7846 +[2025-09-05 14:58:17] [Rank 0] Group 13 Loss: 4.7846 +[2025-09-05 14:58:17] [Rank 0] Group 14 Loss: 4.8307 +[2025-09-05 14:58:17] [Rank 0] Group 14 Loss: 4.8307 +[2025-09-05 14:58:17] [Rank 0] Group 15 Loss: 4.8535 +[2025-09-05 14:58:17] [Rank 0] Group 15 Loss: 4.8535 +[2025-09-05 14:58:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:58:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:58:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:58:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:58:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:58:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:58:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:58:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:58:17] [Rank 0] Group 4 FTA: 0.9000 +[2025-09-05 14:58:17] [Rank 0] Group 4 FTA: 0.9000 +[2025-09-05 14:58:17] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 14:58:17] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 14:58:17] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:58:17] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:58:17] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 14:58:17] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 14:58:17] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 14:58:17] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 14:58:17] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 14:58:17] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 14:58:17] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 14:58:17] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 14:58:17] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 14:58:17] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 14:58:17] [Rank 0] Group 12 FTA: 0.4600 +[2025-09-05 14:58:17] [Rank 0] Group 12 FTA: 0.4600 +[2025-09-05 14:58:17] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 14:58:17] [Rank 0] Group 13 FTA: 0.3100 +[2025-09-05 14:58:17] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 14:58:17] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 14:58:17] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:58:17] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 14:58:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:58:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:58:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:58:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 14:58:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:58:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 14:58:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:58:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 14:58:18] [Rank 0] step:9501/10000 train_time:404978ms step_avg:42.62ms +[2025-09-05 14:58:18] [Rank 0] step:9501/10000 train_time:404978ms step_avg:42.62ms +[2025-09-05 14:58:19] [Rank 0] step:9521/10000 train_time:405656ms step_avg:42.61ms +[2025-09-05 14:58:19] [Rank 0] step:9521/10000 train_time:405656ms step_avg:42.61ms +[2025-09-05 14:58:20] [Rank 0] step:9541/10000 train_time:406398ms step_avg:42.59ms +[2025-09-05 14:58:20] [Rank 0] step:9541/10000 train_time:406398ms step_avg:42.59ms +[2025-09-05 14:58:21] [Rank 0] step:9561/10000 train_time:407139ms step_avg:42.58ms +[2025-09-05 14:58:21] [Rank 0] step:9561/10000 train_time:407139ms step_avg:42.58ms +[2025-09-05 14:58:21] [Rank 0] step:9581/10000 train_time:407881ms step_avg:42.57ms +[2025-09-05 14:58:21] [Rank 0] step:9581/10000 train_time:407881ms step_avg:42.57ms +[2025-09-05 14:58:22] [Rank 0] step:9601/10000 train_time:408622ms step_avg:42.56ms +[2025-09-05 14:58:22] [Rank 0] step:9601/10000 train_time:408622ms step_avg:42.56ms +[2025-09-05 14:58:23] [Rank 0] step:9621/10000 train_time:409365ms step_avg:42.55ms +[2025-09-05 14:58:23] [Rank 0] step:9621/10000 train_time:409365ms step_avg:42.55ms +[2025-09-05 14:58:24] [Rank 0] step:9641/10000 train_time:410106ms step_avg:42.54ms +[2025-09-05 14:58:24] [Rank 0] step:9641/10000 train_time:410106ms step_avg:42.54ms +[2025-09-05 14:58:25] [Rank 0] step:9661/10000 train_time:410921ms step_avg:42.53ms +[2025-09-05 14:58:25] [Rank 0] step:9661/10000 train_time:410921ms step_avg:42.53ms +[2025-09-05 14:58:25] [Rank 0] step:9681/10000 train_time:411663ms step_avg:42.52ms +[2025-09-05 14:58:25] [Rank 0] step:9681/10000 train_time:411663ms step_avg:42.52ms +[2025-09-05 14:58:26] [Rank 0] step:9701/10000 train_time:412405ms step_avg:42.51ms +[2025-09-05 14:58:26] [Rank 0] step:9701/10000 train_time:412405ms step_avg:42.51ms +[2025-09-05 14:58:27] [Rank 0] step:9721/10000 train_time:413147ms step_avg:42.50ms +[2025-09-05 14:58:27] [Rank 0] step:9721/10000 train_time:413147ms step_avg:42.50ms +[2025-09-05 14:58:27] [Rank 0] step:9741/10000 train_time:413888ms step_avg:42.49ms +[2025-09-05 14:58:27] [Rank 0] step:9741/10000 train_time:413888ms step_avg:42.49ms +[2025-09-05 14:58:28] [Rank 0] step:9761/10000 train_time:414633ms step_avg:42.48ms +[2025-09-05 14:58:28] [Rank 0] step:9761/10000 train_time:414633ms step_avg:42.48ms +[2025-09-05 14:58:29] [Rank 0] step:9781/10000 train_time:415375ms step_avg:42.47ms +[2025-09-05 14:58:29] [Rank 0] step:9781/10000 train_time:415375ms step_avg:42.47ms +[2025-09-05 14:58:30] [Rank 0] step:9801/10000 train_time:416116ms step_avg:42.46ms +[2025-09-05 14:58:30] [Rank 0] step:9801/10000 train_time:416116ms step_avg:42.46ms +[2025-09-05 14:58:30] [Rank 0] step:9821/10000 train_time:416858ms step_avg:42.45ms +[2025-09-05 14:58:30] [Rank 0] step:9821/10000 train_time:416858ms step_avg:42.45ms +[2025-09-05 14:58:31] [Rank 0] step:9841/10000 train_time:417599ms step_avg:42.43ms +[2025-09-05 14:58:31] [Rank 0] step:9841/10000 train_time:417599ms step_avg:42.43ms +[2025-09-05 14:58:32] [Rank 0] step:9861/10000 train_time:418341ms step_avg:42.42ms +[2025-09-05 14:58:32] [Rank 0] step:9861/10000 train_time:418341ms step_avg:42.42ms +[2025-09-05 14:58:33] [Rank 0] step:9881/10000 train_time:419083ms step_avg:42.41ms +[2025-09-05 14:58:33] [Rank 0] step:9881/10000 train_time:419083ms step_avg:42.41ms +[2025-09-05 14:58:33] [Rank 0] step:9901/10000 train_time:419825ms step_avg:42.40ms +[2025-09-05 14:58:33] [Rank 0] step:9901/10000 train_time:419825ms step_avg:42.40ms +[2025-09-05 14:58:34] [Rank 0] step:9921/10000 train_time:420567ms step_avg:42.39ms +[2025-09-05 14:58:34] [Rank 0] step:9921/10000 train_time:420567ms step_avg:42.39ms +[2025-09-05 14:58:35] [Rank 0] step:9941/10000 train_time:421309ms step_avg:42.38ms +[2025-09-05 14:58:35] [Rank 0] step:9941/10000 train_time:421309ms step_avg:42.38ms +[2025-09-05 14:58:36] [Rank 0] step:9961/10000 train_time:422050ms step_avg:42.37ms +[2025-09-05 14:58:36] [Rank 0] step:9961/10000 train_time:422050ms step_avg:42.37ms +[2025-09-05 14:58:36] [Rank 0] step:9981/10000 train_time:422792ms step_avg:42.36ms +[2025-09-05 14:58:36] [Rank 0] step:9981/10000 train_time:422792ms step_avg:42.36ms +[2025-09-05 14:58:37] [Rank 0] step:10000/10000 train_time:423497ms step_avg:42.35ms +[2025-09-05 14:58:37] [Rank 0] step:10000/10000 train_time:423497ms step_avg:42.35ms +[2025-09-05 14:58:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:58:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 14:58:38] [Rank 0] PRINT: step:10000/10000 train_loss:1.3864 val_loss:1.3759 train_time:423622ms step_avg:42.36ms +[2025-09-05 14:58:38] [Rank 0] PRINT: step:10000/10000 train_loss:1.3864 val_loss:1.3759 train_time:423622ms step_avg:42.36ms +[2025-09-05 14:58:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:58:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 14:58:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:58:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 14:59:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:59:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 14:59:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:59:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 14:59:59] [Rank 0] Total Loss: 4.2409 +[2025-09-05 14:59:59] [Rank 0] Total Loss: 4.2409 +[2025-09-05 14:59:59] [Rank 0] Total FTA (Unweighted): 0.5944 +[2025-09-05 14:59:59] [Rank 0] Total FTA (Unweighted): 0.5944 +[2025-09-05 14:59:59] [Rank 0] Total FTA (Weighted): 0.5944 +[2025-09-05 14:59:59] [Rank 0] Total FTA (Weighted): 0.5944 +[2025-09-05 14:59:59] [Rank 0] Group 0 Loss: 3.5966 +[2025-09-05 14:59:59] [Rank 0] Group 0 Loss: 3.5966 +[2025-09-05 14:59:59] [Rank 0] Group 1 Loss: 3.4160 +[2025-09-05 14:59:59] [Rank 0] Group 1 Loss: 3.4160 +[2025-09-05 14:59:59] [Rank 0] Group 2 Loss: 3.3097 +[2025-09-05 14:59:59] [Rank 0] Group 2 Loss: 3.3097 +[2025-09-05 14:59:59] [Rank 0] Group 3 Loss: 3.6849 +[2025-09-05 14:59:59] [Rank 0] Group 3 Loss: 3.6849 +[2025-09-05 14:59:59] [Rank 0] Group 4 Loss: 3.8622 +[2025-09-05 14:59:59] [Rank 0] Group 4 Loss: 3.8622 +[2025-09-05 14:59:59] [Rank 0] Group 5 Loss: 4.0054 +[2025-09-05 14:59:59] [Rank 0] Group 5 Loss: 4.0054 +[2025-09-05 14:59:59] [Rank 0] Group 6 Loss: 4.0308 +[2025-09-05 14:59:59] [Rank 0] Group 6 Loss: 4.0308 +[2025-09-05 14:59:59] [Rank 0] Group 7 Loss: 4.1901 +[2025-09-05 14:59:59] [Rank 0] Group 7 Loss: 4.1901 +[2025-09-05 14:59:59] [Rank 0] Group 8 Loss: 4.5088 +[2025-09-05 14:59:59] [Rank 0] Group 8 Loss: 4.5088 +[2025-09-05 14:59:59] [Rank 0] Group 9 Loss: 4.6165 +[2025-09-05 14:59:59] [Rank 0] Group 9 Loss: 4.6165 +[2025-09-05 14:59:59] [Rank 0] Group 10 Loss: 4.7818 +[2025-09-05 14:59:59] [Rank 0] Group 10 Loss: 4.7818 +[2025-09-05 14:59:59] [Rank 0] Group 11 Loss: 4.7628 +[2025-09-05 14:59:59] [Rank 0] Group 11 Loss: 4.7628 +[2025-09-05 14:59:59] [Rank 0] Group 12 Loss: 4.7541 +[2025-09-05 14:59:59] [Rank 0] Group 12 Loss: 4.7541 +[2025-09-05 14:59:59] [Rank 0] Group 13 Loss: 4.7643 +[2025-09-05 14:59:59] [Rank 0] Group 13 Loss: 4.7643 +[2025-09-05 14:59:59] [Rank 0] Group 14 Loss: 4.7735 +[2025-09-05 14:59:59] [Rank 0] Group 14 Loss: 4.7735 +[2025-09-05 14:59:59] [Rank 0] Group 15 Loss: 4.7967 +[2025-09-05 14:59:59] [Rank 0] Group 15 Loss: 4.7967 +[2025-09-05 14:59:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:59:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 14:59:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:59:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 14:59:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:59:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 14:59:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:59:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 14:59:59] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 14:59:59] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 14:59:59] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:59:59] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 14:59:59] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:59:59] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 14:59:59] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:59:59] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 14:59:59] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 14:59:59] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 14:59:59] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 14:59:59] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 14:59:59] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 14:59:59] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 14:59:59] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 14:59:59] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 14:59:59] [Rank 0] Group 12 FTA: 0.4900 +[2025-09-05 14:59:59] [Rank 0] Group 12 FTA: 0.4900 +[2025-09-05 14:59:59] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 14:59:59] [Rank 0] Group 13 FTA: 0.3300 +[2025-09-05 14:59:59] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 14:59:59] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 14:59:59] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 14:59:59] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 14:59:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 14:59:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_loss_curves.png +[2025-09-05 15:00:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 15:00:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/per_class_acc_curves.png +[2025-09-05 15:00:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 15:00:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_loss_curve.png +[2025-09-05 15:00:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 15:00:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/total_acc_curve.png +[2025-09-05 15:00:00] [Rank 0] step:10001/10000 train_time:423632ms step_avg:42.36ms +[2025-09-05 15:00:00] [Rank 0] step:10001/10000 train_time:423632ms step_avg:42.36ms +[2025-09-05 15:00:00] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 15:00:00 2025 --- +[2025-09-05 15:00:00] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 15:00:00 2025 --- +[2025-09-05 15:00:00] [Rank 0] PRINT: Peak memory allocated: 3780 MiB reserved: 4788 MiB +[2025-09-05 15:00:00] [Rank 0] PRINT: Peak memory allocated: 3780 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/training_log_ce79dadc-b802-45da-bc5f-ede1bd85df46.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/training_log_ce79dadc-b802-45da-bc5f-ede1bd85df46.txt new file mode 100644 index 0000000000000000000000000000000000000000..aff580ec1a864dabbca928a87561220a0d1dda79 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42/training_log_ce79dadc-b802-45da-bc5f-ede1bd85df46.txt @@ -0,0 +1,2756 @@ +[2025-09-05 14:14:30] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:14:30 2025 --- +[2025-09-05 14:14:30] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:14:30 2025 --- +[2025-09-05 14:14:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:14:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:14:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:14:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:14:30] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 14:14:30] [Rank 0] PRINT: Using fixed seed: 42 +[2025-09-05 14:14:30] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42 +[2025-09-05 14:14:30] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_42 +[2025-09-05 14:14:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:14:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:14:30] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:14:30] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:14:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:14:32] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:14:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:14:32] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:14:32] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:14:32] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:14:33] [Rank 0] PRINT: Model test failed: +[2025-09-05 14:14:33] [Rank 0] PRINT: Model test failed: +[2025-09-05 14:14:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:14:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:14:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:14:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:14:33] [Rank 0] PRINT: Model test still fails: +[2025-09-05 14:14:33] [Rank 0] PRINT: Model test still fails: +[2025-09-05 14:14:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:14:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:14:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:14:33] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:14:33] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:14:33] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:14:38] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:14:38] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:14:38] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:14:38] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:14:51] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:14:51] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:14:51] [Rank 0] PRINT: Starting warmup... +[2025-09-05 14:14:51] [Rank 0] PRINT: Starting warmup... diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..197d149299d684c602a7c4f7cee6923be84f1d52 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.5, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "25fefcfc-501a-4895-9dd0-a46840a7567c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..9421115ca9f9b6e7accab6525fb30a4b989ec47e --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cccdb5bf4013cd7d96294def16edf748036c6624b8f389380814da67d0f9e6b0 +size 395351 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..3a8cb380bae122b3fbb2e9f4f131f614e986b5bd --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cedd9eb9b184b93bb12023fff8dc22f3b1156599fd13e58a17433c1493a2540 +size 494232 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..5acc24d8da212296251b8501c414888741b08579 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b36de6b2a04e2ab133a6d71d34eecf845e0805dfd6eca000caee1cee99977eac +size 93157 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..9e58cce7d718152824e384bbeda70618503690da --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74f0daa99c640fa48b54347355ab8173c2c0cefcdaa528c2849728304df3c067 +size 122792 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/training_log_25fefcfc-501a-4895-9dd0-a46840a7567c.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/training_log_25fefcfc-501a-4895-9dd0-a46840a7567c.txt new file mode 100644 index 0000000000000000000000000000000000000000..d18f48062330d8d59fb12f88e7995cd6343fc7b6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/training_log_25fefcfc-501a-4895-9dd0-a46840a7567c.txt @@ -0,0 +1,5614 @@ +[2025-09-05 15:00:25] [Rank 0] PRINT: --- Script Start: Fri Sep 5 15:00:25 2025 --- +[2025-09-05 15:00:25] [Rank 0] PRINT: --- Script Start: Fri Sep 5 15:00:25 2025 --- +[2025-09-05 15:00:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 15:00:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 15:00:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 15:00:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 15:00:25] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 15:00:25] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 15:00:25] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43 +[2025-09-05 15:00:25] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43 +[2025-09-05 15:00:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 15:00:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 15:00:25] [Rank 0] PRINT: Constructing model... +[2025-09-05 15:00:25] [Rank 0] PRINT: Constructing model... +[2025-09-05 15:00:26] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 15:00:26] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 15:00:26] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 15:00:26] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 15:00:26] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 15:00:26] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 15:00:30] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 15:00:30] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 15:00:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 15:00:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 15:00:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 15:00:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 15:00:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 15:00:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 15:00:31] [Rank 0] PRINT: Model returns: +[2025-09-05 15:00:31] [Rank 0] PRINT: Model returns: +[2025-09-05 15:00:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 15:00:31] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 15:00:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 15:00:31] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 15:00:31] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 15:00:31] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 15:00:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 15:00:31] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 15:00:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 15:00:31] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 15:00:35] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 15:00:35] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 15:00:35] [Rank 0] PRINT: Starting warmup... +[2025-09-05 15:00:35] [Rank 0] PRINT: Starting warmup... +[2025-09-05 15:01:34] [Rank 0] PRINT: Warmup complete. +[2025-09-05 15:01:34] [Rank 0] PRINT: Warmup complete. +[2025-09-05 15:01:35] [Rank 0] PRINT: Starting training... +[2025-09-05 15:01:35] [Rank 0] PRINT: Starting training... +[2025-09-05 15:01:41] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/fixed_eval_indices.json +[2025-09-05 15:01:41] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/fixed_eval_indices.json +[2025-09-05 15:01:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:01:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:01:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 15:01:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 15:02:34] [Rank 0] step:21/10000 train_time:48060ms step_avg:2288.59ms +[2025-09-05 15:02:34] [Rank 0] step:21/10000 train_time:48060ms step_avg:2288.59ms +[2025-09-05 15:02:34] [Rank 0] step:41/10000 train_time:48789ms step_avg:1189.97ms +[2025-09-05 15:02:34] [Rank 0] step:41/10000 train_time:48789ms step_avg:1189.97ms +[2025-09-05 15:02:35] [Rank 0] step:61/10000 train_time:49619ms step_avg:813.42ms +[2025-09-05 15:02:35] [Rank 0] step:61/10000 train_time:49619ms step_avg:813.42ms +[2025-09-05 15:02:36] [Rank 0] step:81/10000 train_time:50347ms step_avg:621.56ms +[2025-09-05 15:02:36] [Rank 0] step:81/10000 train_time:50347ms step_avg:621.56ms +[2025-09-05 15:02:37] [Rank 0] step:101/10000 train_time:51074ms step_avg:505.68ms +[2025-09-05 15:02:37] [Rank 0] step:101/10000 train_time:51074ms step_avg:505.68ms +[2025-09-05 15:02:37] [Rank 0] step:121/10000 train_time:51801ms step_avg:428.10ms +[2025-09-05 15:02:37] [Rank 0] step:121/10000 train_time:51801ms step_avg:428.10ms +[2025-09-05 15:02:38] [Rank 0] step:141/10000 train_time:52527ms step_avg:372.54ms +[2025-09-05 15:02:38] [Rank 0] step:141/10000 train_time:52527ms step_avg:372.54ms +[2025-09-05 15:02:39] [Rank 0] step:161/10000 train_time:53254ms step_avg:330.77ms +[2025-09-05 15:02:39] [Rank 0] step:161/10000 train_time:53254ms step_avg:330.77ms +[2025-09-05 15:02:39] [Rank 0] step:181/10000 train_time:53981ms step_avg:298.24ms +[2025-09-05 15:02:39] [Rank 0] step:181/10000 train_time:53981ms step_avg:298.24ms +[2025-09-05 15:02:40] [Rank 0] step:201/10000 train_time:54707ms step_avg:272.18ms +[2025-09-05 15:02:40] [Rank 0] step:201/10000 train_time:54707ms step_avg:272.18ms +[2025-09-05 15:02:41] [Rank 0] step:221/10000 train_time:55434ms step_avg:250.83ms +[2025-09-05 15:02:41] [Rank 0] step:221/10000 train_time:55434ms step_avg:250.83ms +[2025-09-05 15:02:42] [Rank 0] step:241/10000 train_time:56161ms step_avg:233.03ms +[2025-09-05 15:02:42] [Rank 0] step:241/10000 train_time:56161ms step_avg:233.03ms +[2025-09-05 15:02:42] [Rank 0] step:261/10000 train_time:56888ms step_avg:217.96ms +[2025-09-05 15:02:42] [Rank 0] step:261/10000 train_time:56888ms step_avg:217.96ms +[2025-09-05 15:02:43] [Rank 0] step:281/10000 train_time:57614ms step_avg:205.03ms +[2025-09-05 15:02:43] [Rank 0] step:281/10000 train_time:57614ms step_avg:205.03ms +[2025-09-05 15:02:44] [Rank 0] step:301/10000 train_time:58342ms step_avg:193.83ms +[2025-09-05 15:02:44] [Rank 0] step:301/10000 train_time:58342ms step_avg:193.83ms +[2025-09-05 15:02:45] [Rank 0] step:321/10000 train_time:59069ms step_avg:184.01ms +[2025-09-05 15:02:45] [Rank 0] step:321/10000 train_time:59069ms step_avg:184.01ms +[2025-09-05 15:02:45] [Rank 0] step:341/10000 train_time:59795ms step_avg:175.35ms +[2025-09-05 15:02:45] [Rank 0] step:341/10000 train_time:59795ms step_avg:175.35ms +[2025-09-05 15:02:46] [Rank 0] step:361/10000 train_time:60522ms step_avg:167.65ms +[2025-09-05 15:02:46] [Rank 0] step:361/10000 train_time:60522ms step_avg:167.65ms +[2025-09-05 15:02:47] [Rank 0] step:381/10000 train_time:61249ms step_avg:160.76ms +[2025-09-05 15:02:47] [Rank 0] step:381/10000 train_time:61249ms step_avg:160.76ms +[2025-09-05 15:02:47] [Rank 0] step:401/10000 train_time:61975ms step_avg:154.55ms +[2025-09-05 15:02:47] [Rank 0] step:401/10000 train_time:61975ms step_avg:154.55ms +[2025-09-05 15:02:48] [Rank 0] step:421/10000 train_time:62702ms step_avg:148.94ms +[2025-09-05 15:02:48] [Rank 0] step:421/10000 train_time:62702ms step_avg:148.94ms +[2025-09-05 15:02:49] [Rank 0] step:441/10000 train_time:63429ms step_avg:143.83ms +[2025-09-05 15:02:49] [Rank 0] step:441/10000 train_time:63429ms step_avg:143.83ms +[2025-09-05 15:02:50] [Rank 0] step:461/10000 train_time:64156ms step_avg:139.17ms +[2025-09-05 15:02:50] [Rank 0] step:461/10000 train_time:64156ms step_avg:139.17ms +[2025-09-05 15:02:50] [Rank 0] step:481/10000 train_time:64883ms step_avg:134.89ms +[2025-09-05 15:02:50] [Rank 0] step:481/10000 train_time:64883ms step_avg:134.89ms +[2025-09-05 15:02:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:02:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:02:52] [Rank 0] PRINT: step:500/10000 train_loss:3.5544 val_loss:2.3264 train_time:65690ms step_avg:131.38ms +[2025-09-05 15:02:52] [Rank 0] PRINT: step:500/10000 train_loss:3.5544 val_loss:2.3264 train_time:65690ms step_avg:131.38ms +[2025-09-05 15:02:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:02:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:02:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:02:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:04:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:04:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:04:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:04:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:04:13] [Rank 0] Total Loss: 4.6990 +[2025-09-05 15:04:13] [Rank 0] Total Loss: 4.6990 +[2025-09-05 15:04:13] [Rank 0] Total FTA (Unweighted): 0.2400 +[2025-09-05 15:04:13] [Rank 0] Total FTA (Unweighted): 0.2400 +[2025-09-05 15:04:13] [Rank 0] Total FTA (Weighted): 0.2400 +[2025-09-05 15:04:13] [Rank 0] Total FTA (Weighted): 0.2400 +[2025-09-05 15:04:13] [Rank 0] Group 0 Loss: 3.2932 +[2025-09-05 15:04:13] [Rank 0] Group 0 Loss: 3.2932 +[2025-09-05 15:04:13] [Rank 0] Group 1 Loss: 3.1320 +[2025-09-05 15:04:13] [Rank 0] Group 1 Loss: 3.1320 +[2025-09-05 15:04:13] [Rank 0] Group 2 Loss: 3.1567 +[2025-09-05 15:04:13] [Rank 0] Group 2 Loss: 3.1567 +[2025-09-05 15:04:13] [Rank 0] Group 3 Loss: 3.6114 +[2025-09-05 15:04:13] [Rank 0] Group 3 Loss: 3.6114 +[2025-09-05 15:04:13] [Rank 0] Group 4 Loss: 3.9484 +[2025-09-05 15:04:13] [Rank 0] Group 4 Loss: 3.9484 +[2025-09-05 15:04:13] [Rank 0] Group 5 Loss: 4.4289 +[2025-09-05 15:04:13] [Rank 0] Group 5 Loss: 4.4289 +[2025-09-05 15:04:13] [Rank 0] Group 6 Loss: 4.7963 +[2025-09-05 15:04:13] [Rank 0] Group 6 Loss: 4.7963 +[2025-09-05 15:04:13] [Rank 0] Group 7 Loss: 4.9638 +[2025-09-05 15:04:13] [Rank 0] Group 7 Loss: 4.9638 +[2025-09-05 15:04:13] [Rank 0] Group 8 Loss: 5.3219 +[2025-09-05 15:04:13] [Rank 0] Group 8 Loss: 5.3219 +[2025-09-05 15:04:13] [Rank 0] Group 9 Loss: 5.4206 +[2025-09-05 15:04:13] [Rank 0] Group 9 Loss: 5.4206 +[2025-09-05 15:04:13] [Rank 0] Group 10 Loss: 5.5344 +[2025-09-05 15:04:13] [Rank 0] Group 10 Loss: 5.5344 +[2025-09-05 15:04:13] [Rank 0] Group 11 Loss: 5.5935 +[2025-09-05 15:04:13] [Rank 0] Group 11 Loss: 5.5935 +[2025-09-05 15:04:13] [Rank 0] Group 12 Loss: 5.4844 +[2025-09-05 15:04:13] [Rank 0] Group 12 Loss: 5.4844 +[2025-09-05 15:04:13] [Rank 0] Group 13 Loss: 5.5084 +[2025-09-05 15:04:13] [Rank 0] Group 13 Loss: 5.5084 +[2025-09-05 15:04:13] [Rank 0] Group 14 Loss: 5.5199 +[2025-09-05 15:04:13] [Rank 0] Group 14 Loss: 5.5199 +[2025-09-05 15:04:13] [Rank 0] Group 15 Loss: 5.4703 +[2025-09-05 15:04:13] [Rank 0] Group 15 Loss: 5.4703 +[2025-09-05 15:04:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:04:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:04:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:04:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:04:13] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 15:04:13] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 15:04:13] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 15:04:13] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 15:04:13] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-05 15:04:13] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-05 15:04:13] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 15:04:13] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 15:04:13] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 15:04:13] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 15:04:13] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 15:04:13] [Rank 0] Group 7 FTA: 0.0900 +[2025-09-05 15:04:13] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-05 15:04:13] [Rank 0] Group 8 FTA: 0.1600 +[2025-09-05 15:04:13] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 15:04:13] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 15:04:13] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 15:04:13] [Rank 0] Group 10 FTA: 0.1000 +[2025-09-05 15:04:13] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 15:04:13] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 15:04:13] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 15:04:13] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 15:04:13] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:04:13] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:04:13] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:04:13] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:04:13] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:04:13] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:04:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:04:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:04:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:04:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:04:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:04:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:04:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:04:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:04:15] [Rank 0] step:501/10000 train_time:65699ms step_avg:131.14ms +[2025-09-05 15:04:15] [Rank 0] step:501/10000 train_time:65699ms step_avg:131.14ms +[2025-09-05 15:04:15] [Rank 0] step:521/10000 train_time:66366ms step_avg:127.38ms +[2025-09-05 15:04:15] [Rank 0] step:521/10000 train_time:66366ms step_avg:127.38ms +[2025-09-05 15:04:16] [Rank 0] step:541/10000 train_time:67092ms step_avg:124.02ms +[2025-09-05 15:04:16] [Rank 0] step:541/10000 train_time:67092ms step_avg:124.02ms +[2025-09-05 15:04:17] [Rank 0] step:561/10000 train_time:67819ms step_avg:120.89ms +[2025-09-05 15:04:17] [Rank 0] step:561/10000 train_time:67819ms step_avg:120.89ms +[2025-09-05 15:04:18] [Rank 0] step:581/10000 train_time:68546ms step_avg:117.98ms +[2025-09-05 15:04:18] [Rank 0] step:581/10000 train_time:68546ms step_avg:117.98ms +[2025-09-05 15:04:18] [Rank 0] step:601/10000 train_time:69273ms step_avg:115.26ms +[2025-09-05 15:04:18] [Rank 0] step:601/10000 train_time:69273ms step_avg:115.26ms +[2025-09-05 15:04:19] [Rank 0] step:621/10000 train_time:69999ms step_avg:112.72ms +[2025-09-05 15:04:19] [Rank 0] step:621/10000 train_time:69999ms step_avg:112.72ms +[2025-09-05 15:04:20] [Rank 0] step:641/10000 train_time:70726ms step_avg:110.34ms +[2025-09-05 15:04:20] [Rank 0] step:641/10000 train_time:70726ms step_avg:110.34ms +[2025-09-05 15:04:20] [Rank 0] step:661/10000 train_time:71453ms step_avg:108.10ms +[2025-09-05 15:04:20] [Rank 0] step:661/10000 train_time:71453ms step_avg:108.10ms +[2025-09-05 15:04:21] [Rank 0] step:681/10000 train_time:72180ms step_avg:105.99ms +[2025-09-05 15:04:21] [Rank 0] step:681/10000 train_time:72180ms step_avg:105.99ms +[2025-09-05 15:04:22] [Rank 0] step:701/10000 train_time:72907ms step_avg:104.00ms +[2025-09-05 15:04:22] [Rank 0] step:701/10000 train_time:72907ms step_avg:104.00ms +[2025-09-05 15:04:23] [Rank 0] step:721/10000 train_time:73636ms step_avg:102.13ms +[2025-09-05 15:04:23] [Rank 0] step:721/10000 train_time:73636ms step_avg:102.13ms +[2025-09-05 15:04:23] [Rank 0] step:741/10000 train_time:74363ms step_avg:100.36ms +[2025-09-05 15:04:23] [Rank 0] step:741/10000 train_time:74363ms step_avg:100.36ms +[2025-09-05 15:04:24] [Rank 0] step:761/10000 train_time:75095ms step_avg:98.68ms +[2025-09-05 15:04:24] [Rank 0] step:761/10000 train_time:75095ms step_avg:98.68ms +[2025-09-05 15:04:25] [Rank 0] step:781/10000 train_time:75827ms step_avg:97.09ms +[2025-09-05 15:04:25] [Rank 0] step:781/10000 train_time:75827ms step_avg:97.09ms +[2025-09-05 15:04:26] [Rank 0] step:801/10000 train_time:76559ms step_avg:95.58ms +[2025-09-05 15:04:26] [Rank 0] step:801/10000 train_time:76559ms step_avg:95.58ms +[2025-09-05 15:04:27] [Rank 0] step:821/10000 train_time:77904ms step_avg:94.89ms +[2025-09-05 15:04:27] [Rank 0] step:821/10000 train_time:77904ms step_avg:94.89ms +[2025-09-05 15:04:28] [Rank 0] step:841/10000 train_time:78636ms step_avg:93.50ms +[2025-09-05 15:04:28] [Rank 0] step:841/10000 train_time:78636ms step_avg:93.50ms +[2025-09-05 15:04:29] [Rank 0] step:861/10000 train_time:79496ms step_avg:92.33ms +[2025-09-05 15:04:29] [Rank 0] step:861/10000 train_time:79496ms step_avg:92.33ms +[2025-09-05 15:04:29] [Rank 0] step:881/10000 train_time:80227ms step_avg:91.06ms +[2025-09-05 15:04:29] [Rank 0] step:881/10000 train_time:80227ms step_avg:91.06ms +[2025-09-05 15:04:30] [Rank 0] step:901/10000 train_time:80958ms step_avg:89.85ms +[2025-09-05 15:04:30] [Rank 0] step:901/10000 train_time:80958ms step_avg:89.85ms +[2025-09-05 15:04:31] [Rank 0] step:921/10000 train_time:81841ms step_avg:88.86ms +[2025-09-05 15:04:31] [Rank 0] step:921/10000 train_time:81841ms step_avg:88.86ms +[2025-09-05 15:04:32] [Rank 0] step:941/10000 train_time:82573ms step_avg:87.75ms +[2025-09-05 15:04:32] [Rank 0] step:941/10000 train_time:82573ms step_avg:87.75ms +[2025-09-05 15:04:32] [Rank 0] step:961/10000 train_time:83304ms step_avg:86.68ms +[2025-09-05 15:04:32] [Rank 0] step:961/10000 train_time:83304ms step_avg:86.68ms +[2025-09-05 15:04:33] [Rank 0] step:981/10000 train_time:84036ms step_avg:85.66ms +[2025-09-05 15:04:33] [Rank 0] step:981/10000 train_time:84036ms step_avg:85.66ms +[2025-09-05 15:04:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:04:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:04:34] [Rank 0] PRINT: step:1000/10000 train_loss:2.0578 val_loss:1.8674 train_time:84847ms step_avg:84.85ms +[2025-09-05 15:04:34] [Rank 0] PRINT: step:1000/10000 train_loss:2.0578 val_loss:1.8674 train_time:84847ms step_avg:84.85ms +[2025-09-05 15:04:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:04:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:04:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:04:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:05:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:05:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:05:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:05:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:05:55] [Rank 0] Total Loss: 4.2056 +[2025-09-05 15:05:55] [Rank 0] Total Loss: 4.2056 +[2025-09-05 15:05:55] [Rank 0] Total FTA (Unweighted): 0.3444 +[2025-09-05 15:05:55] [Rank 0] Total FTA (Unweighted): 0.3444 +[2025-09-05 15:05:55] [Rank 0] Total FTA (Weighted): 0.3444 +[2025-09-05 15:05:55] [Rank 0] Total FTA (Weighted): 0.3444 +[2025-09-05 15:05:55] [Rank 0] Group 0 Loss: 3.1568 +[2025-09-05 15:05:55] [Rank 0] Group 0 Loss: 3.1568 +[2025-09-05 15:05:55] [Rank 0] Group 1 Loss: 2.9857 +[2025-09-05 15:05:55] [Rank 0] Group 1 Loss: 2.9857 +[2025-09-05 15:05:55] [Rank 0] Group 2 Loss: 2.9861 +[2025-09-05 15:05:55] [Rank 0] Group 2 Loss: 2.9861 +[2025-09-05 15:05:55] [Rank 0] Group 3 Loss: 3.2988 +[2025-09-05 15:05:55] [Rank 0] Group 3 Loss: 3.2988 +[2025-09-05 15:05:55] [Rank 0] Group 4 Loss: 3.5104 +[2025-09-05 15:05:55] [Rank 0] Group 4 Loss: 3.5104 +[2025-09-05 15:05:55] [Rank 0] Group 5 Loss: 3.7950 +[2025-09-05 15:05:55] [Rank 0] Group 5 Loss: 3.7950 +[2025-09-05 15:05:55] [Rank 0] Group 6 Loss: 4.0986 +[2025-09-05 15:05:55] [Rank 0] Group 6 Loss: 4.0986 +[2025-09-05 15:05:55] [Rank 0] Group 7 Loss: 4.3397 +[2025-09-05 15:05:55] [Rank 0] Group 7 Loss: 4.3397 +[2025-09-05 15:05:55] [Rank 0] Group 8 Loss: 4.6641 +[2025-09-05 15:05:55] [Rank 0] Group 8 Loss: 4.6641 +[2025-09-05 15:05:55] [Rank 0] Group 9 Loss: 4.8036 +[2025-09-05 15:05:55] [Rank 0] Group 9 Loss: 4.8036 +[2025-09-05 15:05:55] [Rank 0] Group 10 Loss: 4.8994 +[2025-09-05 15:05:55] [Rank 0] Group 10 Loss: 4.8994 +[2025-09-05 15:05:55] [Rank 0] Group 11 Loss: 4.9500 +[2025-09-05 15:05:55] [Rank 0] Group 11 Loss: 4.9500 +[2025-09-05 15:05:55] [Rank 0] Group 12 Loss: 4.9226 +[2025-09-05 15:05:55] [Rank 0] Group 12 Loss: 4.9226 +[2025-09-05 15:05:55] [Rank 0] Group 13 Loss: 4.9655 +[2025-09-05 15:05:55] [Rank 0] Group 13 Loss: 4.9655 +[2025-09-05 15:05:55] [Rank 0] Group 14 Loss: 4.9835 +[2025-09-05 15:05:55] [Rank 0] Group 14 Loss: 4.9835 +[2025-09-05 15:05:55] [Rank 0] Group 15 Loss: 4.9300 +[2025-09-05 15:05:55] [Rank 0] Group 15 Loss: 4.9300 +[2025-09-05 15:05:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:05:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:05:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:05:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:05:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:05:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:05:55] [Rank 0] Group 3 FTA: 0.4500 +[2025-09-05 15:05:55] [Rank 0] Group 3 FTA: 0.4500 +[2025-09-05 15:05:55] [Rank 0] Group 4 FTA: 0.3700 +[2025-09-05 15:05:55] [Rank 0] Group 4 FTA: 0.3700 +[2025-09-05 15:05:55] [Rank 0] Group 5 FTA: 0.3300 +[2025-09-05 15:05:55] [Rank 0] Group 5 FTA: 0.3300 +[2025-09-05 15:05:55] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 15:05:55] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 15:05:55] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 15:05:55] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 15:05:55] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 15:05:55] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 15:05:55] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-05 15:05:55] [Rank 0] Group 9 FTA: 0.0900 +[2025-09-05 15:05:55] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 15:05:55] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 15:05:55] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 15:05:55] [Rank 0] Group 11 FTA: 0.0700 +[2025-09-05 15:05:55] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 15:05:55] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 15:05:55] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:05:55] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:05:55] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:05:55] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:05:55] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 15:05:55] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 15:05:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:05:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:05:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:05:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:05:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:05:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:05:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:05:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:05:56] [Rank 0] step:1001/10000 train_time:84857ms step_avg:84.77ms +[2025-09-05 15:05:56] [Rank 0] step:1001/10000 train_time:84857ms step_avg:84.77ms +[2025-09-05 15:05:57] [Rank 0] step:1021/10000 train_time:85513ms step_avg:83.75ms +[2025-09-05 15:05:57] [Rank 0] step:1021/10000 train_time:85513ms step_avg:83.75ms +[2025-09-05 15:05:58] [Rank 0] step:1041/10000 train_time:86245ms step_avg:82.85ms +[2025-09-05 15:05:58] [Rank 0] step:1041/10000 train_time:86245ms step_avg:82.85ms +[2025-09-05 15:05:58] [Rank 0] step:1061/10000 train_time:86977ms step_avg:81.98ms +[2025-09-05 15:05:58] [Rank 0] step:1061/10000 train_time:86977ms step_avg:81.98ms +[2025-09-05 15:05:59] [Rank 0] step:1081/10000 train_time:87708ms step_avg:81.14ms +[2025-09-05 15:05:59] [Rank 0] step:1081/10000 train_time:87708ms step_avg:81.14ms +[2025-09-05 15:06:00] [Rank 0] step:1101/10000 train_time:88440ms step_avg:80.33ms +[2025-09-05 15:06:00] [Rank 0] step:1101/10000 train_time:88440ms step_avg:80.33ms +[2025-09-05 15:06:00] [Rank 0] step:1121/10000 train_time:89172ms step_avg:79.55ms +[2025-09-05 15:06:00] [Rank 0] step:1121/10000 train_time:89172ms step_avg:79.55ms +[2025-09-05 15:06:01] [Rank 0] step:1141/10000 train_time:89904ms step_avg:78.79ms +[2025-09-05 15:06:01] [Rank 0] step:1141/10000 train_time:89904ms step_avg:78.79ms +[2025-09-05 15:06:02] [Rank 0] step:1161/10000 train_time:90637ms step_avg:78.07ms +[2025-09-05 15:06:02] [Rank 0] step:1161/10000 train_time:90637ms step_avg:78.07ms +[2025-09-05 15:06:03] [Rank 0] step:1181/10000 train_time:91369ms step_avg:77.37ms +[2025-09-05 15:06:03] [Rank 0] step:1181/10000 train_time:91369ms step_avg:77.37ms +[2025-09-05 15:06:03] [Rank 0] step:1201/10000 train_time:92101ms step_avg:76.69ms +[2025-09-05 15:06:03] [Rank 0] step:1201/10000 train_time:92101ms step_avg:76.69ms +[2025-09-05 15:06:04] [Rank 0] step:1221/10000 train_time:92833ms step_avg:76.03ms +[2025-09-05 15:06:04] [Rank 0] step:1221/10000 train_time:92833ms step_avg:76.03ms +[2025-09-05 15:06:05] [Rank 0] step:1241/10000 train_time:93566ms step_avg:75.40ms +[2025-09-05 15:06:05] [Rank 0] step:1241/10000 train_time:93566ms step_avg:75.40ms +[2025-09-05 15:06:06] [Rank 0] step:1261/10000 train_time:94297ms step_avg:74.78ms +[2025-09-05 15:06:06] [Rank 0] step:1261/10000 train_time:94297ms step_avg:74.78ms +[2025-09-05 15:06:06] [Rank 0] step:1281/10000 train_time:95029ms step_avg:74.18ms +[2025-09-05 15:06:06] [Rank 0] step:1281/10000 train_time:95029ms step_avg:74.18ms +[2025-09-05 15:06:07] [Rank 0] step:1301/10000 train_time:95761ms step_avg:73.61ms +[2025-09-05 15:06:07] [Rank 0] step:1301/10000 train_time:95761ms step_avg:73.61ms +[2025-09-05 15:06:08] [Rank 0] step:1321/10000 train_time:96493ms step_avg:73.05ms +[2025-09-05 15:06:08] [Rank 0] step:1321/10000 train_time:96493ms step_avg:73.05ms +[2025-09-05 15:06:09] [Rank 0] step:1341/10000 train_time:97225ms step_avg:72.50ms +[2025-09-05 15:06:09] [Rank 0] step:1341/10000 train_time:97225ms step_avg:72.50ms +[2025-09-05 15:06:09] [Rank 0] step:1361/10000 train_time:97956ms step_avg:71.97ms +[2025-09-05 15:06:09] [Rank 0] step:1361/10000 train_time:97956ms step_avg:71.97ms +[2025-09-05 15:06:10] [Rank 0] step:1381/10000 train_time:98688ms step_avg:71.46ms +[2025-09-05 15:06:10] [Rank 0] step:1381/10000 train_time:98688ms step_avg:71.46ms +[2025-09-05 15:06:11] [Rank 0] step:1401/10000 train_time:99420ms step_avg:70.96ms +[2025-09-05 15:06:11] [Rank 0] step:1401/10000 train_time:99420ms step_avg:70.96ms +[2025-09-05 15:06:11] [Rank 0] step:1421/10000 train_time:100152ms step_avg:70.48ms +[2025-09-05 15:06:11] [Rank 0] step:1421/10000 train_time:100152ms step_avg:70.48ms +[2025-09-05 15:06:12] [Rank 0] step:1441/10000 train_time:100884ms step_avg:70.01ms +[2025-09-05 15:06:12] [Rank 0] step:1441/10000 train_time:100884ms step_avg:70.01ms +[2025-09-05 15:06:13] [Rank 0] step:1461/10000 train_time:101616ms step_avg:69.55ms +[2025-09-05 15:06:13] [Rank 0] step:1461/10000 train_time:101616ms step_avg:69.55ms +[2025-09-05 15:06:14] [Rank 0] step:1481/10000 train_time:102347ms step_avg:69.11ms +[2025-09-05 15:06:14] [Rank 0] step:1481/10000 train_time:102347ms step_avg:69.11ms +[2025-09-05 15:06:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:06:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:06:15] [Rank 0] PRINT: step:1500/10000 train_loss:1.7778 val_loss:1.6889 train_time:103160ms step_avg:68.77ms +[2025-09-05 15:06:15] [Rank 0] PRINT: step:1500/10000 train_loss:1.7778 val_loss:1.6889 train_time:103160ms step_avg:68.77ms +[2025-09-05 15:06:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:06:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:06:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:06:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:07:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:07:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:07:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:07:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:07:37] [Rank 0] Total Loss: 4.1556 +[2025-09-05 15:07:37] [Rank 0] Total Loss: 4.1556 +[2025-09-05 15:07:37] [Rank 0] Total FTA (Unweighted): 0.3944 +[2025-09-05 15:07:37] [Rank 0] Total FTA (Unweighted): 0.3944 +[2025-09-05 15:07:37] [Rank 0] Total FTA (Weighted): 0.3944 +[2025-09-05 15:07:37] [Rank 0] Total FTA (Weighted): 0.3944 +[2025-09-05 15:07:37] [Rank 0] Group 0 Loss: 3.2658 +[2025-09-05 15:07:37] [Rank 0] Group 0 Loss: 3.2658 +[2025-09-05 15:07:37] [Rank 0] Group 1 Loss: 3.0351 +[2025-09-05 15:07:37] [Rank 0] Group 1 Loss: 3.0351 +[2025-09-05 15:07:37] [Rank 0] Group 2 Loss: 3.0429 +[2025-09-05 15:07:37] [Rank 0] Group 2 Loss: 3.0429 +[2025-09-05 15:07:37] [Rank 0] Group 3 Loss: 3.3423 +[2025-09-05 15:07:37] [Rank 0] Group 3 Loss: 3.3423 +[2025-09-05 15:07:37] [Rank 0] Group 4 Loss: 3.5378 +[2025-09-05 15:07:37] [Rank 0] Group 4 Loss: 3.5378 +[2025-09-05 15:07:37] [Rank 0] Group 5 Loss: 3.7474 +[2025-09-05 15:07:37] [Rank 0] Group 5 Loss: 3.7474 +[2025-09-05 15:07:37] [Rank 0] Group 6 Loss: 3.9439 +[2025-09-05 15:07:37] [Rank 0] Group 6 Loss: 3.9439 +[2025-09-05 15:07:37] [Rank 0] Group 7 Loss: 4.2354 +[2025-09-05 15:07:37] [Rank 0] Group 7 Loss: 4.2354 +[2025-09-05 15:07:37] [Rank 0] Group 8 Loss: 4.5383 +[2025-09-05 15:07:37] [Rank 0] Group 8 Loss: 4.5383 +[2025-09-05 15:07:37] [Rank 0] Group 9 Loss: 4.6840 +[2025-09-05 15:07:37] [Rank 0] Group 9 Loss: 4.6840 +[2025-09-05 15:07:37] [Rank 0] Group 10 Loss: 4.7949 +[2025-09-05 15:07:37] [Rank 0] Group 10 Loss: 4.7949 +[2025-09-05 15:07:37] [Rank 0] Group 11 Loss: 4.8253 +[2025-09-05 15:07:37] [Rank 0] Group 11 Loss: 4.8253 +[2025-09-05 15:07:37] [Rank 0] Group 12 Loss: 4.8364 +[2025-09-05 15:07:37] [Rank 0] Group 12 Loss: 4.8364 +[2025-09-05 15:07:37] [Rank 0] Group 13 Loss: 4.9072 +[2025-09-05 15:07:37] [Rank 0] Group 13 Loss: 4.9072 +[2025-09-05 15:07:37] [Rank 0] Group 14 Loss: 4.8815 +[2025-09-05 15:07:37] [Rank 0] Group 14 Loss: 4.8815 +[2025-09-05 15:07:37] [Rank 0] Group 15 Loss: 4.8718 +[2025-09-05 15:07:37] [Rank 0] Group 15 Loss: 4.8718 +[2025-09-05 15:07:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:07:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:07:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:07:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:07:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:07:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:07:37] [Rank 0] Group 3 FTA: 0.7400 +[2025-09-05 15:07:37] [Rank 0] Group 3 FTA: 0.7400 +[2025-09-05 15:07:37] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 15:07:37] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 15:07:37] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 15:07:37] [Rank 0] Group 5 FTA: 0.4600 +[2025-09-05 15:07:37] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 15:07:37] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 15:07:37] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 15:07:37] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 15:07:37] [Rank 0] Group 8 FTA: 0.2600 +[2025-09-05 15:07:37] [Rank 0] Group 8 FTA: 0.2600 +[2025-09-05 15:07:37] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 15:07:37] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 15:07:37] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 15:07:37] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 15:07:37] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 15:07:37] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 15:07:37] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 15:07:37] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 15:07:37] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:07:37] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:07:37] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 15:07:37] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 15:07:37] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:07:37] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:07:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:07:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:07:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:07:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:07:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:07:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:07:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:07:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:07:38] [Rank 0] step:1501/10000 train_time:103169ms step_avg:68.73ms +[2025-09-05 15:07:38] [Rank 0] step:1501/10000 train_time:103169ms step_avg:68.73ms +[2025-09-05 15:07:39] [Rank 0] step:1521/10000 train_time:103832ms step_avg:68.27ms +[2025-09-05 15:07:39] [Rank 0] step:1521/10000 train_time:103832ms step_avg:68.27ms +[2025-09-05 15:07:40] [Rank 0] step:1541/10000 train_time:104564ms step_avg:67.85ms +[2025-09-05 15:07:40] [Rank 0] step:1541/10000 train_time:104564ms step_avg:67.85ms +[2025-09-05 15:07:41] [Rank 0] step:1561/10000 train_time:105467ms step_avg:67.56ms +[2025-09-05 15:07:41] [Rank 0] step:1561/10000 train_time:105467ms step_avg:67.56ms +[2025-09-05 15:07:41] [Rank 0] step:1581/10000 train_time:106302ms step_avg:67.24ms +[2025-09-05 15:07:41] [Rank 0] step:1581/10000 train_time:106302ms step_avg:67.24ms +[2025-09-05 15:07:42] [Rank 0] step:1601/10000 train_time:107033ms step_avg:66.85ms +[2025-09-05 15:07:42] [Rank 0] step:1601/10000 train_time:107033ms step_avg:66.85ms +[2025-09-05 15:07:43] [Rank 0] step:1621/10000 train_time:107764ms step_avg:66.48ms +[2025-09-05 15:07:43] [Rank 0] step:1621/10000 train_time:107764ms step_avg:66.48ms +[2025-09-05 15:07:44] [Rank 0] step:1641/10000 train_time:109123ms step_avg:66.50ms +[2025-09-05 15:07:44] [Rank 0] step:1641/10000 train_time:109123ms step_avg:66.50ms +[2025-09-05 15:07:45] [Rank 0] step:1661/10000 train_time:109855ms step_avg:66.14ms +[2025-09-05 15:07:45] [Rank 0] step:1661/10000 train_time:109855ms step_avg:66.14ms +[2025-09-05 15:07:46] [Rank 0] step:1681/10000 train_time:110587ms step_avg:65.79ms +[2025-09-05 15:07:46] [Rank 0] step:1681/10000 train_time:110587ms step_avg:65.79ms +[2025-09-05 15:07:46] [Rank 0] step:1701/10000 train_time:111318ms step_avg:65.44ms +[2025-09-05 15:07:46] [Rank 0] step:1701/10000 train_time:111318ms step_avg:65.44ms +[2025-09-05 15:07:47] [Rank 0] step:1721/10000 train_time:112050ms step_avg:65.11ms +[2025-09-05 15:07:47] [Rank 0] step:1721/10000 train_time:112050ms step_avg:65.11ms +[2025-09-05 15:07:48] [Rank 0] step:1741/10000 train_time:112782ms step_avg:64.78ms +[2025-09-05 15:07:48] [Rank 0] step:1741/10000 train_time:112782ms step_avg:64.78ms +[2025-09-05 15:07:49] [Rank 0] step:1761/10000 train_time:113513ms step_avg:64.46ms +[2025-09-05 15:07:49] [Rank 0] step:1761/10000 train_time:113513ms step_avg:64.46ms +[2025-09-05 15:07:49] [Rank 0] step:1781/10000 train_time:114245ms step_avg:64.15ms +[2025-09-05 15:07:49] [Rank 0] step:1781/10000 train_time:114245ms step_avg:64.15ms +[2025-09-05 15:07:50] [Rank 0] step:1801/10000 train_time:114976ms step_avg:63.84ms +[2025-09-05 15:07:50] [Rank 0] step:1801/10000 train_time:114976ms step_avg:63.84ms +[2025-09-05 15:07:51] [Rank 0] step:1821/10000 train_time:115709ms step_avg:63.54ms +[2025-09-05 15:07:51] [Rank 0] step:1821/10000 train_time:115709ms step_avg:63.54ms +[2025-09-05 15:07:51] [Rank 0] step:1841/10000 train_time:116440ms step_avg:63.25ms +[2025-09-05 15:07:51] [Rank 0] step:1841/10000 train_time:116440ms step_avg:63.25ms +[2025-09-05 15:07:52] [Rank 0] step:1861/10000 train_time:117172ms step_avg:62.96ms +[2025-09-05 15:07:52] [Rank 0] step:1861/10000 train_time:117172ms step_avg:62.96ms +[2025-09-05 15:07:53] [Rank 0] step:1881/10000 train_time:117904ms step_avg:62.68ms +[2025-09-05 15:07:53] [Rank 0] step:1881/10000 train_time:117904ms step_avg:62.68ms +[2025-09-05 15:07:54] [Rank 0] step:1901/10000 train_time:118636ms step_avg:62.41ms +[2025-09-05 15:07:54] [Rank 0] step:1901/10000 train_time:118636ms step_avg:62.41ms +[2025-09-05 15:07:54] [Rank 0] step:1921/10000 train_time:119368ms step_avg:62.14ms +[2025-09-05 15:07:54] [Rank 0] step:1921/10000 train_time:119368ms step_avg:62.14ms +[2025-09-05 15:07:55] [Rank 0] step:1941/10000 train_time:120100ms step_avg:61.88ms +[2025-09-05 15:07:55] [Rank 0] step:1941/10000 train_time:120100ms step_avg:61.88ms +[2025-09-05 15:07:56] [Rank 0] step:1961/10000 train_time:120832ms step_avg:61.62ms +[2025-09-05 15:07:56] [Rank 0] step:1961/10000 train_time:120832ms step_avg:61.62ms +[2025-09-05 15:07:57] [Rank 0] step:1981/10000 train_time:121563ms step_avg:61.36ms +[2025-09-05 15:07:57] [Rank 0] step:1981/10000 train_time:121563ms step_avg:61.36ms +[2025-09-05 15:07:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:07:57] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:07:58] [Rank 0] PRINT: step:2000/10000 train_loss:1.6432 val_loss:1.5840 train_time:122375ms step_avg:61.19ms +[2025-09-05 15:07:58] [Rank 0] PRINT: step:2000/10000 train_loss:1.6432 val_loss:1.5840 train_time:122375ms step_avg:61.19ms +[2025-09-05 15:07:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:07:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:07:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:07:58] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:09:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:09:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:09:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:09:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:09:20] [Rank 0] Total Loss: 4.2051 +[2025-09-05 15:09:20] [Rank 0] Total Loss: 4.2051 +[2025-09-05 15:09:20] [Rank 0] Total FTA (Unweighted): 0.4431 +[2025-09-05 15:09:20] [Rank 0] Total FTA (Unweighted): 0.4431 +[2025-09-05 15:09:20] [Rank 0] Total FTA (Weighted): 0.4431 +[2025-09-05 15:09:20] [Rank 0] Total FTA (Weighted): 0.4431 +[2025-09-05 15:09:20] [Rank 0] Group 0 Loss: 3.3804 +[2025-09-05 15:09:20] [Rank 0] Group 0 Loss: 3.3804 +[2025-09-05 15:09:20] [Rank 0] Group 1 Loss: 3.2767 +[2025-09-05 15:09:20] [Rank 0] Group 1 Loss: 3.2767 +[2025-09-05 15:09:20] [Rank 0] Group 2 Loss: 3.0816 +[2025-09-05 15:09:20] [Rank 0] Group 2 Loss: 3.0816 +[2025-09-05 15:09:20] [Rank 0] Group 3 Loss: 3.4353 +[2025-09-05 15:09:20] [Rank 0] Group 3 Loss: 3.4353 +[2025-09-05 15:09:20] [Rank 0] Group 4 Loss: 3.6215 +[2025-09-05 15:09:20] [Rank 0] Group 4 Loss: 3.6215 +[2025-09-05 15:09:20] [Rank 0] Group 5 Loss: 3.7823 +[2025-09-05 15:09:20] [Rank 0] Group 5 Loss: 3.7823 +[2025-09-05 15:09:20] [Rank 0] Group 6 Loss: 3.9605 +[2025-09-05 15:09:20] [Rank 0] Group 6 Loss: 3.9605 +[2025-09-05 15:09:20] [Rank 0] Group 7 Loss: 4.2417 +[2025-09-05 15:09:20] [Rank 0] Group 7 Loss: 4.2417 +[2025-09-05 15:09:20] [Rank 0] Group 8 Loss: 4.5451 +[2025-09-05 15:09:20] [Rank 0] Group 8 Loss: 4.5451 +[2025-09-05 15:09:20] [Rank 0] Group 9 Loss: 4.6868 +[2025-09-05 15:09:20] [Rank 0] Group 9 Loss: 4.6868 +[2025-09-05 15:09:20] [Rank 0] Group 10 Loss: 4.8327 +[2025-09-05 15:09:20] [Rank 0] Group 10 Loss: 4.8327 +[2025-09-05 15:09:20] [Rank 0] Group 11 Loss: 4.8705 +[2025-09-05 15:09:20] [Rank 0] Group 11 Loss: 4.8705 +[2025-09-05 15:09:20] [Rank 0] Group 12 Loss: 4.8415 +[2025-09-05 15:09:20] [Rank 0] Group 12 Loss: 4.8415 +[2025-09-05 15:09:20] [Rank 0] Group 13 Loss: 4.9091 +[2025-09-05 15:09:20] [Rank 0] Group 13 Loss: 4.9091 +[2025-09-05 15:09:20] [Rank 0] Group 14 Loss: 4.8888 +[2025-09-05 15:09:20] [Rank 0] Group 14 Loss: 4.8888 +[2025-09-05 15:09:20] [Rank 0] Group 15 Loss: 4.9264 +[2025-09-05 15:09:20] [Rank 0] Group 15 Loss: 4.9264 +[2025-09-05 15:09:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:09:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:09:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:09:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:09:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:09:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:09:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:09:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:09:20] [Rank 0] Group 4 FTA: 0.5200 +[2025-09-05 15:09:20] [Rank 0] Group 4 FTA: 0.5200 +[2025-09-05 15:09:20] [Rank 0] Group 5 FTA: 0.5200 +[2025-09-05 15:09:20] [Rank 0] Group 5 FTA: 0.5200 +[2025-09-05 15:09:20] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 15:09:20] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 15:09:20] [Rank 0] Group 7 FTA: 0.3700 +[2025-09-05 15:09:20] [Rank 0] Group 7 FTA: 0.3700 +[2025-09-05 15:09:20] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 15:09:20] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 15:09:20] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 15:09:20] [Rank 0] Group 9 FTA: 0.2300 +[2025-09-05 15:09:20] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 15:09:20] [Rank 0] Group 10 FTA: 0.1600 +[2025-09-05 15:09:20] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 15:09:20] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 15:09:20] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 15:09:20] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 15:09:20] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 15:09:20] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 15:09:20] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:09:20] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:09:20] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:09:20] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:09:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:09:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:09:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:09:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:09:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:09:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:09:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:09:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:09:22] [Rank 0] step:2001/10000 train_time:122385ms step_avg:61.16ms +[2025-09-05 15:09:22] [Rank 0] step:2001/10000 train_time:122385ms step_avg:61.16ms +[2025-09-05 15:09:22] [Rank 0] step:2021/10000 train_time:123053ms step_avg:60.89ms +[2025-09-05 15:09:22] [Rank 0] step:2021/10000 train_time:123053ms step_avg:60.89ms +[2025-09-05 15:09:23] [Rank 0] step:2041/10000 train_time:123784ms step_avg:60.65ms +[2025-09-05 15:09:23] [Rank 0] step:2041/10000 train_time:123784ms step_avg:60.65ms +[2025-09-05 15:09:24] [Rank 0] step:2061/10000 train_time:124516ms step_avg:60.42ms +[2025-09-05 15:09:24] [Rank 0] step:2061/10000 train_time:124516ms step_avg:60.42ms +[2025-09-05 15:09:25] [Rank 0] step:2081/10000 train_time:125249ms step_avg:60.19ms +[2025-09-05 15:09:25] [Rank 0] step:2081/10000 train_time:125249ms step_avg:60.19ms +[2025-09-05 15:09:25] [Rank 0] step:2101/10000 train_time:125980ms step_avg:59.96ms +[2025-09-05 15:09:25] [Rank 0] step:2101/10000 train_time:125980ms step_avg:59.96ms +[2025-09-05 15:09:26] [Rank 0] step:2121/10000 train_time:126712ms step_avg:59.74ms +[2025-09-05 15:09:26] [Rank 0] step:2121/10000 train_time:126712ms step_avg:59.74ms +[2025-09-05 15:09:27] [Rank 0] step:2141/10000 train_time:127444ms step_avg:59.53ms +[2025-09-05 15:09:27] [Rank 0] step:2141/10000 train_time:127444ms step_avg:59.53ms +[2025-09-05 15:09:27] [Rank 0] step:2161/10000 train_time:128176ms step_avg:59.31ms +[2025-09-05 15:09:27] [Rank 0] step:2161/10000 train_time:128176ms step_avg:59.31ms +[2025-09-05 15:09:28] [Rank 0] step:2181/10000 train_time:128907ms step_avg:59.10ms +[2025-09-05 15:09:28] [Rank 0] step:2181/10000 train_time:128907ms step_avg:59.10ms +[2025-09-05 15:09:29] [Rank 0] step:2201/10000 train_time:129639ms step_avg:58.90ms +[2025-09-05 15:09:29] [Rank 0] step:2201/10000 train_time:129639ms step_avg:58.90ms +[2025-09-05 15:09:30] [Rank 0] step:2221/10000 train_time:130371ms step_avg:58.70ms +[2025-09-05 15:09:30] [Rank 0] step:2221/10000 train_time:130371ms step_avg:58.70ms +[2025-09-05 15:09:30] [Rank 0] step:2241/10000 train_time:131107ms step_avg:58.50ms +[2025-09-05 15:09:30] [Rank 0] step:2241/10000 train_time:131107ms step_avg:58.50ms +[2025-09-05 15:09:31] [Rank 0] step:2261/10000 train_time:131846ms step_avg:58.31ms +[2025-09-05 15:09:31] [Rank 0] step:2261/10000 train_time:131846ms step_avg:58.31ms +[2025-09-05 15:09:32] [Rank 0] step:2281/10000 train_time:132584ms step_avg:58.13ms +[2025-09-05 15:09:32] [Rank 0] step:2281/10000 train_time:132584ms step_avg:58.13ms +[2025-09-05 15:09:33] [Rank 0] step:2301/10000 train_time:133322ms step_avg:57.94ms +[2025-09-05 15:09:33] [Rank 0] step:2301/10000 train_time:133322ms step_avg:57.94ms +[2025-09-05 15:09:33] [Rank 0] step:2321/10000 train_time:134060ms step_avg:57.76ms +[2025-09-05 15:09:33] [Rank 0] step:2321/10000 train_time:134060ms step_avg:57.76ms +[2025-09-05 15:09:34] [Rank 0] step:2341/10000 train_time:134797ms step_avg:57.58ms +[2025-09-05 15:09:34] [Rank 0] step:2341/10000 train_time:134797ms step_avg:57.58ms +[2025-09-05 15:09:35] [Rank 0] step:2361/10000 train_time:135536ms step_avg:57.41ms +[2025-09-05 15:09:35] [Rank 0] step:2361/10000 train_time:135536ms step_avg:57.41ms +[2025-09-05 15:09:36] [Rank 0] step:2381/10000 train_time:136274ms step_avg:57.23ms +[2025-09-05 15:09:36] [Rank 0] step:2381/10000 train_time:136274ms step_avg:57.23ms +[2025-09-05 15:09:36] [Rank 0] step:2401/10000 train_time:137012ms step_avg:57.06ms +[2025-09-05 15:09:36] [Rank 0] step:2401/10000 train_time:137012ms step_avg:57.06ms +[2025-09-05 15:09:37] [Rank 0] step:2421/10000 train_time:137750ms step_avg:56.90ms +[2025-09-05 15:09:37] [Rank 0] step:2421/10000 train_time:137750ms step_avg:56.90ms +[2025-09-05 15:09:38] [Rank 0] step:2441/10000 train_time:138488ms step_avg:56.73ms +[2025-09-05 15:09:38] [Rank 0] step:2441/10000 train_time:138488ms step_avg:56.73ms +[2025-09-05 15:09:39] [Rank 0] step:2461/10000 train_time:139227ms step_avg:56.57ms +[2025-09-05 15:09:39] [Rank 0] step:2461/10000 train_time:139227ms step_avg:56.57ms +[2025-09-05 15:09:39] [Rank 0] step:2481/10000 train_time:139965ms step_avg:56.41ms +[2025-09-05 15:09:39] [Rank 0] step:2481/10000 train_time:139965ms step_avg:56.41ms +[2025-09-05 15:09:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:09:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:09:40] [Rank 0] PRINT: step:2500/10000 train_loss:1.5550 val_loss:1.5057 train_time:140785ms step_avg:56.31ms +[2025-09-05 15:09:40] [Rank 0] PRINT: step:2500/10000 train_loss:1.5550 val_loss:1.5057 train_time:140785ms step_avg:56.31ms +[2025-09-05 15:09:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:09:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:09:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:09:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:11:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:11:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:11:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:11:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:11:02] [Rank 0] Total Loss: 4.0958 +[2025-09-05 15:11:02] [Rank 0] Total Loss: 4.0958 +[2025-09-05 15:11:02] [Rank 0] Total FTA (Unweighted): 0.4656 +[2025-09-05 15:11:02] [Rank 0] Total FTA (Unweighted): 0.4656 +[2025-09-05 15:11:02] [Rank 0] Total FTA (Weighted): 0.4656 +[2025-09-05 15:11:02] [Rank 0] Total FTA (Weighted): 0.4656 +[2025-09-05 15:11:02] [Rank 0] Group 0 Loss: 3.3726 +[2025-09-05 15:11:02] [Rank 0] Group 0 Loss: 3.3726 +[2025-09-05 15:11:02] [Rank 0] Group 1 Loss: 3.1651 +[2025-09-05 15:11:02] [Rank 0] Group 1 Loss: 3.1651 +[2025-09-05 15:11:02] [Rank 0] Group 2 Loss: 3.0339 +[2025-09-05 15:11:02] [Rank 0] Group 2 Loss: 3.0339 +[2025-09-05 15:11:02] [Rank 0] Group 3 Loss: 3.3947 +[2025-09-05 15:11:02] [Rank 0] Group 3 Loss: 3.3947 +[2025-09-05 15:11:02] [Rank 0] Group 4 Loss: 3.5699 +[2025-09-05 15:11:02] [Rank 0] Group 4 Loss: 3.5699 +[2025-09-05 15:11:02] [Rank 0] Group 5 Loss: 3.7509 +[2025-09-05 15:11:02] [Rank 0] Group 5 Loss: 3.7509 +[2025-09-05 15:11:02] [Rank 0] Group 6 Loss: 3.8637 +[2025-09-05 15:11:02] [Rank 0] Group 6 Loss: 3.8637 +[2025-09-05 15:11:02] [Rank 0] Group 7 Loss: 4.0968 +[2025-09-05 15:11:02] [Rank 0] Group 7 Loss: 4.0968 +[2025-09-05 15:11:02] [Rank 0] Group 8 Loss: 4.3849 +[2025-09-05 15:11:02] [Rank 0] Group 8 Loss: 4.3849 +[2025-09-05 15:11:02] [Rank 0] Group 9 Loss: 4.5110 +[2025-09-05 15:11:02] [Rank 0] Group 9 Loss: 4.5110 +[2025-09-05 15:11:02] [Rank 0] Group 10 Loss: 4.6595 +[2025-09-05 15:11:02] [Rank 0] Group 10 Loss: 4.6595 +[2025-09-05 15:11:02] [Rank 0] Group 11 Loss: 4.6867 +[2025-09-05 15:11:02] [Rank 0] Group 11 Loss: 4.6867 +[2025-09-05 15:11:02] [Rank 0] Group 12 Loss: 4.7017 +[2025-09-05 15:11:02] [Rank 0] Group 12 Loss: 4.7017 +[2025-09-05 15:11:02] [Rank 0] Group 13 Loss: 4.7815 +[2025-09-05 15:11:02] [Rank 0] Group 13 Loss: 4.7815 +[2025-09-05 15:11:02] [Rank 0] Group 14 Loss: 4.7661 +[2025-09-05 15:11:02] [Rank 0] Group 14 Loss: 4.7661 +[2025-09-05 15:11:02] [Rank 0] Group 15 Loss: 4.7934 +[2025-09-05 15:11:02] [Rank 0] Group 15 Loss: 4.7934 +[2025-09-05 15:11:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:11:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:11:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:11:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:11:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:11:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:11:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:11:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:11:02] [Rank 0] Group 4 FTA: 0.5600 +[2025-09-05 15:11:02] [Rank 0] Group 4 FTA: 0.5600 +[2025-09-05 15:11:02] [Rank 0] Group 5 FTA: 0.5500 +[2025-09-05 15:11:02] [Rank 0] Group 5 FTA: 0.5500 +[2025-09-05 15:11:02] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 15:11:02] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 15:11:02] [Rank 0] Group 7 FTA: 0.3900 +[2025-09-05 15:11:02] [Rank 0] Group 7 FTA: 0.3900 +[2025-09-05 15:11:02] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 15:11:02] [Rank 0] Group 8 FTA: 0.3700 +[2025-09-05 15:11:02] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 15:11:02] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 15:11:02] [Rank 0] Group 10 FTA: 0.2700 +[2025-09-05 15:11:02] [Rank 0] Group 10 FTA: 0.2700 +[2025-09-05 15:11:02] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:11:02] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:11:02] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 15:11:02] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 15:11:02] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:11:02] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:11:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:11:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:11:02] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:11:02] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:11:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:11:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:11:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:11:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:11:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:11:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:11:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:11:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:11:03] [Rank 0] step:2501/10000 train_time:140794ms step_avg:56.30ms +[2025-09-05 15:11:03] [Rank 0] step:2501/10000 train_time:140794ms step_avg:56.30ms +[2025-09-05 15:11:04] [Rank 0] step:2521/10000 train_time:141466ms step_avg:56.12ms +[2025-09-05 15:11:04] [Rank 0] step:2521/10000 train_time:141466ms step_avg:56.12ms +[2025-09-05 15:11:05] [Rank 0] step:2541/10000 train_time:142204ms step_avg:55.96ms +[2025-09-05 15:11:05] [Rank 0] step:2541/10000 train_time:142204ms step_avg:55.96ms +[2025-09-05 15:11:05] [Rank 0] step:2561/10000 train_time:142942ms step_avg:55.81ms +[2025-09-05 15:11:05] [Rank 0] step:2561/10000 train_time:142942ms step_avg:55.81ms +[2025-09-05 15:11:06] [Rank 0] step:2581/10000 train_time:143681ms step_avg:55.67ms +[2025-09-05 15:11:06] [Rank 0] step:2581/10000 train_time:143681ms step_avg:55.67ms +[2025-09-05 15:11:07] [Rank 0] step:2601/10000 train_time:144419ms step_avg:55.52ms +[2025-09-05 15:11:07] [Rank 0] step:2601/10000 train_time:144419ms step_avg:55.52ms +[2025-09-05 15:11:08] [Rank 0] step:2621/10000 train_time:145158ms step_avg:55.38ms +[2025-09-05 15:11:08] [Rank 0] step:2621/10000 train_time:145158ms step_avg:55.38ms +[2025-09-05 15:11:08] [Rank 0] step:2641/10000 train_time:145895ms step_avg:55.24ms +[2025-09-05 15:11:08] [Rank 0] step:2641/10000 train_time:145895ms step_avg:55.24ms +[2025-09-05 15:11:09] [Rank 0] step:2661/10000 train_time:146632ms step_avg:55.10ms +[2025-09-05 15:11:09] [Rank 0] step:2661/10000 train_time:146632ms step_avg:55.10ms +[2025-09-05 15:11:10] [Rank 0] step:2681/10000 train_time:147370ms step_avg:54.97ms +[2025-09-05 15:11:10] [Rank 0] step:2681/10000 train_time:147370ms step_avg:54.97ms +[2025-09-05 15:11:11] [Rank 0] step:2701/10000 train_time:148109ms step_avg:54.83ms +[2025-09-05 15:11:11] [Rank 0] step:2701/10000 train_time:148109ms step_avg:54.83ms +[2025-09-05 15:11:11] [Rank 0] step:2721/10000 train_time:148847ms step_avg:54.70ms +[2025-09-05 15:11:11] [Rank 0] step:2721/10000 train_time:148847ms step_avg:54.70ms +[2025-09-05 15:11:12] [Rank 0] step:2741/10000 train_time:149585ms step_avg:54.57ms +[2025-09-05 15:11:12] [Rank 0] step:2741/10000 train_time:149585ms step_avg:54.57ms +[2025-09-05 15:11:13] [Rank 0] step:2761/10000 train_time:150323ms step_avg:54.45ms +[2025-09-05 15:11:13] [Rank 0] step:2761/10000 train_time:150323ms step_avg:54.45ms +[2025-09-05 15:11:13] [Rank 0] step:2781/10000 train_time:151061ms step_avg:54.32ms +[2025-09-05 15:11:13] [Rank 0] step:2781/10000 train_time:151061ms step_avg:54.32ms +[2025-09-05 15:11:14] [Rank 0] step:2801/10000 train_time:151799ms step_avg:54.19ms +[2025-09-05 15:11:14] [Rank 0] step:2801/10000 train_time:151799ms step_avg:54.19ms +[2025-09-05 15:11:16] [Rank 0] step:2821/10000 train_time:153164ms step_avg:54.29ms +[2025-09-05 15:11:16] [Rank 0] step:2821/10000 train_time:153164ms step_avg:54.29ms +[2025-09-05 15:11:16] [Rank 0] step:2841/10000 train_time:153901ms step_avg:54.17ms +[2025-09-05 15:11:16] [Rank 0] step:2841/10000 train_time:153901ms step_avg:54.17ms +[2025-09-05 15:11:17] [Rank 0] step:2861/10000 train_time:154639ms step_avg:54.05ms +[2025-09-05 15:11:17] [Rank 0] step:2861/10000 train_time:154639ms step_avg:54.05ms +[2025-09-05 15:11:18] [Rank 0] step:2881/10000 train_time:155378ms step_avg:53.93ms +[2025-09-05 15:11:18] [Rank 0] step:2881/10000 train_time:155378ms step_avg:53.93ms +[2025-09-05 15:11:19] [Rank 0] step:2901/10000 train_time:156116ms step_avg:53.81ms +[2025-09-05 15:11:19] [Rank 0] step:2901/10000 train_time:156116ms step_avg:53.81ms +[2025-09-05 15:11:19] [Rank 0] step:2921/10000 train_time:156854ms step_avg:53.70ms +[2025-09-05 15:11:19] [Rank 0] step:2921/10000 train_time:156854ms step_avg:53.70ms +[2025-09-05 15:11:20] [Rank 0] step:2941/10000 train_time:157592ms step_avg:53.58ms +[2025-09-05 15:11:20] [Rank 0] step:2941/10000 train_time:157592ms step_avg:53.58ms +[2025-09-05 15:11:21] [Rank 0] step:2961/10000 train_time:158330ms step_avg:53.47ms +[2025-09-05 15:11:21] [Rank 0] step:2961/10000 train_time:158330ms step_avg:53.47ms +[2025-09-05 15:11:21] [Rank 0] step:2981/10000 train_time:159068ms step_avg:53.36ms +[2025-09-05 15:11:21] [Rank 0] step:2981/10000 train_time:159068ms step_avg:53.36ms +[2025-09-05 15:11:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:11:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:11:23] [Rank 0] PRINT: step:3000/10000 train_loss:1.4910 val_loss:1.4621 train_time:159887ms step_avg:53.30ms +[2025-09-05 15:11:23] [Rank 0] PRINT: step:3000/10000 train_loss:1.4910 val_loss:1.4621 train_time:159887ms step_avg:53.30ms +[2025-09-05 15:11:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:11:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:11:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:11:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:12:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:12:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:12:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:12:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:12:43] [Rank 0] Total Loss: 4.2090 +[2025-09-05 15:12:43] [Rank 0] Total Loss: 4.2090 +[2025-09-05 15:12:43] [Rank 0] Total FTA (Unweighted): 0.4788 +[2025-09-05 15:12:43] [Rank 0] Total FTA (Unweighted): 0.4788 +[2025-09-05 15:12:43] [Rank 0] Total FTA (Weighted): 0.4788 +[2025-09-05 15:12:43] [Rank 0] Total FTA (Weighted): 0.4788 +[2025-09-05 15:12:43] [Rank 0] Group 0 Loss: 3.4513 +[2025-09-05 15:12:43] [Rank 0] Group 0 Loss: 3.4513 +[2025-09-05 15:12:43] [Rank 0] Group 1 Loss: 3.1790 +[2025-09-05 15:12:43] [Rank 0] Group 1 Loss: 3.1790 +[2025-09-05 15:12:43] [Rank 0] Group 2 Loss: 3.1818 +[2025-09-05 15:12:43] [Rank 0] Group 2 Loss: 3.1818 +[2025-09-05 15:12:43] [Rank 0] Group 3 Loss: 3.5215 +[2025-09-05 15:12:43] [Rank 0] Group 3 Loss: 3.5215 +[2025-09-05 15:12:43] [Rank 0] Group 4 Loss: 3.6852 +[2025-09-05 15:12:43] [Rank 0] Group 4 Loss: 3.6852 +[2025-09-05 15:12:43] [Rank 0] Group 5 Loss: 3.9003 +[2025-09-05 15:12:43] [Rank 0] Group 5 Loss: 3.9003 +[2025-09-05 15:12:43] [Rank 0] Group 6 Loss: 3.9719 +[2025-09-05 15:12:43] [Rank 0] Group 6 Loss: 3.9719 +[2025-09-05 15:12:43] [Rank 0] Group 7 Loss: 4.2065 +[2025-09-05 15:12:43] [Rank 0] Group 7 Loss: 4.2065 +[2025-09-05 15:12:43] [Rank 0] Group 8 Loss: 4.5027 +[2025-09-05 15:12:43] [Rank 0] Group 8 Loss: 4.5027 +[2025-09-05 15:12:43] [Rank 0] Group 9 Loss: 4.6293 +[2025-09-05 15:12:43] [Rank 0] Group 9 Loss: 4.6293 +[2025-09-05 15:12:43] [Rank 0] Group 10 Loss: 4.8248 +[2025-09-05 15:12:43] [Rank 0] Group 10 Loss: 4.8248 +[2025-09-05 15:12:43] [Rank 0] Group 11 Loss: 4.8034 +[2025-09-05 15:12:43] [Rank 0] Group 11 Loss: 4.8034 +[2025-09-05 15:12:43] [Rank 0] Group 12 Loss: 4.8049 +[2025-09-05 15:12:43] [Rank 0] Group 12 Loss: 4.8049 +[2025-09-05 15:12:43] [Rank 0] Group 13 Loss: 4.8456 +[2025-09-05 15:12:43] [Rank 0] Group 13 Loss: 4.8456 +[2025-09-05 15:12:43] [Rank 0] Group 14 Loss: 4.9122 +[2025-09-05 15:12:43] [Rank 0] Group 14 Loss: 4.9122 +[2025-09-05 15:12:43] [Rank 0] Group 15 Loss: 4.9236 +[2025-09-05 15:12:43] [Rank 0] Group 15 Loss: 4.9236 +[2025-09-05 15:12:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:12:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:12:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:12:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:12:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:12:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:12:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:12:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:12:43] [Rank 0] Group 4 FTA: 0.5700 +[2025-09-05 15:12:43] [Rank 0] Group 4 FTA: 0.5700 +[2025-09-05 15:12:43] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:12:43] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:12:43] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 15:12:43] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 15:12:43] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 15:12:43] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 15:12:43] [Rank 0] Group 8 FTA: 0.4000 +[2025-09-05 15:12:43] [Rank 0] Group 8 FTA: 0.4000 +[2025-09-05 15:12:43] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 15:12:43] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 15:12:43] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 15:12:43] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 15:12:43] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:12:43] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:12:43] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 15:12:43] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 15:12:43] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:12:43] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:12:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:12:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:12:43] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:12:43] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:12:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:12:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:12:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:12:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:12:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:12:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:12:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:12:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:12:45] [Rank 0] step:3001/10000 train_time:159897ms step_avg:53.28ms +[2025-09-05 15:12:45] [Rank 0] step:3001/10000 train_time:159897ms step_avg:53.28ms +[2025-09-05 15:12:46] [Rank 0] step:3021/10000 train_time:160565ms step_avg:53.15ms +[2025-09-05 15:12:46] [Rank 0] step:3021/10000 train_time:160565ms step_avg:53.15ms +[2025-09-05 15:12:47] [Rank 0] step:3041/10000 train_time:161303ms step_avg:53.04ms +[2025-09-05 15:12:47] [Rank 0] step:3041/10000 train_time:161303ms step_avg:53.04ms +[2025-09-05 15:12:47] [Rank 0] step:3061/10000 train_time:162041ms step_avg:52.94ms +[2025-09-05 15:12:47] [Rank 0] step:3061/10000 train_time:162041ms step_avg:52.94ms +[2025-09-05 15:12:48] [Rank 0] step:3081/10000 train_time:162779ms step_avg:52.83ms +[2025-09-05 15:12:48] [Rank 0] step:3081/10000 train_time:162779ms step_avg:52.83ms +[2025-09-05 15:12:49] [Rank 0] step:3101/10000 train_time:163517ms step_avg:52.73ms +[2025-09-05 15:12:49] [Rank 0] step:3101/10000 train_time:163517ms step_avg:52.73ms +[2025-09-05 15:12:50] [Rank 0] step:3121/10000 train_time:164256ms step_avg:52.63ms +[2025-09-05 15:12:50] [Rank 0] step:3121/10000 train_time:164256ms step_avg:52.63ms +[2025-09-05 15:12:50] [Rank 0] step:3141/10000 train_time:164994ms step_avg:52.53ms +[2025-09-05 15:12:50] [Rank 0] step:3141/10000 train_time:164994ms step_avg:52.53ms +[2025-09-05 15:12:51] [Rank 0] step:3161/10000 train_time:165733ms step_avg:52.43ms +[2025-09-05 15:12:51] [Rank 0] step:3161/10000 train_time:165733ms step_avg:52.43ms +[2025-09-05 15:12:52] [Rank 0] step:3181/10000 train_time:166471ms step_avg:52.33ms +[2025-09-05 15:12:52] [Rank 0] step:3181/10000 train_time:166471ms step_avg:52.33ms +[2025-09-05 15:12:52] [Rank 0] step:3201/10000 train_time:167209ms step_avg:52.24ms +[2025-09-05 15:12:52] [Rank 0] step:3201/10000 train_time:167209ms step_avg:52.24ms +[2025-09-05 15:12:53] [Rank 0] step:3221/10000 train_time:167947ms step_avg:52.14ms +[2025-09-05 15:12:53] [Rank 0] step:3221/10000 train_time:167947ms step_avg:52.14ms +[2025-09-05 15:12:54] [Rank 0] step:3241/10000 train_time:168852ms step_avg:52.10ms +[2025-09-05 15:12:54] [Rank 0] step:3241/10000 train_time:168852ms step_avg:52.10ms +[2025-09-05 15:12:55] [Rank 0] step:3261/10000 train_time:169590ms step_avg:52.01ms +[2025-09-05 15:12:55] [Rank 0] step:3261/10000 train_time:169590ms step_avg:52.01ms +[2025-09-05 15:12:56] [Rank 0] step:3281/10000 train_time:170331ms step_avg:51.91ms +[2025-09-05 15:12:56] [Rank 0] step:3281/10000 train_time:170331ms step_avg:51.91ms +[2025-09-05 15:12:56] [Rank 0] step:3301/10000 train_time:171220ms step_avg:51.87ms +[2025-09-05 15:12:56] [Rank 0] step:3301/10000 train_time:171220ms step_avg:51.87ms +[2025-09-05 15:12:57] [Rank 0] step:3321/10000 train_time:171966ms step_avg:51.78ms +[2025-09-05 15:12:57] [Rank 0] step:3321/10000 train_time:171966ms step_avg:51.78ms +[2025-09-05 15:12:58] [Rank 0] step:3341/10000 train_time:172704ms step_avg:51.69ms +[2025-09-05 15:12:58] [Rank 0] step:3341/10000 train_time:172704ms step_avg:51.69ms +[2025-09-05 15:12:59] [Rank 0] step:3361/10000 train_time:173442ms step_avg:51.60ms +[2025-09-05 15:12:59] [Rank 0] step:3361/10000 train_time:173442ms step_avg:51.60ms +[2025-09-05 15:12:59] [Rank 0] step:3381/10000 train_time:174180ms step_avg:51.52ms +[2025-09-05 15:12:59] [Rank 0] step:3381/10000 train_time:174180ms step_avg:51.52ms +[2025-09-05 15:13:00] [Rank 0] step:3401/10000 train_time:174919ms step_avg:51.43ms +[2025-09-05 15:13:00] [Rank 0] step:3401/10000 train_time:174919ms step_avg:51.43ms +[2025-09-05 15:13:01] [Rank 0] step:3421/10000 train_time:175657ms step_avg:51.35ms +[2025-09-05 15:13:01] [Rank 0] step:3421/10000 train_time:175657ms step_avg:51.35ms +[2025-09-05 15:13:02] [Rank 0] step:3441/10000 train_time:176397ms step_avg:51.26ms +[2025-09-05 15:13:02] [Rank 0] step:3441/10000 train_time:176397ms step_avg:51.26ms +[2025-09-05 15:13:02] [Rank 0] step:3461/10000 train_time:177136ms step_avg:51.18ms +[2025-09-05 15:13:02] [Rank 0] step:3461/10000 train_time:177136ms step_avg:51.18ms +[2025-09-05 15:13:03] [Rank 0] step:3481/10000 train_time:177873ms step_avg:51.10ms +[2025-09-05 15:13:03] [Rank 0] step:3481/10000 train_time:177873ms step_avg:51.10ms +[2025-09-05 15:13:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:13:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:13:04] [Rank 0] PRINT: step:3500/10000 train_loss:1.4601 val_loss:1.4440 train_time:178692ms step_avg:51.05ms +[2025-09-05 15:13:04] [Rank 0] PRINT: step:3500/10000 train_loss:1.4601 val_loss:1.4440 train_time:178692ms step_avg:51.05ms +[2025-09-05 15:13:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:13:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:13:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:13:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:14:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:14:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:14:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:14:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:14:25] [Rank 0] Total Loss: 3.9631 +[2025-09-05 15:14:25] [Rank 0] Total Loss: 3.9631 +[2025-09-05 15:14:25] [Rank 0] Total FTA (Unweighted): 0.4863 +[2025-09-05 15:14:25] [Rank 0] Total FTA (Unweighted): 0.4863 +[2025-09-05 15:14:25] [Rank 0] Total FTA (Weighted): 0.4863 +[2025-09-05 15:14:25] [Rank 0] Total FTA (Weighted): 0.4863 +[2025-09-05 15:14:25] [Rank 0] Group 0 Loss: 3.1982 +[2025-09-05 15:14:25] [Rank 0] Group 0 Loss: 3.1982 +[2025-09-05 15:14:25] [Rank 0] Group 1 Loss: 3.0634 +[2025-09-05 15:14:25] [Rank 0] Group 1 Loss: 3.0634 +[2025-09-05 15:14:25] [Rank 0] Group 2 Loss: 2.9738 +[2025-09-05 15:14:25] [Rank 0] Group 2 Loss: 2.9738 +[2025-09-05 15:14:25] [Rank 0] Group 3 Loss: 3.2771 +[2025-09-05 15:14:25] [Rank 0] Group 3 Loss: 3.2771 +[2025-09-05 15:14:25] [Rank 0] Group 4 Loss: 3.4766 +[2025-09-05 15:14:25] [Rank 0] Group 4 Loss: 3.4766 +[2025-09-05 15:14:25] [Rank 0] Group 5 Loss: 3.6706 +[2025-09-05 15:14:25] [Rank 0] Group 5 Loss: 3.6706 +[2025-09-05 15:14:25] [Rank 0] Group 6 Loss: 3.7078 +[2025-09-05 15:14:25] [Rank 0] Group 6 Loss: 3.7078 +[2025-09-05 15:14:25] [Rank 0] Group 7 Loss: 3.9518 +[2025-09-05 15:14:25] [Rank 0] Group 7 Loss: 3.9518 +[2025-09-05 15:14:25] [Rank 0] Group 8 Loss: 4.2350 +[2025-09-05 15:14:25] [Rank 0] Group 8 Loss: 4.2350 +[2025-09-05 15:14:25] [Rank 0] Group 9 Loss: 4.3863 +[2025-09-05 15:14:25] [Rank 0] Group 9 Loss: 4.3863 +[2025-09-05 15:14:25] [Rank 0] Group 10 Loss: 4.5149 +[2025-09-05 15:14:25] [Rank 0] Group 10 Loss: 4.5149 +[2025-09-05 15:14:25] [Rank 0] Group 11 Loss: 4.5184 +[2025-09-05 15:14:25] [Rank 0] Group 11 Loss: 4.5184 +[2025-09-05 15:14:25] [Rank 0] Group 12 Loss: 4.5146 +[2025-09-05 15:14:25] [Rank 0] Group 12 Loss: 4.5146 +[2025-09-05 15:14:25] [Rank 0] Group 13 Loss: 4.6066 +[2025-09-05 15:14:25] [Rank 0] Group 13 Loss: 4.6066 +[2025-09-05 15:14:25] [Rank 0] Group 14 Loss: 4.6447 +[2025-09-05 15:14:25] [Rank 0] Group 14 Loss: 4.6447 +[2025-09-05 15:14:25] [Rank 0] Group 15 Loss: 4.6695 +[2025-09-05 15:14:25] [Rank 0] Group 15 Loss: 4.6695 +[2025-09-05 15:14:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:14:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:14:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:14:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:14:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:14:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:14:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:14:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:14:25] [Rank 0] Group 4 FTA: 0.6900 +[2025-09-05 15:14:25] [Rank 0] Group 4 FTA: 0.6900 +[2025-09-05 15:14:25] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 15:14:25] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 15:14:25] [Rank 0] Group 6 FTA: 0.4500 +[2025-09-05 15:14:25] [Rank 0] Group 6 FTA: 0.4500 +[2025-09-05 15:14:25] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 15:14:25] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 15:14:25] [Rank 0] Group 8 FTA: 0.4000 +[2025-09-05 15:14:25] [Rank 0] Group 8 FTA: 0.4000 +[2025-09-05 15:14:25] [Rank 0] Group 9 FTA: 0.3000 +[2025-09-05 15:14:25] [Rank 0] Group 9 FTA: 0.3000 +[2025-09-05 15:14:25] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 15:14:25] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 15:14:25] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 15:14:25] [Rank 0] Group 11 FTA: 0.1800 +[2025-09-05 15:14:25] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 15:14:25] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 15:14:25] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 15:14:25] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 15:14:25] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:14:25] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:14:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:14:25] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:14:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:14:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:14:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:14:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:14:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:14:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:14:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:14:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:14:26] [Rank 0] step:3501/10000 train_time:178702ms step_avg:51.04ms +[2025-09-05 15:14:26] [Rank 0] step:3501/10000 train_time:178702ms step_avg:51.04ms +[2025-09-05 15:14:27] [Rank 0] step:3521/10000 train_time:179372ms step_avg:50.94ms +[2025-09-05 15:14:27] [Rank 0] step:3521/10000 train_time:179372ms step_avg:50.94ms +[2025-09-05 15:14:28] [Rank 0] step:3541/10000 train_time:180111ms step_avg:50.86ms +[2025-09-05 15:14:28] [Rank 0] step:3541/10000 train_time:180111ms step_avg:50.86ms +[2025-09-05 15:14:29] [Rank 0] step:3561/10000 train_time:180848ms step_avg:50.79ms +[2025-09-05 15:14:29] [Rank 0] step:3561/10000 train_time:180848ms step_avg:50.79ms +[2025-09-05 15:14:29] [Rank 0] step:3581/10000 train_time:181586ms step_avg:50.71ms +[2025-09-05 15:14:29] [Rank 0] step:3581/10000 train_time:181586ms step_avg:50.71ms +[2025-09-05 15:14:30] [Rank 0] step:3601/10000 train_time:182323ms step_avg:50.63ms +[2025-09-05 15:14:30] [Rank 0] step:3601/10000 train_time:182323ms step_avg:50.63ms +[2025-09-05 15:14:31] [Rank 0] step:3621/10000 train_time:183061ms step_avg:50.56ms +[2025-09-05 15:14:31] [Rank 0] step:3621/10000 train_time:183061ms step_avg:50.56ms +[2025-09-05 15:14:32] [Rank 0] step:3641/10000 train_time:184409ms step_avg:50.65ms +[2025-09-05 15:14:32] [Rank 0] step:3641/10000 train_time:184409ms step_avg:50.65ms +[2025-09-05 15:14:33] [Rank 0] step:3661/10000 train_time:185147ms step_avg:50.57ms +[2025-09-05 15:14:33] [Rank 0] step:3661/10000 train_time:185147ms step_avg:50.57ms +[2025-09-05 15:14:34] [Rank 0] step:3681/10000 train_time:185885ms step_avg:50.50ms +[2025-09-05 15:14:34] [Rank 0] step:3681/10000 train_time:185885ms step_avg:50.50ms +[2025-09-05 15:14:34] [Rank 0] step:3701/10000 train_time:186623ms step_avg:50.42ms +[2025-09-05 15:14:34] [Rank 0] step:3701/10000 train_time:186623ms step_avg:50.42ms +[2025-09-05 15:14:35] [Rank 0] step:3721/10000 train_time:187360ms step_avg:50.35ms +[2025-09-05 15:14:35] [Rank 0] step:3721/10000 train_time:187360ms step_avg:50.35ms +[2025-09-05 15:14:36] [Rank 0] step:3741/10000 train_time:188098ms step_avg:50.28ms +[2025-09-05 15:14:36] [Rank 0] step:3741/10000 train_time:188098ms step_avg:50.28ms +[2025-09-05 15:14:37] [Rank 0] step:3761/10000 train_time:188836ms step_avg:50.21ms +[2025-09-05 15:14:37] [Rank 0] step:3761/10000 train_time:188836ms step_avg:50.21ms +[2025-09-05 15:14:37] [Rank 0] step:3781/10000 train_time:189574ms step_avg:50.14ms +[2025-09-05 15:14:37] [Rank 0] step:3781/10000 train_time:189574ms step_avg:50.14ms +[2025-09-05 15:14:38] [Rank 0] step:3801/10000 train_time:190312ms step_avg:50.07ms +[2025-09-05 15:14:38] [Rank 0] step:3801/10000 train_time:190312ms step_avg:50.07ms +[2025-09-05 15:14:39] [Rank 0] step:3821/10000 train_time:191050ms step_avg:50.00ms +[2025-09-05 15:14:39] [Rank 0] step:3821/10000 train_time:191050ms step_avg:50.00ms +[2025-09-05 15:14:40] [Rank 0] step:3841/10000 train_time:191787ms step_avg:49.93ms +[2025-09-05 15:14:40] [Rank 0] step:3841/10000 train_time:191787ms step_avg:49.93ms +[2025-09-05 15:14:40] [Rank 0] step:3861/10000 train_time:192525ms step_avg:49.86ms +[2025-09-05 15:14:40] [Rank 0] step:3861/10000 train_time:192525ms step_avg:49.86ms +[2025-09-05 15:14:41] [Rank 0] step:3881/10000 train_time:193263ms step_avg:49.80ms +[2025-09-05 15:14:41] [Rank 0] step:3881/10000 train_time:193263ms step_avg:49.80ms +[2025-09-05 15:14:42] [Rank 0] step:3901/10000 train_time:194000ms step_avg:49.73ms +[2025-09-05 15:14:42] [Rank 0] step:3901/10000 train_time:194000ms step_avg:49.73ms +[2025-09-05 15:14:42] [Rank 0] step:3921/10000 train_time:194738ms step_avg:49.67ms +[2025-09-05 15:14:42] [Rank 0] step:3921/10000 train_time:194738ms step_avg:49.67ms +[2025-09-05 15:14:43] [Rank 0] step:3941/10000 train_time:195477ms step_avg:49.60ms +[2025-09-05 15:14:43] [Rank 0] step:3941/10000 train_time:195477ms step_avg:49.60ms +[2025-09-05 15:14:44] [Rank 0] step:3961/10000 train_time:196214ms step_avg:49.54ms +[2025-09-05 15:14:44] [Rank 0] step:3961/10000 train_time:196214ms step_avg:49.54ms +[2025-09-05 15:14:45] [Rank 0] step:3981/10000 train_time:196952ms step_avg:49.47ms +[2025-09-05 15:14:45] [Rank 0] step:3981/10000 train_time:196952ms step_avg:49.47ms +[2025-09-05 15:14:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:14:45] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:14:46] [Rank 0] PRINT: step:4000/10000 train_loss:1.4453 val_loss:1.4294 train_time:197771ms step_avg:49.44ms +[2025-09-05 15:14:46] [Rank 0] PRINT: step:4000/10000 train_loss:1.4453 val_loss:1.4294 train_time:197771ms step_avg:49.44ms +[2025-09-05 15:14:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:14:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:14:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:14:46] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:16:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:16:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:16:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:16:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:16:07] [Rank 0] Total Loss: 4.0206 +[2025-09-05 15:16:07] [Rank 0] Total Loss: 4.0206 +[2025-09-05 15:16:07] [Rank 0] Total FTA (Unweighted): 0.5081 +[2025-09-05 15:16:07] [Rank 0] Total FTA (Unweighted): 0.5081 +[2025-09-05 15:16:07] [Rank 0] Total FTA (Weighted): 0.5081 +[2025-09-05 15:16:07] [Rank 0] Total FTA (Weighted): 0.5081 +[2025-09-05 15:16:07] [Rank 0] Group 0 Loss: 3.3883 +[2025-09-05 15:16:07] [Rank 0] Group 0 Loss: 3.3883 +[2025-09-05 15:16:07] [Rank 0] Group 1 Loss: 3.1020 +[2025-09-05 15:16:07] [Rank 0] Group 1 Loss: 3.1020 +[2025-09-05 15:16:07] [Rank 0] Group 2 Loss: 2.9417 +[2025-09-05 15:16:07] [Rank 0] Group 2 Loss: 2.9417 +[2025-09-05 15:16:07] [Rank 0] Group 3 Loss: 3.3753 +[2025-09-05 15:16:07] [Rank 0] Group 3 Loss: 3.3753 +[2025-09-05 15:16:07] [Rank 0] Group 4 Loss: 3.5366 +[2025-09-05 15:16:07] [Rank 0] Group 4 Loss: 3.5366 +[2025-09-05 15:16:07] [Rank 0] Group 5 Loss: 3.6898 +[2025-09-05 15:16:07] [Rank 0] Group 5 Loss: 3.6898 +[2025-09-05 15:16:07] [Rank 0] Group 6 Loss: 3.7939 +[2025-09-05 15:16:07] [Rank 0] Group 6 Loss: 3.7939 +[2025-09-05 15:16:07] [Rank 0] Group 7 Loss: 4.0007 +[2025-09-05 15:16:07] [Rank 0] Group 7 Loss: 4.0007 +[2025-09-05 15:16:07] [Rank 0] Group 8 Loss: 4.3080 +[2025-09-05 15:16:07] [Rank 0] Group 8 Loss: 4.3080 +[2025-09-05 15:16:07] [Rank 0] Group 9 Loss: 4.4171 +[2025-09-05 15:16:07] [Rank 0] Group 9 Loss: 4.4171 +[2025-09-05 15:16:07] [Rank 0] Group 10 Loss: 4.5834 +[2025-09-05 15:16:07] [Rank 0] Group 10 Loss: 4.5834 +[2025-09-05 15:16:07] [Rank 0] Group 11 Loss: 4.5773 +[2025-09-05 15:16:07] [Rank 0] Group 11 Loss: 4.5773 +[2025-09-05 15:16:07] [Rank 0] Group 12 Loss: 4.5662 +[2025-09-05 15:16:07] [Rank 0] Group 12 Loss: 4.5662 +[2025-09-05 15:16:07] [Rank 0] Group 13 Loss: 4.6419 +[2025-09-05 15:16:07] [Rank 0] Group 13 Loss: 4.6419 +[2025-09-05 15:16:07] [Rank 0] Group 14 Loss: 4.6753 +[2025-09-05 15:16:07] [Rank 0] Group 14 Loss: 4.6753 +[2025-09-05 15:16:07] [Rank 0] Group 15 Loss: 4.7316 +[2025-09-05 15:16:07] [Rank 0] Group 15 Loss: 4.7316 +[2025-09-05 15:16:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:16:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:16:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:16:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:16:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:16:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:16:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:16:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:16:08] [Rank 0] Group 4 FTA: 0.7300 +[2025-09-05 15:16:08] [Rank 0] Group 4 FTA: 0.7300 +[2025-09-05 15:16:08] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:16:08] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:16:08] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 15:16:08] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 15:16:08] [Rank 0] Group 7 FTA: 0.4200 +[2025-09-05 15:16:08] [Rank 0] Group 7 FTA: 0.4200 +[2025-09-05 15:16:08] [Rank 0] Group 8 FTA: 0.4400 +[2025-09-05 15:16:08] [Rank 0] Group 8 FTA: 0.4400 +[2025-09-05 15:16:08] [Rank 0] Group 9 FTA: 0.3100 +[2025-09-05 15:16:08] [Rank 0] Group 9 FTA: 0.3100 +[2025-09-05 15:16:08] [Rank 0] Group 10 FTA: 0.4300 +[2025-09-05 15:16:08] [Rank 0] Group 10 FTA: 0.4300 +[2025-09-05 15:16:08] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 15:16:08] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 15:16:08] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 15:16:08] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 15:16:08] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:16:08] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:16:08] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 15:16:08] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 15:16:08] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:16:08] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:16:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:16:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:16:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:16:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:16:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:16:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:16:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:16:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:16:09] [Rank 0] step:4001/10000 train_time:197780ms step_avg:49.43ms +[2025-09-05 15:16:09] [Rank 0] step:4001/10000 train_time:197780ms step_avg:49.43ms +[2025-09-05 15:16:10] [Rank 0] step:4021/10000 train_time:199083ms step_avg:49.51ms +[2025-09-05 15:16:10] [Rank 0] step:4021/10000 train_time:199083ms step_avg:49.51ms +[2025-09-05 15:16:11] [Rank 0] step:4041/10000 train_time:199821ms step_avg:49.45ms +[2025-09-05 15:16:11] [Rank 0] step:4041/10000 train_time:199821ms step_avg:49.45ms +[2025-09-05 15:16:12] [Rank 0] step:4061/10000 train_time:200559ms step_avg:49.39ms +[2025-09-05 15:16:12] [Rank 0] step:4061/10000 train_time:200559ms step_avg:49.39ms +[2025-09-05 15:16:13] [Rank 0] step:4081/10000 train_time:201296ms step_avg:49.33ms +[2025-09-05 15:16:13] [Rank 0] step:4081/10000 train_time:201296ms step_avg:49.33ms +[2025-09-05 15:16:13] [Rank 0] step:4101/10000 train_time:202034ms step_avg:49.26ms +[2025-09-05 15:16:13] [Rank 0] step:4101/10000 train_time:202034ms step_avg:49.26ms +[2025-09-05 15:16:14] [Rank 0] step:4121/10000 train_time:202773ms step_avg:49.20ms +[2025-09-05 15:16:14] [Rank 0] step:4121/10000 train_time:202773ms step_avg:49.20ms +[2025-09-05 15:16:15] [Rank 0] step:4141/10000 train_time:203510ms step_avg:49.15ms +[2025-09-05 15:16:15] [Rank 0] step:4141/10000 train_time:203510ms step_avg:49.15ms +[2025-09-05 15:16:16] [Rank 0] step:4161/10000 train_time:204248ms step_avg:49.09ms +[2025-09-05 15:16:16] [Rank 0] step:4161/10000 train_time:204248ms step_avg:49.09ms +[2025-09-05 15:16:16] [Rank 0] step:4181/10000 train_time:204985ms step_avg:49.03ms +[2025-09-05 15:16:16] [Rank 0] step:4181/10000 train_time:204985ms step_avg:49.03ms +[2025-09-05 15:16:17] [Rank 0] step:4201/10000 train_time:205723ms step_avg:48.97ms +[2025-09-05 15:16:17] [Rank 0] step:4201/10000 train_time:205723ms step_avg:48.97ms +[2025-09-05 15:16:18] [Rank 0] step:4221/10000 train_time:206469ms step_avg:48.91ms +[2025-09-05 15:16:18] [Rank 0] step:4221/10000 train_time:206469ms step_avg:48.91ms +[2025-09-05 15:16:19] [Rank 0] step:4241/10000 train_time:207208ms step_avg:48.86ms +[2025-09-05 15:16:19] [Rank 0] step:4241/10000 train_time:207208ms step_avg:48.86ms +[2025-09-05 15:16:19] [Rank 0] step:4261/10000 train_time:207947ms step_avg:48.80ms +[2025-09-05 15:16:19] [Rank 0] step:4261/10000 train_time:207947ms step_avg:48.80ms +[2025-09-05 15:16:20] [Rank 0] step:4281/10000 train_time:208685ms step_avg:48.75ms +[2025-09-05 15:16:20] [Rank 0] step:4281/10000 train_time:208685ms step_avg:48.75ms +[2025-09-05 15:16:21] [Rank 0] step:4301/10000 train_time:209423ms step_avg:48.69ms +[2025-09-05 15:16:21] [Rank 0] step:4301/10000 train_time:209423ms step_avg:48.69ms +[2025-09-05 15:16:21] [Rank 0] step:4321/10000 train_time:210161ms step_avg:48.64ms +[2025-09-05 15:16:21] [Rank 0] step:4321/10000 train_time:210161ms step_avg:48.64ms +[2025-09-05 15:16:22] [Rank 0] step:4341/10000 train_time:210899ms step_avg:48.58ms +[2025-09-05 15:16:22] [Rank 0] step:4341/10000 train_time:210899ms step_avg:48.58ms +[2025-09-05 15:16:23] [Rank 0] step:4361/10000 train_time:211637ms step_avg:48.53ms +[2025-09-05 15:16:23] [Rank 0] step:4361/10000 train_time:211637ms step_avg:48.53ms +[2025-09-05 15:16:24] [Rank 0] step:4381/10000 train_time:212375ms step_avg:48.48ms +[2025-09-05 15:16:24] [Rank 0] step:4381/10000 train_time:212375ms step_avg:48.48ms +[2025-09-05 15:16:24] [Rank 0] step:4401/10000 train_time:213113ms step_avg:48.42ms +[2025-09-05 15:16:24] [Rank 0] step:4401/10000 train_time:213113ms step_avg:48.42ms +[2025-09-05 15:16:25] [Rank 0] step:4421/10000 train_time:213852ms step_avg:48.37ms +[2025-09-05 15:16:25] [Rank 0] step:4421/10000 train_time:213852ms step_avg:48.37ms +[2025-09-05 15:16:26] [Rank 0] step:4441/10000 train_time:214590ms step_avg:48.32ms +[2025-09-05 15:16:26] [Rank 0] step:4441/10000 train_time:214590ms step_avg:48.32ms +[2025-09-05 15:16:27] [Rank 0] step:4461/10000 train_time:215328ms step_avg:48.27ms +[2025-09-05 15:16:27] [Rank 0] step:4461/10000 train_time:215328ms step_avg:48.27ms +[2025-09-05 15:16:27] [Rank 0] step:4481/10000 train_time:216066ms step_avg:48.22ms +[2025-09-05 15:16:27] [Rank 0] step:4481/10000 train_time:216066ms step_avg:48.22ms +[2025-09-05 15:16:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:16:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:16:29] [Rank 0] PRINT: step:4500/10000 train_loss:1.4347 val_loss:1.4215 train_time:216885ms step_avg:48.20ms +[2025-09-05 15:16:29] [Rank 0] PRINT: step:4500/10000 train_loss:1.4347 val_loss:1.4215 train_time:216885ms step_avg:48.20ms +[2025-09-05 15:16:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:16:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:16:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:16:29] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:17:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:17:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:17:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:17:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:17:50] [Rank 0] Total Loss: 4.0588 +[2025-09-05 15:17:50] [Rank 0] Total Loss: 4.0588 +[2025-09-05 15:17:50] [Rank 0] Total FTA (Unweighted): 0.5294 +[2025-09-05 15:17:50] [Rank 0] Total FTA (Unweighted): 0.5294 +[2025-09-05 15:17:50] [Rank 0] Total FTA (Weighted): 0.5294 +[2025-09-05 15:17:50] [Rank 0] Total FTA (Weighted): 0.5294 +[2025-09-05 15:17:50] [Rank 0] Group 0 Loss: 3.4211 +[2025-09-05 15:17:50] [Rank 0] Group 0 Loss: 3.4211 +[2025-09-05 15:17:50] [Rank 0] Group 1 Loss: 3.0801 +[2025-09-05 15:17:50] [Rank 0] Group 1 Loss: 3.0801 +[2025-09-05 15:17:50] [Rank 0] Group 2 Loss: 3.0485 +[2025-09-05 15:17:50] [Rank 0] Group 2 Loss: 3.0485 +[2025-09-05 15:17:50] [Rank 0] Group 3 Loss: 3.4367 +[2025-09-05 15:17:50] [Rank 0] Group 3 Loss: 3.4367 +[2025-09-05 15:17:50] [Rank 0] Group 4 Loss: 3.6072 +[2025-09-05 15:17:50] [Rank 0] Group 4 Loss: 3.6072 +[2025-09-05 15:17:50] [Rank 0] Group 5 Loss: 3.8029 +[2025-09-05 15:17:50] [Rank 0] Group 5 Loss: 3.8029 +[2025-09-05 15:17:50] [Rank 0] Group 6 Loss: 3.8490 +[2025-09-05 15:17:50] [Rank 0] Group 6 Loss: 3.8490 +[2025-09-05 15:17:50] [Rank 0] Group 7 Loss: 3.9977 +[2025-09-05 15:17:50] [Rank 0] Group 7 Loss: 3.9977 +[2025-09-05 15:17:50] [Rank 0] Group 8 Loss: 4.3199 +[2025-09-05 15:17:50] [Rank 0] Group 8 Loss: 4.3199 +[2025-09-05 15:17:50] [Rank 0] Group 9 Loss: 4.4802 +[2025-09-05 15:17:50] [Rank 0] Group 9 Loss: 4.4802 +[2025-09-05 15:17:50] [Rank 0] Group 10 Loss: 4.5808 +[2025-09-05 15:17:50] [Rank 0] Group 10 Loss: 4.5808 +[2025-09-05 15:17:50] [Rank 0] Group 11 Loss: 4.6009 +[2025-09-05 15:17:50] [Rank 0] Group 11 Loss: 4.6009 +[2025-09-05 15:17:50] [Rank 0] Group 12 Loss: 4.6115 +[2025-09-05 15:17:50] [Rank 0] Group 12 Loss: 4.6115 +[2025-09-05 15:17:50] [Rank 0] Group 13 Loss: 4.6639 +[2025-09-05 15:17:50] [Rank 0] Group 13 Loss: 4.6639 +[2025-09-05 15:17:50] [Rank 0] Group 14 Loss: 4.7014 +[2025-09-05 15:17:50] [Rank 0] Group 14 Loss: 4.7014 +[2025-09-05 15:17:50] [Rank 0] Group 15 Loss: 4.7384 +[2025-09-05 15:17:50] [Rank 0] Group 15 Loss: 4.7384 +[2025-09-05 15:17:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:17:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:17:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:17:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:17:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:17:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:17:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:17:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:17:50] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 15:17:50] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 15:17:50] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:17:50] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:17:50] [Rank 0] Group 6 FTA: 0.5000 +[2025-09-05 15:17:50] [Rank 0] Group 6 FTA: 0.5000 +[2025-09-05 15:17:50] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 15:17:50] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 15:17:50] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 15:17:50] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 15:17:50] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 15:17:50] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 15:17:50] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 15:17:50] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 15:17:50] [Rank 0] Group 11 FTA: 0.2500 +[2025-09-05 15:17:50] [Rank 0] Group 11 FTA: 0.2500 +[2025-09-05 15:17:50] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 15:17:50] [Rank 0] Group 12 FTA: 0.1500 +[2025-09-05 15:17:50] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:17:50] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:17:50] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:17:50] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:17:50] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:17:50] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:17:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:17:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:17:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:17:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:17:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:17:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:17:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:17:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:17:51] [Rank 0] step:4501/10000 train_time:216895ms step_avg:48.19ms +[2025-09-05 15:17:51] [Rank 0] step:4501/10000 train_time:216895ms step_avg:48.19ms +[2025-09-05 15:17:52] [Rank 0] step:4521/10000 train_time:217561ms step_avg:48.12ms +[2025-09-05 15:17:52] [Rank 0] step:4521/10000 train_time:217561ms step_avg:48.12ms +[2025-09-05 15:17:53] [Rank 0] step:4541/10000 train_time:218299ms step_avg:48.07ms +[2025-09-05 15:17:53] [Rank 0] step:4541/10000 train_time:218299ms step_avg:48.07ms +[2025-09-05 15:17:53] [Rank 0] step:4561/10000 train_time:219038ms step_avg:48.02ms +[2025-09-05 15:17:53] [Rank 0] step:4561/10000 train_time:219038ms step_avg:48.02ms +[2025-09-05 15:17:54] [Rank 0] step:4581/10000 train_time:219776ms step_avg:47.98ms +[2025-09-05 15:17:54] [Rank 0] step:4581/10000 train_time:219776ms step_avg:47.98ms +[2025-09-05 15:17:55] [Rank 0] step:4601/10000 train_time:220515ms step_avg:47.93ms +[2025-09-05 15:17:55] [Rank 0] step:4601/10000 train_time:220515ms step_avg:47.93ms +[2025-09-05 15:17:56] [Rank 0] step:4621/10000 train_time:221253ms step_avg:47.88ms +[2025-09-05 15:17:56] [Rank 0] step:4621/10000 train_time:221253ms step_avg:47.88ms +[2025-09-05 15:17:56] [Rank 0] step:4641/10000 train_time:221991ms step_avg:47.83ms +[2025-09-05 15:17:56] [Rank 0] step:4641/10000 train_time:221991ms step_avg:47.83ms +[2025-09-05 15:17:57] [Rank 0] step:4661/10000 train_time:222731ms step_avg:47.79ms +[2025-09-05 15:17:57] [Rank 0] step:4661/10000 train_time:222731ms step_avg:47.79ms +[2025-09-05 15:17:58] [Rank 0] step:4681/10000 train_time:223469ms step_avg:47.74ms +[2025-09-05 15:17:58] [Rank 0] step:4681/10000 train_time:223469ms step_avg:47.74ms +[2025-09-05 15:17:59] [Rank 0] step:4701/10000 train_time:224207ms step_avg:47.69ms +[2025-09-05 15:17:59] [Rank 0] step:4701/10000 train_time:224207ms step_avg:47.69ms +[2025-09-05 15:17:59] [Rank 0] step:4721/10000 train_time:224945ms step_avg:47.65ms +[2025-09-05 15:17:59] [Rank 0] step:4721/10000 train_time:224945ms step_avg:47.65ms +[2025-09-05 15:18:00] [Rank 0] step:4741/10000 train_time:225684ms step_avg:47.60ms +[2025-09-05 15:18:00] [Rank 0] step:4741/10000 train_time:225684ms step_avg:47.60ms +[2025-09-05 15:18:01] [Rank 0] step:4761/10000 train_time:226423ms step_avg:47.56ms +[2025-09-05 15:18:01] [Rank 0] step:4761/10000 train_time:226423ms step_avg:47.56ms +[2025-09-05 15:18:02] [Rank 0] step:4781/10000 train_time:227161ms step_avg:47.51ms +[2025-09-05 15:18:02] [Rank 0] step:4781/10000 train_time:227161ms step_avg:47.51ms +[2025-09-05 15:18:02] [Rank 0] step:4801/10000 train_time:227899ms step_avg:47.47ms +[2025-09-05 15:18:02] [Rank 0] step:4801/10000 train_time:227899ms step_avg:47.47ms +[2025-09-05 15:18:03] [Rank 0] step:4821/10000 train_time:228637ms step_avg:47.43ms +[2025-09-05 15:18:03] [Rank 0] step:4821/10000 train_time:228637ms step_avg:47.43ms +[2025-09-05 15:18:04] [Rank 0] step:4841/10000 train_time:229687ms step_avg:47.45ms +[2025-09-05 15:18:04] [Rank 0] step:4841/10000 train_time:229687ms step_avg:47.45ms +[2025-09-05 15:18:05] [Rank 0] step:4861/10000 train_time:230425ms step_avg:47.40ms +[2025-09-05 15:18:05] [Rank 0] step:4861/10000 train_time:230425ms step_avg:47.40ms +[2025-09-05 15:18:06] [Rank 0] step:4881/10000 train_time:231163ms step_avg:47.36ms +[2025-09-05 15:18:06] [Rank 0] step:4881/10000 train_time:231163ms step_avg:47.36ms +[2025-09-05 15:18:06] [Rank 0] step:4901/10000 train_time:231902ms step_avg:47.32ms +[2025-09-05 15:18:06] [Rank 0] step:4901/10000 train_time:231902ms step_avg:47.32ms +[2025-09-05 15:18:07] [Rank 0] step:4921/10000 train_time:232641ms step_avg:47.28ms +[2025-09-05 15:18:07] [Rank 0] step:4921/10000 train_time:232641ms step_avg:47.28ms +[2025-09-05 15:18:08] [Rank 0] step:4941/10000 train_time:233379ms step_avg:47.23ms +[2025-09-05 15:18:08] [Rank 0] step:4941/10000 train_time:233379ms step_avg:47.23ms +[2025-09-05 15:18:09] [Rank 0] step:4961/10000 train_time:234117ms step_avg:47.19ms +[2025-09-05 15:18:09] [Rank 0] step:4961/10000 train_time:234117ms step_avg:47.19ms +[2025-09-05 15:18:09] [Rank 0] step:4981/10000 train_time:234856ms step_avg:47.15ms +[2025-09-05 15:18:09] [Rank 0] step:4981/10000 train_time:234856ms step_avg:47.15ms +[2025-09-05 15:18:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:18:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:18:11] [Rank 0] PRINT: step:5000/10000 train_loss:1.4267 val_loss:1.4145 train_time:235810ms step_avg:47.16ms +[2025-09-05 15:18:11] [Rank 0] PRINT: step:5000/10000 train_loss:1.4267 val_loss:1.4145 train_time:235810ms step_avg:47.16ms +[2025-09-05 15:18:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:18:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:18:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:18:11] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:19:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:19:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:19:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:19:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:19:31] [Rank 0] Total Loss: 4.1354 +[2025-09-05 15:19:31] [Rank 0] Total Loss: 4.1354 +[2025-09-05 15:19:31] [Rank 0] Total FTA (Unweighted): 0.5325 +[2025-09-05 15:19:31] [Rank 0] Total FTA (Unweighted): 0.5325 +[2025-09-05 15:19:31] [Rank 0] Total FTA (Weighted): 0.5325 +[2025-09-05 15:19:31] [Rank 0] Total FTA (Weighted): 0.5325 +[2025-09-05 15:19:31] [Rank 0] Group 0 Loss: 3.6482 +[2025-09-05 15:19:31] [Rank 0] Group 0 Loss: 3.6482 +[2025-09-05 15:19:31] [Rank 0] Group 1 Loss: 3.2368 +[2025-09-05 15:19:31] [Rank 0] Group 1 Loss: 3.2368 +[2025-09-05 15:19:31] [Rank 0] Group 2 Loss: 3.1201 +[2025-09-05 15:19:31] [Rank 0] Group 2 Loss: 3.1201 +[2025-09-05 15:19:31] [Rank 0] Group 3 Loss: 3.5094 +[2025-09-05 15:19:31] [Rank 0] Group 3 Loss: 3.5094 +[2025-09-05 15:19:31] [Rank 0] Group 4 Loss: 3.6746 +[2025-09-05 15:19:31] [Rank 0] Group 4 Loss: 3.6746 +[2025-09-05 15:19:31] [Rank 0] Group 5 Loss: 3.8282 +[2025-09-05 15:19:31] [Rank 0] Group 5 Loss: 3.8282 +[2025-09-05 15:19:31] [Rank 0] Group 6 Loss: 3.8958 +[2025-09-05 15:19:31] [Rank 0] Group 6 Loss: 3.8958 +[2025-09-05 15:19:31] [Rank 0] Group 7 Loss: 4.0611 +[2025-09-05 15:19:31] [Rank 0] Group 7 Loss: 4.0611 +[2025-09-05 15:19:31] [Rank 0] Group 8 Loss: 4.3833 +[2025-09-05 15:19:31] [Rank 0] Group 8 Loss: 4.3833 +[2025-09-05 15:19:31] [Rank 0] Group 9 Loss: 4.5346 +[2025-09-05 15:19:31] [Rank 0] Group 9 Loss: 4.5346 +[2025-09-05 15:19:31] [Rank 0] Group 10 Loss: 4.6762 +[2025-09-05 15:19:31] [Rank 0] Group 10 Loss: 4.6762 +[2025-09-05 15:19:31] [Rank 0] Group 11 Loss: 4.6643 +[2025-09-05 15:19:31] [Rank 0] Group 11 Loss: 4.6643 +[2025-09-05 15:19:31] [Rank 0] Group 12 Loss: 4.6665 +[2025-09-05 15:19:31] [Rank 0] Group 12 Loss: 4.6665 +[2025-09-05 15:19:31] [Rank 0] Group 13 Loss: 4.7008 +[2025-09-05 15:19:31] [Rank 0] Group 13 Loss: 4.7008 +[2025-09-05 15:19:31] [Rank 0] Group 14 Loss: 4.7631 +[2025-09-05 15:19:31] [Rank 0] Group 14 Loss: 4.7631 +[2025-09-05 15:19:31] [Rank 0] Group 15 Loss: 4.8034 +[2025-09-05 15:19:31] [Rank 0] Group 15 Loss: 4.8034 +[2025-09-05 15:19:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:19:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:19:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:19:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:19:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:19:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:19:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:19:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:19:31] [Rank 0] Group 4 FTA: 0.8800 +[2025-09-05 15:19:31] [Rank 0] Group 4 FTA: 0.8800 +[2025-09-05 15:19:31] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:19:31] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:19:31] [Rank 0] Group 6 FTA: 0.5000 +[2025-09-05 15:19:31] [Rank 0] Group 6 FTA: 0.5000 +[2025-09-05 15:19:31] [Rank 0] Group 7 FTA: 0.4200 +[2025-09-05 15:19:31] [Rank 0] Group 7 FTA: 0.4200 +[2025-09-05 15:19:31] [Rank 0] Group 8 FTA: 0.4600 +[2025-09-05 15:19:31] [Rank 0] Group 8 FTA: 0.4600 +[2025-09-05 15:19:31] [Rank 0] Group 9 FTA: 0.3700 +[2025-09-05 15:19:31] [Rank 0] Group 9 FTA: 0.3700 +[2025-09-05 15:19:31] [Rank 0] Group 10 FTA: 0.4500 +[2025-09-05 15:19:31] [Rank 0] Group 10 FTA: 0.4500 +[2025-09-05 15:19:31] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 15:19:31] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 15:19:31] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 15:19:31] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 15:19:31] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:19:31] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:19:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:19:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:19:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:19:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:19:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:19:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:19:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:19:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:19:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:19:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:19:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:19:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:19:33] [Rank 0] step:5001/10000 train_time:235820ms step_avg:47.15ms +[2025-09-05 15:19:33] [Rank 0] step:5001/10000 train_time:235820ms step_avg:47.15ms +[2025-09-05 15:19:34] [Rank 0] step:5021/10000 train_time:236503ms step_avg:47.10ms +[2025-09-05 15:19:34] [Rank 0] step:5021/10000 train_time:236503ms step_avg:47.10ms +[2025-09-05 15:19:34] [Rank 0] step:5041/10000 train_time:237241ms step_avg:47.06ms +[2025-09-05 15:19:34] [Rank 0] step:5041/10000 train_time:237241ms step_avg:47.06ms +[2025-09-05 15:19:35] [Rank 0] step:5061/10000 train_time:237979ms step_avg:47.02ms +[2025-09-05 15:19:35] [Rank 0] step:5061/10000 train_time:237979ms step_avg:47.02ms +[2025-09-05 15:19:36] [Rank 0] step:5081/10000 train_time:238717ms step_avg:46.98ms +[2025-09-05 15:19:36] [Rank 0] step:5081/10000 train_time:238717ms step_avg:46.98ms +[2025-09-05 15:19:37] [Rank 0] step:5101/10000 train_time:239455ms step_avg:46.94ms +[2025-09-05 15:19:37] [Rank 0] step:5101/10000 train_time:239455ms step_avg:46.94ms +[2025-09-05 15:19:37] [Rank 0] step:5121/10000 train_time:240193ms step_avg:46.90ms +[2025-09-05 15:19:37] [Rank 0] step:5121/10000 train_time:240193ms step_avg:46.90ms +[2025-09-05 15:19:38] [Rank 0] step:5141/10000 train_time:240931ms step_avg:46.86ms +[2025-09-05 15:19:38] [Rank 0] step:5141/10000 train_time:240931ms step_avg:46.86ms +[2025-09-05 15:19:39] [Rank 0] step:5161/10000 train_time:241670ms step_avg:46.83ms +[2025-09-05 15:19:39] [Rank 0] step:5161/10000 train_time:241670ms step_avg:46.83ms +[2025-09-05 15:19:39] [Rank 0] step:5181/10000 train_time:242408ms step_avg:46.79ms +[2025-09-05 15:19:39] [Rank 0] step:5181/10000 train_time:242408ms step_avg:46.79ms +[2025-09-05 15:19:40] [Rank 0] step:5201/10000 train_time:243145ms step_avg:46.75ms +[2025-09-05 15:19:40] [Rank 0] step:5201/10000 train_time:243145ms step_avg:46.75ms +[2025-09-05 15:19:41] [Rank 0] step:5221/10000 train_time:243883ms step_avg:46.71ms +[2025-09-05 15:19:41] [Rank 0] step:5221/10000 train_time:243883ms step_avg:46.71ms +[2025-09-05 15:19:42] [Rank 0] step:5241/10000 train_time:244622ms step_avg:46.67ms +[2025-09-05 15:19:42] [Rank 0] step:5241/10000 train_time:244622ms step_avg:46.67ms +[2025-09-05 15:19:42] [Rank 0] step:5261/10000 train_time:245360ms step_avg:46.64ms +[2025-09-05 15:19:42] [Rank 0] step:5261/10000 train_time:245360ms step_avg:46.64ms +[2025-09-05 15:19:43] [Rank 0] step:5281/10000 train_time:246099ms step_avg:46.60ms +[2025-09-05 15:19:43] [Rank 0] step:5281/10000 train_time:246099ms step_avg:46.60ms +[2025-09-05 15:19:44] [Rank 0] step:5301/10000 train_time:246836ms step_avg:46.56ms +[2025-09-05 15:19:44] [Rank 0] step:5301/10000 train_time:246836ms step_avg:46.56ms +[2025-09-05 15:19:45] [Rank 0] step:5321/10000 train_time:247575ms step_avg:46.53ms +[2025-09-05 15:19:45] [Rank 0] step:5321/10000 train_time:247575ms step_avg:46.53ms +[2025-09-05 15:19:45] [Rank 0] step:5341/10000 train_time:248313ms step_avg:46.49ms +[2025-09-05 15:19:45] [Rank 0] step:5341/10000 train_time:248313ms step_avg:46.49ms +[2025-09-05 15:19:46] [Rank 0] step:5361/10000 train_time:249051ms step_avg:46.46ms +[2025-09-05 15:19:46] [Rank 0] step:5361/10000 train_time:249051ms step_avg:46.46ms +[2025-09-05 15:19:47] [Rank 0] step:5381/10000 train_time:249789ms step_avg:46.42ms +[2025-09-05 15:19:47] [Rank 0] step:5381/10000 train_time:249789ms step_avg:46.42ms +[2025-09-05 15:19:48] [Rank 0] step:5401/10000 train_time:250528ms step_avg:46.39ms +[2025-09-05 15:19:48] [Rank 0] step:5401/10000 train_time:250528ms step_avg:46.39ms +[2025-09-05 15:19:48] [Rank 0] step:5421/10000 train_time:251266ms step_avg:46.35ms +[2025-09-05 15:19:48] [Rank 0] step:5421/10000 train_time:251266ms step_avg:46.35ms +[2025-09-05 15:19:49] [Rank 0] step:5441/10000 train_time:252004ms step_avg:46.32ms +[2025-09-05 15:19:49] [Rank 0] step:5441/10000 train_time:252004ms step_avg:46.32ms +[2025-09-05 15:19:50] [Rank 0] step:5461/10000 train_time:252742ms step_avg:46.28ms +[2025-09-05 15:19:50] [Rank 0] step:5461/10000 train_time:252742ms step_avg:46.28ms +[2025-09-05 15:19:51] [Rank 0] step:5481/10000 train_time:253481ms step_avg:46.25ms +[2025-09-05 15:19:51] [Rank 0] step:5481/10000 train_time:253481ms step_avg:46.25ms +[2025-09-05 15:19:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:19:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:19:52] [Rank 0] PRINT: step:5500/10000 train_loss:1.4202 val_loss:1.4091 train_time:254299ms step_avg:46.24ms +[2025-09-05 15:19:52] [Rank 0] PRINT: step:5500/10000 train_loss:1.4202 val_loss:1.4091 train_time:254299ms step_avg:46.24ms +[2025-09-05 15:19:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:19:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:19:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:19:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:21:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:21:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:21:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:21:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:21:13] [Rank 0] Total Loss: 4.0771 +[2025-09-05 15:21:13] [Rank 0] Total Loss: 4.0771 +[2025-09-05 15:21:13] [Rank 0] Total FTA (Unweighted): 0.5456 +[2025-09-05 15:21:13] [Rank 0] Total FTA (Unweighted): 0.5456 +[2025-09-05 15:21:13] [Rank 0] Total FTA (Weighted): 0.5456 +[2025-09-05 15:21:13] [Rank 0] Total FTA (Weighted): 0.5456 +[2025-09-05 15:21:13] [Rank 0] Group 0 Loss: 3.4420 +[2025-09-05 15:21:13] [Rank 0] Group 0 Loss: 3.4420 +[2025-09-05 15:21:13] [Rank 0] Group 1 Loss: 3.1203 +[2025-09-05 15:21:13] [Rank 0] Group 1 Loss: 3.1203 +[2025-09-05 15:21:13] [Rank 0] Group 2 Loss: 3.1139 +[2025-09-05 15:21:13] [Rank 0] Group 2 Loss: 3.1139 +[2025-09-05 15:21:13] [Rank 0] Group 3 Loss: 3.4612 +[2025-09-05 15:21:13] [Rank 0] Group 3 Loss: 3.4612 +[2025-09-05 15:21:13] [Rank 0] Group 4 Loss: 3.6437 +[2025-09-05 15:21:13] [Rank 0] Group 4 Loss: 3.6437 +[2025-09-05 15:21:13] [Rank 0] Group 5 Loss: 3.7845 +[2025-09-05 15:21:13] [Rank 0] Group 5 Loss: 3.7845 +[2025-09-05 15:21:13] [Rank 0] Group 6 Loss: 3.8736 +[2025-09-05 15:21:13] [Rank 0] Group 6 Loss: 3.8736 +[2025-09-05 15:21:13] [Rank 0] Group 7 Loss: 4.0398 +[2025-09-05 15:21:13] [Rank 0] Group 7 Loss: 4.0398 +[2025-09-05 15:21:13] [Rank 0] Group 8 Loss: 4.3357 +[2025-09-05 15:21:13] [Rank 0] Group 8 Loss: 4.3357 +[2025-09-05 15:21:13] [Rank 0] Group 9 Loss: 4.4815 +[2025-09-05 15:21:13] [Rank 0] Group 9 Loss: 4.4815 +[2025-09-05 15:21:13] [Rank 0] Group 10 Loss: 4.5975 +[2025-09-05 15:21:13] [Rank 0] Group 10 Loss: 4.5975 +[2025-09-05 15:21:13] [Rank 0] Group 11 Loss: 4.6129 +[2025-09-05 15:21:13] [Rank 0] Group 11 Loss: 4.6129 +[2025-09-05 15:21:13] [Rank 0] Group 12 Loss: 4.6156 +[2025-09-05 15:21:13] [Rank 0] Group 12 Loss: 4.6156 +[2025-09-05 15:21:13] [Rank 0] Group 13 Loss: 4.6648 +[2025-09-05 15:21:13] [Rank 0] Group 13 Loss: 4.6648 +[2025-09-05 15:21:13] [Rank 0] Group 14 Loss: 4.7158 +[2025-09-05 15:21:13] [Rank 0] Group 14 Loss: 4.7158 +[2025-09-05 15:21:13] [Rank 0] Group 15 Loss: 4.7308 +[2025-09-05 15:21:13] [Rank 0] Group 15 Loss: 4.7308 +[2025-09-05 15:21:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:21:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:21:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:21:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:21:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:21:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:21:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:21:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:21:13] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:21:13] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:21:13] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:21:13] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:21:13] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:21:13] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:21:13] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 15:21:13] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 15:21:13] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 15:21:13] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 15:21:13] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 15:21:13] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 15:21:13] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 15:21:13] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 15:21:13] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 15:21:13] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 15:21:13] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 15:21:13] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 15:21:13] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:21:13] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:21:13] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:21:13] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:21:13] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:21:13] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:21:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:21:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:21:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:21:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:21:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:21:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:21:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:21:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:21:15] [Rank 0] step:5501/10000 train_time:254309ms step_avg:46.23ms +[2025-09-05 15:21:15] [Rank 0] step:5501/10000 train_time:254309ms step_avg:46.23ms +[2025-09-05 15:21:16] [Rank 0] step:5521/10000 train_time:254973ms step_avg:46.18ms +[2025-09-05 15:21:16] [Rank 0] step:5521/10000 train_time:254973ms step_avg:46.18ms +[2025-09-05 15:21:16] [Rank 0] step:5541/10000 train_time:255711ms step_avg:46.15ms +[2025-09-05 15:21:16] [Rank 0] step:5541/10000 train_time:255711ms step_avg:46.15ms +[2025-09-05 15:21:17] [Rank 0] step:5561/10000 train_time:256449ms step_avg:46.12ms +[2025-09-05 15:21:17] [Rank 0] step:5561/10000 train_time:256449ms step_avg:46.12ms +[2025-09-05 15:21:18] [Rank 0] step:5581/10000 train_time:257188ms step_avg:46.08ms +[2025-09-05 15:21:18] [Rank 0] step:5581/10000 train_time:257188ms step_avg:46.08ms +[2025-09-05 15:21:19] [Rank 0] step:5601/10000 train_time:257928ms step_avg:46.05ms +[2025-09-05 15:21:19] [Rank 0] step:5601/10000 train_time:257928ms step_avg:46.05ms +[2025-09-05 15:21:19] [Rank 0] step:5621/10000 train_time:258666ms step_avg:46.02ms +[2025-09-05 15:21:19] [Rank 0] step:5621/10000 train_time:258666ms step_avg:46.02ms +[2025-09-05 15:21:20] [Rank 0] step:5641/10000 train_time:259598ms step_avg:46.02ms +[2025-09-05 15:21:20] [Rank 0] step:5641/10000 train_time:259598ms step_avg:46.02ms +[2025-09-05 15:21:21] [Rank 0] step:5661/10000 train_time:260336ms step_avg:45.99ms +[2025-09-05 15:21:21] [Rank 0] step:5661/10000 train_time:260336ms step_avg:45.99ms +[2025-09-05 15:21:22] [Rank 0] step:5681/10000 train_time:261074ms step_avg:45.96ms +[2025-09-05 15:21:22] [Rank 0] step:5681/10000 train_time:261074ms step_avg:45.96ms +[2025-09-05 15:21:23] [Rank 0] step:5701/10000 train_time:261954ms step_avg:45.95ms +[2025-09-05 15:21:23] [Rank 0] step:5701/10000 train_time:261954ms step_avg:45.95ms +[2025-09-05 15:21:23] [Rank 0] step:5721/10000 train_time:262694ms step_avg:45.92ms +[2025-09-05 15:21:23] [Rank 0] step:5721/10000 train_time:262694ms step_avg:45.92ms +[2025-09-05 15:21:24] [Rank 0] step:5741/10000 train_time:263432ms step_avg:45.89ms +[2025-09-05 15:21:24] [Rank 0] step:5741/10000 train_time:263432ms step_avg:45.89ms +[2025-09-05 15:21:25] [Rank 0] step:5761/10000 train_time:264170ms step_avg:45.85ms +[2025-09-05 15:21:25] [Rank 0] step:5761/10000 train_time:264170ms step_avg:45.85ms +[2025-09-05 15:21:26] [Rank 0] step:5781/10000 train_time:264909ms step_avg:45.82ms +[2025-09-05 15:21:26] [Rank 0] step:5781/10000 train_time:264909ms step_avg:45.82ms +[2025-09-05 15:21:26] [Rank 0] step:5801/10000 train_time:265647ms step_avg:45.79ms +[2025-09-05 15:21:26] [Rank 0] step:5801/10000 train_time:265647ms step_avg:45.79ms +[2025-09-05 15:21:27] [Rank 0] step:5821/10000 train_time:266385ms step_avg:45.76ms +[2025-09-05 15:21:27] [Rank 0] step:5821/10000 train_time:266385ms step_avg:45.76ms +[2025-09-05 15:21:28] [Rank 0] step:5841/10000 train_time:267130ms step_avg:45.73ms +[2025-09-05 15:21:28] [Rank 0] step:5841/10000 train_time:267130ms step_avg:45.73ms +[2025-09-05 15:21:28] [Rank 0] step:5861/10000 train_time:267868ms step_avg:45.70ms +[2025-09-05 15:21:28] [Rank 0] step:5861/10000 train_time:267868ms step_avg:45.70ms +[2025-09-05 15:21:29] [Rank 0] step:5881/10000 train_time:268606ms step_avg:45.67ms +[2025-09-05 15:21:29] [Rank 0] step:5881/10000 train_time:268606ms step_avg:45.67ms +[2025-09-05 15:21:30] [Rank 0] step:5901/10000 train_time:269344ms step_avg:45.64ms +[2025-09-05 15:21:30] [Rank 0] step:5901/10000 train_time:269344ms step_avg:45.64ms +[2025-09-05 15:21:31] [Rank 0] step:5921/10000 train_time:270082ms step_avg:45.61ms +[2025-09-05 15:21:31] [Rank 0] step:5921/10000 train_time:270082ms step_avg:45.61ms +[2025-09-05 15:21:31] [Rank 0] step:5941/10000 train_time:270820ms step_avg:45.58ms +[2025-09-05 15:21:31] [Rank 0] step:5941/10000 train_time:270820ms step_avg:45.58ms +[2025-09-05 15:21:32] [Rank 0] step:5961/10000 train_time:271558ms step_avg:45.56ms +[2025-09-05 15:21:32] [Rank 0] step:5961/10000 train_time:271558ms step_avg:45.56ms +[2025-09-05 15:21:33] [Rank 0] step:5981/10000 train_time:272295ms step_avg:45.53ms +[2025-09-05 15:21:33] [Rank 0] step:5981/10000 train_time:272295ms step_avg:45.53ms +[2025-09-05 15:21:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:21:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:21:35] [Rank 0] PRINT: step:6000/10000 train_loss:1.4166 val_loss:1.4082 train_time:273114ms step_avg:45.52ms +[2025-09-05 15:21:35] [Rank 0] PRINT: step:6000/10000 train_loss:1.4166 val_loss:1.4082 train_time:273114ms step_avg:45.52ms +[2025-09-05 15:21:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:21:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:21:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:21:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:23:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:23:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:23:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:23:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:23:02] [Rank 0] Total Loss: 4.1112 +[2025-09-05 15:23:02] [Rank 0] Total Loss: 4.1112 +[2025-09-05 15:23:02] [Rank 0] Total FTA (Unweighted): 0.5519 +[2025-09-05 15:23:02] [Rank 0] Total FTA (Unweighted): 0.5519 +[2025-09-05 15:23:02] [Rank 0] Total FTA (Weighted): 0.5519 +[2025-09-05 15:23:02] [Rank 0] Total FTA (Weighted): 0.5519 +[2025-09-05 15:23:02] [Rank 0] Group 0 Loss: 3.5053 +[2025-09-05 15:23:02] [Rank 0] Group 0 Loss: 3.5053 +[2025-09-05 15:23:02] [Rank 0] Group 1 Loss: 3.1618 +[2025-09-05 15:23:02] [Rank 0] Group 1 Loss: 3.1618 +[2025-09-05 15:23:02] [Rank 0] Group 2 Loss: 3.1369 +[2025-09-05 15:23:02] [Rank 0] Group 2 Loss: 3.1369 +[2025-09-05 15:23:02] [Rank 0] Group 3 Loss: 3.5140 +[2025-09-05 15:23:02] [Rank 0] Group 3 Loss: 3.5140 +[2025-09-05 15:23:02] [Rank 0] Group 4 Loss: 3.6647 +[2025-09-05 15:23:02] [Rank 0] Group 4 Loss: 3.6647 +[2025-09-05 15:23:02] [Rank 0] Group 5 Loss: 3.8426 +[2025-09-05 15:23:02] [Rank 0] Group 5 Loss: 3.8426 +[2025-09-05 15:23:02] [Rank 0] Group 6 Loss: 3.9179 +[2025-09-05 15:23:02] [Rank 0] Group 6 Loss: 3.9179 +[2025-09-05 15:23:02] [Rank 0] Group 7 Loss: 4.0818 +[2025-09-05 15:23:02] [Rank 0] Group 7 Loss: 4.0818 +[2025-09-05 15:23:02] [Rank 0] Group 8 Loss: 4.3466 +[2025-09-05 15:23:02] [Rank 0] Group 8 Loss: 4.3466 +[2025-09-05 15:23:02] [Rank 0] Group 9 Loss: 4.4979 +[2025-09-05 15:23:02] [Rank 0] Group 9 Loss: 4.4979 +[2025-09-05 15:23:02] [Rank 0] Group 10 Loss: 4.6534 +[2025-09-05 15:23:02] [Rank 0] Group 10 Loss: 4.6534 +[2025-09-05 15:23:02] [Rank 0] Group 11 Loss: 4.6527 +[2025-09-05 15:23:02] [Rank 0] Group 11 Loss: 4.6527 +[2025-09-05 15:23:02] [Rank 0] Group 12 Loss: 4.6388 +[2025-09-05 15:23:02] [Rank 0] Group 12 Loss: 4.6388 +[2025-09-05 15:23:02] [Rank 0] Group 13 Loss: 4.6829 +[2025-09-05 15:23:02] [Rank 0] Group 13 Loss: 4.6829 +[2025-09-05 15:23:02] [Rank 0] Group 14 Loss: 4.7315 +[2025-09-05 15:23:02] [Rank 0] Group 14 Loss: 4.7315 +[2025-09-05 15:23:02] [Rank 0] Group 15 Loss: 4.7499 +[2025-09-05 15:23:02] [Rank 0] Group 15 Loss: 4.7499 +[2025-09-05 15:23:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:23:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:23:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:23:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:23:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:23:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:23:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:23:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:23:02] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:23:02] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:23:02] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:23:02] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:23:02] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:23:02] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:23:02] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:23:02] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:23:02] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 15:23:02] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 15:23:02] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 15:23:02] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 15:23:02] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 15:23:02] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 15:23:02] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 15:23:02] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 15:23:02] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 15:23:02] [Rank 0] Group 12 FTA: 0.2200 +[2025-09-05 15:23:02] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:23:02] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:23:02] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:23:02] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:23:02] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:23:02] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:23:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:23:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:23:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:23:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:23:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:23:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:23:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:23:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:23:03] [Rank 0] step:6001/10000 train_time:273124ms step_avg:45.51ms +[2025-09-05 15:23:03] [Rank 0] step:6001/10000 train_time:273124ms step_avg:45.51ms +[2025-09-05 15:23:04] [Rank 0] step:6021/10000 train_time:274432ms step_avg:45.58ms +[2025-09-05 15:23:04] [Rank 0] step:6021/10000 train_time:274432ms step_avg:45.58ms +[2025-09-05 15:23:05] [Rank 0] step:6041/10000 train_time:275170ms step_avg:45.55ms +[2025-09-05 15:23:05] [Rank 0] step:6041/10000 train_time:275170ms step_avg:45.55ms +[2025-09-05 15:23:06] [Rank 0] step:6061/10000 train_time:275909ms step_avg:45.52ms +[2025-09-05 15:23:06] [Rank 0] step:6061/10000 train_time:275909ms step_avg:45.52ms +[2025-09-05 15:23:07] [Rank 0] step:6081/10000 train_time:276647ms step_avg:45.49ms +[2025-09-05 15:23:07] [Rank 0] step:6081/10000 train_time:276647ms step_avg:45.49ms +[2025-09-05 15:23:07] [Rank 0] step:6101/10000 train_time:277385ms step_avg:45.47ms +[2025-09-05 15:23:07] [Rank 0] step:6101/10000 train_time:277385ms step_avg:45.47ms +[2025-09-05 15:23:08] [Rank 0] step:6121/10000 train_time:278123ms step_avg:45.44ms +[2025-09-05 15:23:08] [Rank 0] step:6121/10000 train_time:278123ms step_avg:45.44ms +[2025-09-05 15:23:09] [Rank 0] step:6141/10000 train_time:278861ms step_avg:45.41ms +[2025-09-05 15:23:09] [Rank 0] step:6141/10000 train_time:278861ms step_avg:45.41ms +[2025-09-05 15:23:10] [Rank 0] step:6161/10000 train_time:279599ms step_avg:45.38ms +[2025-09-05 15:23:10] [Rank 0] step:6161/10000 train_time:279599ms step_avg:45.38ms +[2025-09-05 15:23:10] [Rank 0] step:6181/10000 train_time:280337ms step_avg:45.35ms +[2025-09-05 15:23:10] [Rank 0] step:6181/10000 train_time:280337ms step_avg:45.35ms +[2025-09-05 15:23:11] [Rank 0] step:6201/10000 train_time:281074ms step_avg:45.33ms +[2025-09-05 15:23:11] [Rank 0] step:6201/10000 train_time:281074ms step_avg:45.33ms +[2025-09-05 15:23:12] [Rank 0] step:6221/10000 train_time:281813ms step_avg:45.30ms +[2025-09-05 15:23:12] [Rank 0] step:6221/10000 train_time:281813ms step_avg:45.30ms +[2025-09-05 15:23:13] [Rank 0] step:6241/10000 train_time:282551ms step_avg:45.27ms +[2025-09-05 15:23:13] [Rank 0] step:6241/10000 train_time:282551ms step_avg:45.27ms +[2025-09-05 15:23:13] [Rank 0] step:6261/10000 train_time:283289ms step_avg:45.25ms +[2025-09-05 15:23:13] [Rank 0] step:6261/10000 train_time:283289ms step_avg:45.25ms +[2025-09-05 15:23:14] [Rank 0] step:6281/10000 train_time:284026ms step_avg:45.22ms +[2025-09-05 15:23:14] [Rank 0] step:6281/10000 train_time:284026ms step_avg:45.22ms +[2025-09-05 15:23:15] [Rank 0] step:6301/10000 train_time:284765ms step_avg:45.19ms +[2025-09-05 15:23:15] [Rank 0] step:6301/10000 train_time:284765ms step_avg:45.19ms +[2025-09-05 15:23:15] [Rank 0] step:6321/10000 train_time:285502ms step_avg:45.17ms +[2025-09-05 15:23:15] [Rank 0] step:6321/10000 train_time:285502ms step_avg:45.17ms +[2025-09-05 15:23:16] [Rank 0] step:6341/10000 train_time:286241ms step_avg:45.14ms +[2025-09-05 15:23:16] [Rank 0] step:6341/10000 train_time:286241ms step_avg:45.14ms +[2025-09-05 15:23:17] [Rank 0] step:6361/10000 train_time:286980ms step_avg:45.12ms +[2025-09-05 15:23:17] [Rank 0] step:6361/10000 train_time:286980ms step_avg:45.12ms +[2025-09-05 15:23:18] [Rank 0] step:6381/10000 train_time:287718ms step_avg:45.09ms +[2025-09-05 15:23:18] [Rank 0] step:6381/10000 train_time:287718ms step_avg:45.09ms +[2025-09-05 15:23:18] [Rank 0] step:6401/10000 train_time:288456ms step_avg:45.06ms +[2025-09-05 15:23:18] [Rank 0] step:6401/10000 train_time:288456ms step_avg:45.06ms +[2025-09-05 15:23:19] [Rank 0] step:6421/10000 train_time:289195ms step_avg:45.04ms +[2025-09-05 15:23:19] [Rank 0] step:6421/10000 train_time:289195ms step_avg:45.04ms +[2025-09-05 15:23:20] [Rank 0] step:6441/10000 train_time:289933ms step_avg:45.01ms +[2025-09-05 15:23:20] [Rank 0] step:6441/10000 train_time:289933ms step_avg:45.01ms +[2025-09-05 15:23:21] [Rank 0] step:6461/10000 train_time:290671ms step_avg:44.99ms +[2025-09-05 15:23:21] [Rank 0] step:6461/10000 train_time:290671ms step_avg:44.99ms +[2025-09-05 15:23:21] [Rank 0] step:6481/10000 train_time:291408ms step_avg:44.96ms +[2025-09-05 15:23:21] [Rank 0] step:6481/10000 train_time:291408ms step_avg:44.96ms +[2025-09-05 15:23:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:23:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:23:23] [Rank 0] PRINT: step:6500/10000 train_loss:1.4146 val_loss:1.4047 train_time:292227ms step_avg:44.96ms +[2025-09-05 15:23:23] [Rank 0] PRINT: step:6500/10000 train_loss:1.4146 val_loss:1.4047 train_time:292227ms step_avg:44.96ms +[2025-09-05 15:23:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:23:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:23:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:23:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:24:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:24:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:24:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:24:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:24:43] [Rank 0] Total Loss: 4.0238 +[2025-09-05 15:24:43] [Rank 0] Total Loss: 4.0238 +[2025-09-05 15:24:43] [Rank 0] Total FTA (Unweighted): 0.5513 +[2025-09-05 15:24:43] [Rank 0] Total FTA (Unweighted): 0.5513 +[2025-09-05 15:24:43] [Rank 0] Total FTA (Weighted): 0.5513 +[2025-09-05 15:24:43] [Rank 0] Total FTA (Weighted): 0.5513 +[2025-09-05 15:24:43] [Rank 0] Group 0 Loss: 3.5047 +[2025-09-05 15:24:43] [Rank 0] Group 0 Loss: 3.5047 +[2025-09-05 15:24:43] [Rank 0] Group 1 Loss: 3.0983 +[2025-09-05 15:24:43] [Rank 0] Group 1 Loss: 3.0983 +[2025-09-05 15:24:43] [Rank 0] Group 2 Loss: 3.0321 +[2025-09-05 15:24:43] [Rank 0] Group 2 Loss: 3.0321 +[2025-09-05 15:24:43] [Rank 0] Group 3 Loss: 3.4507 +[2025-09-05 15:24:43] [Rank 0] Group 3 Loss: 3.4507 +[2025-09-05 15:24:43] [Rank 0] Group 4 Loss: 3.5460 +[2025-09-05 15:24:43] [Rank 0] Group 4 Loss: 3.5460 +[2025-09-05 15:24:43] [Rank 0] Group 5 Loss: 3.7288 +[2025-09-05 15:24:43] [Rank 0] Group 5 Loss: 3.7288 +[2025-09-05 15:24:43] [Rank 0] Group 6 Loss: 3.8205 +[2025-09-05 15:24:43] [Rank 0] Group 6 Loss: 3.8205 +[2025-09-05 15:24:43] [Rank 0] Group 7 Loss: 3.9739 +[2025-09-05 15:24:43] [Rank 0] Group 7 Loss: 3.9739 +[2025-09-05 15:24:43] [Rank 0] Group 8 Loss: 4.2634 +[2025-09-05 15:24:43] [Rank 0] Group 8 Loss: 4.2634 +[2025-09-05 15:24:43] [Rank 0] Group 9 Loss: 4.3900 +[2025-09-05 15:24:43] [Rank 0] Group 9 Loss: 4.3900 +[2025-09-05 15:24:43] [Rank 0] Group 10 Loss: 4.5260 +[2025-09-05 15:24:43] [Rank 0] Group 10 Loss: 4.5260 +[2025-09-05 15:24:43] [Rank 0] Group 11 Loss: 4.5574 +[2025-09-05 15:24:43] [Rank 0] Group 11 Loss: 4.5574 +[2025-09-05 15:24:43] [Rank 0] Group 12 Loss: 4.5581 +[2025-09-05 15:24:43] [Rank 0] Group 12 Loss: 4.5581 +[2025-09-05 15:24:43] [Rank 0] Group 13 Loss: 4.6115 +[2025-09-05 15:24:43] [Rank 0] Group 13 Loss: 4.6115 +[2025-09-05 15:24:43] [Rank 0] Group 14 Loss: 4.6410 +[2025-09-05 15:24:43] [Rank 0] Group 14 Loss: 4.6410 +[2025-09-05 15:24:43] [Rank 0] Group 15 Loss: 4.6784 +[2025-09-05 15:24:43] [Rank 0] Group 15 Loss: 4.6784 +[2025-09-05 15:24:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:24:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:24:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:24:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:24:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:24:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:24:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:24:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:24:43] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 15:24:43] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 15:24:43] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:24:43] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:24:43] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:24:43] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:24:43] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:24:43] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:24:43] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 15:24:43] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 15:24:43] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 15:24:43] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 15:24:43] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 15:24:43] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 15:24:43] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 15:24:43] [Rank 0] Group 11 FTA: 0.3300 +[2025-09-05 15:24:43] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 15:24:43] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 15:24:43] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:24:43] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:24:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:24:43] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:24:43] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 15:24:43] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 15:24:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:24:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:24:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:24:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:24:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:24:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:24:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:24:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:24:45] [Rank 0] step:6501/10000 train_time:292237ms step_avg:44.95ms +[2025-09-05 15:24:45] [Rank 0] step:6501/10000 train_time:292237ms step_avg:44.95ms +[2025-09-05 15:24:46] [Rank 0] step:6521/10000 train_time:292919ms step_avg:44.92ms +[2025-09-05 15:24:46] [Rank 0] step:6521/10000 train_time:292919ms step_avg:44.92ms +[2025-09-05 15:24:46] [Rank 0] step:6541/10000 train_time:293657ms step_avg:44.89ms +[2025-09-05 15:24:46] [Rank 0] step:6541/10000 train_time:293657ms step_avg:44.89ms +[2025-09-05 15:24:47] [Rank 0] step:6561/10000 train_time:294395ms step_avg:44.87ms +[2025-09-05 15:24:47] [Rank 0] step:6561/10000 train_time:294395ms step_avg:44.87ms +[2025-09-05 15:24:48] [Rank 0] step:6581/10000 train_time:295133ms step_avg:44.85ms +[2025-09-05 15:24:48] [Rank 0] step:6581/10000 train_time:295133ms step_avg:44.85ms +[2025-09-05 15:24:49] [Rank 0] step:6601/10000 train_time:295870ms step_avg:44.82ms +[2025-09-05 15:24:49] [Rank 0] step:6601/10000 train_time:295870ms step_avg:44.82ms +[2025-09-05 15:24:49] [Rank 0] step:6621/10000 train_time:296608ms step_avg:44.80ms +[2025-09-05 15:24:49] [Rank 0] step:6621/10000 train_time:296608ms step_avg:44.80ms +[2025-09-05 15:24:50] [Rank 0] step:6641/10000 train_time:297347ms step_avg:44.77ms +[2025-09-05 15:24:50] [Rank 0] step:6641/10000 train_time:297347ms step_avg:44.77ms +[2025-09-05 15:24:51] [Rank 0] step:6661/10000 train_time:298085ms step_avg:44.75ms +[2025-09-05 15:24:51] [Rank 0] step:6661/10000 train_time:298085ms step_avg:44.75ms +[2025-09-05 15:24:52] [Rank 0] step:6681/10000 train_time:298824ms step_avg:44.73ms +[2025-09-05 15:24:52] [Rank 0] step:6681/10000 train_time:298824ms step_avg:44.73ms +[2025-09-05 15:24:52] [Rank 0] step:6701/10000 train_time:299561ms step_avg:44.70ms +[2025-09-05 15:24:52] [Rank 0] step:6701/10000 train_time:299561ms step_avg:44.70ms +[2025-09-05 15:24:53] [Rank 0] step:6721/10000 train_time:300299ms step_avg:44.68ms +[2025-09-05 15:24:53] [Rank 0] step:6721/10000 train_time:300299ms step_avg:44.68ms +[2025-09-05 15:24:54] [Rank 0] step:6741/10000 train_time:301037ms step_avg:44.66ms +[2025-09-05 15:24:54] [Rank 0] step:6741/10000 train_time:301037ms step_avg:44.66ms +[2025-09-05 15:24:55] [Rank 0] step:6761/10000 train_time:301775ms step_avg:44.63ms +[2025-09-05 15:24:55] [Rank 0] step:6761/10000 train_time:301775ms step_avg:44.63ms +[2025-09-05 15:24:55] [Rank 0] step:6781/10000 train_time:302513ms step_avg:44.61ms +[2025-09-05 15:24:55] [Rank 0] step:6781/10000 train_time:302513ms step_avg:44.61ms +[2025-09-05 15:24:56] [Rank 0] step:6801/10000 train_time:303251ms step_avg:44.59ms +[2025-09-05 15:24:56] [Rank 0] step:6801/10000 train_time:303251ms step_avg:44.59ms +[2025-09-05 15:24:57] [Rank 0] step:6821/10000 train_time:303989ms step_avg:44.57ms +[2025-09-05 15:24:57] [Rank 0] step:6821/10000 train_time:303989ms step_avg:44.57ms +[2025-09-05 15:24:58] [Rank 0] step:6841/10000 train_time:305357ms step_avg:44.64ms +[2025-09-05 15:24:58] [Rank 0] step:6841/10000 train_time:305357ms step_avg:44.64ms +[2025-09-05 15:24:59] [Rank 0] step:6861/10000 train_time:306095ms step_avg:44.61ms +[2025-09-05 15:24:59] [Rank 0] step:6861/10000 train_time:306095ms step_avg:44.61ms +[2025-09-05 15:25:00] [Rank 0] step:6881/10000 train_time:306832ms step_avg:44.59ms +[2025-09-05 15:25:00] [Rank 0] step:6881/10000 train_time:306832ms step_avg:44.59ms +[2025-09-05 15:25:00] [Rank 0] step:6901/10000 train_time:307571ms step_avg:44.57ms +[2025-09-05 15:25:00] [Rank 0] step:6901/10000 train_time:307571ms step_avg:44.57ms +[2025-09-05 15:25:01] [Rank 0] step:6921/10000 train_time:308309ms step_avg:44.55ms +[2025-09-05 15:25:01] [Rank 0] step:6921/10000 train_time:308309ms step_avg:44.55ms +[2025-09-05 15:25:02] [Rank 0] step:6941/10000 train_time:309047ms step_avg:44.52ms +[2025-09-05 15:25:02] [Rank 0] step:6941/10000 train_time:309047ms step_avg:44.52ms +[2025-09-05 15:25:03] [Rank 0] step:6961/10000 train_time:309786ms step_avg:44.50ms +[2025-09-05 15:25:03] [Rank 0] step:6961/10000 train_time:309786ms step_avg:44.50ms +[2025-09-05 15:25:03] [Rank 0] step:6981/10000 train_time:310525ms step_avg:44.48ms +[2025-09-05 15:25:03] [Rank 0] step:6981/10000 train_time:310525ms step_avg:44.48ms +[2025-09-05 15:25:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:25:04] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:25:05] [Rank 0] PRINT: step:7000/10000 train_loss:1.4120 val_loss:1.4033 train_time:311344ms step_avg:44.48ms +[2025-09-05 15:25:05] [Rank 0] PRINT: step:7000/10000 train_loss:1.4120 val_loss:1.4033 train_time:311344ms step_avg:44.48ms +[2025-09-05 15:25:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:25:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:25:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:25:05] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:26:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:26:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:26:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:26:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:26:31] [Rank 0] Total Loss: 4.0449 +[2025-09-05 15:26:31] [Rank 0] Total Loss: 4.0449 +[2025-09-05 15:26:31] [Rank 0] Total FTA (Unweighted): 0.5606 +[2025-09-05 15:26:31] [Rank 0] Total FTA (Unweighted): 0.5606 +[2025-09-05 15:26:31] [Rank 0] Total FTA (Weighted): 0.5606 +[2025-09-05 15:26:31] [Rank 0] Total FTA (Weighted): 0.5606 +[2025-09-05 15:26:31] [Rank 0] Group 0 Loss: 3.4847 +[2025-09-05 15:26:31] [Rank 0] Group 0 Loss: 3.4847 +[2025-09-05 15:26:31] [Rank 0] Group 1 Loss: 3.1283 +[2025-09-05 15:26:31] [Rank 0] Group 1 Loss: 3.1283 +[2025-09-05 15:26:31] [Rank 0] Group 2 Loss: 3.0618 +[2025-09-05 15:26:31] [Rank 0] Group 2 Loss: 3.0618 +[2025-09-05 15:26:31] [Rank 0] Group 3 Loss: 3.4494 +[2025-09-05 15:26:31] [Rank 0] Group 3 Loss: 3.4494 +[2025-09-05 15:26:31] [Rank 0] Group 4 Loss: 3.6098 +[2025-09-05 15:26:31] [Rank 0] Group 4 Loss: 3.6098 +[2025-09-05 15:26:31] [Rank 0] Group 5 Loss: 3.7659 +[2025-09-05 15:26:31] [Rank 0] Group 5 Loss: 3.7659 +[2025-09-05 15:26:31] [Rank 0] Group 6 Loss: 3.8137 +[2025-09-05 15:26:31] [Rank 0] Group 6 Loss: 3.8137 +[2025-09-05 15:26:31] [Rank 0] Group 7 Loss: 4.0000 +[2025-09-05 15:26:31] [Rank 0] Group 7 Loss: 4.0000 +[2025-09-05 15:26:31] [Rank 0] Group 8 Loss: 4.3374 +[2025-09-05 15:26:31] [Rank 0] Group 8 Loss: 4.3374 +[2025-09-05 15:26:31] [Rank 0] Group 9 Loss: 4.4189 +[2025-09-05 15:26:31] [Rank 0] Group 9 Loss: 4.4189 +[2025-09-05 15:26:31] [Rank 0] Group 10 Loss: 4.5733 +[2025-09-05 15:26:31] [Rank 0] Group 10 Loss: 4.5733 +[2025-09-05 15:26:31] [Rank 0] Group 11 Loss: 4.5557 +[2025-09-05 15:26:31] [Rank 0] Group 11 Loss: 4.5557 +[2025-09-05 15:26:31] [Rank 0] Group 12 Loss: 4.5796 +[2025-09-05 15:26:31] [Rank 0] Group 12 Loss: 4.5796 +[2025-09-05 15:26:31] [Rank 0] Group 13 Loss: 4.6209 +[2025-09-05 15:26:31] [Rank 0] Group 13 Loss: 4.6209 +[2025-09-05 15:26:31] [Rank 0] Group 14 Loss: 4.6381 +[2025-09-05 15:26:31] [Rank 0] Group 14 Loss: 4.6381 +[2025-09-05 15:26:31] [Rank 0] Group 15 Loss: 4.6808 +[2025-09-05 15:26:31] [Rank 0] Group 15 Loss: 4.6808 +[2025-09-05 15:26:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:26:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:26:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:26:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:26:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:26:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:26:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:26:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:26:31] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:26:31] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:26:31] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:26:31] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:26:31] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:26:31] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:26:31] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:26:31] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:26:31] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 15:26:31] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 15:26:31] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 15:26:31] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 15:26:31] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 15:26:31] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 15:26:31] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 15:26:31] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 15:26:31] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 15:26:31] [Rank 0] Group 12 FTA: 0.2900 +[2025-09-05 15:26:31] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 15:26:31] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 15:26:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:26:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:26:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:26:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:26:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:26:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:26:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:26:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:26:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:26:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:26:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:26:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:26:32] [Rank 0] step:7001/10000 train_time:311354ms step_avg:44.47ms +[2025-09-05 15:26:32] [Rank 0] step:7001/10000 train_time:311354ms step_avg:44.47ms +[2025-09-05 15:26:33] [Rank 0] step:7021/10000 train_time:312016ms step_avg:44.44ms +[2025-09-05 15:26:33] [Rank 0] step:7021/10000 train_time:312016ms step_avg:44.44ms +[2025-09-05 15:26:34] [Rank 0] step:7041/10000 train_time:312754ms step_avg:44.42ms +[2025-09-05 15:26:34] [Rank 0] step:7041/10000 train_time:312754ms step_avg:44.42ms +[2025-09-05 15:26:35] [Rank 0] step:7061/10000 train_time:313493ms step_avg:44.40ms +[2025-09-05 15:26:35] [Rank 0] step:7061/10000 train_time:313493ms step_avg:44.40ms +[2025-09-05 15:26:35] [Rank 0] step:7081/10000 train_time:314231ms step_avg:44.38ms +[2025-09-05 15:26:35] [Rank 0] step:7081/10000 train_time:314231ms step_avg:44.38ms +[2025-09-05 15:26:36] [Rank 0] step:7101/10000 train_time:315086ms step_avg:44.37ms +[2025-09-05 15:26:36] [Rank 0] step:7101/10000 train_time:315086ms step_avg:44.37ms +[2025-09-05 15:26:37] [Rank 0] step:7121/10000 train_time:315824ms step_avg:44.35ms +[2025-09-05 15:26:37] [Rank 0] step:7121/10000 train_time:315824ms step_avg:44.35ms +[2025-09-05 15:26:38] [Rank 0] step:7141/10000 train_time:316562ms step_avg:44.33ms +[2025-09-05 15:26:38] [Rank 0] step:7141/10000 train_time:316562ms step_avg:44.33ms +[2025-09-05 15:26:39] [Rank 0] step:7161/10000 train_time:317415ms step_avg:44.33ms +[2025-09-05 15:26:39] [Rank 0] step:7161/10000 train_time:317415ms step_avg:44.33ms +[2025-09-05 15:26:39] [Rank 0] step:7181/10000 train_time:318159ms step_avg:44.31ms +[2025-09-05 15:26:39] [Rank 0] step:7181/10000 train_time:318159ms step_avg:44.31ms +[2025-09-05 15:26:40] [Rank 0] step:7201/10000 train_time:318897ms step_avg:44.29ms +[2025-09-05 15:26:40] [Rank 0] step:7201/10000 train_time:318897ms step_avg:44.29ms +[2025-09-05 15:26:41] [Rank 0] step:7221/10000 train_time:319636ms step_avg:44.26ms +[2025-09-05 15:26:41] [Rank 0] step:7221/10000 train_time:319636ms step_avg:44.26ms +[2025-09-05 15:26:42] [Rank 0] step:7241/10000 train_time:320374ms step_avg:44.24ms +[2025-09-05 15:26:42] [Rank 0] step:7241/10000 train_time:320374ms step_avg:44.24ms +[2025-09-05 15:26:42] [Rank 0] step:7261/10000 train_time:321113ms step_avg:44.22ms +[2025-09-05 15:26:42] [Rank 0] step:7261/10000 train_time:321113ms step_avg:44.22ms +[2025-09-05 15:26:43] [Rank 0] step:7281/10000 train_time:321851ms step_avg:44.20ms +[2025-09-05 15:26:43] [Rank 0] step:7281/10000 train_time:321851ms step_avg:44.20ms +[2025-09-05 15:26:44] [Rank 0] step:7301/10000 train_time:322589ms step_avg:44.18ms +[2025-09-05 15:26:44] [Rank 0] step:7301/10000 train_time:322589ms step_avg:44.18ms +[2025-09-05 15:26:44] [Rank 0] step:7321/10000 train_time:323328ms step_avg:44.16ms +[2025-09-05 15:26:44] [Rank 0] step:7321/10000 train_time:323328ms step_avg:44.16ms +[2025-09-05 15:26:45] [Rank 0] step:7341/10000 train_time:324066ms step_avg:44.14ms +[2025-09-05 15:26:45] [Rank 0] step:7341/10000 train_time:324066ms step_avg:44.14ms +[2025-09-05 15:26:46] [Rank 0] step:7361/10000 train_time:324805ms step_avg:44.13ms +[2025-09-05 15:26:46] [Rank 0] step:7361/10000 train_time:324805ms step_avg:44.13ms +[2025-09-05 15:26:47] [Rank 0] step:7381/10000 train_time:325542ms step_avg:44.11ms +[2025-09-05 15:26:47] [Rank 0] step:7381/10000 train_time:325542ms step_avg:44.11ms +[2025-09-05 15:26:47] [Rank 0] step:7401/10000 train_time:326280ms step_avg:44.09ms +[2025-09-05 15:26:47] [Rank 0] step:7401/10000 train_time:326280ms step_avg:44.09ms +[2025-09-05 15:26:48] [Rank 0] step:7421/10000 train_time:327019ms step_avg:44.07ms +[2025-09-05 15:26:48] [Rank 0] step:7421/10000 train_time:327019ms step_avg:44.07ms +[2025-09-05 15:26:49] [Rank 0] step:7441/10000 train_time:327758ms step_avg:44.05ms +[2025-09-05 15:26:49] [Rank 0] step:7441/10000 train_time:327758ms step_avg:44.05ms +[2025-09-05 15:26:50] [Rank 0] step:7461/10000 train_time:328497ms step_avg:44.03ms +[2025-09-05 15:26:50] [Rank 0] step:7461/10000 train_time:328497ms step_avg:44.03ms +[2025-09-05 15:26:50] [Rank 0] step:7481/10000 train_time:329235ms step_avg:44.01ms +[2025-09-05 15:26:50] [Rank 0] step:7481/10000 train_time:329235ms step_avg:44.01ms +[2025-09-05 15:26:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:26:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:26:52] [Rank 0] PRINT: step:7500/10000 train_loss:1.4113 val_loss:1.4027 train_time:330055ms step_avg:44.01ms +[2025-09-05 15:26:52] [Rank 0] PRINT: step:7500/10000 train_loss:1.4113 val_loss:1.4027 train_time:330055ms step_avg:44.01ms +[2025-09-05 15:26:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:26:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:26:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:26:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:28:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:28:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:28:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:28:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:28:18] [Rank 0] Total Loss: 4.1285 +[2025-09-05 15:28:18] [Rank 0] Total Loss: 4.1285 +[2025-09-05 15:28:18] [Rank 0] Total FTA (Unweighted): 0.5713 +[2025-09-05 15:28:18] [Rank 0] Total FTA (Unweighted): 0.5713 +[2025-09-05 15:28:18] [Rank 0] Total FTA (Weighted): 0.5713 +[2025-09-05 15:28:18] [Rank 0] Total FTA (Weighted): 0.5713 +[2025-09-05 15:28:18] [Rank 0] Group 0 Loss: 3.5749 +[2025-09-05 15:28:18] [Rank 0] Group 0 Loss: 3.5749 +[2025-09-05 15:28:18] [Rank 0] Group 1 Loss: 3.1460 +[2025-09-05 15:28:18] [Rank 0] Group 1 Loss: 3.1460 +[2025-09-05 15:28:18] [Rank 0] Group 2 Loss: 3.1286 +[2025-09-05 15:28:18] [Rank 0] Group 2 Loss: 3.1286 +[2025-09-05 15:28:18] [Rank 0] Group 3 Loss: 3.5317 +[2025-09-05 15:28:18] [Rank 0] Group 3 Loss: 3.5317 +[2025-09-05 15:28:18] [Rank 0] Group 4 Loss: 3.6714 +[2025-09-05 15:28:18] [Rank 0] Group 4 Loss: 3.6714 +[2025-09-05 15:28:18] [Rank 0] Group 5 Loss: 3.8455 +[2025-09-05 15:28:18] [Rank 0] Group 5 Loss: 3.8455 +[2025-09-05 15:28:18] [Rank 0] Group 6 Loss: 3.9487 +[2025-09-05 15:28:18] [Rank 0] Group 6 Loss: 3.9487 +[2025-09-05 15:28:18] [Rank 0] Group 7 Loss: 4.0811 +[2025-09-05 15:28:18] [Rank 0] Group 7 Loss: 4.0811 +[2025-09-05 15:28:18] [Rank 0] Group 8 Loss: 4.4106 +[2025-09-05 15:28:18] [Rank 0] Group 8 Loss: 4.4106 +[2025-09-05 15:28:18] [Rank 0] Group 9 Loss: 4.5006 +[2025-09-05 15:28:18] [Rank 0] Group 9 Loss: 4.5006 +[2025-09-05 15:28:18] [Rank 0] Group 10 Loss: 4.6790 +[2025-09-05 15:28:18] [Rank 0] Group 10 Loss: 4.6790 +[2025-09-05 15:28:18] [Rank 0] Group 11 Loss: 4.6728 +[2025-09-05 15:28:18] [Rank 0] Group 11 Loss: 4.6728 +[2025-09-05 15:28:18] [Rank 0] Group 12 Loss: 4.6541 +[2025-09-05 15:28:18] [Rank 0] Group 12 Loss: 4.6541 +[2025-09-05 15:28:18] [Rank 0] Group 13 Loss: 4.6901 +[2025-09-05 15:28:18] [Rank 0] Group 13 Loss: 4.6901 +[2025-09-05 15:28:18] [Rank 0] Group 14 Loss: 4.7477 +[2025-09-05 15:28:18] [Rank 0] Group 14 Loss: 4.7477 +[2025-09-05 15:28:18] [Rank 0] Group 15 Loss: 4.7724 +[2025-09-05 15:28:18] [Rank 0] Group 15 Loss: 4.7724 +[2025-09-05 15:28:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:28:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:28:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:28:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:28:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:28:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:28:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:28:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:28:18] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:28:18] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:28:18] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 15:28:18] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 15:28:18] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:28:18] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:28:18] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 15:28:18] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 15:28:18] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 15:28:18] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 15:28:18] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:28:18] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:28:18] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 15:28:18] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 15:28:18] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 15:28:18] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 15:28:18] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 15:28:18] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 15:28:18] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 15:28:18] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 15:28:18] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 15:28:18] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 15:28:18] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:28:18] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:28:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:28:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:28:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:28:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:28:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:28:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:28:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:28:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:28:20] [Rank 0] step:7501/10000 train_time:330065ms step_avg:44.00ms +[2025-09-05 15:28:20] [Rank 0] step:7501/10000 train_time:330065ms step_avg:44.00ms +[2025-09-05 15:28:20] [Rank 0] step:7521/10000 train_time:330739ms step_avg:43.98ms +[2025-09-05 15:28:20] [Rank 0] step:7521/10000 train_time:330739ms step_avg:43.98ms +[2025-09-05 15:28:21] [Rank 0] step:7541/10000 train_time:331477ms step_avg:43.96ms +[2025-09-05 15:28:21] [Rank 0] step:7541/10000 train_time:331477ms step_avg:43.96ms +[2025-09-05 15:28:22] [Rank 0] step:7561/10000 train_time:332215ms step_avg:43.94ms +[2025-09-05 15:28:22] [Rank 0] step:7561/10000 train_time:332215ms step_avg:43.94ms +[2025-09-05 15:28:23] [Rank 0] step:7581/10000 train_time:332953ms step_avg:43.92ms +[2025-09-05 15:28:23] [Rank 0] step:7581/10000 train_time:332953ms step_avg:43.92ms +[2025-09-05 15:28:23] [Rank 0] step:7601/10000 train_time:333691ms step_avg:43.90ms +[2025-09-05 15:28:23] [Rank 0] step:7601/10000 train_time:333691ms step_avg:43.90ms +[2025-09-05 15:28:24] [Rank 0] step:7621/10000 train_time:334428ms step_avg:43.88ms +[2025-09-05 15:28:24] [Rank 0] step:7621/10000 train_time:334428ms step_avg:43.88ms +[2025-09-05 15:28:25] [Rank 0] step:7641/10000 train_time:335167ms step_avg:43.86ms +[2025-09-05 15:28:25] [Rank 0] step:7641/10000 train_time:335167ms step_avg:43.86ms +[2025-09-05 15:28:26] [Rank 0] step:7661/10000 train_time:336512ms step_avg:43.93ms +[2025-09-05 15:28:26] [Rank 0] step:7661/10000 train_time:336512ms step_avg:43.93ms +[2025-09-05 15:28:27] [Rank 0] step:7681/10000 train_time:337250ms step_avg:43.91ms +[2025-09-05 15:28:27] [Rank 0] step:7681/10000 train_time:337250ms step_avg:43.91ms +[2025-09-05 15:28:28] [Rank 0] step:7701/10000 train_time:337988ms step_avg:43.89ms +[2025-09-05 15:28:28] [Rank 0] step:7701/10000 train_time:337988ms step_avg:43.89ms +[2025-09-05 15:28:28] [Rank 0] step:7721/10000 train_time:338727ms step_avg:43.87ms +[2025-09-05 15:28:28] [Rank 0] step:7721/10000 train_time:338727ms step_avg:43.87ms +[2025-09-05 15:28:29] [Rank 0] step:7741/10000 train_time:339465ms step_avg:43.85ms +[2025-09-05 15:28:29] [Rank 0] step:7741/10000 train_time:339465ms step_avg:43.85ms +[2025-09-05 15:28:30] [Rank 0] step:7761/10000 train_time:340203ms step_avg:43.83ms +[2025-09-05 15:28:30] [Rank 0] step:7761/10000 train_time:340203ms step_avg:43.83ms +[2025-09-05 15:28:31] [Rank 0] step:7781/10000 train_time:340942ms step_avg:43.82ms +[2025-09-05 15:28:31] [Rank 0] step:7781/10000 train_time:340942ms step_avg:43.82ms +[2025-09-05 15:28:31] [Rank 0] step:7801/10000 train_time:341680ms step_avg:43.80ms +[2025-09-05 15:28:31] [Rank 0] step:7801/10000 train_time:341680ms step_avg:43.80ms +[2025-09-05 15:28:32] [Rank 0] step:7821/10000 train_time:342418ms step_avg:43.78ms +[2025-09-05 15:28:32] [Rank 0] step:7821/10000 train_time:342418ms step_avg:43.78ms +[2025-09-05 15:28:33] [Rank 0] step:7841/10000 train_time:343156ms step_avg:43.76ms +[2025-09-05 15:28:33] [Rank 0] step:7841/10000 train_time:343156ms step_avg:43.76ms +[2025-09-05 15:28:34] [Rank 0] step:7861/10000 train_time:343894ms step_avg:43.75ms +[2025-09-05 15:28:34] [Rank 0] step:7861/10000 train_time:343894ms step_avg:43.75ms +[2025-09-05 15:28:34] [Rank 0] step:7881/10000 train_time:344633ms step_avg:43.73ms +[2025-09-05 15:28:34] [Rank 0] step:7881/10000 train_time:344633ms step_avg:43.73ms +[2025-09-05 15:28:35] [Rank 0] step:7901/10000 train_time:345370ms step_avg:43.71ms +[2025-09-05 15:28:35] [Rank 0] step:7901/10000 train_time:345370ms step_avg:43.71ms +[2025-09-05 15:28:36] [Rank 0] step:7921/10000 train_time:346109ms step_avg:43.70ms +[2025-09-05 15:28:36] [Rank 0] step:7921/10000 train_time:346109ms step_avg:43.70ms +[2025-09-05 15:28:37] [Rank 0] step:7941/10000 train_time:346848ms step_avg:43.68ms +[2025-09-05 15:28:37] [Rank 0] step:7941/10000 train_time:346848ms step_avg:43.68ms +[2025-09-05 15:28:37] [Rank 0] step:7961/10000 train_time:347586ms step_avg:43.66ms +[2025-09-05 15:28:37] [Rank 0] step:7961/10000 train_time:347586ms step_avg:43.66ms +[2025-09-05 15:28:38] [Rank 0] step:7981/10000 train_time:348324ms step_avg:43.64ms +[2025-09-05 15:28:38] [Rank 0] step:7981/10000 train_time:348324ms step_avg:43.64ms +[2025-09-05 15:28:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:28:39] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:28:39] [Rank 0] PRINT: step:8000/10000 train_loss:1.4118 val_loss:1.4035 train_time:349143ms step_avg:43.64ms +[2025-09-05 15:28:39] [Rank 0] PRINT: step:8000/10000 train_loss:1.4118 val_loss:1.4035 train_time:349143ms step_avg:43.64ms +[2025-09-05 15:28:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:28:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:28:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:28:39] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:30:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:30:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:30:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:30:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:30:06] [Rank 0] Total Loss: 4.0864 +[2025-09-05 15:30:06] [Rank 0] Total Loss: 4.0864 +[2025-09-05 15:30:06] [Rank 0] Total FTA (Unweighted): 0.5700 +[2025-09-05 15:30:06] [Rank 0] Total FTA (Unweighted): 0.5700 +[2025-09-05 15:30:06] [Rank 0] Total FTA (Weighted): 0.5700 +[2025-09-05 15:30:06] [Rank 0] Total FTA (Weighted): 0.5700 +[2025-09-05 15:30:06] [Rank 0] Group 0 Loss: 3.5993 +[2025-09-05 15:30:06] [Rank 0] Group 0 Loss: 3.5993 +[2025-09-05 15:30:06] [Rank 0] Group 1 Loss: 3.1473 +[2025-09-05 15:30:06] [Rank 0] Group 1 Loss: 3.1473 +[2025-09-05 15:30:06] [Rank 0] Group 2 Loss: 3.1691 +[2025-09-05 15:30:06] [Rank 0] Group 2 Loss: 3.1691 +[2025-09-05 15:30:06] [Rank 0] Group 3 Loss: 3.4961 +[2025-09-05 15:30:06] [Rank 0] Group 3 Loss: 3.4961 +[2025-09-05 15:30:06] [Rank 0] Group 4 Loss: 3.5969 +[2025-09-05 15:30:06] [Rank 0] Group 4 Loss: 3.5969 +[2025-09-05 15:30:06] [Rank 0] Group 5 Loss: 3.8133 +[2025-09-05 15:30:06] [Rank 0] Group 5 Loss: 3.8133 +[2025-09-05 15:30:06] [Rank 0] Group 6 Loss: 3.8679 +[2025-09-05 15:30:06] [Rank 0] Group 6 Loss: 3.8679 +[2025-09-05 15:30:06] [Rank 0] Group 7 Loss: 4.0510 +[2025-09-05 15:30:06] [Rank 0] Group 7 Loss: 4.0510 +[2025-09-05 15:30:06] [Rank 0] Group 8 Loss: 4.3515 +[2025-09-05 15:30:06] [Rank 0] Group 8 Loss: 4.3515 +[2025-09-05 15:30:06] [Rank 0] Group 9 Loss: 4.4448 +[2025-09-05 15:30:06] [Rank 0] Group 9 Loss: 4.4448 +[2025-09-05 15:30:06] [Rank 0] Group 10 Loss: 4.5900 +[2025-09-05 15:30:06] [Rank 0] Group 10 Loss: 4.5900 +[2025-09-05 15:30:06] [Rank 0] Group 11 Loss: 4.6131 +[2025-09-05 15:30:06] [Rank 0] Group 11 Loss: 4.6131 +[2025-09-05 15:30:06] [Rank 0] Group 12 Loss: 4.6309 +[2025-09-05 15:30:06] [Rank 0] Group 12 Loss: 4.6309 +[2025-09-05 15:30:06] [Rank 0] Group 13 Loss: 4.6366 +[2025-09-05 15:30:06] [Rank 0] Group 13 Loss: 4.6366 +[2025-09-05 15:30:06] [Rank 0] Group 14 Loss: 4.6779 +[2025-09-05 15:30:06] [Rank 0] Group 14 Loss: 4.6779 +[2025-09-05 15:30:06] [Rank 0] Group 15 Loss: 4.6959 +[2025-09-05 15:30:06] [Rank 0] Group 15 Loss: 4.6959 +[2025-09-05 15:30:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:30:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:30:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:30:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:30:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:30:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:30:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:30:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:30:06] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:30:06] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:30:06] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:30:06] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:30:06] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:30:06] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:30:06] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:30:06] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:30:06] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:30:06] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:30:06] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:30:06] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:30:06] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 15:30:06] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 15:30:06] [Rank 0] Group 11 FTA: 0.3500 +[2025-09-05 15:30:06] [Rank 0] Group 11 FTA: 0.3500 +[2025-09-05 15:30:06] [Rank 0] Group 12 FTA: 0.3500 +[2025-09-05 15:30:06] [Rank 0] Group 12 FTA: 0.3500 +[2025-09-05 15:30:06] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 15:30:06] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 15:30:06] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 15:30:06] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 15:30:06] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:30:06] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:30:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:30:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:30:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:30:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:30:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:30:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:30:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:30:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:30:07] [Rank 0] step:8001/10000 train_time:349153ms step_avg:43.64ms +[2025-09-05 15:30:07] [Rank 0] step:8001/10000 train_time:349153ms step_avg:43.64ms +[2025-09-05 15:30:09] [Rank 0] step:8021/10000 train_time:350451ms step_avg:43.69ms +[2025-09-05 15:30:09] [Rank 0] step:8021/10000 train_time:350451ms step_avg:43.69ms +[2025-09-05 15:30:09] [Rank 0] step:8041/10000 train_time:351189ms step_avg:43.67ms +[2025-09-05 15:30:09] [Rank 0] step:8041/10000 train_time:351189ms step_avg:43.67ms +[2025-09-05 15:30:10] [Rank 0] step:8061/10000 train_time:351928ms step_avg:43.66ms +[2025-09-05 15:30:10] [Rank 0] step:8061/10000 train_time:351928ms step_avg:43.66ms +[2025-09-05 15:30:11] [Rank 0] step:8081/10000 train_time:352666ms step_avg:43.64ms +[2025-09-05 15:30:11] [Rank 0] step:8081/10000 train_time:352666ms step_avg:43.64ms +[2025-09-05 15:30:12] [Rank 0] step:8101/10000 train_time:353404ms step_avg:43.62ms +[2025-09-05 15:30:12] [Rank 0] step:8101/10000 train_time:353404ms step_avg:43.62ms +[2025-09-05 15:30:12] [Rank 0] step:8121/10000 train_time:354143ms step_avg:43.61ms +[2025-09-05 15:30:12] [Rank 0] step:8121/10000 train_time:354143ms step_avg:43.61ms +[2025-09-05 15:30:13] [Rank 0] step:8141/10000 train_time:354882ms step_avg:43.59ms +[2025-09-05 15:30:13] [Rank 0] step:8141/10000 train_time:354882ms step_avg:43.59ms +[2025-09-05 15:30:14] [Rank 0] step:8161/10000 train_time:355620ms step_avg:43.58ms +[2025-09-05 15:30:14] [Rank 0] step:8161/10000 train_time:355620ms step_avg:43.58ms +[2025-09-05 15:30:15] [Rank 0] step:8181/10000 train_time:356358ms step_avg:43.56ms +[2025-09-05 15:30:15] [Rank 0] step:8181/10000 train_time:356358ms step_avg:43.56ms +[2025-09-05 15:30:15] [Rank 0] step:8201/10000 train_time:357096ms step_avg:43.54ms +[2025-09-05 15:30:15] [Rank 0] step:8201/10000 train_time:357096ms step_avg:43.54ms +[2025-09-05 15:30:16] [Rank 0] step:8221/10000 train_time:357834ms step_avg:43.53ms +[2025-09-05 15:30:16] [Rank 0] step:8221/10000 train_time:357834ms step_avg:43.53ms +[2025-09-05 15:30:17] [Rank 0] step:8241/10000 train_time:358573ms step_avg:43.51ms +[2025-09-05 15:30:17] [Rank 0] step:8241/10000 train_time:358573ms step_avg:43.51ms +[2025-09-05 15:30:18] [Rank 0] step:8261/10000 train_time:359311ms step_avg:43.49ms +[2025-09-05 15:30:18] [Rank 0] step:8261/10000 train_time:359311ms step_avg:43.49ms +[2025-09-05 15:30:18] [Rank 0] step:8281/10000 train_time:360049ms step_avg:43.48ms +[2025-09-05 15:30:18] [Rank 0] step:8281/10000 train_time:360049ms step_avg:43.48ms +[2025-09-05 15:30:19] [Rank 0] step:8301/10000 train_time:360787ms step_avg:43.46ms +[2025-09-05 15:30:19] [Rank 0] step:8301/10000 train_time:360787ms step_avg:43.46ms +[2025-09-05 15:30:20] [Rank 0] step:8321/10000 train_time:361526ms step_avg:43.45ms +[2025-09-05 15:30:20] [Rank 0] step:8321/10000 train_time:361526ms step_avg:43.45ms +[2025-09-05 15:30:21] [Rank 0] step:8341/10000 train_time:362265ms step_avg:43.43ms +[2025-09-05 15:30:21] [Rank 0] step:8341/10000 train_time:362265ms step_avg:43.43ms +[2025-09-05 15:30:21] [Rank 0] step:8361/10000 train_time:363004ms step_avg:43.42ms +[2025-09-05 15:30:21] [Rank 0] step:8361/10000 train_time:363004ms step_avg:43.42ms +[2025-09-05 15:30:22] [Rank 0] step:8381/10000 train_time:363742ms step_avg:43.40ms +[2025-09-05 15:30:22] [Rank 0] step:8381/10000 train_time:363742ms step_avg:43.40ms +[2025-09-05 15:30:23] [Rank 0] step:8401/10000 train_time:364480ms step_avg:43.39ms +[2025-09-05 15:30:23] [Rank 0] step:8401/10000 train_time:364480ms step_avg:43.39ms +[2025-09-05 15:30:24] [Rank 0] step:8421/10000 train_time:365219ms step_avg:43.37ms +[2025-09-05 15:30:24] [Rank 0] step:8421/10000 train_time:365219ms step_avg:43.37ms +[2025-09-05 15:30:24] [Rank 0] step:8441/10000 train_time:365957ms step_avg:43.35ms +[2025-09-05 15:30:24] [Rank 0] step:8441/10000 train_time:365957ms step_avg:43.35ms +[2025-09-05 15:30:25] [Rank 0] step:8461/10000 train_time:366695ms step_avg:43.34ms +[2025-09-05 15:30:25] [Rank 0] step:8461/10000 train_time:366695ms step_avg:43.34ms +[2025-09-05 15:30:26] [Rank 0] step:8481/10000 train_time:367434ms step_avg:43.32ms +[2025-09-05 15:30:26] [Rank 0] step:8481/10000 train_time:367434ms step_avg:43.32ms +[2025-09-05 15:30:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:30:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:30:27] [Rank 0] PRINT: step:8500/10000 train_loss:1.4092 val_loss:1.3981 train_time:368254ms step_avg:43.32ms +[2025-09-05 15:30:27] [Rank 0] PRINT: step:8500/10000 train_loss:1.4092 val_loss:1.3981 train_time:368254ms step_avg:43.32ms +[2025-09-05 15:30:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:30:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:30:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:30:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:31:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:31:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:31:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:31:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:31:53] [Rank 0] Total Loss: 4.0373 +[2025-09-05 15:31:53] [Rank 0] Total Loss: 4.0373 +[2025-09-05 15:31:53] [Rank 0] Total FTA (Unweighted): 0.5744 +[2025-09-05 15:31:53] [Rank 0] Total FTA (Unweighted): 0.5744 +[2025-09-05 15:31:53] [Rank 0] Total FTA (Weighted): 0.5744 +[2025-09-05 15:31:53] [Rank 0] Total FTA (Weighted): 0.5744 +[2025-09-05 15:31:53] [Rank 0] Group 0 Loss: 3.5465 +[2025-09-05 15:31:53] [Rank 0] Group 0 Loss: 3.5465 +[2025-09-05 15:31:53] [Rank 0] Group 1 Loss: 3.1114 +[2025-09-05 15:31:53] [Rank 0] Group 1 Loss: 3.1114 +[2025-09-05 15:31:53] [Rank 0] Group 2 Loss: 3.1299 +[2025-09-05 15:31:53] [Rank 0] Group 2 Loss: 3.1299 +[2025-09-05 15:31:53] [Rank 0] Group 3 Loss: 3.5085 +[2025-09-05 15:31:53] [Rank 0] Group 3 Loss: 3.5085 +[2025-09-05 15:31:53] [Rank 0] Group 4 Loss: 3.5702 +[2025-09-05 15:31:53] [Rank 0] Group 4 Loss: 3.5702 +[2025-09-05 15:31:53] [Rank 0] Group 5 Loss: 3.7543 +[2025-09-05 15:31:53] [Rank 0] Group 5 Loss: 3.7543 +[2025-09-05 15:31:53] [Rank 0] Group 6 Loss: 3.8074 +[2025-09-05 15:31:53] [Rank 0] Group 6 Loss: 3.8074 +[2025-09-05 15:31:53] [Rank 0] Group 7 Loss: 4.0014 +[2025-09-05 15:31:53] [Rank 0] Group 7 Loss: 4.0014 +[2025-09-05 15:31:53] [Rank 0] Group 8 Loss: 4.2686 +[2025-09-05 15:31:53] [Rank 0] Group 8 Loss: 4.2686 +[2025-09-05 15:31:53] [Rank 0] Group 9 Loss: 4.3785 +[2025-09-05 15:31:53] [Rank 0] Group 9 Loss: 4.3785 +[2025-09-05 15:31:53] [Rank 0] Group 10 Loss: 4.5365 +[2025-09-05 15:31:53] [Rank 0] Group 10 Loss: 4.5365 +[2025-09-05 15:31:53] [Rank 0] Group 11 Loss: 4.5426 +[2025-09-05 15:31:53] [Rank 0] Group 11 Loss: 4.5426 +[2025-09-05 15:31:53] [Rank 0] Group 12 Loss: 4.5774 +[2025-09-05 15:31:53] [Rank 0] Group 12 Loss: 4.5774 +[2025-09-05 15:31:53] [Rank 0] Group 13 Loss: 4.6049 +[2025-09-05 15:31:53] [Rank 0] Group 13 Loss: 4.6049 +[2025-09-05 15:31:53] [Rank 0] Group 14 Loss: 4.6117 +[2025-09-05 15:31:53] [Rank 0] Group 14 Loss: 4.6117 +[2025-09-05 15:31:53] [Rank 0] Group 15 Loss: 4.6471 +[2025-09-05 15:31:53] [Rank 0] Group 15 Loss: 4.6471 +[2025-09-05 15:31:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:31:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:31:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:31:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:31:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:31:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:31:54] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:31:54] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:31:54] [Rank 0] Group 4 FTA: 0.9600 +[2025-09-05 15:31:54] [Rank 0] Group 4 FTA: 0.9600 +[2025-09-05 15:31:54] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:31:54] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:31:54] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:31:54] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:31:54] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 15:31:54] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 15:31:54] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:31:54] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:31:54] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:31:54] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:31:54] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 15:31:54] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 15:31:54] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 15:31:54] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 15:31:54] [Rank 0] Group 12 FTA: 0.3800 +[2025-09-05 15:31:54] [Rank 0] Group 12 FTA: 0.3800 +[2025-09-05 15:31:54] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 15:31:54] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 15:31:54] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:31:54] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:31:54] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:31:54] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:31:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:31:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:31:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:31:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:31:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:31:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:31:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:31:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:31:55] [Rank 0] step:8501/10000 train_time:368263ms step_avg:43.32ms +[2025-09-05 15:31:55] [Rank 0] step:8501/10000 train_time:368263ms step_avg:43.32ms +[2025-09-05 15:31:56] [Rank 0] step:8521/10000 train_time:368930ms step_avg:43.30ms +[2025-09-05 15:31:56] [Rank 0] step:8521/10000 train_time:368930ms step_avg:43.30ms +[2025-09-05 15:31:57] [Rank 0] step:8541/10000 train_time:369667ms step_avg:43.28ms +[2025-09-05 15:31:57] [Rank 0] step:8541/10000 train_time:369667ms step_avg:43.28ms +[2025-09-05 15:31:57] [Rank 0] step:8561/10000 train_time:370406ms step_avg:43.27ms +[2025-09-05 15:31:57] [Rank 0] step:8561/10000 train_time:370406ms step_avg:43.27ms +[2025-09-05 15:31:58] [Rank 0] step:8581/10000 train_time:371143ms step_avg:43.25ms +[2025-09-05 15:31:58] [Rank 0] step:8581/10000 train_time:371143ms step_avg:43.25ms +[2025-09-05 15:31:59] [Rank 0] step:8601/10000 train_time:371891ms step_avg:43.24ms +[2025-09-05 15:31:59] [Rank 0] step:8601/10000 train_time:371891ms step_avg:43.24ms +[2025-09-05 15:31:59] [Rank 0] step:8621/10000 train_time:372630ms step_avg:43.22ms +[2025-09-05 15:31:59] [Rank 0] step:8621/10000 train_time:372630ms step_avg:43.22ms +[2025-09-05 15:32:00] [Rank 0] step:8641/10000 train_time:373468ms step_avg:43.22ms +[2025-09-05 15:32:00] [Rank 0] step:8641/10000 train_time:373468ms step_avg:43.22ms +[2025-09-05 15:32:01] [Rank 0] step:8661/10000 train_time:374206ms step_avg:43.21ms +[2025-09-05 15:32:01] [Rank 0] step:8661/10000 train_time:374206ms step_avg:43.21ms +[2025-09-05 15:32:02] [Rank 0] step:8681/10000 train_time:374944ms step_avg:43.19ms +[2025-09-05 15:32:02] [Rank 0] step:8681/10000 train_time:374944ms step_avg:43.19ms +[2025-09-05 15:32:03] [Rank 0] step:8701/10000 train_time:375683ms step_avg:43.18ms +[2025-09-05 15:32:03] [Rank 0] step:8701/10000 train_time:375683ms step_avg:43.18ms +[2025-09-05 15:32:03] [Rank 0] step:8721/10000 train_time:376422ms step_avg:43.16ms +[2025-09-05 15:32:03] [Rank 0] step:8721/10000 train_time:376422ms step_avg:43.16ms +[2025-09-05 15:32:04] [Rank 0] step:8741/10000 train_time:377160ms step_avg:43.15ms +[2025-09-05 15:32:04] [Rank 0] step:8741/10000 train_time:377160ms step_avg:43.15ms +[2025-09-05 15:32:05] [Rank 0] step:8761/10000 train_time:377899ms step_avg:43.13ms +[2025-09-05 15:32:05] [Rank 0] step:8761/10000 train_time:377899ms step_avg:43.13ms +[2025-09-05 15:32:05] [Rank 0] step:8781/10000 train_time:378637ms step_avg:43.12ms +[2025-09-05 15:32:05] [Rank 0] step:8781/10000 train_time:378637ms step_avg:43.12ms +[2025-09-05 15:32:06] [Rank 0] step:8801/10000 train_time:379376ms step_avg:43.11ms +[2025-09-05 15:32:06] [Rank 0] step:8801/10000 train_time:379376ms step_avg:43.11ms +[2025-09-05 15:32:07] [Rank 0] step:8821/10000 train_time:380114ms step_avg:43.09ms +[2025-09-05 15:32:07] [Rank 0] step:8821/10000 train_time:380114ms step_avg:43.09ms +[2025-09-05 15:32:08] [Rank 0] step:8841/10000 train_time:381484ms step_avg:43.15ms +[2025-09-05 15:32:08] [Rank 0] step:8841/10000 train_time:381484ms step_avg:43.15ms +[2025-09-05 15:32:09] [Rank 0] step:8861/10000 train_time:382222ms step_avg:43.14ms +[2025-09-05 15:32:09] [Rank 0] step:8861/10000 train_time:382222ms step_avg:43.14ms +[2025-09-05 15:32:10] [Rank 0] step:8881/10000 train_time:382961ms step_avg:43.12ms +[2025-09-05 15:32:10] [Rank 0] step:8881/10000 train_time:382961ms step_avg:43.12ms +[2025-09-05 15:32:11] [Rank 0] step:8901/10000 train_time:383699ms step_avg:43.11ms +[2025-09-05 15:32:11] [Rank 0] step:8901/10000 train_time:383699ms step_avg:43.11ms +[2025-09-05 15:32:11] [Rank 0] step:8921/10000 train_time:384437ms step_avg:43.09ms +[2025-09-05 15:32:11] [Rank 0] step:8921/10000 train_time:384437ms step_avg:43.09ms +[2025-09-05 15:32:12] [Rank 0] step:8941/10000 train_time:385177ms step_avg:43.08ms +[2025-09-05 15:32:12] [Rank 0] step:8941/10000 train_time:385177ms step_avg:43.08ms +[2025-09-05 15:32:13] [Rank 0] step:8961/10000 train_time:385916ms step_avg:43.07ms +[2025-09-05 15:32:13] [Rank 0] step:8961/10000 train_time:385916ms step_avg:43.07ms +[2025-09-05 15:32:13] [Rank 0] step:8981/10000 train_time:386654ms step_avg:43.05ms +[2025-09-05 15:32:13] [Rank 0] step:8981/10000 train_time:386654ms step_avg:43.05ms +[2025-09-05 15:32:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:32:14] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:32:15] [Rank 0] PRINT: step:9000/10000 train_loss:1.4045 val_loss:1.3939 train_time:387473ms step_avg:43.05ms +[2025-09-05 15:32:15] [Rank 0] PRINT: step:9000/10000 train_loss:1.4045 val_loss:1.3939 train_time:387473ms step_avg:43.05ms +[2025-09-05 15:32:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:32:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:32:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:32:15] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:33:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:33:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:33:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:33:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:33:35] [Rank 0] Total Loss: 4.1368 +[2025-09-05 15:33:35] [Rank 0] Total Loss: 4.1368 +[2025-09-05 15:33:35] [Rank 0] Total FTA (Unweighted): 0.5737 +[2025-09-05 15:33:35] [Rank 0] Total FTA (Unweighted): 0.5737 +[2025-09-05 15:33:35] [Rank 0] Total FTA (Weighted): 0.5737 +[2025-09-05 15:33:35] [Rank 0] Total FTA (Weighted): 0.5737 +[2025-09-05 15:33:35] [Rank 0] Group 0 Loss: 3.6181 +[2025-09-05 15:33:35] [Rank 0] Group 0 Loss: 3.6181 +[2025-09-05 15:33:35] [Rank 0] Group 1 Loss: 3.2223 +[2025-09-05 15:33:35] [Rank 0] Group 1 Loss: 3.2223 +[2025-09-05 15:33:35] [Rank 0] Group 2 Loss: 3.2117 +[2025-09-05 15:33:35] [Rank 0] Group 2 Loss: 3.2117 +[2025-09-05 15:33:35] [Rank 0] Group 3 Loss: 3.5696 +[2025-09-05 15:33:35] [Rank 0] Group 3 Loss: 3.5696 +[2025-09-05 15:33:35] [Rank 0] Group 4 Loss: 3.6711 +[2025-09-05 15:33:35] [Rank 0] Group 4 Loss: 3.6711 +[2025-09-05 15:33:35] [Rank 0] Group 5 Loss: 3.8446 +[2025-09-05 15:33:35] [Rank 0] Group 5 Loss: 3.8446 +[2025-09-05 15:33:35] [Rank 0] Group 6 Loss: 3.9268 +[2025-09-05 15:33:35] [Rank 0] Group 6 Loss: 3.9268 +[2025-09-05 15:33:35] [Rank 0] Group 7 Loss: 4.0943 +[2025-09-05 15:33:35] [Rank 0] Group 7 Loss: 4.0943 +[2025-09-05 15:33:35] [Rank 0] Group 8 Loss: 4.3806 +[2025-09-05 15:33:35] [Rank 0] Group 8 Loss: 4.3806 +[2025-09-05 15:33:35] [Rank 0] Group 9 Loss: 4.4860 +[2025-09-05 15:33:35] [Rank 0] Group 9 Loss: 4.4860 +[2025-09-05 15:33:35] [Rank 0] Group 10 Loss: 4.6499 +[2025-09-05 15:33:35] [Rank 0] Group 10 Loss: 4.6499 +[2025-09-05 15:33:35] [Rank 0] Group 11 Loss: 4.6640 +[2025-09-05 15:33:35] [Rank 0] Group 11 Loss: 4.6640 +[2025-09-05 15:33:35] [Rank 0] Group 12 Loss: 4.6760 +[2025-09-05 15:33:35] [Rank 0] Group 12 Loss: 4.6760 +[2025-09-05 15:33:35] [Rank 0] Group 13 Loss: 4.7122 +[2025-09-05 15:33:35] [Rank 0] Group 13 Loss: 4.7122 +[2025-09-05 15:33:35] [Rank 0] Group 14 Loss: 4.7183 +[2025-09-05 15:33:35] [Rank 0] Group 14 Loss: 4.7183 +[2025-09-05 15:33:35] [Rank 0] Group 15 Loss: 4.7437 +[2025-09-05 15:33:35] [Rank 0] Group 15 Loss: 4.7437 +[2025-09-05 15:33:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:33:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:33:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:33:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:33:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:33:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:33:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:33:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:33:35] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:33:35] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 15:33:35] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:33:35] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:33:35] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:33:35] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:33:35] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:33:35] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:33:35] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:33:35] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:33:35] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:33:35] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:33:35] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 15:33:35] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 15:33:35] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 15:33:35] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 15:33:35] [Rank 0] Group 12 FTA: 0.3900 +[2025-09-05 15:33:35] [Rank 0] Group 12 FTA: 0.3900 +[2025-09-05 15:33:35] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 15:33:35] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 15:33:35] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:33:35] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:33:35] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 15:33:35] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 15:33:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:33:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:33:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:33:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:33:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:33:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:33:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:33:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:33:37] [Rank 0] step:9001/10000 train_time:387483ms step_avg:43.05ms +[2025-09-05 15:33:37] [Rank 0] step:9001/10000 train_time:387483ms step_avg:43.05ms +[2025-09-05 15:33:38] [Rank 0] step:9021/10000 train_time:388162ms step_avg:43.03ms +[2025-09-05 15:33:38] [Rank 0] step:9021/10000 train_time:388162ms step_avg:43.03ms +[2025-09-05 15:33:38] [Rank 0] step:9041/10000 train_time:388900ms step_avg:43.02ms +[2025-09-05 15:33:38] [Rank 0] step:9041/10000 train_time:388900ms step_avg:43.02ms +[2025-09-05 15:33:39] [Rank 0] step:9061/10000 train_time:389638ms step_avg:43.00ms +[2025-09-05 15:33:39] [Rank 0] step:9061/10000 train_time:389638ms step_avg:43.00ms +[2025-09-05 15:33:40] [Rank 0] step:9081/10000 train_time:390376ms step_avg:42.99ms +[2025-09-05 15:33:40] [Rank 0] step:9081/10000 train_time:390376ms step_avg:42.99ms +[2025-09-05 15:33:41] [Rank 0] step:9101/10000 train_time:391114ms step_avg:42.97ms +[2025-09-05 15:33:41] [Rank 0] step:9101/10000 train_time:391114ms step_avg:42.97ms +[2025-09-05 15:33:41] [Rank 0] step:9121/10000 train_time:391852ms step_avg:42.96ms +[2025-09-05 15:33:41] [Rank 0] step:9121/10000 train_time:391852ms step_avg:42.96ms +[2025-09-05 15:33:42] [Rank 0] step:9141/10000 train_time:392695ms step_avg:42.96ms +[2025-09-05 15:33:42] [Rank 0] step:9141/10000 train_time:392695ms step_avg:42.96ms +[2025-09-05 15:33:43] [Rank 0] step:9161/10000 train_time:393433ms step_avg:42.95ms +[2025-09-05 15:33:43] [Rank 0] step:9161/10000 train_time:393433ms step_avg:42.95ms +[2025-09-05 15:33:44] [Rank 0] step:9181/10000 train_time:394174ms step_avg:42.93ms +[2025-09-05 15:33:44] [Rank 0] step:9181/10000 train_time:394174ms step_avg:42.93ms +[2025-09-05 15:33:44] [Rank 0] step:9201/10000 train_time:394912ms step_avg:42.92ms +[2025-09-05 15:33:44] [Rank 0] step:9201/10000 train_time:394912ms step_avg:42.92ms +[2025-09-05 15:33:45] [Rank 0] step:9221/10000 train_time:395650ms step_avg:42.91ms +[2025-09-05 15:33:45] [Rank 0] step:9221/10000 train_time:395650ms step_avg:42.91ms +[2025-09-05 15:33:46] [Rank 0] step:9241/10000 train_time:396389ms step_avg:42.89ms +[2025-09-05 15:33:46] [Rank 0] step:9241/10000 train_time:396389ms step_avg:42.89ms +[2025-09-05 15:33:47] [Rank 0] step:9261/10000 train_time:397127ms step_avg:42.88ms +[2025-09-05 15:33:47] [Rank 0] step:9261/10000 train_time:397127ms step_avg:42.88ms +[2025-09-05 15:33:47] [Rank 0] step:9281/10000 train_time:397866ms step_avg:42.87ms +[2025-09-05 15:33:47] [Rank 0] step:9281/10000 train_time:397866ms step_avg:42.87ms +[2025-09-05 15:33:48] [Rank 0] step:9301/10000 train_time:398604ms step_avg:42.86ms +[2025-09-05 15:33:48] [Rank 0] step:9301/10000 train_time:398604ms step_avg:42.86ms +[2025-09-05 15:33:49] [Rank 0] step:9321/10000 train_time:399342ms step_avg:42.84ms +[2025-09-05 15:33:49] [Rank 0] step:9321/10000 train_time:399342ms step_avg:42.84ms +[2025-09-05 15:33:50] [Rank 0] step:9341/10000 train_time:400183ms step_avg:42.84ms +[2025-09-05 15:33:50] [Rank 0] step:9341/10000 train_time:400183ms step_avg:42.84ms +[2025-09-05 15:33:50] [Rank 0] step:9361/10000 train_time:400920ms step_avg:42.83ms +[2025-09-05 15:33:50] [Rank 0] step:9361/10000 train_time:400920ms step_avg:42.83ms +[2025-09-05 15:33:51] [Rank 0] step:9381/10000 train_time:401759ms step_avg:42.83ms +[2025-09-05 15:33:51] [Rank 0] step:9381/10000 train_time:401759ms step_avg:42.83ms +[2025-09-05 15:33:52] [Rank 0] step:9401/10000 train_time:402496ms step_avg:42.81ms +[2025-09-05 15:33:52] [Rank 0] step:9401/10000 train_time:402496ms step_avg:42.81ms +[2025-09-05 15:33:53] [Rank 0] step:9421/10000 train_time:403235ms step_avg:42.80ms +[2025-09-05 15:33:53] [Rank 0] step:9421/10000 train_time:403235ms step_avg:42.80ms +[2025-09-05 15:33:53] [Rank 0] step:9441/10000 train_time:403972ms step_avg:42.79ms +[2025-09-05 15:33:53] [Rank 0] step:9441/10000 train_time:403972ms step_avg:42.79ms +[2025-09-05 15:33:54] [Rank 0] step:9461/10000 train_time:404711ms step_avg:42.78ms +[2025-09-05 15:33:54] [Rank 0] step:9461/10000 train_time:404711ms step_avg:42.78ms +[2025-09-05 15:33:55] [Rank 0] step:9481/10000 train_time:405449ms step_avg:42.76ms +[2025-09-05 15:33:55] [Rank 0] step:9481/10000 train_time:405449ms step_avg:42.76ms +[2025-09-05 15:33:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:33:56] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:33:56] [Rank 0] PRINT: step:9500/10000 train_loss:1.3996 val_loss:1.3895 train_time:406268ms step_avg:42.77ms +[2025-09-05 15:33:56] [Rank 0] PRINT: step:9500/10000 train_loss:1.3996 val_loss:1.3895 train_time:406268ms step_avg:42.77ms +[2025-09-05 15:33:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:33:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:33:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:33:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:35:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:35:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:35:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:35:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:35:18] [Rank 0] Total Loss: 4.0658 +[2025-09-05 15:35:18] [Rank 0] Total Loss: 4.0658 +[2025-09-05 15:35:18] [Rank 0] Total FTA (Unweighted): 0.5881 +[2025-09-05 15:35:18] [Rank 0] Total FTA (Unweighted): 0.5881 +[2025-09-05 15:35:18] [Rank 0] Total FTA (Weighted): 0.5881 +[2025-09-05 15:35:18] [Rank 0] Total FTA (Weighted): 0.5881 +[2025-09-05 15:35:18] [Rank 0] Group 0 Loss: 3.6221 +[2025-09-05 15:35:18] [Rank 0] Group 0 Loss: 3.6221 +[2025-09-05 15:35:18] [Rank 0] Group 1 Loss: 3.1442 +[2025-09-05 15:35:18] [Rank 0] Group 1 Loss: 3.1442 +[2025-09-05 15:35:18] [Rank 0] Group 2 Loss: 3.1513 +[2025-09-05 15:35:18] [Rank 0] Group 2 Loss: 3.1513 +[2025-09-05 15:35:18] [Rank 0] Group 3 Loss: 3.5188 +[2025-09-05 15:35:18] [Rank 0] Group 3 Loss: 3.5188 +[2025-09-05 15:35:18] [Rank 0] Group 4 Loss: 3.5974 +[2025-09-05 15:35:18] [Rank 0] Group 4 Loss: 3.5974 +[2025-09-05 15:35:18] [Rank 0] Group 5 Loss: 3.7669 +[2025-09-05 15:35:18] [Rank 0] Group 5 Loss: 3.7669 +[2025-09-05 15:35:18] [Rank 0] Group 6 Loss: 3.8452 +[2025-09-05 15:35:18] [Rank 0] Group 6 Loss: 3.8452 +[2025-09-05 15:35:18] [Rank 0] Group 7 Loss: 4.0253 +[2025-09-05 15:35:18] [Rank 0] Group 7 Loss: 4.0253 +[2025-09-05 15:35:18] [Rank 0] Group 8 Loss: 4.2960 +[2025-09-05 15:35:18] [Rank 0] Group 8 Loss: 4.2960 +[2025-09-05 15:35:18] [Rank 0] Group 9 Loss: 4.4013 +[2025-09-05 15:35:18] [Rank 0] Group 9 Loss: 4.4013 +[2025-09-05 15:35:18] [Rank 0] Group 10 Loss: 4.5644 +[2025-09-05 15:35:18] [Rank 0] Group 10 Loss: 4.5644 +[2025-09-05 15:35:18] [Rank 0] Group 11 Loss: 4.5873 +[2025-09-05 15:35:18] [Rank 0] Group 11 Loss: 4.5873 +[2025-09-05 15:35:18] [Rank 0] Group 12 Loss: 4.6076 +[2025-09-05 15:35:18] [Rank 0] Group 12 Loss: 4.6076 +[2025-09-05 15:35:18] [Rank 0] Group 13 Loss: 4.6363 +[2025-09-05 15:35:18] [Rank 0] Group 13 Loss: 4.6363 +[2025-09-05 15:35:18] [Rank 0] Group 14 Loss: 4.6330 +[2025-09-05 15:35:18] [Rank 0] Group 14 Loss: 4.6330 +[2025-09-05 15:35:18] [Rank 0] Group 15 Loss: 4.6554 +[2025-09-05 15:35:18] [Rank 0] Group 15 Loss: 4.6554 +[2025-09-05 15:35:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:35:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:35:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:35:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:35:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:35:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:35:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:35:18] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:35:18] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 15:35:18] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 15:35:18] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 15:35:18] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 15:35:18] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:35:18] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:35:18] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:35:18] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:35:18] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:35:18] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:35:18] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:35:18] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:35:18] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 15:35:18] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 15:35:18] [Rank 0] Group 11 FTA: 0.3900 +[2025-09-05 15:35:18] [Rank 0] Group 11 FTA: 0.3900 +[2025-09-05 15:35:18] [Rank 0] Group 12 FTA: 0.4600 +[2025-09-05 15:35:18] [Rank 0] Group 12 FTA: 0.4600 +[2025-09-05 15:35:18] [Rank 0] Group 13 FTA: 0.3200 +[2025-09-05 15:35:18] [Rank 0] Group 13 FTA: 0.3200 +[2025-09-05 15:35:18] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 15:35:18] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 15:35:18] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 15:35:18] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 15:35:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:35:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:35:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:35:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:35:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:35:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:35:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:35:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:35:19] [Rank 0] step:9501/10000 train_time:406277ms step_avg:42.76ms +[2025-09-05 15:35:19] [Rank 0] step:9501/10000 train_time:406277ms step_avg:42.76ms +[2025-09-05 15:35:20] [Rank 0] step:9521/10000 train_time:406951ms step_avg:42.74ms +[2025-09-05 15:35:20] [Rank 0] step:9521/10000 train_time:406951ms step_avg:42.74ms +[2025-09-05 15:35:21] [Rank 0] step:9541/10000 train_time:407690ms step_avg:42.73ms +[2025-09-05 15:35:21] [Rank 0] step:9541/10000 train_time:407690ms step_avg:42.73ms +[2025-09-05 15:35:21] [Rank 0] step:9561/10000 train_time:408427ms step_avg:42.72ms +[2025-09-05 15:35:21] [Rank 0] step:9561/10000 train_time:408427ms step_avg:42.72ms +[2025-09-05 15:35:22] [Rank 0] step:9581/10000 train_time:409166ms step_avg:42.71ms +[2025-09-05 15:35:22] [Rank 0] step:9581/10000 train_time:409166ms step_avg:42.71ms +[2025-09-05 15:35:23] [Rank 0] step:9601/10000 train_time:409904ms step_avg:42.69ms +[2025-09-05 15:35:23] [Rank 0] step:9601/10000 train_time:409904ms step_avg:42.69ms +[2025-09-05 15:35:24] [Rank 0] step:9621/10000 train_time:410641ms step_avg:42.68ms +[2025-09-05 15:35:24] [Rank 0] step:9621/10000 train_time:410641ms step_avg:42.68ms +[2025-09-05 15:35:24] [Rank 0] step:9641/10000 train_time:411480ms step_avg:42.68ms +[2025-09-05 15:35:24] [Rank 0] step:9641/10000 train_time:411480ms step_avg:42.68ms +[2025-09-05 15:35:25] [Rank 0] step:9661/10000 train_time:412497ms step_avg:42.70ms +[2025-09-05 15:35:25] [Rank 0] step:9661/10000 train_time:412497ms step_avg:42.70ms +[2025-09-05 15:35:26] [Rank 0] step:9681/10000 train_time:413235ms step_avg:42.69ms +[2025-09-05 15:35:26] [Rank 0] step:9681/10000 train_time:413235ms step_avg:42.69ms +[2025-09-05 15:35:27] [Rank 0] step:9701/10000 train_time:413973ms step_avg:42.67ms +[2025-09-05 15:35:27] [Rank 0] step:9701/10000 train_time:413973ms step_avg:42.67ms +[2025-09-05 15:35:28] [Rank 0] step:9721/10000 train_time:414711ms step_avg:42.66ms +[2025-09-05 15:35:28] [Rank 0] step:9721/10000 train_time:414711ms step_avg:42.66ms +[2025-09-05 15:35:28] [Rank 0] step:9741/10000 train_time:415449ms step_avg:42.65ms +[2025-09-05 15:35:28] [Rank 0] step:9741/10000 train_time:415449ms step_avg:42.65ms +[2025-09-05 15:35:29] [Rank 0] step:9761/10000 train_time:416187ms step_avg:42.64ms +[2025-09-05 15:35:29] [Rank 0] step:9761/10000 train_time:416187ms step_avg:42.64ms +[2025-09-05 15:35:30] [Rank 0] step:9781/10000 train_time:416925ms step_avg:42.63ms +[2025-09-05 15:35:30] [Rank 0] step:9781/10000 train_time:416925ms step_avg:42.63ms +[2025-09-05 15:35:31] [Rank 0] step:9801/10000 train_time:417663ms step_avg:42.61ms +[2025-09-05 15:35:31] [Rank 0] step:9801/10000 train_time:417663ms step_avg:42.61ms +[2025-09-05 15:35:31] [Rank 0] step:9821/10000 train_time:418401ms step_avg:42.60ms +[2025-09-05 15:35:31] [Rank 0] step:9821/10000 train_time:418401ms step_avg:42.60ms +[2025-09-05 15:35:32] [Rank 0] step:9841/10000 train_time:419138ms step_avg:42.59ms +[2025-09-05 15:35:32] [Rank 0] step:9841/10000 train_time:419138ms step_avg:42.59ms +[2025-09-05 15:35:33] [Rank 0] step:9861/10000 train_time:419877ms step_avg:42.58ms +[2025-09-05 15:35:33] [Rank 0] step:9861/10000 train_time:419877ms step_avg:42.58ms +[2025-09-05 15:35:34] [Rank 0] step:9881/10000 train_time:420615ms step_avg:42.57ms +[2025-09-05 15:35:34] [Rank 0] step:9881/10000 train_time:420615ms step_avg:42.57ms +[2025-09-05 15:35:34] [Rank 0] step:9901/10000 train_time:421353ms step_avg:42.56ms +[2025-09-05 15:35:34] [Rank 0] step:9901/10000 train_time:421353ms step_avg:42.56ms +[2025-09-05 15:35:35] [Rank 0] step:9921/10000 train_time:422091ms step_avg:42.55ms +[2025-09-05 15:35:35] [Rank 0] step:9921/10000 train_time:422091ms step_avg:42.55ms +[2025-09-05 15:35:36] [Rank 0] step:9941/10000 train_time:422829ms step_avg:42.53ms +[2025-09-05 15:35:36] [Rank 0] step:9941/10000 train_time:422829ms step_avg:42.53ms +[2025-09-05 15:35:37] [Rank 0] step:9961/10000 train_time:423566ms step_avg:42.52ms +[2025-09-05 15:35:37] [Rank 0] step:9961/10000 train_time:423566ms step_avg:42.52ms +[2025-09-05 15:35:37] [Rank 0] step:9981/10000 train_time:424304ms step_avg:42.51ms +[2025-09-05 15:35:37] [Rank 0] step:9981/10000 train_time:424304ms step_avg:42.51ms +[2025-09-05 15:35:38] [Rank 0] step:10000/10000 train_time:425006ms step_avg:42.50ms +[2025-09-05 15:35:38] [Rank 0] step:10000/10000 train_time:425006ms step_avg:42.50ms +[2025-09-05 15:35:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:35:38] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:35:38] [Rank 0] PRINT: step:10000/10000 train_loss:1.3941 val_loss:1.3828 train_time:425128ms step_avg:42.51ms +[2025-09-05 15:35:38] [Rank 0] PRINT: step:10000/10000 train_loss:1.3941 val_loss:1.3828 train_time:425128ms step_avg:42.51ms +[2025-09-05 15:35:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:35:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:35:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:35:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:37:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:37:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:37:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:37:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:37:00] [Rank 0] Total Loss: 4.0853 +[2025-09-05 15:37:00] [Rank 0] Total Loss: 4.0853 +[2025-09-05 15:37:00] [Rank 0] Total FTA (Unweighted): 0.5900 +[2025-09-05 15:37:00] [Rank 0] Total FTA (Unweighted): 0.5900 +[2025-09-05 15:37:00] [Rank 0] Total FTA (Weighted): 0.5900 +[2025-09-05 15:37:00] [Rank 0] Total FTA (Weighted): 0.5900 +[2025-09-05 15:37:00] [Rank 0] Group 0 Loss: 3.6357 +[2025-09-05 15:37:00] [Rank 0] Group 0 Loss: 3.6357 +[2025-09-05 15:37:00] [Rank 0] Group 1 Loss: 3.1858 +[2025-09-05 15:37:00] [Rank 0] Group 1 Loss: 3.1858 +[2025-09-05 15:37:00] [Rank 0] Group 2 Loss: 3.1826 +[2025-09-05 15:37:00] [Rank 0] Group 2 Loss: 3.1826 +[2025-09-05 15:37:00] [Rank 0] Group 3 Loss: 3.5145 +[2025-09-05 15:37:00] [Rank 0] Group 3 Loss: 3.5145 +[2025-09-05 15:37:00] [Rank 0] Group 4 Loss: 3.6233 +[2025-09-05 15:37:00] [Rank 0] Group 4 Loss: 3.6233 +[2025-09-05 15:37:00] [Rank 0] Group 5 Loss: 3.7799 +[2025-09-05 15:37:00] [Rank 0] Group 5 Loss: 3.7799 +[2025-09-05 15:37:00] [Rank 0] Group 6 Loss: 3.8823 +[2025-09-05 15:37:00] [Rank 0] Group 6 Loss: 3.8823 +[2025-09-05 15:37:00] [Rank 0] Group 7 Loss: 4.0315 +[2025-09-05 15:37:00] [Rank 0] Group 7 Loss: 4.0315 +[2025-09-05 15:37:00] [Rank 0] Group 8 Loss: 4.3201 +[2025-09-05 15:37:00] [Rank 0] Group 8 Loss: 4.3201 +[2025-09-05 15:37:00] [Rank 0] Group 9 Loss: 4.4291 +[2025-09-05 15:37:00] [Rank 0] Group 9 Loss: 4.4291 +[2025-09-05 15:37:00] [Rank 0] Group 10 Loss: 4.5689 +[2025-09-05 15:37:00] [Rank 0] Group 10 Loss: 4.5689 +[2025-09-05 15:37:00] [Rank 0] Group 11 Loss: 4.6194 +[2025-09-05 15:37:00] [Rank 0] Group 11 Loss: 4.6194 +[2025-09-05 15:37:00] [Rank 0] Group 12 Loss: 4.6072 +[2025-09-05 15:37:00] [Rank 0] Group 12 Loss: 4.6072 +[2025-09-05 15:37:00] [Rank 0] Group 13 Loss: 4.6682 +[2025-09-05 15:37:00] [Rank 0] Group 13 Loss: 4.6682 +[2025-09-05 15:37:00] [Rank 0] Group 14 Loss: 4.6519 +[2025-09-05 15:37:00] [Rank 0] Group 14 Loss: 4.6519 +[2025-09-05 15:37:00] [Rank 0] Group 15 Loss: 4.6650 +[2025-09-05 15:37:00] [Rank 0] Group 15 Loss: 4.6650 +[2025-09-05 15:37:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:37:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:37:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:37:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:37:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:37:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:37:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:37:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:37:00] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 15:37:00] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 15:37:00] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:37:00] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:37:00] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:37:00] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:37:00] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:37:00] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 15:37:00] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:37:00] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 15:37:00] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:37:00] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 15:37:00] [Rank 0] Group 10 FTA: 0.5300 +[2025-09-05 15:37:00] [Rank 0] Group 10 FTA: 0.5300 +[2025-09-05 15:37:00] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 15:37:00] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 15:37:00] [Rank 0] Group 12 FTA: 0.4600 +[2025-09-05 15:37:00] [Rank 0] Group 12 FTA: 0.4600 +[2025-09-05 15:37:00] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 15:37:00] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 15:37:00] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 15:37:00] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 15:37:00] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:37:00] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:37:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:37:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_loss_curves.png +[2025-09-05 15:37:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:37:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/per_class_acc_curves.png +[2025-09-05 15:37:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:37:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_loss_curve.png +[2025-09-05 15:37:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:37:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/total_acc_curve.png +[2025-09-05 15:37:01] [Rank 0] step:10001/10000 train_time:425137ms step_avg:42.51ms +[2025-09-05 15:37:01] [Rank 0] step:10001/10000 train_time:425137ms step_avg:42.51ms +[2025-09-05 15:37:01] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 15:37:01 2025 --- +[2025-09-05 15:37:01] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 15:37:01 2025 --- +[2025-09-05 15:37:01] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 15:37:01] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/training_log_65ef0aec-0b66-4c38-beea-9ce54e81ad72.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/training_log_65ef0aec-0b66-4c38-beea-9ce54e81ad72.txt new file mode 100644 index 0000000000000000000000000000000000000000..bf103b6d3ec56b8c585bf097a44d63093ec168af --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43/training_log_65ef0aec-0b66-4c38-beea-9ce54e81ad72.txt @@ -0,0 +1,2756 @@ +[2025-09-05 14:15:39] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:15:39 2025 --- +[2025-09-05 14:15:39] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:15:39 2025 --- +[2025-09-05 14:15:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:15:39] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:15:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:15:39] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:15:39] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 14:15:39] [Rank 0] PRINT: Using fixed seed: 43 +[2025-09-05 14:15:39] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43 +[2025-09-05 14:15:39] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_43 +[2025-09-05 14:15:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:15:39] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:15:39] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:15:39] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:15:41] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:15:41] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:15:41] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:15:41] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:15:41] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:15:41] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:15:41] [Rank 0] PRINT: Model test failed: +[2025-09-05 14:15:41] [Rank 0] PRINT: Model test failed: +[2025-09-05 14:15:41] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:15:41] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:15:41] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:15:41] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:15:41] [Rank 0] PRINT: Model test still fails: +[2025-09-05 14:15:41] [Rank 0] PRINT: Model test still fails: +[2025-09-05 14:15:41] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:15:41] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:15:41] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:15:41] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:15:41] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:15:41] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:15:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:15:43] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:15:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:15:43] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:15:49] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:15:49] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:15:49] [Rank 0] PRINT: Starting warmup... +[2025-09-05 14:15:49] [Rank 0] PRINT: Starting warmup... diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4009f8f36bbf2c88bb3477e611a1e9c0ea5361da --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 44, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.5, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "7ed8369c-4c34-42b1-9f02-3f1d2441c7b4", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..ce38b3d512bb4e1fdb038ac1551c68c47417a59d --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d69c0f677dd0359cd42cfce156281a81d0d3719a5bb94517461166c3ea0e28db +size 404722 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..73e8eb679f82b81687292e1a93d63507594f509d --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dca77e94ae1984d6080319fbdd3cecab51e050ec39c973b9f4099f76b465d5bf +size 483035 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..8f095794f86e41db203e7e9ca5cf98c918893dbe --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53969c111cb6cb736a062f5161f20c47a5f90a704b8df963fd8b616c40f6b224 +size 92771 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..169a8a2368efb2057ab746abdf0233e4b6ce4bcd --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b112f0f10b03eef89c581b663c31215ae5e516f51509a0b4b9975d531b863ef +size 114669 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/training_log_33324169-e70a-427c-87ef-b06c7ab52579.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/training_log_33324169-e70a-427c-87ef-b06c7ab52579.txt new file mode 100644 index 0000000000000000000000000000000000000000..3e26e79f4dc67b9b33e1113e7e6a14d259262466 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/training_log_33324169-e70a-427c-87ef-b06c7ab52579.txt @@ -0,0 +1,2756 @@ +[2025-09-05 14:16:14] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:16:14 2025 --- +[2025-09-05 14:16:14] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:16:14 2025 --- +[2025-09-05 14:16:14] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:16:14] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:16:14] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:16:14] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:16:14] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-05 14:16:14] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-05 14:16:14] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44 +[2025-09-05 14:16:14] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44 +[2025-09-05 14:16:14] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:16:14] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:16:14] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:16:14] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:16:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:16:16] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:16:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:16:16] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:16:16] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:16:16] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:16:16] [Rank 0] PRINT: Model test failed: +[2025-09-05 14:16:16] [Rank 0] PRINT: Model test failed: +[2025-09-05 14:16:16] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:16:16] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:16:16] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:16:16] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:16:16] [Rank 0] PRINT: Model test still fails: +[2025-09-05 14:16:16] [Rank 0] PRINT: Model test still fails: +[2025-09-05 14:16:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:16:16] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:16:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:16:16] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:16:16] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:16:16] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:16:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:16:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:16:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:16:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:16:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:16:24] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:16:24] [Rank 0] PRINT: Starting warmup... +[2025-09-05 14:16:24] [Rank 0] PRINT: Starting warmup... diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/training_log_7ed8369c-4c34-42b1-9f02-3f1d2441c7b4.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/training_log_7ed8369c-4c34-42b1-9f02-3f1d2441c7b4.txt new file mode 100644 index 0000000000000000000000000000000000000000..15b5142fc108b177587ca761f49c543673a52b89 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/training_log_7ed8369c-4c34-42b1-9f02-3f1d2441c7b4.txt @@ -0,0 +1,5614 @@ +[2025-09-05 15:37:27] [Rank 0] PRINT: --- Script Start: Fri Sep 5 15:37:27 2025 --- +[2025-09-05 15:37:27] [Rank 0] PRINT: --- Script Start: Fri Sep 5 15:37:27 2025 --- +[2025-09-05 15:37:27] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 15:37:27] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=44, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 15:37:27] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 15:37:27] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 15:37:27] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-05 15:37:27] [Rank 0] PRINT: Using fixed seed: 44 +[2025-09-05 15:37:27] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44 +[2025-09-05 15:37:27] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44 +[2025-09-05 15:37:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 15:37:27] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 15:37:27] [Rank 0] PRINT: Constructing model... +[2025-09-05 15:37:27] [Rank 0] PRINT: Constructing model... +[2025-09-05 15:37:28] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 15:37:28] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 15:37:28] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 15:37:28] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 15:37:28] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 15:37:28] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 15:37:32] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 15:37:32] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 15:37:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 15:37:32] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 15:37:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 15:37:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 15:37:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 15:37:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 15:37:32] [Rank 0] PRINT: Model returns: +[2025-09-05 15:37:32] [Rank 0] PRINT: Model returns: +[2025-09-05 15:37:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 15:37:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 15:37:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 15:37:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 15:37:32] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 15:37:32] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 15:37:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 15:37:33] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 15:37:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 15:37:33] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 15:37:37] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 15:37:37] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 15:37:37] [Rank 0] PRINT: Starting warmup... +[2025-09-05 15:37:37] [Rank 0] PRINT: Starting warmup... +[2025-09-05 15:38:35] [Rank 0] PRINT: Warmup complete. +[2025-09-05 15:38:35] [Rank 0] PRINT: Warmup complete. +[2025-09-05 15:38:35] [Rank 0] PRINT: Starting training... +[2025-09-05 15:38:35] [Rank 0] PRINT: Starting training... +[2025-09-05 15:38:41] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/fixed_eval_indices.json +[2025-09-05 15:38:41] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/fixed_eval_indices.json +[2025-09-05 15:38:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:38:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:38:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 15:38:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 15:39:20] [Rank 0] step:21/10000 train_time:34187ms step_avg:1627.96ms +[2025-09-05 15:39:20] [Rank 0] step:21/10000 train_time:34187ms step_avg:1627.96ms +[2025-09-05 15:39:20] [Rank 0] step:41/10000 train_time:34917ms step_avg:851.64ms +[2025-09-05 15:39:20] [Rank 0] step:41/10000 train_time:34917ms step_avg:851.64ms +[2025-09-05 15:39:21] [Rank 0] step:61/10000 train_time:35645ms step_avg:584.35ms +[2025-09-05 15:39:21] [Rank 0] step:61/10000 train_time:35645ms step_avg:584.35ms +[2025-09-05 15:39:22] [Rank 0] step:81/10000 train_time:36375ms step_avg:449.07ms +[2025-09-05 15:39:22] [Rank 0] step:81/10000 train_time:36375ms step_avg:449.07ms +[2025-09-05 15:39:22] [Rank 0] step:101/10000 train_time:37103ms step_avg:367.35ms +[2025-09-05 15:39:22] [Rank 0] step:101/10000 train_time:37103ms step_avg:367.35ms +[2025-09-05 15:39:23] [Rank 0] step:121/10000 train_time:37831ms step_avg:312.65ms +[2025-09-05 15:39:23] [Rank 0] step:121/10000 train_time:37831ms step_avg:312.65ms +[2025-09-05 15:39:24] [Rank 0] step:141/10000 train_time:38559ms step_avg:273.47ms +[2025-09-05 15:39:24] [Rank 0] step:141/10000 train_time:38559ms step_avg:273.47ms +[2025-09-05 15:39:25] [Rank 0] step:161/10000 train_time:39287ms step_avg:244.02ms +[2025-09-05 15:39:25] [Rank 0] step:161/10000 train_time:39287ms step_avg:244.02ms +[2025-09-05 15:39:25] [Rank 0] step:181/10000 train_time:40015ms step_avg:221.08ms +[2025-09-05 15:39:25] [Rank 0] step:181/10000 train_time:40015ms step_avg:221.08ms +[2025-09-05 15:39:26] [Rank 0] step:201/10000 train_time:40743ms step_avg:202.70ms +[2025-09-05 15:39:26] [Rank 0] step:201/10000 train_time:40743ms step_avg:202.70ms +[2025-09-05 15:39:27] [Rank 0] step:221/10000 train_time:41472ms step_avg:187.66ms +[2025-09-05 15:39:27] [Rank 0] step:221/10000 train_time:41472ms step_avg:187.66ms +[2025-09-05 15:39:28] [Rank 0] step:241/10000 train_time:42201ms step_avg:175.11ms +[2025-09-05 15:39:28] [Rank 0] step:241/10000 train_time:42201ms step_avg:175.11ms +[2025-09-05 15:39:28] [Rank 0] step:261/10000 train_time:42929ms step_avg:164.48ms +[2025-09-05 15:39:28] [Rank 0] step:261/10000 train_time:42929ms step_avg:164.48ms +[2025-09-05 15:39:29] [Rank 0] step:281/10000 train_time:43657ms step_avg:155.36ms +[2025-09-05 15:39:29] [Rank 0] step:281/10000 train_time:43657ms step_avg:155.36ms +[2025-09-05 15:39:30] [Rank 0] step:301/10000 train_time:44385ms step_avg:147.46ms +[2025-09-05 15:39:30] [Rank 0] step:301/10000 train_time:44385ms step_avg:147.46ms +[2025-09-05 15:39:30] [Rank 0] step:321/10000 train_time:45113ms step_avg:140.54ms +[2025-09-05 15:39:30] [Rank 0] step:321/10000 train_time:45113ms step_avg:140.54ms +[2025-09-05 15:39:31] [Rank 0] step:341/10000 train_time:45841ms step_avg:134.43ms +[2025-09-05 15:39:31] [Rank 0] step:341/10000 train_time:45841ms step_avg:134.43ms +[2025-09-05 15:39:32] [Rank 0] step:361/10000 train_time:46570ms step_avg:129.00ms +[2025-09-05 15:39:32] [Rank 0] step:361/10000 train_time:46570ms step_avg:129.00ms +[2025-09-05 15:39:33] [Rank 0] step:381/10000 train_time:47298ms step_avg:124.14ms +[2025-09-05 15:39:33] [Rank 0] step:381/10000 train_time:47298ms step_avg:124.14ms +[2025-09-05 15:39:33] [Rank 0] step:401/10000 train_time:48026ms step_avg:119.77ms +[2025-09-05 15:39:33] [Rank 0] step:401/10000 train_time:48026ms step_avg:119.77ms +[2025-09-05 15:39:34] [Rank 0] step:421/10000 train_time:48754ms step_avg:115.81ms +[2025-09-05 15:39:34] [Rank 0] step:421/10000 train_time:48754ms step_avg:115.81ms +[2025-09-05 15:39:35] [Rank 0] step:441/10000 train_time:49483ms step_avg:112.21ms +[2025-09-05 15:39:35] [Rank 0] step:441/10000 train_time:49483ms step_avg:112.21ms +[2025-09-05 15:39:36] [Rank 0] step:461/10000 train_time:50212ms step_avg:108.92ms +[2025-09-05 15:39:36] [Rank 0] step:461/10000 train_time:50212ms step_avg:108.92ms +[2025-09-05 15:39:36] [Rank 0] step:481/10000 train_time:50940ms step_avg:105.90ms +[2025-09-05 15:39:36] [Rank 0] step:481/10000 train_time:50940ms step_avg:105.90ms +[2025-09-05 15:39:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:39:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:39:37] [Rank 0] PRINT: step:500/10000 train_loss:3.5945 val_loss:2.3662 train_time:51750ms step_avg:103.50ms +[2025-09-05 15:39:37] [Rank 0] PRINT: step:500/10000 train_loss:3.5945 val_loss:2.3662 train_time:51750ms step_avg:103.50ms +[2025-09-05 15:39:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:39:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:39:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:39:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:40:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:40:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:40:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:40:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:40:59] [Rank 0] Total Loss: 4.6747 +[2025-09-05 15:40:59] [Rank 0] Total Loss: 4.6747 +[2025-09-05 15:40:59] [Rank 0] Total FTA (Unweighted): 0.2431 +[2025-09-05 15:40:59] [Rank 0] Total FTA (Unweighted): 0.2431 +[2025-09-05 15:40:59] [Rank 0] Total FTA (Weighted): 0.2431 +[2025-09-05 15:40:59] [Rank 0] Total FTA (Weighted): 0.2431 +[2025-09-05 15:40:59] [Rank 0] Group 0 Loss: 3.3465 +[2025-09-05 15:40:59] [Rank 0] Group 0 Loss: 3.3465 +[2025-09-05 15:40:59] [Rank 0] Group 1 Loss: 3.0502 +[2025-09-05 15:40:59] [Rank 0] Group 1 Loss: 3.0502 +[2025-09-05 15:40:59] [Rank 0] Group 2 Loss: 3.0640 +[2025-09-05 15:40:59] [Rank 0] Group 2 Loss: 3.0640 +[2025-09-05 15:40:59] [Rank 0] Group 3 Loss: 3.5367 +[2025-09-05 15:40:59] [Rank 0] Group 3 Loss: 3.5367 +[2025-09-05 15:40:59] [Rank 0] Group 4 Loss: 3.9495 +[2025-09-05 15:40:59] [Rank 0] Group 4 Loss: 3.9495 +[2025-09-05 15:40:59] [Rank 0] Group 5 Loss: 4.4172 +[2025-09-05 15:40:59] [Rank 0] Group 5 Loss: 4.4172 +[2025-09-05 15:40:59] [Rank 0] Group 6 Loss: 4.7687 +[2025-09-05 15:40:59] [Rank 0] Group 6 Loss: 4.7687 +[2025-09-05 15:40:59] [Rank 0] Group 7 Loss: 4.9563 +[2025-09-05 15:40:59] [Rank 0] Group 7 Loss: 4.9563 +[2025-09-05 15:40:59] [Rank 0] Group 8 Loss: 5.2996 +[2025-09-05 15:40:59] [Rank 0] Group 8 Loss: 5.2996 +[2025-09-05 15:40:59] [Rank 0] Group 9 Loss: 5.4370 +[2025-09-05 15:40:59] [Rank 0] Group 9 Loss: 5.4370 +[2025-09-05 15:40:59] [Rank 0] Group 10 Loss: 5.5008 +[2025-09-05 15:40:59] [Rank 0] Group 10 Loss: 5.5008 +[2025-09-05 15:40:59] [Rank 0] Group 11 Loss: 5.5494 +[2025-09-05 15:40:59] [Rank 0] Group 11 Loss: 5.5494 +[2025-09-05 15:40:59] [Rank 0] Group 12 Loss: 5.4552 +[2025-09-05 15:40:59] [Rank 0] Group 12 Loss: 5.4552 +[2025-09-05 15:40:59] [Rank 0] Group 13 Loss: 5.4839 +[2025-09-05 15:40:59] [Rank 0] Group 13 Loss: 5.4839 +[2025-09-05 15:40:59] [Rank 0] Group 14 Loss: 5.5270 +[2025-09-05 15:40:59] [Rank 0] Group 14 Loss: 5.5270 +[2025-09-05 15:40:59] [Rank 0] Group 15 Loss: 5.4540 +[2025-09-05 15:40:59] [Rank 0] Group 15 Loss: 5.4540 +[2025-09-05 15:40:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:40:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:40:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:40:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:40:59] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 15:40:59] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 15:40:59] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 15:40:59] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 15:40:59] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-05 15:40:59] [Rank 0] Group 4 FTA: 0.2000 +[2025-09-05 15:40:59] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 15:40:59] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 15:40:59] [Rank 0] Group 6 FTA: 0.1200 +[2025-09-05 15:40:59] [Rank 0] Group 6 FTA: 0.1200 +[2025-09-05 15:40:59] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 15:40:59] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 15:40:59] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 15:40:59] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 15:40:59] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 15:40:59] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 15:40:59] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 15:40:59] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 15:40:59] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 15:40:59] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 15:40:59] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 15:40:59] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 15:40:59] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:40:59] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:40:59] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:40:59] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:40:59] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:40:59] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:41:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:41:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:41:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:41:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:41:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:41:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:41:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:41:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:41:01] [Rank 0] step:501/10000 train_time:51759ms step_avg:103.31ms +[2025-09-05 15:41:01] [Rank 0] step:501/10000 train_time:51759ms step_avg:103.31ms +[2025-09-05 15:41:01] [Rank 0] step:521/10000 train_time:52422ms step_avg:100.62ms +[2025-09-05 15:41:01] [Rank 0] step:521/10000 train_time:52422ms step_avg:100.62ms +[2025-09-05 15:41:02] [Rank 0] step:541/10000 train_time:53153ms step_avg:98.25ms +[2025-09-05 15:41:02] [Rank 0] step:541/10000 train_time:53153ms step_avg:98.25ms +[2025-09-05 15:41:03] [Rank 0] step:561/10000 train_time:53882ms step_avg:96.05ms +[2025-09-05 15:41:03] [Rank 0] step:561/10000 train_time:53882ms step_avg:96.05ms +[2025-09-05 15:41:04] [Rank 0] step:581/10000 train_time:54610ms step_avg:93.99ms +[2025-09-05 15:41:04] [Rank 0] step:581/10000 train_time:54610ms step_avg:93.99ms +[2025-09-05 15:41:04] [Rank 0] step:601/10000 train_time:55339ms step_avg:92.08ms +[2025-09-05 15:41:04] [Rank 0] step:601/10000 train_time:55339ms step_avg:92.08ms +[2025-09-05 15:41:05] [Rank 0] step:621/10000 train_time:56068ms step_avg:90.29ms +[2025-09-05 15:41:05] [Rank 0] step:621/10000 train_time:56068ms step_avg:90.29ms +[2025-09-05 15:41:06] [Rank 0] step:641/10000 train_time:56797ms step_avg:88.61ms +[2025-09-05 15:41:06] [Rank 0] step:641/10000 train_time:56797ms step_avg:88.61ms +[2025-09-05 15:41:07] [Rank 0] step:661/10000 train_time:57526ms step_avg:87.03ms +[2025-09-05 15:41:07] [Rank 0] step:661/10000 train_time:57526ms step_avg:87.03ms +[2025-09-05 15:41:07] [Rank 0] step:681/10000 train_time:58254ms step_avg:85.54ms +[2025-09-05 15:41:07] [Rank 0] step:681/10000 train_time:58254ms step_avg:85.54ms +[2025-09-05 15:41:08] [Rank 0] step:701/10000 train_time:58983ms step_avg:84.14ms +[2025-09-05 15:41:08] [Rank 0] step:701/10000 train_time:58983ms step_avg:84.14ms +[2025-09-05 15:41:09] [Rank 0] step:721/10000 train_time:59712ms step_avg:82.82ms +[2025-09-05 15:41:09] [Rank 0] step:721/10000 train_time:59712ms step_avg:82.82ms +[2025-09-05 15:41:09] [Rank 0] step:741/10000 train_time:60441ms step_avg:81.57ms +[2025-09-05 15:41:09] [Rank 0] step:741/10000 train_time:60441ms step_avg:81.57ms +[2025-09-05 15:41:10] [Rank 0] step:761/10000 train_time:61174ms step_avg:80.39ms +[2025-09-05 15:41:10] [Rank 0] step:761/10000 train_time:61174ms step_avg:80.39ms +[2025-09-05 15:41:11] [Rank 0] step:781/10000 train_time:61907ms step_avg:79.27ms +[2025-09-05 15:41:11] [Rank 0] step:781/10000 train_time:61907ms step_avg:79.27ms +[2025-09-05 15:41:12] [Rank 0] step:801/10000 train_time:62642ms step_avg:78.20ms +[2025-09-05 15:41:12] [Rank 0] step:801/10000 train_time:62642ms step_avg:78.20ms +[2025-09-05 15:41:13] [Rank 0] step:821/10000 train_time:64003ms step_avg:77.96ms +[2025-09-05 15:41:13] [Rank 0] step:821/10000 train_time:64003ms step_avg:77.96ms +[2025-09-05 15:41:14] [Rank 0] step:841/10000 train_time:64736ms step_avg:76.98ms +[2025-09-05 15:41:14] [Rank 0] step:841/10000 train_time:64736ms step_avg:76.98ms +[2025-09-05 15:41:14] [Rank 0] step:861/10000 train_time:65470ms step_avg:76.04ms +[2025-09-05 15:41:14] [Rank 0] step:861/10000 train_time:65470ms step_avg:76.04ms +[2025-09-05 15:41:15] [Rank 0] step:881/10000 train_time:66203ms step_avg:75.15ms +[2025-09-05 15:41:15] [Rank 0] step:881/10000 train_time:66203ms step_avg:75.15ms +[2025-09-05 15:41:16] [Rank 0] step:901/10000 train_time:66936ms step_avg:74.29ms +[2025-09-05 15:41:16] [Rank 0] step:901/10000 train_time:66936ms step_avg:74.29ms +[2025-09-05 15:41:17] [Rank 0] step:921/10000 train_time:67670ms step_avg:73.47ms +[2025-09-05 15:41:17] [Rank 0] step:921/10000 train_time:67670ms step_avg:73.47ms +[2025-09-05 15:41:17] [Rank 0] step:941/10000 train_time:68404ms step_avg:72.69ms +[2025-09-05 15:41:17] [Rank 0] step:941/10000 train_time:68404ms step_avg:72.69ms +[2025-09-05 15:41:18] [Rank 0] step:961/10000 train_time:69137ms step_avg:71.94ms +[2025-09-05 15:41:18] [Rank 0] step:961/10000 train_time:69137ms step_avg:71.94ms +[2025-09-05 15:41:19] [Rank 0] step:981/10000 train_time:69871ms step_avg:71.22ms +[2025-09-05 15:41:19] [Rank 0] step:981/10000 train_time:69871ms step_avg:71.22ms +[2025-09-05 15:41:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:41:20] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:41:20] [Rank 0] PRINT: step:1000/10000 train_loss:2.0927 val_loss:1.8751 train_time:70685ms step_avg:70.69ms +[2025-09-05 15:41:20] [Rank 0] PRINT: step:1000/10000 train_loss:2.0927 val_loss:1.8751 train_time:70685ms step_avg:70.69ms +[2025-09-05 15:41:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:41:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:41:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:41:20] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:42:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:42:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:42:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:42:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:42:41] [Rank 0] Total Loss: 4.4039 +[2025-09-05 15:42:41] [Rank 0] Total Loss: 4.4039 +[2025-09-05 15:42:41] [Rank 0] Total FTA (Unweighted): 0.3381 +[2025-09-05 15:42:41] [Rank 0] Total FTA (Unweighted): 0.3381 +[2025-09-05 15:42:41] [Rank 0] Total FTA (Weighted): 0.3381 +[2025-09-05 15:42:41] [Rank 0] Total FTA (Weighted): 0.3381 +[2025-09-05 15:42:41] [Rank 0] Group 0 Loss: 3.1975 +[2025-09-05 15:42:41] [Rank 0] Group 0 Loss: 3.1975 +[2025-09-05 15:42:41] [Rank 0] Group 1 Loss: 3.0621 +[2025-09-05 15:42:41] [Rank 0] Group 1 Loss: 3.0621 +[2025-09-05 15:42:41] [Rank 0] Group 2 Loss: 3.1191 +[2025-09-05 15:42:41] [Rank 0] Group 2 Loss: 3.1191 +[2025-09-05 15:42:41] [Rank 0] Group 3 Loss: 3.4737 +[2025-09-05 15:42:41] [Rank 0] Group 3 Loss: 3.4737 +[2025-09-05 15:42:41] [Rank 0] Group 4 Loss: 3.7841 +[2025-09-05 15:42:41] [Rank 0] Group 4 Loss: 3.7841 +[2025-09-05 15:42:41] [Rank 0] Group 5 Loss: 4.0070 +[2025-09-05 15:42:41] [Rank 0] Group 5 Loss: 4.0070 +[2025-09-05 15:42:41] [Rank 0] Group 6 Loss: 4.3258 +[2025-09-05 15:42:41] [Rank 0] Group 6 Loss: 4.3258 +[2025-09-05 15:42:41] [Rank 0] Group 7 Loss: 4.5913 +[2025-09-05 15:42:41] [Rank 0] Group 7 Loss: 4.5913 +[2025-09-05 15:42:41] [Rank 0] Group 8 Loss: 4.8688 +[2025-09-05 15:42:41] [Rank 0] Group 8 Loss: 4.8688 +[2025-09-05 15:42:41] [Rank 0] Group 9 Loss: 5.0065 +[2025-09-05 15:42:41] [Rank 0] Group 9 Loss: 5.0065 +[2025-09-05 15:42:41] [Rank 0] Group 10 Loss: 5.1469 +[2025-09-05 15:42:41] [Rank 0] Group 10 Loss: 5.1469 +[2025-09-05 15:42:41] [Rank 0] Group 11 Loss: 5.1793 +[2025-09-05 15:42:41] [Rank 0] Group 11 Loss: 5.1793 +[2025-09-05 15:42:41] [Rank 0] Group 12 Loss: 5.1580 +[2025-09-05 15:42:41] [Rank 0] Group 12 Loss: 5.1580 +[2025-09-05 15:42:41] [Rank 0] Group 13 Loss: 5.1893 +[2025-09-05 15:42:41] [Rank 0] Group 13 Loss: 5.1893 +[2025-09-05 15:42:41] [Rank 0] Group 14 Loss: 5.1897 +[2025-09-05 15:42:41] [Rank 0] Group 14 Loss: 5.1897 +[2025-09-05 15:42:41] [Rank 0] Group 15 Loss: 5.1633 +[2025-09-05 15:42:41] [Rank 0] Group 15 Loss: 5.1633 +[2025-09-05 15:42:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:42:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:42:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:42:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:42:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:42:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:42:41] [Rank 0] Group 3 FTA: 0.3600 +[2025-09-05 15:42:41] [Rank 0] Group 3 FTA: 0.3600 +[2025-09-05 15:42:41] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 15:42:41] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 15:42:41] [Rank 0] Group 5 FTA: 0.2800 +[2025-09-05 15:42:41] [Rank 0] Group 5 FTA: 0.2800 +[2025-09-05 15:42:41] [Rank 0] Group 6 FTA: 0.3000 +[2025-09-05 15:42:41] [Rank 0] Group 6 FTA: 0.3000 +[2025-09-05 15:42:41] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 15:42:41] [Rank 0] Group 7 FTA: 0.1300 +[2025-09-05 15:42:41] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 15:42:41] [Rank 0] Group 8 FTA: 0.2200 +[2025-09-05 15:42:41] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 15:42:41] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 15:42:41] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 15:42:41] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 15:42:41] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:42:41] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:42:41] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 15:42:41] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 15:42:41] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:42:41] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:42:41] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:42:41] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 15:42:41] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:42:41] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:42:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:42:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:42:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:42:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:42:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:42:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:42:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:42:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:42:43] [Rank 0] step:1001/10000 train_time:70695ms step_avg:70.62ms +[2025-09-05 15:42:43] [Rank 0] step:1001/10000 train_time:70695ms step_avg:70.62ms +[2025-09-05 15:42:44] [Rank 0] step:1021/10000 train_time:71364ms step_avg:69.90ms +[2025-09-05 15:42:44] [Rank 0] step:1021/10000 train_time:71364ms step_avg:69.90ms +[2025-09-05 15:42:45] [Rank 0] step:1041/10000 train_time:72097ms step_avg:69.26ms +[2025-09-05 15:42:45] [Rank 0] step:1041/10000 train_time:72097ms step_avg:69.26ms +[2025-09-05 15:42:45] [Rank 0] step:1061/10000 train_time:72830ms step_avg:68.64ms +[2025-09-05 15:42:45] [Rank 0] step:1061/10000 train_time:72830ms step_avg:68.64ms +[2025-09-05 15:42:46] [Rank 0] step:1081/10000 train_time:73563ms step_avg:68.05ms +[2025-09-05 15:42:46] [Rank 0] step:1081/10000 train_time:73563ms step_avg:68.05ms +[2025-09-05 15:42:47] [Rank 0] step:1101/10000 train_time:74298ms step_avg:67.48ms +[2025-09-05 15:42:47] [Rank 0] step:1101/10000 train_time:74298ms step_avg:67.48ms +[2025-09-05 15:42:48] [Rank 0] step:1121/10000 train_time:75032ms step_avg:66.93ms +[2025-09-05 15:42:48] [Rank 0] step:1121/10000 train_time:75032ms step_avg:66.93ms +[2025-09-05 15:42:48] [Rank 0] step:1141/10000 train_time:75766ms step_avg:66.40ms +[2025-09-05 15:42:48] [Rank 0] step:1141/10000 train_time:75766ms step_avg:66.40ms +[2025-09-05 15:42:49] [Rank 0] step:1161/10000 train_time:76500ms step_avg:65.89ms +[2025-09-05 15:42:49] [Rank 0] step:1161/10000 train_time:76500ms step_avg:65.89ms +[2025-09-05 15:42:50] [Rank 0] step:1181/10000 train_time:77235ms step_avg:65.40ms +[2025-09-05 15:42:50] [Rank 0] step:1181/10000 train_time:77235ms step_avg:65.40ms +[2025-09-05 15:42:51] [Rank 0] step:1201/10000 train_time:77969ms step_avg:64.92ms +[2025-09-05 15:42:51] [Rank 0] step:1201/10000 train_time:77969ms step_avg:64.92ms +[2025-09-05 15:42:51] [Rank 0] step:1221/10000 train_time:78704ms step_avg:64.46ms +[2025-09-05 15:42:51] [Rank 0] step:1221/10000 train_time:78704ms step_avg:64.46ms +[2025-09-05 15:42:52] [Rank 0] step:1241/10000 train_time:79438ms step_avg:64.01ms +[2025-09-05 15:42:52] [Rank 0] step:1241/10000 train_time:79438ms step_avg:64.01ms +[2025-09-05 15:42:53] [Rank 0] step:1261/10000 train_time:80172ms step_avg:63.58ms +[2025-09-05 15:42:53] [Rank 0] step:1261/10000 train_time:80172ms step_avg:63.58ms +[2025-09-05 15:42:53] [Rank 0] step:1281/10000 train_time:80907ms step_avg:63.16ms +[2025-09-05 15:42:53] [Rank 0] step:1281/10000 train_time:80907ms step_avg:63.16ms +[2025-09-05 15:42:54] [Rank 0] step:1301/10000 train_time:81642ms step_avg:62.75ms +[2025-09-05 15:42:54] [Rank 0] step:1301/10000 train_time:81642ms step_avg:62.75ms +[2025-09-05 15:42:55] [Rank 0] step:1321/10000 train_time:82376ms step_avg:62.36ms +[2025-09-05 15:42:55] [Rank 0] step:1321/10000 train_time:82376ms step_avg:62.36ms +[2025-09-05 15:42:56] [Rank 0] step:1341/10000 train_time:83110ms step_avg:61.98ms +[2025-09-05 15:42:56] [Rank 0] step:1341/10000 train_time:83110ms step_avg:61.98ms +[2025-09-05 15:42:56] [Rank 0] step:1361/10000 train_time:83843ms step_avg:61.60ms +[2025-09-05 15:42:56] [Rank 0] step:1361/10000 train_time:83843ms step_avg:61.60ms +[2025-09-05 15:42:57] [Rank 0] step:1381/10000 train_time:84577ms step_avg:61.24ms +[2025-09-05 15:42:57] [Rank 0] step:1381/10000 train_time:84577ms step_avg:61.24ms +[2025-09-05 15:42:58] [Rank 0] step:1401/10000 train_time:85311ms step_avg:60.89ms +[2025-09-05 15:42:58] [Rank 0] step:1401/10000 train_time:85311ms step_avg:60.89ms +[2025-09-05 15:42:59] [Rank 0] step:1421/10000 train_time:86045ms step_avg:60.55ms +[2025-09-05 15:42:59] [Rank 0] step:1421/10000 train_time:86045ms step_avg:60.55ms +[2025-09-05 15:42:59] [Rank 0] step:1441/10000 train_time:86780ms step_avg:60.22ms +[2025-09-05 15:42:59] [Rank 0] step:1441/10000 train_time:86780ms step_avg:60.22ms +[2025-09-05 15:43:00] [Rank 0] step:1461/10000 train_time:87514ms step_avg:59.90ms +[2025-09-05 15:43:00] [Rank 0] step:1461/10000 train_time:87514ms step_avg:59.90ms +[2025-09-05 15:43:01] [Rank 0] step:1481/10000 train_time:88249ms step_avg:59.59ms +[2025-09-05 15:43:01] [Rank 0] step:1481/10000 train_time:88249ms step_avg:59.59ms +[2025-09-05 15:43:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:43:02] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:43:02] [Rank 0] PRINT: step:1500/10000 train_loss:1.7687 val_loss:1.6758 train_time:89064ms step_avg:59.38ms +[2025-09-05 15:43:02] [Rank 0] PRINT: step:1500/10000 train_loss:1.7687 val_loss:1.6758 train_time:89064ms step_avg:59.38ms +[2025-09-05 15:43:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:43:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:43:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:43:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:44:23] [Rank 0] Total Loss: 4.4016 +[2025-09-05 15:44:23] [Rank 0] Total Loss: 4.4016 +[2025-09-05 15:44:23] [Rank 0] Total FTA (Unweighted): 0.3937 +[2025-09-05 15:44:23] [Rank 0] Total FTA (Unweighted): 0.3937 +[2025-09-05 15:44:23] [Rank 0] Total FTA (Weighted): 0.3937 +[2025-09-05 15:44:23] [Rank 0] Total FTA (Weighted): 0.3937 +[2025-09-05 15:44:23] [Rank 0] Group 0 Loss: 3.3881 +[2025-09-05 15:44:23] [Rank 0] Group 0 Loss: 3.3881 +[2025-09-05 15:44:23] [Rank 0] Group 1 Loss: 3.2845 +[2025-09-05 15:44:23] [Rank 0] Group 1 Loss: 3.2845 +[2025-09-05 15:44:23] [Rank 0] Group 2 Loss: 3.2710 +[2025-09-05 15:44:23] [Rank 0] Group 2 Loss: 3.2710 +[2025-09-05 15:44:23] [Rank 0] Group 3 Loss: 3.6060 +[2025-09-05 15:44:23] [Rank 0] Group 3 Loss: 3.6060 +[2025-09-05 15:44:23] [Rank 0] Group 4 Loss: 3.7966 +[2025-09-05 15:44:23] [Rank 0] Group 4 Loss: 3.7966 +[2025-09-05 15:44:23] [Rank 0] Group 5 Loss: 4.0354 +[2025-09-05 15:44:23] [Rank 0] Group 5 Loss: 4.0354 +[2025-09-05 15:44:23] [Rank 0] Group 6 Loss: 4.2166 +[2025-09-05 15:44:23] [Rank 0] Group 6 Loss: 4.2166 +[2025-09-05 15:44:23] [Rank 0] Group 7 Loss: 4.4702 +[2025-09-05 15:44:23] [Rank 0] Group 7 Loss: 4.4702 +[2025-09-05 15:44:23] [Rank 0] Group 8 Loss: 4.7634 +[2025-09-05 15:44:23] [Rank 0] Group 8 Loss: 4.7634 +[2025-09-05 15:44:23] [Rank 0] Group 9 Loss: 4.9227 +[2025-09-05 15:44:23] [Rank 0] Group 9 Loss: 4.9227 +[2025-09-05 15:44:23] [Rank 0] Group 10 Loss: 5.0931 +[2025-09-05 15:44:23] [Rank 0] Group 10 Loss: 5.0931 +[2025-09-05 15:44:23] [Rank 0] Group 11 Loss: 5.1118 +[2025-09-05 15:44:23] [Rank 0] Group 11 Loss: 5.1118 +[2025-09-05 15:44:23] [Rank 0] Group 12 Loss: 5.0681 +[2025-09-05 15:44:23] [Rank 0] Group 12 Loss: 5.0681 +[2025-09-05 15:44:23] [Rank 0] Group 13 Loss: 5.1493 +[2025-09-05 15:44:23] [Rank 0] Group 13 Loss: 5.1493 +[2025-09-05 15:44:23] [Rank 0] Group 14 Loss: 5.1268 +[2025-09-05 15:44:23] [Rank 0] Group 14 Loss: 5.1268 +[2025-09-05 15:44:23] [Rank 0] Group 15 Loss: 5.1226 +[2025-09-05 15:44:23] [Rank 0] Group 15 Loss: 5.1226 +[2025-09-05 15:44:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:44:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:44:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:44:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:44:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:44:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:44:23] [Rank 0] Group 3 FTA: 0.6700 +[2025-09-05 15:44:23] [Rank 0] Group 3 FTA: 0.6700 +[2025-09-05 15:44:23] [Rank 0] Group 4 FTA: 0.4300 +[2025-09-05 15:44:23] [Rank 0] Group 4 FTA: 0.4300 +[2025-09-05 15:44:23] [Rank 0] Group 5 FTA: 0.4700 +[2025-09-05 15:44:23] [Rank 0] Group 5 FTA: 0.4700 +[2025-09-05 15:44:23] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 15:44:23] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 15:44:23] [Rank 0] Group 7 FTA: 0.2700 +[2025-09-05 15:44:23] [Rank 0] Group 7 FTA: 0.2700 +[2025-09-05 15:44:23] [Rank 0] Group 8 FTA: 0.2800 +[2025-09-05 15:44:23] [Rank 0] Group 8 FTA: 0.2800 +[2025-09-05 15:44:23] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 15:44:23] [Rank 0] Group 9 FTA: 0.1800 +[2025-09-05 15:44:23] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 15:44:23] [Rank 0] Group 10 FTA: 0.1200 +[2025-09-05 15:44:23] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 15:44:23] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 15:44:23] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 15:44:23] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 15:44:23] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:44:23] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 15:44:23] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:44:23] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:44:23] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:44:23] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:44:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:44:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:44:25] [Rank 0] step:1501/10000 train_time:89073ms step_avg:59.34ms +[2025-09-05 15:44:25] [Rank 0] step:1501/10000 train_time:89073ms step_avg:59.34ms +[2025-09-05 15:44:25] [Rank 0] step:1521/10000 train_time:89737ms step_avg:59.00ms +[2025-09-05 15:44:25] [Rank 0] step:1521/10000 train_time:89737ms step_avg:59.00ms +[2025-09-05 15:44:26] [Rank 0] step:1541/10000 train_time:90471ms step_avg:58.71ms +[2025-09-05 15:44:26] [Rank 0] step:1541/10000 train_time:90471ms step_avg:58.71ms +[2025-09-05 15:44:27] [Rank 0] step:1561/10000 train_time:91205ms step_avg:58.43ms +[2025-09-05 15:44:27] [Rank 0] step:1561/10000 train_time:91205ms step_avg:58.43ms +[2025-09-05 15:44:28] [Rank 0] step:1581/10000 train_time:91939ms step_avg:58.15ms +[2025-09-05 15:44:28] [Rank 0] step:1581/10000 train_time:91939ms step_avg:58.15ms +[2025-09-05 15:44:28] [Rank 0] step:1601/10000 train_time:92672ms step_avg:57.88ms +[2025-09-05 15:44:28] [Rank 0] step:1601/10000 train_time:92672ms step_avg:57.88ms +[2025-09-05 15:44:29] [Rank 0] step:1621/10000 train_time:93410ms step_avg:57.63ms +[2025-09-05 15:44:29] [Rank 0] step:1621/10000 train_time:93410ms step_avg:57.63ms +[2025-09-05 15:44:31] [Rank 0] step:1641/10000 train_time:94770ms step_avg:57.75ms +[2025-09-05 15:44:31] [Rank 0] step:1641/10000 train_time:94770ms step_avg:57.75ms +[2025-09-05 15:44:31] [Rank 0] step:1661/10000 train_time:95640ms step_avg:57.58ms +[2025-09-05 15:44:31] [Rank 0] step:1661/10000 train_time:95640ms step_avg:57.58ms +[2025-09-05 15:44:32] [Rank 0] step:1681/10000 train_time:96374ms step_avg:57.33ms +[2025-09-05 15:44:32] [Rank 0] step:1681/10000 train_time:96374ms step_avg:57.33ms +[2025-09-05 15:44:33] [Rank 0] step:1701/10000 train_time:97107ms step_avg:57.09ms +[2025-09-05 15:44:33] [Rank 0] step:1701/10000 train_time:97107ms step_avg:57.09ms +[2025-09-05 15:44:34] [Rank 0] step:1721/10000 train_time:97840ms step_avg:56.85ms +[2025-09-05 15:44:34] [Rank 0] step:1721/10000 train_time:97840ms step_avg:56.85ms +[2025-09-05 15:44:35] [Rank 0] step:1741/10000 train_time:98794ms step_avg:56.75ms +[2025-09-05 15:44:35] [Rank 0] step:1741/10000 train_time:98794ms step_avg:56.75ms +[2025-09-05 15:44:35] [Rank 0] step:1761/10000 train_time:99528ms step_avg:56.52ms +[2025-09-05 15:44:35] [Rank 0] step:1761/10000 train_time:99528ms step_avg:56.52ms +[2025-09-05 15:44:36] [Rank 0] step:1781/10000 train_time:100263ms step_avg:56.30ms +[2025-09-05 15:44:36] [Rank 0] step:1781/10000 train_time:100263ms step_avg:56.30ms +[2025-09-05 15:44:37] [Rank 0] step:1801/10000 train_time:100997ms step_avg:56.08ms +[2025-09-05 15:44:37] [Rank 0] step:1801/10000 train_time:100997ms step_avg:56.08ms +[2025-09-05 15:44:37] [Rank 0] step:1821/10000 train_time:101732ms step_avg:55.87ms +[2025-09-05 15:44:37] [Rank 0] step:1821/10000 train_time:101732ms step_avg:55.87ms +[2025-09-05 15:44:38] [Rank 0] step:1841/10000 train_time:102466ms step_avg:55.66ms +[2025-09-05 15:44:38] [Rank 0] step:1841/10000 train_time:102466ms step_avg:55.66ms +[2025-09-05 15:44:39] [Rank 0] step:1861/10000 train_time:103200ms step_avg:55.45ms +[2025-09-05 15:44:39] [Rank 0] step:1861/10000 train_time:103200ms step_avg:55.45ms +[2025-09-05 15:44:40] [Rank 0] step:1881/10000 train_time:103935ms step_avg:55.25ms +[2025-09-05 15:44:40] [Rank 0] step:1881/10000 train_time:103935ms step_avg:55.25ms +[2025-09-05 15:44:40] [Rank 0] step:1901/10000 train_time:104668ms step_avg:55.06ms +[2025-09-05 15:44:40] [Rank 0] step:1901/10000 train_time:104668ms step_avg:55.06ms +[2025-09-05 15:44:41] [Rank 0] step:1921/10000 train_time:105402ms step_avg:54.87ms +[2025-09-05 15:44:41] [Rank 0] step:1921/10000 train_time:105402ms step_avg:54.87ms +[2025-09-05 15:44:42] [Rank 0] step:1941/10000 train_time:106136ms step_avg:54.68ms +[2025-09-05 15:44:42] [Rank 0] step:1941/10000 train_time:106136ms step_avg:54.68ms +[2025-09-05 15:44:43] [Rank 0] step:1961/10000 train_time:106870ms step_avg:54.50ms +[2025-09-05 15:44:43] [Rank 0] step:1961/10000 train_time:106870ms step_avg:54.50ms +[2025-09-05 15:44:43] [Rank 0] step:1981/10000 train_time:107603ms step_avg:54.32ms +[2025-09-05 15:44:43] [Rank 0] step:1981/10000 train_time:107603ms step_avg:54.32ms +[2025-09-05 15:44:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:44:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:44:45] [Rank 0] PRINT: step:2000/10000 train_loss:1.6258 val_loss:1.5671 train_time:108418ms step_avg:54.21ms +[2025-09-05 15:44:45] [Rank 0] PRINT: step:2000/10000 train_loss:1.6258 val_loss:1.5671 train_time:108418ms step_avg:54.21ms +[2025-09-05 15:44:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:44:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:44:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:44:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:46:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:46:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:46:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:46:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:46:06] [Rank 0] Total Loss: 4.2771 +[2025-09-05 15:46:06] [Rank 0] Total Loss: 4.2771 +[2025-09-05 15:46:06] [Rank 0] Total FTA (Unweighted): 0.4438 +[2025-09-05 15:46:06] [Rank 0] Total FTA (Unweighted): 0.4438 +[2025-09-05 15:46:06] [Rank 0] Total FTA (Weighted): 0.4437 +[2025-09-05 15:46:06] [Rank 0] Total FTA (Weighted): 0.4437 +[2025-09-05 15:46:06] [Rank 0] Group 0 Loss: 3.3241 +[2025-09-05 15:46:06] [Rank 0] Group 0 Loss: 3.3241 +[2025-09-05 15:46:06] [Rank 0] Group 1 Loss: 3.2423 +[2025-09-05 15:46:06] [Rank 0] Group 1 Loss: 3.2423 +[2025-09-05 15:46:06] [Rank 0] Group 2 Loss: 3.2369 +[2025-09-05 15:46:06] [Rank 0] Group 2 Loss: 3.2369 +[2025-09-05 15:46:06] [Rank 0] Group 3 Loss: 3.5440 +[2025-09-05 15:46:06] [Rank 0] Group 3 Loss: 3.5440 +[2025-09-05 15:46:06] [Rank 0] Group 4 Loss: 3.7771 +[2025-09-05 15:46:06] [Rank 0] Group 4 Loss: 3.7771 +[2025-09-05 15:46:06] [Rank 0] Group 5 Loss: 3.8766 +[2025-09-05 15:46:06] [Rank 0] Group 5 Loss: 3.8766 +[2025-09-05 15:46:06] [Rank 0] Group 6 Loss: 4.0775 +[2025-09-05 15:46:06] [Rank 0] Group 6 Loss: 4.0775 +[2025-09-05 15:46:06] [Rank 0] Group 7 Loss: 4.3152 +[2025-09-05 15:46:06] [Rank 0] Group 7 Loss: 4.3152 +[2025-09-05 15:46:06] [Rank 0] Group 8 Loss: 4.5808 +[2025-09-05 15:46:06] [Rank 0] Group 8 Loss: 4.5808 +[2025-09-05 15:46:06] [Rank 0] Group 9 Loss: 4.7276 +[2025-09-05 15:46:06] [Rank 0] Group 9 Loss: 4.7276 +[2025-09-05 15:46:06] [Rank 0] Group 10 Loss: 4.9255 +[2025-09-05 15:46:06] [Rank 0] Group 10 Loss: 4.9255 +[2025-09-05 15:46:06] [Rank 0] Group 11 Loss: 4.9133 +[2025-09-05 15:46:06] [Rank 0] Group 11 Loss: 4.9133 +[2025-09-05 15:46:06] [Rank 0] Group 12 Loss: 4.9181 +[2025-09-05 15:46:06] [Rank 0] Group 12 Loss: 4.9181 +[2025-09-05 15:46:06] [Rank 0] Group 13 Loss: 5.0135 +[2025-09-05 15:46:06] [Rank 0] Group 13 Loss: 5.0135 +[2025-09-05 15:46:06] [Rank 0] Group 14 Loss: 4.9845 +[2025-09-05 15:46:06] [Rank 0] Group 14 Loss: 4.9845 +[2025-09-05 15:46:06] [Rank 0] Group 15 Loss: 4.9773 +[2025-09-05 15:46:06] [Rank 0] Group 15 Loss: 4.9773 +[2025-09-05 15:46:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:46:06] [Rank 0] Group 4 FTA: 0.4900 +[2025-09-05 15:46:06] [Rank 0] Group 4 FTA: 0.4900 +[2025-09-05 15:46:06] [Rank 0] Group 5 FTA: 0.4700 +[2025-09-05 15:46:06] [Rank 0] Group 5 FTA: 0.4700 +[2025-09-05 15:46:06] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 15:46:06] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 15:46:06] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 15:46:06] [Rank 0] Group 7 FTA: 0.3500 +[2025-09-05 15:46:06] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 15:46:06] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 15:46:06] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 15:46:06] [Rank 0] Group 9 FTA: 0.2600 +[2025-09-05 15:46:06] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 15:46:06] [Rank 0] Group 10 FTA: 0.1800 +[2025-09-05 15:46:06] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:46:06] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:46:06] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 15:46:06] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 15:46:06] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 15:46:06] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 15:46:06] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:46:06] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 15:46:06] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 15:46:06] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 15:46:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:46:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:46:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:46:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:46:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:46:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:46:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:46:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:46:08] [Rank 0] step:2001/10000 train_time:108427ms step_avg:54.19ms +[2025-09-05 15:46:08] [Rank 0] step:2001/10000 train_time:108427ms step_avg:54.19ms +[2025-09-05 15:46:09] [Rank 0] step:2021/10000 train_time:109301ms step_avg:54.08ms +[2025-09-05 15:46:09] [Rank 0] step:2021/10000 train_time:109301ms step_avg:54.08ms +[2025-09-05 15:46:09] [Rank 0] step:2041/10000 train_time:110036ms step_avg:53.91ms +[2025-09-05 15:46:09] [Rank 0] step:2041/10000 train_time:110036ms step_avg:53.91ms +[2025-09-05 15:46:10] [Rank 0] step:2061/10000 train_time:110770ms step_avg:53.75ms +[2025-09-05 15:46:10] [Rank 0] step:2061/10000 train_time:110770ms step_avg:53.75ms +[2025-09-05 15:46:11] [Rank 0] step:2081/10000 train_time:111504ms step_avg:53.58ms +[2025-09-05 15:46:11] [Rank 0] step:2081/10000 train_time:111504ms step_avg:53.58ms +[2025-09-05 15:46:12] [Rank 0] step:2101/10000 train_time:112238ms step_avg:53.42ms +[2025-09-05 15:46:12] [Rank 0] step:2101/10000 train_time:112238ms step_avg:53.42ms +[2025-09-05 15:46:12] [Rank 0] step:2121/10000 train_time:112971ms step_avg:53.26ms +[2025-09-05 15:46:12] [Rank 0] step:2121/10000 train_time:112971ms step_avg:53.26ms +[2025-09-05 15:46:13] [Rank 0] step:2141/10000 train_time:113704ms step_avg:53.11ms +[2025-09-05 15:46:13] [Rank 0] step:2141/10000 train_time:113704ms step_avg:53.11ms +[2025-09-05 15:46:14] [Rank 0] step:2161/10000 train_time:114438ms step_avg:52.96ms +[2025-09-05 15:46:14] [Rank 0] step:2161/10000 train_time:114438ms step_avg:52.96ms +[2025-09-05 15:46:15] [Rank 0] step:2181/10000 train_time:115172ms step_avg:52.81ms +[2025-09-05 15:46:15] [Rank 0] step:2181/10000 train_time:115172ms step_avg:52.81ms +[2025-09-05 15:46:15] [Rank 0] step:2201/10000 train_time:115905ms step_avg:52.66ms +[2025-09-05 15:46:15] [Rank 0] step:2201/10000 train_time:115905ms step_avg:52.66ms +[2025-09-05 15:46:16] [Rank 0] step:2221/10000 train_time:116639ms step_avg:52.52ms +[2025-09-05 15:46:16] [Rank 0] step:2221/10000 train_time:116639ms step_avg:52.52ms +[2025-09-05 15:46:17] [Rank 0] step:2241/10000 train_time:117377ms step_avg:52.38ms +[2025-09-05 15:46:17] [Rank 0] step:2241/10000 train_time:117377ms step_avg:52.38ms +[2025-09-05 15:46:18] [Rank 0] step:2261/10000 train_time:118117ms step_avg:52.24ms +[2025-09-05 15:46:18] [Rank 0] step:2261/10000 train_time:118117ms step_avg:52.24ms +[2025-09-05 15:46:18] [Rank 0] step:2281/10000 train_time:118857ms step_avg:52.11ms +[2025-09-05 15:46:18] [Rank 0] step:2281/10000 train_time:118857ms step_avg:52.11ms +[2025-09-05 15:46:19] [Rank 0] step:2301/10000 train_time:119597ms step_avg:51.98ms +[2025-09-05 15:46:19] [Rank 0] step:2301/10000 train_time:119597ms step_avg:51.98ms +[2025-09-05 15:46:20] [Rank 0] step:2321/10000 train_time:120336ms step_avg:51.85ms +[2025-09-05 15:46:20] [Rank 0] step:2321/10000 train_time:120336ms step_avg:51.85ms +[2025-09-05 15:46:21] [Rank 0] step:2341/10000 train_time:121076ms step_avg:51.72ms +[2025-09-05 15:46:21] [Rank 0] step:2341/10000 train_time:121076ms step_avg:51.72ms +[2025-09-05 15:46:21] [Rank 0] step:2361/10000 train_time:121817ms step_avg:51.60ms +[2025-09-05 15:46:21] [Rank 0] step:2361/10000 train_time:121817ms step_avg:51.60ms +[2025-09-05 15:46:22] [Rank 0] step:2381/10000 train_time:122557ms step_avg:51.47ms +[2025-09-05 15:46:22] [Rank 0] step:2381/10000 train_time:122557ms step_avg:51.47ms +[2025-09-05 15:46:23] [Rank 0] step:2401/10000 train_time:123297ms step_avg:51.35ms +[2025-09-05 15:46:23] [Rank 0] step:2401/10000 train_time:123297ms step_avg:51.35ms +[2025-09-05 15:46:23] [Rank 0] step:2421/10000 train_time:124036ms step_avg:51.23ms +[2025-09-05 15:46:23] [Rank 0] step:2421/10000 train_time:124036ms step_avg:51.23ms +[2025-09-05 15:46:24] [Rank 0] step:2441/10000 train_time:124776ms step_avg:51.12ms +[2025-09-05 15:46:24] [Rank 0] step:2441/10000 train_time:124776ms step_avg:51.12ms +[2025-09-05 15:46:25] [Rank 0] step:2461/10000 train_time:125515ms step_avg:51.00ms +[2025-09-05 15:46:25] [Rank 0] step:2461/10000 train_time:125515ms step_avg:51.00ms +[2025-09-05 15:46:26] [Rank 0] step:2481/10000 train_time:126254ms step_avg:50.89ms +[2025-09-05 15:46:26] [Rank 0] step:2481/10000 train_time:126254ms step_avg:50.89ms +[2025-09-05 15:46:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:46:26] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:46:27] [Rank 0] PRINT: step:2500/10000 train_loss:1.5420 val_loss:1.4990 train_time:127075ms step_avg:50.83ms +[2025-09-05 15:46:27] [Rank 0] PRINT: step:2500/10000 train_loss:1.5420 val_loss:1.4990 train_time:127075ms step_avg:50.83ms +[2025-09-05 15:46:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:46:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:46:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:46:27] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:47:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:47:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:47:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:47:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:47:48] [Rank 0] Total Loss: 4.1959 +[2025-09-05 15:47:48] [Rank 0] Total Loss: 4.1959 +[2025-09-05 15:47:48] [Rank 0] Total FTA (Unweighted): 0.4619 +[2025-09-05 15:47:48] [Rank 0] Total FTA (Unweighted): 0.4619 +[2025-09-05 15:47:48] [Rank 0] Total FTA (Weighted): 0.4619 +[2025-09-05 15:47:48] [Rank 0] Total FTA (Weighted): 0.4619 +[2025-09-05 15:47:48] [Rank 0] Group 0 Loss: 3.4021 +[2025-09-05 15:47:48] [Rank 0] Group 0 Loss: 3.4021 +[2025-09-05 15:47:48] [Rank 0] Group 1 Loss: 3.2829 +[2025-09-05 15:47:48] [Rank 0] Group 1 Loss: 3.2829 +[2025-09-05 15:47:48] [Rank 0] Group 2 Loss: 3.1503 +[2025-09-05 15:47:48] [Rank 0] Group 2 Loss: 3.1503 +[2025-09-05 15:47:48] [Rank 0] Group 3 Loss: 3.5047 +[2025-09-05 15:47:48] [Rank 0] Group 3 Loss: 3.5047 +[2025-09-05 15:47:48] [Rank 0] Group 4 Loss: 3.6490 +[2025-09-05 15:47:48] [Rank 0] Group 4 Loss: 3.6490 +[2025-09-05 15:47:48] [Rank 0] Group 5 Loss: 3.8603 +[2025-09-05 15:47:48] [Rank 0] Group 5 Loss: 3.8603 +[2025-09-05 15:47:48] [Rank 0] Group 6 Loss: 3.9643 +[2025-09-05 15:47:48] [Rank 0] Group 6 Loss: 3.9643 +[2025-09-05 15:47:48] [Rank 0] Group 7 Loss: 4.2250 +[2025-09-05 15:47:48] [Rank 0] Group 7 Loss: 4.2250 +[2025-09-05 15:47:48] [Rank 0] Group 8 Loss: 4.4718 +[2025-09-05 15:47:48] [Rank 0] Group 8 Loss: 4.4718 +[2025-09-05 15:47:48] [Rank 0] Group 9 Loss: 4.6187 +[2025-09-05 15:47:48] [Rank 0] Group 9 Loss: 4.6187 +[2025-09-05 15:47:48] [Rank 0] Group 10 Loss: 4.8343 +[2025-09-05 15:47:48] [Rank 0] Group 10 Loss: 4.8343 +[2025-09-05 15:47:48] [Rank 0] Group 11 Loss: 4.7911 +[2025-09-05 15:47:48] [Rank 0] Group 11 Loss: 4.7911 +[2025-09-05 15:47:48] [Rank 0] Group 12 Loss: 4.7650 +[2025-09-05 15:47:48] [Rank 0] Group 12 Loss: 4.7650 +[2025-09-05 15:47:48] [Rank 0] Group 13 Loss: 4.8585 +[2025-09-05 15:47:48] [Rank 0] Group 13 Loss: 4.8585 +[2025-09-05 15:47:48] [Rank 0] Group 14 Loss: 4.8754 +[2025-09-05 15:47:48] [Rank 0] Group 14 Loss: 4.8754 +[2025-09-05 15:47:48] [Rank 0] Group 15 Loss: 4.8813 +[2025-09-05 15:47:48] [Rank 0] Group 15 Loss: 4.8813 +[2025-09-05 15:47:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:47:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:47:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:47:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:47:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:47:49] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:47:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:47:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:47:49] [Rank 0] Group 4 FTA: 0.5600 +[2025-09-05 15:47:49] [Rank 0] Group 4 FTA: 0.5600 +[2025-09-05 15:47:49] [Rank 0] Group 5 FTA: 0.5400 +[2025-09-05 15:47:49] [Rank 0] Group 5 FTA: 0.5400 +[2025-09-05 15:47:49] [Rank 0] Group 6 FTA: 0.4300 +[2025-09-05 15:47:49] [Rank 0] Group 6 FTA: 0.4300 +[2025-09-05 15:47:49] [Rank 0] Group 7 FTA: 0.3900 +[2025-09-05 15:47:49] [Rank 0] Group 7 FTA: 0.3900 +[2025-09-05 15:47:49] [Rank 0] Group 8 FTA: 0.3900 +[2025-09-05 15:47:49] [Rank 0] Group 8 FTA: 0.3900 +[2025-09-05 15:47:49] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 15:47:49] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 15:47:49] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 15:47:49] [Rank 0] Group 10 FTA: 0.2400 +[2025-09-05 15:47:49] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:47:49] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 15:47:49] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 15:47:49] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 15:47:49] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 15:47:49] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 15:47:49] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:47:49] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:47:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:47:49] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 15:47:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:47:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:47:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:47:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:47:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:47:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:47:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:47:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:47:50] [Rank 0] step:2501/10000 train_time:127083ms step_avg:50.81ms +[2025-09-05 15:47:50] [Rank 0] step:2501/10000 train_time:127083ms step_avg:50.81ms +[2025-09-05 15:47:51] [Rank 0] step:2521/10000 train_time:127751ms step_avg:50.67ms +[2025-09-05 15:47:51] [Rank 0] step:2521/10000 train_time:127751ms step_avg:50.67ms +[2025-09-05 15:47:51] [Rank 0] step:2541/10000 train_time:128492ms step_avg:50.57ms +[2025-09-05 15:47:51] [Rank 0] step:2541/10000 train_time:128492ms step_avg:50.57ms +[2025-09-05 15:47:52] [Rank 0] step:2561/10000 train_time:129231ms step_avg:50.46ms +[2025-09-05 15:47:52] [Rank 0] step:2561/10000 train_time:129231ms step_avg:50.46ms +[2025-09-05 15:47:53] [Rank 0] step:2581/10000 train_time:129971ms step_avg:50.36ms +[2025-09-05 15:47:53] [Rank 0] step:2581/10000 train_time:129971ms step_avg:50.36ms +[2025-09-05 15:47:54] [Rank 0] step:2601/10000 train_time:130711ms step_avg:50.25ms +[2025-09-05 15:47:54] [Rank 0] step:2601/10000 train_time:130711ms step_avg:50.25ms +[2025-09-05 15:47:54] [Rank 0] step:2621/10000 train_time:131451ms step_avg:50.15ms +[2025-09-05 15:47:54] [Rank 0] step:2621/10000 train_time:131451ms step_avg:50.15ms +[2025-09-05 15:47:55] [Rank 0] step:2641/10000 train_time:132190ms step_avg:50.05ms +[2025-09-05 15:47:55] [Rank 0] step:2641/10000 train_time:132190ms step_avg:50.05ms +[2025-09-05 15:47:56] [Rank 0] step:2661/10000 train_time:132930ms step_avg:49.96ms +[2025-09-05 15:47:56] [Rank 0] step:2661/10000 train_time:132930ms step_avg:49.96ms +[2025-09-05 15:47:57] [Rank 0] step:2681/10000 train_time:133671ms step_avg:49.86ms +[2025-09-05 15:47:57] [Rank 0] step:2681/10000 train_time:133671ms step_avg:49.86ms +[2025-09-05 15:47:57] [Rank 0] step:2701/10000 train_time:134410ms step_avg:49.76ms +[2025-09-05 15:47:57] [Rank 0] step:2701/10000 train_time:134410ms step_avg:49.76ms +[2025-09-05 15:47:58] [Rank 0] step:2721/10000 train_time:135149ms step_avg:49.67ms +[2025-09-05 15:47:58] [Rank 0] step:2721/10000 train_time:135149ms step_avg:49.67ms +[2025-09-05 15:47:59] [Rank 0] step:2741/10000 train_time:135889ms step_avg:49.58ms +[2025-09-05 15:47:59] [Rank 0] step:2741/10000 train_time:135889ms step_avg:49.58ms +[2025-09-05 15:48:00] [Rank 0] step:2761/10000 train_time:136628ms step_avg:49.49ms +[2025-09-05 15:48:00] [Rank 0] step:2761/10000 train_time:136628ms step_avg:49.49ms +[2025-09-05 15:48:00] [Rank 0] step:2781/10000 train_time:137368ms step_avg:49.40ms +[2025-09-05 15:48:00] [Rank 0] step:2781/10000 train_time:137368ms step_avg:49.40ms +[2025-09-05 15:48:01] [Rank 0] step:2801/10000 train_time:138108ms step_avg:49.31ms +[2025-09-05 15:48:01] [Rank 0] step:2801/10000 train_time:138108ms step_avg:49.31ms +[2025-09-05 15:48:02] [Rank 0] step:2821/10000 train_time:139476ms step_avg:49.44ms +[2025-09-05 15:48:02] [Rank 0] step:2821/10000 train_time:139476ms step_avg:49.44ms +[2025-09-05 15:48:03] [Rank 0] step:2841/10000 train_time:140215ms step_avg:49.35ms +[2025-09-05 15:48:03] [Rank 0] step:2841/10000 train_time:140215ms step_avg:49.35ms +[2025-09-05 15:48:04] [Rank 0] step:2861/10000 train_time:140955ms step_avg:49.27ms +[2025-09-05 15:48:04] [Rank 0] step:2861/10000 train_time:140955ms step_avg:49.27ms +[2025-09-05 15:48:05] [Rank 0] step:2881/10000 train_time:141696ms step_avg:49.18ms +[2025-09-05 15:48:05] [Rank 0] step:2881/10000 train_time:141696ms step_avg:49.18ms +[2025-09-05 15:48:05] [Rank 0] step:2901/10000 train_time:142436ms step_avg:49.10ms +[2025-09-05 15:48:05] [Rank 0] step:2901/10000 train_time:142436ms step_avg:49.10ms +[2025-09-05 15:48:06] [Rank 0] step:2921/10000 train_time:143176ms step_avg:49.02ms +[2025-09-05 15:48:06] [Rank 0] step:2921/10000 train_time:143176ms step_avg:49.02ms +[2025-09-05 15:48:07] [Rank 0] step:2941/10000 train_time:143915ms step_avg:48.93ms +[2025-09-05 15:48:07] [Rank 0] step:2941/10000 train_time:143915ms step_avg:48.93ms +[2025-09-05 15:48:08] [Rank 0] step:2961/10000 train_time:144656ms step_avg:48.85ms +[2025-09-05 15:48:08] [Rank 0] step:2961/10000 train_time:144656ms step_avg:48.85ms +[2025-09-05 15:48:08] [Rank 0] step:2981/10000 train_time:145396ms step_avg:48.77ms +[2025-09-05 15:48:08] [Rank 0] step:2981/10000 train_time:145396ms step_avg:48.77ms +[2025-09-05 15:48:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:48:09] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:48:10] [Rank 0] PRINT: step:3000/10000 train_loss:1.4887 val_loss:1.4649 train_time:146216ms step_avg:48.74ms +[2025-09-05 15:48:10] [Rank 0] PRINT: step:3000/10000 train_loss:1.4887 val_loss:1.4649 train_time:146216ms step_avg:48.74ms +[2025-09-05 15:48:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:48:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:48:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:48:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:49:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:49:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:49:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:49:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:49:30] [Rank 0] Total Loss: 4.1380 +[2025-09-05 15:49:30] [Rank 0] Total Loss: 4.1380 +[2025-09-05 15:49:31] [Rank 0] Total FTA (Unweighted): 0.4831 +[2025-09-05 15:49:31] [Rank 0] Total FTA (Unweighted): 0.4831 +[2025-09-05 15:49:31] [Rank 0] Total FTA (Weighted): 0.4831 +[2025-09-05 15:49:31] [Rank 0] Total FTA (Weighted): 0.4831 +[2025-09-05 15:49:31] [Rank 0] Group 0 Loss: 3.4679 +[2025-09-05 15:49:31] [Rank 0] Group 0 Loss: 3.4679 +[2025-09-05 15:49:31] [Rank 0] Group 1 Loss: 3.1740 +[2025-09-05 15:49:31] [Rank 0] Group 1 Loss: 3.1740 +[2025-09-05 15:49:31] [Rank 0] Group 2 Loss: 3.1206 +[2025-09-05 15:49:31] [Rank 0] Group 2 Loss: 3.1206 +[2025-09-05 15:49:31] [Rank 0] Group 3 Loss: 3.4816 +[2025-09-05 15:49:31] [Rank 0] Group 3 Loss: 3.4816 +[2025-09-05 15:49:31] [Rank 0] Group 4 Loss: 3.6061 +[2025-09-05 15:49:31] [Rank 0] Group 4 Loss: 3.6061 +[2025-09-05 15:49:31] [Rank 0] Group 5 Loss: 3.8129 +[2025-09-05 15:49:31] [Rank 0] Group 5 Loss: 3.8129 +[2025-09-05 15:49:31] [Rank 0] Group 6 Loss: 3.8672 +[2025-09-05 15:49:31] [Rank 0] Group 6 Loss: 3.8672 +[2025-09-05 15:49:31] [Rank 0] Group 7 Loss: 4.1566 +[2025-09-05 15:49:31] [Rank 0] Group 7 Loss: 4.1566 +[2025-09-05 15:49:31] [Rank 0] Group 8 Loss: 4.4124 +[2025-09-05 15:49:31] [Rank 0] Group 8 Loss: 4.4124 +[2025-09-05 15:49:31] [Rank 0] Group 9 Loss: 4.5474 +[2025-09-05 15:49:31] [Rank 0] Group 9 Loss: 4.5474 +[2025-09-05 15:49:31] [Rank 0] Group 10 Loss: 4.7058 +[2025-09-05 15:49:31] [Rank 0] Group 10 Loss: 4.7058 +[2025-09-05 15:49:31] [Rank 0] Group 11 Loss: 4.6894 +[2025-09-05 15:49:31] [Rank 0] Group 11 Loss: 4.6894 +[2025-09-05 15:49:31] [Rank 0] Group 12 Loss: 4.7302 +[2025-09-05 15:49:31] [Rank 0] Group 12 Loss: 4.7302 +[2025-09-05 15:49:31] [Rank 0] Group 13 Loss: 4.7958 +[2025-09-05 15:49:31] [Rank 0] Group 13 Loss: 4.7958 +[2025-09-05 15:49:31] [Rank 0] Group 14 Loss: 4.8179 +[2025-09-05 15:49:31] [Rank 0] Group 14 Loss: 4.8179 +[2025-09-05 15:49:31] [Rank 0] Group 15 Loss: 4.8228 +[2025-09-05 15:49:31] [Rank 0] Group 15 Loss: 4.8228 +[2025-09-05 15:49:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:49:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:49:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:49:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:49:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:49:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:49:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:49:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:49:31] [Rank 0] Group 4 FTA: 0.6100 +[2025-09-05 15:49:31] [Rank 0] Group 4 FTA: 0.6100 +[2025-09-05 15:49:31] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 15:49:31] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 15:49:31] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 15:49:31] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 15:49:31] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 15:49:31] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 15:49:31] [Rank 0] Group 8 FTA: 0.4100 +[2025-09-05 15:49:31] [Rank 0] Group 8 FTA: 0.4100 +[2025-09-05 15:49:31] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 15:49:31] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 15:49:31] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 15:49:31] [Rank 0] Group 10 FTA: 0.3500 +[2025-09-05 15:49:31] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 15:49:31] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 15:49:31] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 15:49:31] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 15:49:31] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:49:31] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 15:49:31] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 15:49:31] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 15:49:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:49:31] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 15:49:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:49:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:49:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:49:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:49:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:49:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:49:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:49:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:49:32] [Rank 0] step:3001/10000 train_time:146225ms step_avg:48.73ms +[2025-09-05 15:49:32] [Rank 0] step:3001/10000 train_time:146225ms step_avg:48.73ms +[2025-09-05 15:49:33] [Rank 0] step:3021/10000 train_time:146895ms step_avg:48.62ms +[2025-09-05 15:49:33] [Rank 0] step:3021/10000 train_time:146895ms step_avg:48.62ms +[2025-09-05 15:49:34] [Rank 0] step:3041/10000 train_time:147635ms step_avg:48.55ms +[2025-09-05 15:49:34] [Rank 0] step:3041/10000 train_time:147635ms step_avg:48.55ms +[2025-09-05 15:49:34] [Rank 0] step:3061/10000 train_time:148375ms step_avg:48.47ms +[2025-09-05 15:49:34] [Rank 0] step:3061/10000 train_time:148375ms step_avg:48.47ms +[2025-09-05 15:49:35] [Rank 0] step:3081/10000 train_time:149115ms step_avg:48.40ms +[2025-09-05 15:49:35] [Rank 0] step:3081/10000 train_time:149115ms step_avg:48.40ms +[2025-09-05 15:49:36] [Rank 0] step:3101/10000 train_time:149856ms step_avg:48.32ms +[2025-09-05 15:49:36] [Rank 0] step:3101/10000 train_time:149856ms step_avg:48.32ms +[2025-09-05 15:49:36] [Rank 0] step:3121/10000 train_time:150595ms step_avg:48.25ms +[2025-09-05 15:49:36] [Rank 0] step:3121/10000 train_time:150595ms step_avg:48.25ms +[2025-09-05 15:49:37] [Rank 0] step:3141/10000 train_time:151334ms step_avg:48.18ms +[2025-09-05 15:49:37] [Rank 0] step:3141/10000 train_time:151334ms step_avg:48.18ms +[2025-09-05 15:49:38] [Rank 0] step:3161/10000 train_time:152073ms step_avg:48.11ms +[2025-09-05 15:49:38] [Rank 0] step:3161/10000 train_time:152073ms step_avg:48.11ms +[2025-09-05 15:49:39] [Rank 0] step:3181/10000 train_time:152813ms step_avg:48.04ms +[2025-09-05 15:49:39] [Rank 0] step:3181/10000 train_time:152813ms step_avg:48.04ms +[2025-09-05 15:49:39] [Rank 0] step:3201/10000 train_time:153553ms step_avg:47.97ms +[2025-09-05 15:49:39] [Rank 0] step:3201/10000 train_time:153553ms step_avg:47.97ms +[2025-09-05 15:49:40] [Rank 0] step:3221/10000 train_time:154292ms step_avg:47.90ms +[2025-09-05 15:49:40] [Rank 0] step:3221/10000 train_time:154292ms step_avg:47.90ms +[2025-09-05 15:49:41] [Rank 0] step:3241/10000 train_time:155031ms step_avg:47.83ms +[2025-09-05 15:49:41] [Rank 0] step:3241/10000 train_time:155031ms step_avg:47.83ms +[2025-09-05 15:49:42] [Rank 0] step:3261/10000 train_time:155772ms step_avg:47.77ms +[2025-09-05 15:49:42] [Rank 0] step:3261/10000 train_time:155772ms step_avg:47.77ms +[2025-09-05 15:49:42] [Rank 0] step:3281/10000 train_time:156512ms step_avg:47.70ms +[2025-09-05 15:49:42] [Rank 0] step:3281/10000 train_time:156512ms step_avg:47.70ms +[2025-09-05 15:49:43] [Rank 0] step:3301/10000 train_time:157252ms step_avg:47.64ms +[2025-09-05 15:49:43] [Rank 0] step:3301/10000 train_time:157252ms step_avg:47.64ms +[2025-09-05 15:49:44] [Rank 0] step:3321/10000 train_time:157992ms step_avg:47.57ms +[2025-09-05 15:49:44] [Rank 0] step:3321/10000 train_time:157992ms step_avg:47.57ms +[2025-09-05 15:49:45] [Rank 0] step:3341/10000 train_time:158732ms step_avg:47.51ms +[2025-09-05 15:49:45] [Rank 0] step:3341/10000 train_time:158732ms step_avg:47.51ms +[2025-09-05 15:49:45] [Rank 0] step:3361/10000 train_time:159472ms step_avg:47.45ms +[2025-09-05 15:49:45] [Rank 0] step:3361/10000 train_time:159472ms step_avg:47.45ms +[2025-09-05 15:49:46] [Rank 0] step:3381/10000 train_time:160212ms step_avg:47.39ms +[2025-09-05 15:49:46] [Rank 0] step:3381/10000 train_time:160212ms step_avg:47.39ms +[2025-09-05 15:49:47] [Rank 0] step:3401/10000 train_time:160951ms step_avg:47.32ms +[2025-09-05 15:49:47] [Rank 0] step:3401/10000 train_time:160951ms step_avg:47.32ms +[2025-09-05 15:49:48] [Rank 0] step:3421/10000 train_time:161817ms step_avg:47.30ms +[2025-09-05 15:49:48] [Rank 0] step:3421/10000 train_time:161817ms step_avg:47.30ms +[2025-09-05 15:49:48] [Rank 0] step:3441/10000 train_time:162557ms step_avg:47.24ms +[2025-09-05 15:49:48] [Rank 0] step:3441/10000 train_time:162557ms step_avg:47.24ms +[2025-09-05 15:49:49] [Rank 0] step:3461/10000 train_time:163297ms step_avg:47.18ms +[2025-09-05 15:49:49] [Rank 0] step:3461/10000 train_time:163297ms step_avg:47.18ms +[2025-09-05 15:49:50] [Rank 0] step:3481/10000 train_time:164147ms step_avg:47.16ms +[2025-09-05 15:49:50] [Rank 0] step:3481/10000 train_time:164147ms step_avg:47.16ms +[2025-09-05 15:49:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:49:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:49:51] [Rank 0] PRINT: step:3500/10000 train_loss:1.4590 val_loss:1.4378 train_time:164968ms step_avg:47.13ms +[2025-09-05 15:49:51] [Rank 0] PRINT: step:3500/10000 train_loss:1.4590 val_loss:1.4378 train_time:164968ms step_avg:47.13ms +[2025-09-05 15:49:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:49:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:49:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:49:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:51:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:51:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:51:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:51:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:51:13] [Rank 0] Total Loss: 4.2103 +[2025-09-05 15:51:13] [Rank 0] Total Loss: 4.2103 +[2025-09-05 15:51:13] [Rank 0] Total FTA (Unweighted): 0.5031 +[2025-09-05 15:51:13] [Rank 0] Total FTA (Unweighted): 0.5031 +[2025-09-05 15:51:13] [Rank 0] Total FTA (Weighted): 0.5031 +[2025-09-05 15:51:13] [Rank 0] Total FTA (Weighted): 0.5031 +[2025-09-05 15:51:13] [Rank 0] Group 0 Loss: 3.3699 +[2025-09-05 15:51:13] [Rank 0] Group 0 Loss: 3.3699 +[2025-09-05 15:51:13] [Rank 0] Group 1 Loss: 3.2294 +[2025-09-05 15:51:13] [Rank 0] Group 1 Loss: 3.2294 +[2025-09-05 15:51:13] [Rank 0] Group 2 Loss: 3.2607 +[2025-09-05 15:51:13] [Rank 0] Group 2 Loss: 3.2607 +[2025-09-05 15:51:13] [Rank 0] Group 3 Loss: 3.6014 +[2025-09-05 15:51:13] [Rank 0] Group 3 Loss: 3.6014 +[2025-09-05 15:51:13] [Rank 0] Group 4 Loss: 3.7839 +[2025-09-05 15:51:13] [Rank 0] Group 4 Loss: 3.7839 +[2025-09-05 15:51:13] [Rank 0] Group 5 Loss: 3.8460 +[2025-09-05 15:51:13] [Rank 0] Group 5 Loss: 3.8460 +[2025-09-05 15:51:13] [Rank 0] Group 6 Loss: 3.9930 +[2025-09-05 15:51:13] [Rank 0] Group 6 Loss: 3.9930 +[2025-09-05 15:51:13] [Rank 0] Group 7 Loss: 4.2548 +[2025-09-05 15:51:13] [Rank 0] Group 7 Loss: 4.2548 +[2025-09-05 15:51:13] [Rank 0] Group 8 Loss: 4.4579 +[2025-09-05 15:51:13] [Rank 0] Group 8 Loss: 4.4579 +[2025-09-05 15:51:13] [Rank 0] Group 9 Loss: 4.5997 +[2025-09-05 15:51:13] [Rank 0] Group 9 Loss: 4.5997 +[2025-09-05 15:51:13] [Rank 0] Group 10 Loss: 4.7775 +[2025-09-05 15:51:13] [Rank 0] Group 10 Loss: 4.7775 +[2025-09-05 15:51:13] [Rank 0] Group 11 Loss: 4.7813 +[2025-09-05 15:51:13] [Rank 0] Group 11 Loss: 4.7813 +[2025-09-05 15:51:13] [Rank 0] Group 12 Loss: 4.7999 +[2025-09-05 15:51:13] [Rank 0] Group 12 Loss: 4.7999 +[2025-09-05 15:51:13] [Rank 0] Group 13 Loss: 4.8366 +[2025-09-05 15:51:13] [Rank 0] Group 13 Loss: 4.8366 +[2025-09-05 15:51:13] [Rank 0] Group 14 Loss: 4.8758 +[2025-09-05 15:51:13] [Rank 0] Group 14 Loss: 4.8758 +[2025-09-05 15:51:13] [Rank 0] Group 15 Loss: 4.8971 +[2025-09-05 15:51:13] [Rank 0] Group 15 Loss: 4.8971 +[2025-09-05 15:51:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:51:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:51:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:51:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:51:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:51:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:51:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:51:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:51:13] [Rank 0] Group 4 FTA: 0.7400 +[2025-09-05 15:51:13] [Rank 0] Group 4 FTA: 0.7400 +[2025-09-05 15:51:13] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 15:51:13] [Rank 0] Group 5 FTA: 0.5600 +[2025-09-05 15:51:13] [Rank 0] Group 6 FTA: 0.4500 +[2025-09-05 15:51:13] [Rank 0] Group 6 FTA: 0.4500 +[2025-09-05 15:51:13] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 15:51:13] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 15:51:13] [Rank 0] Group 8 FTA: 0.4000 +[2025-09-05 15:51:13] [Rank 0] Group 8 FTA: 0.4000 +[2025-09-05 15:51:13] [Rank 0] Group 9 FTA: 0.3200 +[2025-09-05 15:51:13] [Rank 0] Group 9 FTA: 0.3200 +[2025-09-05 15:51:13] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 15:51:13] [Rank 0] Group 10 FTA: 0.3800 +[2025-09-05 15:51:13] [Rank 0] Group 11 FTA: 0.2500 +[2025-09-05 15:51:13] [Rank 0] Group 11 FTA: 0.2500 +[2025-09-05 15:51:13] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 15:51:13] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 15:51:13] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:51:13] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:51:13] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:51:13] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:51:13] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:51:13] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:51:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:51:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:51:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:51:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:51:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:51:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:51:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:51:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:51:14] [Rank 0] step:3501/10000 train_time:164977ms step_avg:47.12ms +[2025-09-05 15:51:14] [Rank 0] step:3501/10000 train_time:164977ms step_avg:47.12ms +[2025-09-05 15:51:15] [Rank 0] step:3521/10000 train_time:165657ms step_avg:47.05ms +[2025-09-05 15:51:15] [Rank 0] step:3521/10000 train_time:165657ms step_avg:47.05ms +[2025-09-05 15:51:16] [Rank 0] step:3541/10000 train_time:166397ms step_avg:46.99ms +[2025-09-05 15:51:16] [Rank 0] step:3541/10000 train_time:166397ms step_avg:46.99ms +[2025-09-05 15:51:16] [Rank 0] step:3561/10000 train_time:167136ms step_avg:46.94ms +[2025-09-05 15:51:16] [Rank 0] step:3561/10000 train_time:167136ms step_avg:46.94ms +[2025-09-05 15:51:17] [Rank 0] step:3581/10000 train_time:167875ms step_avg:46.88ms +[2025-09-05 15:51:17] [Rank 0] step:3581/10000 train_time:167875ms step_avg:46.88ms +[2025-09-05 15:51:18] [Rank 0] step:3601/10000 train_time:168615ms step_avg:46.82ms +[2025-09-05 15:51:18] [Rank 0] step:3601/10000 train_time:168615ms step_avg:46.82ms +[2025-09-05 15:51:19] [Rank 0] step:3621/10000 train_time:169353ms step_avg:46.77ms +[2025-09-05 15:51:19] [Rank 0] step:3621/10000 train_time:169353ms step_avg:46.77ms +[2025-09-05 15:51:20] [Rank 0] step:3641/10000 train_time:170691ms step_avg:46.88ms +[2025-09-05 15:51:20] [Rank 0] step:3641/10000 train_time:170691ms step_avg:46.88ms +[2025-09-05 15:51:21] [Rank 0] step:3661/10000 train_time:171439ms step_avg:46.83ms +[2025-09-05 15:51:21] [Rank 0] step:3661/10000 train_time:171439ms step_avg:46.83ms +[2025-09-05 15:51:21] [Rank 0] step:3681/10000 train_time:172178ms step_avg:46.77ms +[2025-09-05 15:51:21] [Rank 0] step:3681/10000 train_time:172178ms step_avg:46.77ms +[2025-09-05 15:51:22] [Rank 0] step:3701/10000 train_time:172918ms step_avg:46.72ms +[2025-09-05 15:51:22] [Rank 0] step:3701/10000 train_time:172918ms step_avg:46.72ms +[2025-09-05 15:51:23] [Rank 0] step:3721/10000 train_time:173658ms step_avg:46.67ms +[2025-09-05 15:51:23] [Rank 0] step:3721/10000 train_time:173658ms step_avg:46.67ms +[2025-09-05 15:51:24] [Rank 0] step:3741/10000 train_time:174398ms step_avg:46.62ms +[2025-09-05 15:51:24] [Rank 0] step:3741/10000 train_time:174398ms step_avg:46.62ms +[2025-09-05 15:51:24] [Rank 0] step:3761/10000 train_time:175137ms step_avg:46.57ms +[2025-09-05 15:51:24] [Rank 0] step:3761/10000 train_time:175137ms step_avg:46.57ms +[2025-09-05 15:51:25] [Rank 0] step:3781/10000 train_time:175877ms step_avg:46.52ms +[2025-09-05 15:51:25] [Rank 0] step:3781/10000 train_time:175877ms step_avg:46.52ms +[2025-09-05 15:51:26] [Rank 0] step:3801/10000 train_time:176616ms step_avg:46.47ms +[2025-09-05 15:51:26] [Rank 0] step:3801/10000 train_time:176616ms step_avg:46.47ms +[2025-09-05 15:51:27] [Rank 0] step:3821/10000 train_time:177356ms step_avg:46.42ms +[2025-09-05 15:51:27] [Rank 0] step:3821/10000 train_time:177356ms step_avg:46.42ms +[2025-09-05 15:51:27] [Rank 0] step:3841/10000 train_time:178095ms step_avg:46.37ms +[2025-09-05 15:51:27] [Rank 0] step:3841/10000 train_time:178095ms step_avg:46.37ms +[2025-09-05 15:51:28] [Rank 0] step:3861/10000 train_time:178834ms step_avg:46.32ms +[2025-09-05 15:51:28] [Rank 0] step:3861/10000 train_time:178834ms step_avg:46.32ms +[2025-09-05 15:51:29] [Rank 0] step:3881/10000 train_time:179574ms step_avg:46.27ms +[2025-09-05 15:51:29] [Rank 0] step:3881/10000 train_time:179574ms step_avg:46.27ms +[2025-09-05 15:51:29] [Rank 0] step:3901/10000 train_time:180313ms step_avg:46.22ms +[2025-09-05 15:51:29] [Rank 0] step:3901/10000 train_time:180313ms step_avg:46.22ms +[2025-09-05 15:51:30] [Rank 0] step:3921/10000 train_time:181053ms step_avg:46.18ms +[2025-09-05 15:51:30] [Rank 0] step:3921/10000 train_time:181053ms step_avg:46.18ms +[2025-09-05 15:51:31] [Rank 0] step:3941/10000 train_time:181793ms step_avg:46.13ms +[2025-09-05 15:51:31] [Rank 0] step:3941/10000 train_time:181793ms step_avg:46.13ms +[2025-09-05 15:51:32] [Rank 0] step:3961/10000 train_time:182532ms step_avg:46.08ms +[2025-09-05 15:51:32] [Rank 0] step:3961/10000 train_time:182532ms step_avg:46.08ms +[2025-09-05 15:51:32] [Rank 0] step:3981/10000 train_time:183272ms step_avg:46.04ms +[2025-09-05 15:51:32] [Rank 0] step:3981/10000 train_time:183272ms step_avg:46.04ms +[2025-09-05 15:51:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:51:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:51:34] [Rank 0] PRINT: step:4000/10000 train_loss:1.4408 val_loss:1.4264 train_time:184097ms step_avg:46.02ms +[2025-09-05 15:51:34] [Rank 0] PRINT: step:4000/10000 train_loss:1.4408 val_loss:1.4264 train_time:184097ms step_avg:46.02ms +[2025-09-05 15:51:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:51:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:51:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:51:34] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:52:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:52:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:52:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:52:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:52:55] [Rank 0] Total Loss: 4.2107 +[2025-09-05 15:52:55] [Rank 0] Total Loss: 4.2107 +[2025-09-05 15:52:55] [Rank 0] Total FTA (Unweighted): 0.5237 +[2025-09-05 15:52:55] [Rank 0] Total FTA (Unweighted): 0.5237 +[2025-09-05 15:52:55] [Rank 0] Total FTA (Weighted): 0.5238 +[2025-09-05 15:52:55] [Rank 0] Total FTA (Weighted): 0.5238 +[2025-09-05 15:52:55] [Rank 0] Group 0 Loss: 3.4217 +[2025-09-05 15:52:55] [Rank 0] Group 0 Loss: 3.4217 +[2025-09-05 15:52:55] [Rank 0] Group 1 Loss: 3.2468 +[2025-09-05 15:52:55] [Rank 0] Group 1 Loss: 3.2468 +[2025-09-05 15:52:55] [Rank 0] Group 2 Loss: 3.2608 +[2025-09-05 15:52:55] [Rank 0] Group 2 Loss: 3.2608 +[2025-09-05 15:52:55] [Rank 0] Group 3 Loss: 3.6468 +[2025-09-05 15:52:55] [Rank 0] Group 3 Loss: 3.6468 +[2025-09-05 15:52:55] [Rank 0] Group 4 Loss: 3.7954 +[2025-09-05 15:52:55] [Rank 0] Group 4 Loss: 3.7954 +[2025-09-05 15:52:55] [Rank 0] Group 5 Loss: 3.9033 +[2025-09-05 15:52:55] [Rank 0] Group 5 Loss: 3.9033 +[2025-09-05 15:52:55] [Rank 0] Group 6 Loss: 3.9592 +[2025-09-05 15:52:55] [Rank 0] Group 6 Loss: 3.9592 +[2025-09-05 15:52:55] [Rank 0] Group 7 Loss: 4.2150 +[2025-09-05 15:52:55] [Rank 0] Group 7 Loss: 4.2150 +[2025-09-05 15:52:55] [Rank 0] Group 8 Loss: 4.4841 +[2025-09-05 15:52:55] [Rank 0] Group 8 Loss: 4.4841 +[2025-09-05 15:52:55] [Rank 0] Group 9 Loss: 4.6005 +[2025-09-05 15:52:55] [Rank 0] Group 9 Loss: 4.6005 +[2025-09-05 15:52:55] [Rank 0] Group 10 Loss: 4.7577 +[2025-09-05 15:52:55] [Rank 0] Group 10 Loss: 4.7577 +[2025-09-05 15:52:55] [Rank 0] Group 11 Loss: 4.7610 +[2025-09-05 15:52:55] [Rank 0] Group 11 Loss: 4.7610 +[2025-09-05 15:52:55] [Rank 0] Group 12 Loss: 4.7581 +[2025-09-05 15:52:55] [Rank 0] Group 12 Loss: 4.7581 +[2025-09-05 15:52:55] [Rank 0] Group 13 Loss: 4.8415 +[2025-09-05 15:52:55] [Rank 0] Group 13 Loss: 4.8415 +[2025-09-05 15:52:55] [Rank 0] Group 14 Loss: 4.8480 +[2025-09-05 15:52:55] [Rank 0] Group 14 Loss: 4.8480 +[2025-09-05 15:52:55] [Rank 0] Group 15 Loss: 4.8711 +[2025-09-05 15:52:55] [Rank 0] Group 15 Loss: 4.8711 +[2025-09-05 15:52:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:52:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:52:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:52:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:52:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:52:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:52:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:52:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:52:55] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 15:52:55] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 15:52:55] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:52:55] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:52:55] [Rank 0] Group 6 FTA: 0.4800 +[2025-09-05 15:52:55] [Rank 0] Group 6 FTA: 0.4800 +[2025-09-05 15:52:55] [Rank 0] Group 7 FTA: 0.4500 +[2025-09-05 15:52:55] [Rank 0] Group 7 FTA: 0.4500 +[2025-09-05 15:52:55] [Rank 0] Group 8 FTA: 0.4600 +[2025-09-05 15:52:55] [Rank 0] Group 8 FTA: 0.4600 +[2025-09-05 15:52:55] [Rank 0] Group 9 FTA: 0.3700 +[2025-09-05 15:52:55] [Rank 0] Group 9 FTA: 0.3700 +[2025-09-05 15:52:55] [Rank 0] Group 10 FTA: 0.4000 +[2025-09-05 15:52:55] [Rank 0] Group 10 FTA: 0.4000 +[2025-09-05 15:52:55] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 15:52:55] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 15:52:55] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 15:52:55] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 15:52:55] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 15:52:55] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 15:52:55] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:52:55] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 15:52:55] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:52:55] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 15:52:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:52:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:52:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:52:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:52:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:52:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:52:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:52:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:52:57] [Rank 0] step:4001/10000 train_time:184106ms step_avg:46.01ms +[2025-09-05 15:52:57] [Rank 0] step:4001/10000 train_time:184106ms step_avg:46.01ms +[2025-09-05 15:52:58] [Rank 0] step:4021/10000 train_time:184976ms step_avg:46.00ms +[2025-09-05 15:52:58] [Rank 0] step:4021/10000 train_time:184976ms step_avg:46.00ms +[2025-09-05 15:52:59] [Rank 0] step:4041/10000 train_time:185716ms step_avg:45.96ms +[2025-09-05 15:52:59] [Rank 0] step:4041/10000 train_time:185716ms step_avg:45.96ms +[2025-09-05 15:52:59] [Rank 0] step:4061/10000 train_time:186455ms step_avg:45.91ms +[2025-09-05 15:52:59] [Rank 0] step:4061/10000 train_time:186455ms step_avg:45.91ms +[2025-09-05 15:53:00] [Rank 0] step:4081/10000 train_time:187342ms step_avg:45.91ms +[2025-09-05 15:53:00] [Rank 0] step:4081/10000 train_time:187342ms step_avg:45.91ms +[2025-09-05 15:53:01] [Rank 0] step:4101/10000 train_time:188081ms step_avg:45.86ms +[2025-09-05 15:53:01] [Rank 0] step:4101/10000 train_time:188081ms step_avg:45.86ms +[2025-09-05 15:53:02] [Rank 0] step:4121/10000 train_time:188821ms step_avg:45.82ms +[2025-09-05 15:53:02] [Rank 0] step:4121/10000 train_time:188821ms step_avg:45.82ms +[2025-09-05 15:53:02] [Rank 0] step:4141/10000 train_time:189561ms step_avg:45.78ms +[2025-09-05 15:53:02] [Rank 0] step:4141/10000 train_time:189561ms step_avg:45.78ms +[2025-09-05 15:53:03] [Rank 0] step:4161/10000 train_time:190301ms step_avg:45.73ms +[2025-09-05 15:53:03] [Rank 0] step:4161/10000 train_time:190301ms step_avg:45.73ms +[2025-09-05 15:53:04] [Rank 0] step:4181/10000 train_time:191041ms step_avg:45.69ms +[2025-09-05 15:53:04] [Rank 0] step:4181/10000 train_time:191041ms step_avg:45.69ms +[2025-09-05 15:53:05] [Rank 0] step:4201/10000 train_time:191780ms step_avg:45.65ms +[2025-09-05 15:53:05] [Rank 0] step:4201/10000 train_time:191780ms step_avg:45.65ms +[2025-09-05 15:53:05] [Rank 0] step:4221/10000 train_time:192519ms step_avg:45.61ms +[2025-09-05 15:53:05] [Rank 0] step:4221/10000 train_time:192519ms step_avg:45.61ms +[2025-09-05 15:53:06] [Rank 0] step:4241/10000 train_time:193259ms step_avg:45.57ms +[2025-09-05 15:53:06] [Rank 0] step:4241/10000 train_time:193259ms step_avg:45.57ms +[2025-09-05 15:53:07] [Rank 0] step:4261/10000 train_time:193999ms step_avg:45.53ms +[2025-09-05 15:53:07] [Rank 0] step:4261/10000 train_time:193999ms step_avg:45.53ms +[2025-09-05 15:53:08] [Rank 0] step:4281/10000 train_time:194739ms step_avg:45.49ms +[2025-09-05 15:53:08] [Rank 0] step:4281/10000 train_time:194739ms step_avg:45.49ms +[2025-09-05 15:53:08] [Rank 0] step:4301/10000 train_time:195478ms step_avg:45.45ms +[2025-09-05 15:53:08] [Rank 0] step:4301/10000 train_time:195478ms step_avg:45.45ms +[2025-09-05 15:53:09] [Rank 0] step:4321/10000 train_time:196217ms step_avg:45.41ms +[2025-09-05 15:53:09] [Rank 0] step:4321/10000 train_time:196217ms step_avg:45.41ms +[2025-09-05 15:53:10] [Rank 0] step:4341/10000 train_time:196955ms step_avg:45.37ms +[2025-09-05 15:53:10] [Rank 0] step:4341/10000 train_time:196955ms step_avg:45.37ms +[2025-09-05 15:53:11] [Rank 0] step:4361/10000 train_time:197695ms step_avg:45.33ms +[2025-09-05 15:53:11] [Rank 0] step:4361/10000 train_time:197695ms step_avg:45.33ms +[2025-09-05 15:53:11] [Rank 0] step:4381/10000 train_time:198434ms step_avg:45.29ms +[2025-09-05 15:53:11] [Rank 0] step:4381/10000 train_time:198434ms step_avg:45.29ms +[2025-09-05 15:53:12] [Rank 0] step:4401/10000 train_time:199174ms step_avg:45.26ms +[2025-09-05 15:53:12] [Rank 0] step:4401/10000 train_time:199174ms step_avg:45.26ms +[2025-09-05 15:53:13] [Rank 0] step:4421/10000 train_time:199913ms step_avg:45.22ms +[2025-09-05 15:53:13] [Rank 0] step:4421/10000 train_time:199913ms step_avg:45.22ms +[2025-09-05 15:53:13] [Rank 0] step:4441/10000 train_time:200652ms step_avg:45.18ms +[2025-09-05 15:53:13] [Rank 0] step:4441/10000 train_time:200652ms step_avg:45.18ms +[2025-09-05 15:53:14] [Rank 0] step:4461/10000 train_time:201392ms step_avg:45.15ms +[2025-09-05 15:53:14] [Rank 0] step:4461/10000 train_time:201392ms step_avg:45.15ms +[2025-09-05 15:53:15] [Rank 0] step:4481/10000 train_time:202131ms step_avg:45.11ms +[2025-09-05 15:53:15] [Rank 0] step:4481/10000 train_time:202131ms step_avg:45.11ms +[2025-09-05 15:53:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:53:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:53:16] [Rank 0] PRINT: step:4500/10000 train_loss:1.4309 val_loss:1.4170 train_time:202951ms step_avg:45.10ms +[2025-09-05 15:53:16] [Rank 0] PRINT: step:4500/10000 train_loss:1.4309 val_loss:1.4170 train_time:202951ms step_avg:45.10ms +[2025-09-05 15:53:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:53:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:53:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:53:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:54:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:54:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:54:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:54:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:54:38] [Rank 0] Total Loss: 4.1270 +[2025-09-05 15:54:38] [Rank 0] Total Loss: 4.1270 +[2025-09-05 15:54:38] [Rank 0] Total FTA (Unweighted): 0.5281 +[2025-09-05 15:54:38] [Rank 0] Total FTA (Unweighted): 0.5281 +[2025-09-05 15:54:38] [Rank 0] Total FTA (Weighted): 0.5281 +[2025-09-05 15:54:38] [Rank 0] Total FTA (Weighted): 0.5281 +[2025-09-05 15:54:38] [Rank 0] Group 0 Loss: 3.4771 +[2025-09-05 15:54:38] [Rank 0] Group 0 Loss: 3.4771 +[2025-09-05 15:54:38] [Rank 0] Group 1 Loss: 3.1212 +[2025-09-05 15:54:38] [Rank 0] Group 1 Loss: 3.1212 +[2025-09-05 15:54:38] [Rank 0] Group 2 Loss: 3.1368 +[2025-09-05 15:54:38] [Rank 0] Group 2 Loss: 3.1368 +[2025-09-05 15:54:38] [Rank 0] Group 3 Loss: 3.5251 +[2025-09-05 15:54:38] [Rank 0] Group 3 Loss: 3.5251 +[2025-09-05 15:54:38] [Rank 0] Group 4 Loss: 3.6571 +[2025-09-05 15:54:38] [Rank 0] Group 4 Loss: 3.6571 +[2025-09-05 15:54:38] [Rank 0] Group 5 Loss: 3.8224 +[2025-09-05 15:54:38] [Rank 0] Group 5 Loss: 3.8224 +[2025-09-05 15:54:38] [Rank 0] Group 6 Loss: 3.8744 +[2025-09-05 15:54:38] [Rank 0] Group 6 Loss: 3.8744 +[2025-09-05 15:54:38] [Rank 0] Group 7 Loss: 4.1127 +[2025-09-05 15:54:38] [Rank 0] Group 7 Loss: 4.1127 +[2025-09-05 15:54:38] [Rank 0] Group 8 Loss: 4.3982 +[2025-09-05 15:54:38] [Rank 0] Group 8 Loss: 4.3982 +[2025-09-05 15:54:38] [Rank 0] Group 9 Loss: 4.5470 +[2025-09-05 15:54:38] [Rank 0] Group 9 Loss: 4.5470 +[2025-09-05 15:54:38] [Rank 0] Group 10 Loss: 4.7135 +[2025-09-05 15:54:38] [Rank 0] Group 10 Loss: 4.7135 +[2025-09-05 15:54:38] [Rank 0] Group 11 Loss: 4.6559 +[2025-09-05 15:54:38] [Rank 0] Group 11 Loss: 4.6559 +[2025-09-05 15:54:38] [Rank 0] Group 12 Loss: 4.6575 +[2025-09-05 15:54:38] [Rank 0] Group 12 Loss: 4.6575 +[2025-09-05 15:54:38] [Rank 0] Group 13 Loss: 4.7596 +[2025-09-05 15:54:38] [Rank 0] Group 13 Loss: 4.7596 +[2025-09-05 15:54:38] [Rank 0] Group 14 Loss: 4.7650 +[2025-09-05 15:54:38] [Rank 0] Group 14 Loss: 4.7650 +[2025-09-05 15:54:38] [Rank 0] Group 15 Loss: 4.8080 +[2025-09-05 15:54:38] [Rank 0] Group 15 Loss: 4.8080 +[2025-09-05 15:54:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:54:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:54:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:54:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:54:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:54:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:54:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:54:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:54:38] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 15:54:38] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 15:54:38] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:54:38] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:54:38] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 15:54:38] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 15:54:38] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 15:54:38] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 15:54:38] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 15:54:38] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 15:54:38] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 15:54:38] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 15:54:38] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 15:54:38] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 15:54:38] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 15:54:38] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 15:54:38] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 15:54:38] [Rank 0] Group 12 FTA: 0.1800 +[2025-09-05 15:54:38] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 15:54:38] [Rank 0] Group 13 FTA: 0.1000 +[2025-09-05 15:54:38] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 15:54:38] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 15:54:38] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 15:54:38] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 15:54:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:54:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:54:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:54:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:54:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:54:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:54:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:54:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:54:39] [Rank 0] step:4501/10000 train_time:202960ms step_avg:45.09ms +[2025-09-05 15:54:39] [Rank 0] step:4501/10000 train_time:202960ms step_avg:45.09ms +[2025-09-05 15:54:40] [Rank 0] step:4521/10000 train_time:203635ms step_avg:45.04ms +[2025-09-05 15:54:40] [Rank 0] step:4521/10000 train_time:203635ms step_avg:45.04ms +[2025-09-05 15:54:41] [Rank 0] step:4541/10000 train_time:204375ms step_avg:45.01ms +[2025-09-05 15:54:41] [Rank 0] step:4541/10000 train_time:204375ms step_avg:45.01ms +[2025-09-05 15:54:42] [Rank 0] step:4561/10000 train_time:205114ms step_avg:44.97ms +[2025-09-05 15:54:42] [Rank 0] step:4561/10000 train_time:205114ms step_avg:44.97ms +[2025-09-05 15:54:42] [Rank 0] step:4581/10000 train_time:205854ms step_avg:44.94ms +[2025-09-05 15:54:42] [Rank 0] step:4581/10000 train_time:205854ms step_avg:44.94ms +[2025-09-05 15:54:43] [Rank 0] step:4601/10000 train_time:206593ms step_avg:44.90ms +[2025-09-05 15:54:43] [Rank 0] step:4601/10000 train_time:206593ms step_avg:44.90ms +[2025-09-05 15:54:44] [Rank 0] step:4621/10000 train_time:207332ms step_avg:44.87ms +[2025-09-05 15:54:44] [Rank 0] step:4621/10000 train_time:207332ms step_avg:44.87ms +[2025-09-05 15:54:45] [Rank 0] step:4641/10000 train_time:208071ms step_avg:44.83ms +[2025-09-05 15:54:45] [Rank 0] step:4641/10000 train_time:208071ms step_avg:44.83ms +[2025-09-05 15:54:45] [Rank 0] step:4661/10000 train_time:208811ms step_avg:44.80ms +[2025-09-05 15:54:45] [Rank 0] step:4661/10000 train_time:208811ms step_avg:44.80ms +[2025-09-05 15:54:46] [Rank 0] step:4681/10000 train_time:209550ms step_avg:44.77ms +[2025-09-05 15:54:46] [Rank 0] step:4681/10000 train_time:209550ms step_avg:44.77ms +[2025-09-05 15:54:47] [Rank 0] step:4701/10000 train_time:210289ms step_avg:44.73ms +[2025-09-05 15:54:47] [Rank 0] step:4701/10000 train_time:210289ms step_avg:44.73ms +[2025-09-05 15:54:48] [Rank 0] step:4721/10000 train_time:211028ms step_avg:44.70ms +[2025-09-05 15:54:48] [Rank 0] step:4721/10000 train_time:211028ms step_avg:44.70ms +[2025-09-05 15:54:48] [Rank 0] step:4741/10000 train_time:211768ms step_avg:44.67ms +[2025-09-05 15:54:48] [Rank 0] step:4741/10000 train_time:211768ms step_avg:44.67ms +[2025-09-05 15:54:49] [Rank 0] step:4761/10000 train_time:212507ms step_avg:44.63ms +[2025-09-05 15:54:49] [Rank 0] step:4761/10000 train_time:212507ms step_avg:44.63ms +[2025-09-05 15:54:50] [Rank 0] step:4781/10000 train_time:213247ms step_avg:44.60ms +[2025-09-05 15:54:50] [Rank 0] step:4781/10000 train_time:213247ms step_avg:44.60ms +[2025-09-05 15:54:50] [Rank 0] step:4801/10000 train_time:213986ms step_avg:44.57ms +[2025-09-05 15:54:50] [Rank 0] step:4801/10000 train_time:213986ms step_avg:44.57ms +[2025-09-05 15:54:51] [Rank 0] step:4821/10000 train_time:214726ms step_avg:44.54ms +[2025-09-05 15:54:51] [Rank 0] step:4821/10000 train_time:214726ms step_avg:44.54ms +[2025-09-05 15:54:52] [Rank 0] step:4841/10000 train_time:215774ms step_avg:44.57ms +[2025-09-05 15:54:52] [Rank 0] step:4841/10000 train_time:215774ms step_avg:44.57ms +[2025-09-05 15:54:53] [Rank 0] step:4861/10000 train_time:216513ms step_avg:44.54ms +[2025-09-05 15:54:53] [Rank 0] step:4861/10000 train_time:216513ms step_avg:44.54ms +[2025-09-05 15:54:54] [Rank 0] step:4881/10000 train_time:217252ms step_avg:44.51ms +[2025-09-05 15:54:54] [Rank 0] step:4881/10000 train_time:217252ms step_avg:44.51ms +[2025-09-05 15:54:54] [Rank 0] step:4901/10000 train_time:217992ms step_avg:44.48ms +[2025-09-05 15:54:54] [Rank 0] step:4901/10000 train_time:217992ms step_avg:44.48ms +[2025-09-05 15:54:55] [Rank 0] step:4921/10000 train_time:218730ms step_avg:44.45ms +[2025-09-05 15:54:55] [Rank 0] step:4921/10000 train_time:218730ms step_avg:44.45ms +[2025-09-05 15:54:56] [Rank 0] step:4941/10000 train_time:219470ms step_avg:44.42ms +[2025-09-05 15:54:56] [Rank 0] step:4941/10000 train_time:219470ms step_avg:44.42ms +[2025-09-05 15:54:57] [Rank 0] step:4961/10000 train_time:220210ms step_avg:44.39ms +[2025-09-05 15:54:57] [Rank 0] step:4961/10000 train_time:220210ms step_avg:44.39ms +[2025-09-05 15:54:57] [Rank 0] step:4981/10000 train_time:220950ms step_avg:44.36ms +[2025-09-05 15:54:57] [Rank 0] step:4981/10000 train_time:220950ms step_avg:44.36ms +[2025-09-05 15:54:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:54:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:54:59] [Rank 0] PRINT: step:5000/10000 train_loss:1.4241 val_loss:1.4118 train_time:221770ms step_avg:44.35ms +[2025-09-05 15:54:59] [Rank 0] PRINT: step:5000/10000 train_loss:1.4241 val_loss:1.4118 train_time:221770ms step_avg:44.35ms +[2025-09-05 15:54:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:54:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:54:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:54:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:56:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:56:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:56:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:56:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:56:20] [Rank 0] Total Loss: 4.1747 +[2025-09-05 15:56:20] [Rank 0] Total Loss: 4.1747 +[2025-09-05 15:56:20] [Rank 0] Total FTA (Unweighted): 0.5344 +[2025-09-05 15:56:20] [Rank 0] Total FTA (Unweighted): 0.5344 +[2025-09-05 15:56:20] [Rank 0] Total FTA (Weighted): 0.5344 +[2025-09-05 15:56:20] [Rank 0] Total FTA (Weighted): 0.5344 +[2025-09-05 15:56:20] [Rank 0] Group 0 Loss: 3.3632 +[2025-09-05 15:56:20] [Rank 0] Group 0 Loss: 3.3632 +[2025-09-05 15:56:20] [Rank 0] Group 1 Loss: 3.2701 +[2025-09-05 15:56:20] [Rank 0] Group 1 Loss: 3.2701 +[2025-09-05 15:56:20] [Rank 0] Group 2 Loss: 3.2659 +[2025-09-05 15:56:20] [Rank 0] Group 2 Loss: 3.2659 +[2025-09-05 15:56:20] [Rank 0] Group 3 Loss: 3.5934 +[2025-09-05 15:56:20] [Rank 0] Group 3 Loss: 3.5934 +[2025-09-05 15:56:20] [Rank 0] Group 4 Loss: 3.6816 +[2025-09-05 15:56:20] [Rank 0] Group 4 Loss: 3.6816 +[2025-09-05 15:56:20] [Rank 0] Group 5 Loss: 3.8824 +[2025-09-05 15:56:20] [Rank 0] Group 5 Loss: 3.8824 +[2025-09-05 15:56:20] [Rank 0] Group 6 Loss: 3.9391 +[2025-09-05 15:56:20] [Rank 0] Group 6 Loss: 3.9391 +[2025-09-05 15:56:20] [Rank 0] Group 7 Loss: 4.1749 +[2025-09-05 15:56:20] [Rank 0] Group 7 Loss: 4.1749 +[2025-09-05 15:56:20] [Rank 0] Group 8 Loss: 4.4291 +[2025-09-05 15:56:20] [Rank 0] Group 8 Loss: 4.4291 +[2025-09-05 15:56:20] [Rank 0] Group 9 Loss: 4.5920 +[2025-09-05 15:56:20] [Rank 0] Group 9 Loss: 4.5920 +[2025-09-05 15:56:20] [Rank 0] Group 10 Loss: 4.7143 +[2025-09-05 15:56:20] [Rank 0] Group 10 Loss: 4.7143 +[2025-09-05 15:56:20] [Rank 0] Group 11 Loss: 4.7065 +[2025-09-05 15:56:20] [Rank 0] Group 11 Loss: 4.7065 +[2025-09-05 15:56:20] [Rank 0] Group 12 Loss: 4.7435 +[2025-09-05 15:56:20] [Rank 0] Group 12 Loss: 4.7435 +[2025-09-05 15:56:20] [Rank 0] Group 13 Loss: 4.8040 +[2025-09-05 15:56:20] [Rank 0] Group 13 Loss: 4.8040 +[2025-09-05 15:56:20] [Rank 0] Group 14 Loss: 4.7815 +[2025-09-05 15:56:20] [Rank 0] Group 14 Loss: 4.7815 +[2025-09-05 15:56:20] [Rank 0] Group 15 Loss: 4.8531 +[2025-09-05 15:56:20] [Rank 0] Group 15 Loss: 4.8531 +[2025-09-05 15:56:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:56:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:56:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:56:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:56:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:56:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:56:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:56:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:56:20] [Rank 0] Group 4 FTA: 0.8500 +[2025-09-05 15:56:20] [Rank 0] Group 4 FTA: 0.8500 +[2025-09-05 15:56:20] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:56:20] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 15:56:20] [Rank 0] Group 6 FTA: 0.4800 +[2025-09-05 15:56:20] [Rank 0] Group 6 FTA: 0.4800 +[2025-09-05 15:56:20] [Rank 0] Group 7 FTA: 0.4200 +[2025-09-05 15:56:20] [Rank 0] Group 7 FTA: 0.4200 +[2025-09-05 15:56:20] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 15:56:20] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 15:56:20] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 15:56:20] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 15:56:20] [Rank 0] Group 10 FTA: 0.4400 +[2025-09-05 15:56:20] [Rank 0] Group 10 FTA: 0.4400 +[2025-09-05 15:56:20] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 15:56:20] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 15:56:20] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 15:56:20] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 15:56:20] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:56:20] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:56:20] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:56:20] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 15:56:20] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:56:20] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:56:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:56:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:56:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:56:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:56:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:56:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:56:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:56:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:56:22] [Rank 0] step:5001/10000 train_time:221779ms step_avg:44.35ms +[2025-09-05 15:56:22] [Rank 0] step:5001/10000 train_time:221779ms step_avg:44.35ms +[2025-09-05 15:56:23] [Rank 0] step:5021/10000 train_time:222460ms step_avg:44.31ms +[2025-09-05 15:56:23] [Rank 0] step:5021/10000 train_time:222460ms step_avg:44.31ms +[2025-09-05 15:56:23] [Rank 0] step:5041/10000 train_time:223199ms step_avg:44.28ms +[2025-09-05 15:56:23] [Rank 0] step:5041/10000 train_time:223199ms step_avg:44.28ms +[2025-09-05 15:56:24] [Rank 0] step:5061/10000 train_time:223938ms step_avg:44.25ms +[2025-09-05 15:56:24] [Rank 0] step:5061/10000 train_time:223938ms step_avg:44.25ms +[2025-09-05 15:56:25] [Rank 0] step:5081/10000 train_time:224677ms step_avg:44.22ms +[2025-09-05 15:56:25] [Rank 0] step:5081/10000 train_time:224677ms step_avg:44.22ms +[2025-09-05 15:56:26] [Rank 0] step:5101/10000 train_time:225417ms step_avg:44.19ms +[2025-09-05 15:56:26] [Rank 0] step:5101/10000 train_time:225417ms step_avg:44.19ms +[2025-09-05 15:56:26] [Rank 0] step:5121/10000 train_time:226157ms step_avg:44.16ms +[2025-09-05 15:56:26] [Rank 0] step:5121/10000 train_time:226157ms step_avg:44.16ms +[2025-09-05 15:56:27] [Rank 0] step:5141/10000 train_time:226896ms step_avg:44.13ms +[2025-09-05 15:56:27] [Rank 0] step:5141/10000 train_time:226896ms step_avg:44.13ms +[2025-09-05 15:56:28] [Rank 0] step:5161/10000 train_time:227635ms step_avg:44.11ms +[2025-09-05 15:56:28] [Rank 0] step:5161/10000 train_time:227635ms step_avg:44.11ms +[2025-09-05 15:56:28] [Rank 0] step:5181/10000 train_time:228375ms step_avg:44.08ms +[2025-09-05 15:56:28] [Rank 0] step:5181/10000 train_time:228375ms step_avg:44.08ms +[2025-09-05 15:56:29] [Rank 0] step:5201/10000 train_time:229115ms step_avg:44.05ms +[2025-09-05 15:56:29] [Rank 0] step:5201/10000 train_time:229115ms step_avg:44.05ms +[2025-09-05 15:56:30] [Rank 0] step:5221/10000 train_time:229854ms step_avg:44.02ms +[2025-09-05 15:56:30] [Rank 0] step:5221/10000 train_time:229854ms step_avg:44.02ms +[2025-09-05 15:56:31] [Rank 0] step:5241/10000 train_time:230593ms step_avg:44.00ms +[2025-09-05 15:56:31] [Rank 0] step:5241/10000 train_time:230593ms step_avg:44.00ms +[2025-09-05 15:56:31] [Rank 0] step:5261/10000 train_time:231333ms step_avg:43.97ms +[2025-09-05 15:56:31] [Rank 0] step:5261/10000 train_time:231333ms step_avg:43.97ms +[2025-09-05 15:56:32] [Rank 0] step:5281/10000 train_time:232073ms step_avg:43.94ms +[2025-09-05 15:56:32] [Rank 0] step:5281/10000 train_time:232073ms step_avg:43.94ms +[2025-09-05 15:56:33] [Rank 0] step:5301/10000 train_time:232812ms step_avg:43.92ms +[2025-09-05 15:56:33] [Rank 0] step:5301/10000 train_time:232812ms step_avg:43.92ms +[2025-09-05 15:56:34] [Rank 0] step:5321/10000 train_time:233551ms step_avg:43.89ms +[2025-09-05 15:56:34] [Rank 0] step:5321/10000 train_time:233551ms step_avg:43.89ms +[2025-09-05 15:56:34] [Rank 0] step:5341/10000 train_time:234292ms step_avg:43.87ms +[2025-09-05 15:56:34] [Rank 0] step:5341/10000 train_time:234292ms step_avg:43.87ms +[2025-09-05 15:56:35] [Rank 0] step:5361/10000 train_time:235031ms step_avg:43.84ms +[2025-09-05 15:56:35] [Rank 0] step:5361/10000 train_time:235031ms step_avg:43.84ms +[2025-09-05 15:56:36] [Rank 0] step:5381/10000 train_time:235772ms step_avg:43.82ms +[2025-09-05 15:56:36] [Rank 0] step:5381/10000 train_time:235772ms step_avg:43.82ms +[2025-09-05 15:56:37] [Rank 0] step:5401/10000 train_time:236511ms step_avg:43.79ms +[2025-09-05 15:56:37] [Rank 0] step:5401/10000 train_time:236511ms step_avg:43.79ms +[2025-09-05 15:56:37] [Rank 0] step:5421/10000 train_time:237251ms step_avg:43.77ms +[2025-09-05 15:56:37] [Rank 0] step:5421/10000 train_time:237251ms step_avg:43.77ms +[2025-09-05 15:56:38] [Rank 0] step:5441/10000 train_time:237991ms step_avg:43.74ms +[2025-09-05 15:56:38] [Rank 0] step:5441/10000 train_time:237991ms step_avg:43.74ms +[2025-09-05 15:56:39] [Rank 0] step:5461/10000 train_time:238731ms step_avg:43.72ms +[2025-09-05 15:56:39] [Rank 0] step:5461/10000 train_time:238731ms step_avg:43.72ms +[2025-09-05 15:56:40] [Rank 0] step:5481/10000 train_time:239471ms step_avg:43.69ms +[2025-09-05 15:56:40] [Rank 0] step:5481/10000 train_time:239471ms step_avg:43.69ms +[2025-09-05 15:56:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:56:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:56:41] [Rank 0] PRINT: step:5500/10000 train_loss:1.4197 val_loss:1.4098 train_time:240291ms step_avg:43.69ms +[2025-09-05 15:56:41] [Rank 0] PRINT: step:5500/10000 train_loss:1.4197 val_loss:1.4098 train_time:240291ms step_avg:43.69ms +[2025-09-05 15:56:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:56:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:56:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:56:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:58:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:58:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:58:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:58:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:58:02] [Rank 0] Total Loss: 4.1342 +[2025-09-05 15:58:02] [Rank 0] Total Loss: 4.1342 +[2025-09-05 15:58:02] [Rank 0] Total FTA (Unweighted): 0.5419 +[2025-09-05 15:58:02] [Rank 0] Total FTA (Unweighted): 0.5419 +[2025-09-05 15:58:02] [Rank 0] Total FTA (Weighted): 0.5419 +[2025-09-05 15:58:02] [Rank 0] Total FTA (Weighted): 0.5419 +[2025-09-05 15:58:02] [Rank 0] Group 0 Loss: 3.4052 +[2025-09-05 15:58:02] [Rank 0] Group 0 Loss: 3.4052 +[2025-09-05 15:58:02] [Rank 0] Group 1 Loss: 3.1443 +[2025-09-05 15:58:02] [Rank 0] Group 1 Loss: 3.1443 +[2025-09-05 15:58:02] [Rank 0] Group 2 Loss: 3.1878 +[2025-09-05 15:58:02] [Rank 0] Group 2 Loss: 3.1878 +[2025-09-05 15:58:02] [Rank 0] Group 3 Loss: 3.4713 +[2025-09-05 15:58:02] [Rank 0] Group 3 Loss: 3.4713 +[2025-09-05 15:58:02] [Rank 0] Group 4 Loss: 3.6949 +[2025-09-05 15:58:02] [Rank 0] Group 4 Loss: 3.6949 +[2025-09-05 15:58:02] [Rank 0] Group 5 Loss: 3.8176 +[2025-09-05 15:58:02] [Rank 0] Group 5 Loss: 3.8176 +[2025-09-05 15:58:02] [Rank 0] Group 6 Loss: 3.9155 +[2025-09-05 15:58:02] [Rank 0] Group 6 Loss: 3.9155 +[2025-09-05 15:58:02] [Rank 0] Group 7 Loss: 4.1398 +[2025-09-05 15:58:02] [Rank 0] Group 7 Loss: 4.1398 +[2025-09-05 15:58:02] [Rank 0] Group 8 Loss: 4.3625 +[2025-09-05 15:58:02] [Rank 0] Group 8 Loss: 4.3625 +[2025-09-05 15:58:02] [Rank 0] Group 9 Loss: 4.5065 +[2025-09-05 15:58:02] [Rank 0] Group 9 Loss: 4.5065 +[2025-09-05 15:58:02] [Rank 0] Group 10 Loss: 4.6794 +[2025-09-05 15:58:02] [Rank 0] Group 10 Loss: 4.6794 +[2025-09-05 15:58:02] [Rank 0] Group 11 Loss: 4.7474 +[2025-09-05 15:58:02] [Rank 0] Group 11 Loss: 4.7474 +[2025-09-05 15:58:02] [Rank 0] Group 12 Loss: 4.7138 +[2025-09-05 15:58:02] [Rank 0] Group 12 Loss: 4.7138 +[2025-09-05 15:58:02] [Rank 0] Group 13 Loss: 4.7769 +[2025-09-05 15:58:02] [Rank 0] Group 13 Loss: 4.7769 +[2025-09-05 15:58:02] [Rank 0] Group 14 Loss: 4.7641 +[2025-09-05 15:58:02] [Rank 0] Group 14 Loss: 4.7641 +[2025-09-05 15:58:02] [Rank 0] Group 15 Loss: 4.8195 +[2025-09-05 15:58:02] [Rank 0] Group 15 Loss: 4.8195 +[2025-09-05 15:58:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:58:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:58:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:58:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:58:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:58:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:58:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:58:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:58:02] [Rank 0] Group 4 FTA: 0.9000 +[2025-09-05 15:58:02] [Rank 0] Group 4 FTA: 0.9000 +[2025-09-05 15:58:02] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:58:02] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 15:58:02] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 15:58:02] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 15:58:02] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 15:58:02] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 15:58:02] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 15:58:02] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 15:58:02] [Rank 0] Group 9 FTA: 0.3800 +[2025-09-05 15:58:02] [Rank 0] Group 9 FTA: 0.3800 +[2025-09-05 15:58:02] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 15:58:02] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 15:58:02] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 15:58:02] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 15:58:02] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 15:58:02] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 15:58:02] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 15:58:02] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 15:58:02] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:58:02] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 15:58:02] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 15:58:02] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 15:58:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:58:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:58:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:58:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:58:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:58:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:58:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:58:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:58:03] [Rank 0] step:5501/10000 train_time:240300ms step_avg:43.68ms +[2025-09-05 15:58:03] [Rank 0] step:5501/10000 train_time:240300ms step_avg:43.68ms +[2025-09-05 15:58:04] [Rank 0] step:5521/10000 train_time:240982ms step_avg:43.65ms +[2025-09-05 15:58:04] [Rank 0] step:5521/10000 train_time:240982ms step_avg:43.65ms +[2025-09-05 15:58:05] [Rank 0] step:5541/10000 train_time:241722ms step_avg:43.62ms +[2025-09-05 15:58:05] [Rank 0] step:5541/10000 train_time:241722ms step_avg:43.62ms +[2025-09-05 15:58:05] [Rank 0] step:5561/10000 train_time:242461ms step_avg:43.60ms +[2025-09-05 15:58:05] [Rank 0] step:5561/10000 train_time:242461ms step_avg:43.60ms +[2025-09-05 15:58:06] [Rank 0] step:5581/10000 train_time:243201ms step_avg:43.58ms +[2025-09-05 15:58:06] [Rank 0] step:5581/10000 train_time:243201ms step_avg:43.58ms +[2025-09-05 15:58:07] [Rank 0] step:5601/10000 train_time:243940ms step_avg:43.55ms +[2025-09-05 15:58:07] [Rank 0] step:5601/10000 train_time:243940ms step_avg:43.55ms +[2025-09-05 15:58:08] [Rank 0] step:5621/10000 train_time:244680ms step_avg:43.53ms +[2025-09-05 15:58:08] [Rank 0] step:5621/10000 train_time:244680ms step_avg:43.53ms +[2025-09-05 15:58:09] [Rank 0] step:5641/10000 train_time:246020ms step_avg:43.61ms +[2025-09-05 15:58:09] [Rank 0] step:5641/10000 train_time:246020ms step_avg:43.61ms +[2025-09-05 15:58:10] [Rank 0] step:5661/10000 train_time:246760ms step_avg:43.59ms +[2025-09-05 15:58:10] [Rank 0] step:5661/10000 train_time:246760ms step_avg:43.59ms +[2025-09-05 15:58:10] [Rank 0] step:5681/10000 train_time:247500ms step_avg:43.57ms +[2025-09-05 15:58:10] [Rank 0] step:5681/10000 train_time:247500ms step_avg:43.57ms +[2025-09-05 15:58:11] [Rank 0] step:5701/10000 train_time:248240ms step_avg:43.54ms +[2025-09-05 15:58:11] [Rank 0] step:5701/10000 train_time:248240ms step_avg:43.54ms +[2025-09-05 15:58:12] [Rank 0] step:5721/10000 train_time:248979ms step_avg:43.52ms +[2025-09-05 15:58:12] [Rank 0] step:5721/10000 train_time:248979ms step_avg:43.52ms +[2025-09-05 15:58:13] [Rank 0] step:5741/10000 train_time:249719ms step_avg:43.50ms +[2025-09-05 15:58:13] [Rank 0] step:5741/10000 train_time:249719ms step_avg:43.50ms +[2025-09-05 15:58:13] [Rank 0] step:5761/10000 train_time:250577ms step_avg:43.50ms +[2025-09-05 15:58:13] [Rank 0] step:5761/10000 train_time:250577ms step_avg:43.50ms +[2025-09-05 15:58:14] [Rank 0] step:5781/10000 train_time:251317ms step_avg:43.47ms +[2025-09-05 15:58:14] [Rank 0] step:5781/10000 train_time:251317ms step_avg:43.47ms +[2025-09-05 15:58:15] [Rank 0] step:5801/10000 train_time:252055ms step_avg:43.45ms +[2025-09-05 15:58:15] [Rank 0] step:5801/10000 train_time:252055ms step_avg:43.45ms +[2025-09-05 15:58:16] [Rank 0] step:5821/10000 train_time:252931ms step_avg:43.45ms +[2025-09-05 15:58:16] [Rank 0] step:5821/10000 train_time:252931ms step_avg:43.45ms +[2025-09-05 15:58:17] [Rank 0] step:5841/10000 train_time:253671ms step_avg:43.43ms +[2025-09-05 15:58:17] [Rank 0] step:5841/10000 train_time:253671ms step_avg:43.43ms +[2025-09-05 15:58:17] [Rank 0] step:5861/10000 train_time:254411ms step_avg:43.41ms +[2025-09-05 15:58:17] [Rank 0] step:5861/10000 train_time:254411ms step_avg:43.41ms +[2025-09-05 15:58:18] [Rank 0] step:5881/10000 train_time:255154ms step_avg:43.39ms +[2025-09-05 15:58:18] [Rank 0] step:5881/10000 train_time:255154ms step_avg:43.39ms +[2025-09-05 15:58:19] [Rank 0] step:5901/10000 train_time:255893ms step_avg:43.36ms +[2025-09-05 15:58:19] [Rank 0] step:5901/10000 train_time:255893ms step_avg:43.36ms +[2025-09-05 15:58:20] [Rank 0] step:5921/10000 train_time:256632ms step_avg:43.34ms +[2025-09-05 15:58:20] [Rank 0] step:5921/10000 train_time:256632ms step_avg:43.34ms +[2025-09-05 15:58:20] [Rank 0] step:5941/10000 train_time:257371ms step_avg:43.32ms +[2025-09-05 15:58:20] [Rank 0] step:5941/10000 train_time:257371ms step_avg:43.32ms +[2025-09-05 15:58:21] [Rank 0] step:5961/10000 train_time:258111ms step_avg:43.30ms +[2025-09-05 15:58:21] [Rank 0] step:5961/10000 train_time:258111ms step_avg:43.30ms +[2025-09-05 15:58:22] [Rank 0] step:5981/10000 train_time:258850ms step_avg:43.28ms +[2025-09-05 15:58:22] [Rank 0] step:5981/10000 train_time:258850ms step_avg:43.28ms +[2025-09-05 15:58:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:58:22] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 15:58:23] [Rank 0] PRINT: step:6000/10000 train_loss:1.4188 val_loss:1.4127 train_time:259670ms step_avg:43.28ms +[2025-09-05 15:58:23] [Rank 0] PRINT: step:6000/10000 train_loss:1.4188 val_loss:1.4127 train_time:259670ms step_avg:43.28ms +[2025-09-05 15:58:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:58:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 15:58:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:58:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 15:59:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:59:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 15:59:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:59:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 15:59:44] [Rank 0] Total Loss: 4.0469 +[2025-09-05 15:59:44] [Rank 0] Total Loss: 4.0469 +[2025-09-05 15:59:44] [Rank 0] Total FTA (Unweighted): 0.5487 +[2025-09-05 15:59:44] [Rank 0] Total FTA (Unweighted): 0.5487 +[2025-09-05 15:59:44] [Rank 0] Total FTA (Weighted): 0.5487 +[2025-09-05 15:59:44] [Rank 0] Total FTA (Weighted): 0.5487 +[2025-09-05 15:59:44] [Rank 0] Group 0 Loss: 3.4319 +[2025-09-05 15:59:44] [Rank 0] Group 0 Loss: 3.4319 +[2025-09-05 15:59:44] [Rank 0] Group 1 Loss: 3.0414 +[2025-09-05 15:59:44] [Rank 0] Group 1 Loss: 3.0414 +[2025-09-05 15:59:44] [Rank 0] Group 2 Loss: 3.0669 +[2025-09-05 15:59:44] [Rank 0] Group 2 Loss: 3.0669 +[2025-09-05 15:59:44] [Rank 0] Group 3 Loss: 3.4537 +[2025-09-05 15:59:44] [Rank 0] Group 3 Loss: 3.4537 +[2025-09-05 15:59:44] [Rank 0] Group 4 Loss: 3.5820 +[2025-09-05 15:59:44] [Rank 0] Group 4 Loss: 3.5820 +[2025-09-05 15:59:44] [Rank 0] Group 5 Loss: 3.7740 +[2025-09-05 15:59:44] [Rank 0] Group 5 Loss: 3.7740 +[2025-09-05 15:59:44] [Rank 0] Group 6 Loss: 3.7918 +[2025-09-05 15:59:44] [Rank 0] Group 6 Loss: 3.7918 +[2025-09-05 15:59:44] [Rank 0] Group 7 Loss: 4.0346 +[2025-09-05 15:59:44] [Rank 0] Group 7 Loss: 4.0346 +[2025-09-05 15:59:44] [Rank 0] Group 8 Loss: 4.3217 +[2025-09-05 15:59:44] [Rank 0] Group 8 Loss: 4.3217 +[2025-09-05 15:59:44] [Rank 0] Group 9 Loss: 4.4513 +[2025-09-05 15:59:44] [Rank 0] Group 9 Loss: 4.4513 +[2025-09-05 15:59:44] [Rank 0] Group 10 Loss: 4.5948 +[2025-09-05 15:59:44] [Rank 0] Group 10 Loss: 4.5948 +[2025-09-05 15:59:44] [Rank 0] Group 11 Loss: 4.5737 +[2025-09-05 15:59:44] [Rank 0] Group 11 Loss: 4.5737 +[2025-09-05 15:59:44] [Rank 0] Group 12 Loss: 4.5855 +[2025-09-05 15:59:44] [Rank 0] Group 12 Loss: 4.5855 +[2025-09-05 15:59:44] [Rank 0] Group 13 Loss: 4.6590 +[2025-09-05 15:59:44] [Rank 0] Group 13 Loss: 4.6590 +[2025-09-05 15:59:44] [Rank 0] Group 14 Loss: 4.6801 +[2025-09-05 15:59:44] [Rank 0] Group 14 Loss: 4.6801 +[2025-09-05 15:59:44] [Rank 0] Group 15 Loss: 4.7082 +[2025-09-05 15:59:44] [Rank 0] Group 15 Loss: 4.7082 +[2025-09-05 15:59:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:59:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 15:59:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:59:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 15:59:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:59:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 15:59:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:59:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 15:59:44] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 15:59:44] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 15:59:44] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:59:44] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 15:59:44] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:59:44] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 15:59:44] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 15:59:44] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 15:59:44] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 15:59:44] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 15:59:44] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 15:59:44] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 15:59:44] [Rank 0] Group 10 FTA: 0.4700 +[2025-09-05 15:59:44] [Rank 0] Group 10 FTA: 0.4700 +[2025-09-05 15:59:44] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 15:59:44] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 15:59:44] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 15:59:44] [Rank 0] Group 12 FTA: 0.2500 +[2025-09-05 15:59:44] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:59:44] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 15:59:44] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 15:59:44] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 15:59:44] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:59:44] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 15:59:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:59:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 15:59:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:59:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 15:59:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:59:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 15:59:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:59:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 15:59:46] [Rank 0] step:6001/10000 train_time:259679ms step_avg:43.27ms +[2025-09-05 15:59:46] [Rank 0] step:6001/10000 train_time:259679ms step_avg:43.27ms +[2025-09-05 15:59:47] [Rank 0] step:6021/10000 train_time:260967ms step_avg:43.34ms +[2025-09-05 15:59:47] [Rank 0] step:6021/10000 train_time:260967ms step_avg:43.34ms +[2025-09-05 15:59:48] [Rank 0] step:6041/10000 train_time:261706ms step_avg:43.32ms +[2025-09-05 15:59:48] [Rank 0] step:6041/10000 train_time:261706ms step_avg:43.32ms +[2025-09-05 15:59:49] [Rank 0] step:6061/10000 train_time:262445ms step_avg:43.30ms +[2025-09-05 15:59:49] [Rank 0] step:6061/10000 train_time:262445ms step_avg:43.30ms +[2025-09-05 15:59:49] [Rank 0] step:6081/10000 train_time:263184ms step_avg:43.28ms +[2025-09-05 15:59:49] [Rank 0] step:6081/10000 train_time:263184ms step_avg:43.28ms +[2025-09-05 15:59:50] [Rank 0] step:6101/10000 train_time:263923ms step_avg:43.26ms +[2025-09-05 15:59:50] [Rank 0] step:6101/10000 train_time:263923ms step_avg:43.26ms +[2025-09-05 15:59:51] [Rank 0] step:6121/10000 train_time:264664ms step_avg:43.24ms +[2025-09-05 15:59:51] [Rank 0] step:6121/10000 train_time:264664ms step_avg:43.24ms +[2025-09-05 15:59:52] [Rank 0] step:6141/10000 train_time:265403ms step_avg:43.22ms +[2025-09-05 15:59:52] [Rank 0] step:6141/10000 train_time:265403ms step_avg:43.22ms +[2025-09-05 15:59:52] [Rank 0] step:6161/10000 train_time:266144ms step_avg:43.20ms +[2025-09-05 15:59:52] [Rank 0] step:6161/10000 train_time:266144ms step_avg:43.20ms +[2025-09-05 15:59:53] [Rank 0] step:6181/10000 train_time:266883ms step_avg:43.18ms +[2025-09-05 15:59:53] [Rank 0] step:6181/10000 train_time:266883ms step_avg:43.18ms +[2025-09-05 15:59:54] [Rank 0] step:6201/10000 train_time:267623ms step_avg:43.16ms +[2025-09-05 15:59:54] [Rank 0] step:6201/10000 train_time:267623ms step_avg:43.16ms +[2025-09-05 15:59:55] [Rank 0] step:6221/10000 train_time:268362ms step_avg:43.14ms +[2025-09-05 15:59:55] [Rank 0] step:6221/10000 train_time:268362ms step_avg:43.14ms +[2025-09-05 15:59:55] [Rank 0] step:6241/10000 train_time:269101ms step_avg:43.12ms +[2025-09-05 15:59:55] [Rank 0] step:6241/10000 train_time:269101ms step_avg:43.12ms +[2025-09-05 15:59:56] [Rank 0] step:6261/10000 train_time:269841ms step_avg:43.10ms +[2025-09-05 15:59:56] [Rank 0] step:6261/10000 train_time:269841ms step_avg:43.10ms +[2025-09-05 15:59:57] [Rank 0] step:6281/10000 train_time:270580ms step_avg:43.08ms +[2025-09-05 15:59:57] [Rank 0] step:6281/10000 train_time:270580ms step_avg:43.08ms +[2025-09-05 15:59:58] [Rank 0] step:6301/10000 train_time:271318ms step_avg:43.06ms +[2025-09-05 15:59:58] [Rank 0] step:6301/10000 train_time:271318ms step_avg:43.06ms +[2025-09-05 15:59:58] [Rank 0] step:6321/10000 train_time:272057ms step_avg:43.04ms +[2025-09-05 15:59:58] [Rank 0] step:6321/10000 train_time:272057ms step_avg:43.04ms +[2025-09-05 15:59:59] [Rank 0] step:6341/10000 train_time:272796ms step_avg:43.02ms +[2025-09-05 15:59:59] [Rank 0] step:6341/10000 train_time:272796ms step_avg:43.02ms +[2025-09-05 16:00:00] [Rank 0] step:6361/10000 train_time:273536ms step_avg:43.00ms +[2025-09-05 16:00:00] [Rank 0] step:6361/10000 train_time:273536ms step_avg:43.00ms +[2025-09-05 16:00:00] [Rank 0] step:6381/10000 train_time:274274ms step_avg:42.98ms +[2025-09-05 16:00:00] [Rank 0] step:6381/10000 train_time:274274ms step_avg:42.98ms +[2025-09-05 16:00:01] [Rank 0] step:6401/10000 train_time:275014ms step_avg:42.96ms +[2025-09-05 16:00:01] [Rank 0] step:6401/10000 train_time:275014ms step_avg:42.96ms +[2025-09-05 16:00:02] [Rank 0] step:6421/10000 train_time:275755ms step_avg:42.95ms +[2025-09-05 16:00:02] [Rank 0] step:6421/10000 train_time:275755ms step_avg:42.95ms +[2025-09-05 16:00:03] [Rank 0] step:6441/10000 train_time:276500ms step_avg:42.93ms +[2025-09-05 16:00:03] [Rank 0] step:6441/10000 train_time:276500ms step_avg:42.93ms +[2025-09-05 16:00:03] [Rank 0] step:6461/10000 train_time:277240ms step_avg:42.91ms +[2025-09-05 16:00:03] [Rank 0] step:6461/10000 train_time:277240ms step_avg:42.91ms +[2025-09-05 16:00:04] [Rank 0] step:6481/10000 train_time:277980ms step_avg:42.89ms +[2025-09-05 16:00:04] [Rank 0] step:6481/10000 train_time:277980ms step_avg:42.89ms +[2025-09-05 16:00:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:00:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:00:05] [Rank 0] PRINT: step:6500/10000 train_loss:1.4181 val_loss:1.4084 train_time:278801ms step_avg:42.89ms +[2025-09-05 16:00:05] [Rank 0] PRINT: step:6500/10000 train_loss:1.4181 val_loss:1.4084 train_time:278801ms step_avg:42.89ms +[2025-09-05 16:00:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:00:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:00:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:00:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:01:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:01:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:01:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:01:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:01:27] [Rank 0] Total Loss: 4.2137 +[2025-09-05 16:01:27] [Rank 0] Total Loss: 4.2137 +[2025-09-05 16:01:27] [Rank 0] Total FTA (Unweighted): 0.5494 +[2025-09-05 16:01:27] [Rank 0] Total FTA (Unweighted): 0.5494 +[2025-09-05 16:01:27] [Rank 0] Total FTA (Weighted): 0.5494 +[2025-09-05 16:01:27] [Rank 0] Total FTA (Weighted): 0.5494 +[2025-09-05 16:01:27] [Rank 0] Group 0 Loss: 3.4286 +[2025-09-05 16:01:27] [Rank 0] Group 0 Loss: 3.4286 +[2025-09-05 16:01:27] [Rank 0] Group 1 Loss: 3.3097 +[2025-09-05 16:01:27] [Rank 0] Group 1 Loss: 3.3097 +[2025-09-05 16:01:27] [Rank 0] Group 2 Loss: 3.2933 +[2025-09-05 16:01:27] [Rank 0] Group 2 Loss: 3.2933 +[2025-09-05 16:01:27] [Rank 0] Group 3 Loss: 3.6192 +[2025-09-05 16:01:27] [Rank 0] Group 3 Loss: 3.6192 +[2025-09-05 16:01:27] [Rank 0] Group 4 Loss: 3.7360 +[2025-09-05 16:01:27] [Rank 0] Group 4 Loss: 3.7360 +[2025-09-05 16:01:27] [Rank 0] Group 5 Loss: 3.9217 +[2025-09-05 16:01:27] [Rank 0] Group 5 Loss: 3.9217 +[2025-09-05 16:01:27] [Rank 0] Group 6 Loss: 4.0109 +[2025-09-05 16:01:27] [Rank 0] Group 6 Loss: 4.0109 +[2025-09-05 16:01:27] [Rank 0] Group 7 Loss: 4.2304 +[2025-09-05 16:01:27] [Rank 0] Group 7 Loss: 4.2304 +[2025-09-05 16:01:27] [Rank 0] Group 8 Loss: 4.4649 +[2025-09-05 16:01:27] [Rank 0] Group 8 Loss: 4.4649 +[2025-09-05 16:01:27] [Rank 0] Group 9 Loss: 4.5942 +[2025-09-05 16:01:27] [Rank 0] Group 9 Loss: 4.5942 +[2025-09-05 16:01:27] [Rank 0] Group 10 Loss: 4.7505 +[2025-09-05 16:01:27] [Rank 0] Group 10 Loss: 4.7505 +[2025-09-05 16:01:27] [Rank 0] Group 11 Loss: 4.7639 +[2025-09-05 16:01:27] [Rank 0] Group 11 Loss: 4.7639 +[2025-09-05 16:01:27] [Rank 0] Group 12 Loss: 4.7381 +[2025-09-05 16:01:27] [Rank 0] Group 12 Loss: 4.7381 +[2025-09-05 16:01:27] [Rank 0] Group 13 Loss: 4.8451 +[2025-09-05 16:01:27] [Rank 0] Group 13 Loss: 4.8451 +[2025-09-05 16:01:27] [Rank 0] Group 14 Loss: 4.8430 +[2025-09-05 16:01:27] [Rank 0] Group 14 Loss: 4.8430 +[2025-09-05 16:01:27] [Rank 0] Group 15 Loss: 4.8690 +[2025-09-05 16:01:27] [Rank 0] Group 15 Loss: 4.8690 +[2025-09-05 16:01:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:01:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:01:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:01:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:01:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:01:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:01:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:01:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:01:27] [Rank 0] Group 4 FTA: 0.8900 +[2025-09-05 16:01:27] [Rank 0] Group 4 FTA: 0.8900 +[2025-09-05 16:01:27] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:01:27] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:01:27] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:01:27] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:01:28] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 16:01:28] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 16:01:28] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 16:01:28] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 16:01:28] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 16:01:28] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 16:01:28] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 16:01:28] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 16:01:28] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 16:01:28] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 16:01:28] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 16:01:28] [Rank 0] Group 12 FTA: 0.2600 +[2025-09-05 16:01:28] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 16:01:28] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 16:01:28] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:01:28] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:01:28] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 16:01:28] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 16:01:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:01:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:01:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:01:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:01:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:01:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:01:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:01:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:01:29] [Rank 0] step:6501/10000 train_time:278810ms step_avg:42.89ms +[2025-09-05 16:01:29] [Rank 0] step:6501/10000 train_time:278810ms step_avg:42.89ms +[2025-09-05 16:01:30] [Rank 0] step:6521/10000 train_time:279494ms step_avg:42.86ms +[2025-09-05 16:01:30] [Rank 0] step:6521/10000 train_time:279494ms step_avg:42.86ms +[2025-09-05 16:01:30] [Rank 0] step:6541/10000 train_time:280245ms step_avg:42.84ms +[2025-09-05 16:01:30] [Rank 0] step:6541/10000 train_time:280245ms step_avg:42.84ms +[2025-09-05 16:01:31] [Rank 0] step:6561/10000 train_time:280984ms step_avg:42.83ms +[2025-09-05 16:01:31] [Rank 0] step:6561/10000 train_time:280984ms step_avg:42.83ms +[2025-09-05 16:01:32] [Rank 0] step:6581/10000 train_time:281725ms step_avg:42.81ms +[2025-09-05 16:01:32] [Rank 0] step:6581/10000 train_time:281725ms step_avg:42.81ms +[2025-09-05 16:01:33] [Rank 0] step:6601/10000 train_time:282463ms step_avg:42.79ms +[2025-09-05 16:01:33] [Rank 0] step:6601/10000 train_time:282463ms step_avg:42.79ms +[2025-09-05 16:01:33] [Rank 0] step:6621/10000 train_time:283203ms step_avg:42.77ms +[2025-09-05 16:01:33] [Rank 0] step:6621/10000 train_time:283203ms step_avg:42.77ms +[2025-09-05 16:01:34] [Rank 0] step:6641/10000 train_time:283943ms step_avg:42.76ms +[2025-09-05 16:01:34] [Rank 0] step:6641/10000 train_time:283943ms step_avg:42.76ms +[2025-09-05 16:01:35] [Rank 0] step:6661/10000 train_time:284682ms step_avg:42.74ms +[2025-09-05 16:01:35] [Rank 0] step:6661/10000 train_time:284682ms step_avg:42.74ms +[2025-09-05 16:01:36] [Rank 0] step:6681/10000 train_time:285422ms step_avg:42.72ms +[2025-09-05 16:01:36] [Rank 0] step:6681/10000 train_time:285422ms step_avg:42.72ms +[2025-09-05 16:01:36] [Rank 0] step:6701/10000 train_time:286163ms step_avg:42.70ms +[2025-09-05 16:01:36] [Rank 0] step:6701/10000 train_time:286163ms step_avg:42.70ms +[2025-09-05 16:01:37] [Rank 0] step:6721/10000 train_time:286904ms step_avg:42.69ms +[2025-09-05 16:01:37] [Rank 0] step:6721/10000 train_time:286904ms step_avg:42.69ms +[2025-09-05 16:01:38] [Rank 0] step:6741/10000 train_time:287644ms step_avg:42.67ms +[2025-09-05 16:01:38] [Rank 0] step:6741/10000 train_time:287644ms step_avg:42.67ms +[2025-09-05 16:01:39] [Rank 0] step:6761/10000 train_time:288384ms step_avg:42.65ms +[2025-09-05 16:01:39] [Rank 0] step:6761/10000 train_time:288384ms step_avg:42.65ms +[2025-09-05 16:01:39] [Rank 0] step:6781/10000 train_time:289124ms step_avg:42.64ms +[2025-09-05 16:01:39] [Rank 0] step:6781/10000 train_time:289124ms step_avg:42.64ms +[2025-09-05 16:01:40] [Rank 0] step:6801/10000 train_time:289864ms step_avg:42.62ms +[2025-09-05 16:01:40] [Rank 0] step:6801/10000 train_time:289864ms step_avg:42.62ms +[2025-09-05 16:01:41] [Rank 0] step:6821/10000 train_time:290604ms step_avg:42.60ms +[2025-09-05 16:01:41] [Rank 0] step:6821/10000 train_time:290604ms step_avg:42.60ms +[2025-09-05 16:01:42] [Rank 0] step:6841/10000 train_time:291948ms step_avg:42.68ms +[2025-09-05 16:01:42] [Rank 0] step:6841/10000 train_time:291948ms step_avg:42.68ms +[2025-09-05 16:01:43] [Rank 0] step:6861/10000 train_time:292688ms step_avg:42.66ms +[2025-09-05 16:01:43] [Rank 0] step:6861/10000 train_time:292688ms step_avg:42.66ms +[2025-09-05 16:01:44] [Rank 0] step:6881/10000 train_time:293428ms step_avg:42.64ms +[2025-09-05 16:01:44] [Rank 0] step:6881/10000 train_time:293428ms step_avg:42.64ms +[2025-09-05 16:01:44] [Rank 0] step:6901/10000 train_time:294168ms step_avg:42.63ms +[2025-09-05 16:01:44] [Rank 0] step:6901/10000 train_time:294168ms step_avg:42.63ms +[2025-09-05 16:01:45] [Rank 0] step:6921/10000 train_time:294907ms step_avg:42.61ms +[2025-09-05 16:01:45] [Rank 0] step:6921/10000 train_time:294907ms step_avg:42.61ms +[2025-09-05 16:01:46] [Rank 0] step:6941/10000 train_time:295647ms step_avg:42.59ms +[2025-09-05 16:01:46] [Rank 0] step:6941/10000 train_time:295647ms step_avg:42.59ms +[2025-09-05 16:01:47] [Rank 0] step:6961/10000 train_time:296387ms step_avg:42.58ms +[2025-09-05 16:01:47] [Rank 0] step:6961/10000 train_time:296387ms step_avg:42.58ms +[2025-09-05 16:01:47] [Rank 0] step:6981/10000 train_time:297127ms step_avg:42.56ms +[2025-09-05 16:01:47] [Rank 0] step:6981/10000 train_time:297127ms step_avg:42.56ms +[2025-09-05 16:01:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:01:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:01:49] [Rank 0] PRINT: step:7000/10000 train_loss:1.4163 val_loss:1.4065 train_time:297948ms step_avg:42.56ms +[2025-09-05 16:01:49] [Rank 0] PRINT: step:7000/10000 train_loss:1.4163 val_loss:1.4065 train_time:297948ms step_avg:42.56ms +[2025-09-05 16:01:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:01:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:01:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:01:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:03:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:03:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:03:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:03:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:03:10] [Rank 0] Total Loss: 4.1819 +[2025-09-05 16:03:10] [Rank 0] Total Loss: 4.1819 +[2025-09-05 16:03:10] [Rank 0] Total FTA (Unweighted): 0.5656 +[2025-09-05 16:03:10] [Rank 0] Total FTA (Unweighted): 0.5656 +[2025-09-05 16:03:10] [Rank 0] Total FTA (Weighted): 0.5656 +[2025-09-05 16:03:10] [Rank 0] Total FTA (Weighted): 0.5656 +[2025-09-05 16:03:10] [Rank 0] Group 0 Loss: 3.4450 +[2025-09-05 16:03:10] [Rank 0] Group 0 Loss: 3.4450 +[2025-09-05 16:03:10] [Rank 0] Group 1 Loss: 3.2451 +[2025-09-05 16:03:10] [Rank 0] Group 1 Loss: 3.2451 +[2025-09-05 16:03:10] [Rank 0] Group 2 Loss: 3.2200 +[2025-09-05 16:03:10] [Rank 0] Group 2 Loss: 3.2200 +[2025-09-05 16:03:10] [Rank 0] Group 3 Loss: 3.5877 +[2025-09-05 16:03:10] [Rank 0] Group 3 Loss: 3.5877 +[2025-09-05 16:03:10] [Rank 0] Group 4 Loss: 3.7528 +[2025-09-05 16:03:10] [Rank 0] Group 4 Loss: 3.7528 +[2025-09-05 16:03:10] [Rank 0] Group 5 Loss: 3.9019 +[2025-09-05 16:03:10] [Rank 0] Group 5 Loss: 3.9019 +[2025-09-05 16:03:10] [Rank 0] Group 6 Loss: 3.9420 +[2025-09-05 16:03:10] [Rank 0] Group 6 Loss: 3.9420 +[2025-09-05 16:03:10] [Rank 0] Group 7 Loss: 4.1966 +[2025-09-05 16:03:10] [Rank 0] Group 7 Loss: 4.1966 +[2025-09-05 16:03:10] [Rank 0] Group 8 Loss: 4.4194 +[2025-09-05 16:03:10] [Rank 0] Group 8 Loss: 4.4194 +[2025-09-05 16:03:10] [Rank 0] Group 9 Loss: 4.5684 +[2025-09-05 16:03:10] [Rank 0] Group 9 Loss: 4.5684 +[2025-09-05 16:03:10] [Rank 0] Group 10 Loss: 4.7484 +[2025-09-05 16:03:10] [Rank 0] Group 10 Loss: 4.7484 +[2025-09-05 16:03:10] [Rank 0] Group 11 Loss: 4.7354 +[2025-09-05 16:03:10] [Rank 0] Group 11 Loss: 4.7354 +[2025-09-05 16:03:10] [Rank 0] Group 12 Loss: 4.7301 +[2025-09-05 16:03:10] [Rank 0] Group 12 Loss: 4.7301 +[2025-09-05 16:03:10] [Rank 0] Group 13 Loss: 4.7977 +[2025-09-05 16:03:10] [Rank 0] Group 13 Loss: 4.7977 +[2025-09-05 16:03:10] [Rank 0] Group 14 Loss: 4.8002 +[2025-09-05 16:03:10] [Rank 0] Group 14 Loss: 4.8002 +[2025-09-05 16:03:10] [Rank 0] Group 15 Loss: 4.8191 +[2025-09-05 16:03:10] [Rank 0] Group 15 Loss: 4.8191 +[2025-09-05 16:03:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:03:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:03:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:03:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:03:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:03:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:03:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:03:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:03:10] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:03:10] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:03:10] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:03:10] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:03:10] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:03:10] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:03:10] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 16:03:10] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 16:03:10] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 16:03:10] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 16:03:10] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 16:03:10] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 16:03:10] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:03:10] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:03:10] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 16:03:10] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 16:03:10] [Rank 0] Group 12 FTA: 0.2800 +[2025-09-05 16:03:10] [Rank 0] Group 12 FTA: 0.2800 +[2025-09-05 16:03:10] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 16:03:10] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 16:03:10] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:03:10] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:03:10] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:03:10] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:03:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:03:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:03:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:03:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:03:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:03:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:03:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:03:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:03:11] [Rank 0] step:7001/10000 train_time:297957ms step_avg:42.56ms +[2025-09-05 16:03:11] [Rank 0] step:7001/10000 train_time:297957ms step_avg:42.56ms +[2025-09-05 16:03:12] [Rank 0] step:7021/10000 train_time:298633ms step_avg:42.53ms +[2025-09-05 16:03:12] [Rank 0] step:7021/10000 train_time:298633ms step_avg:42.53ms +[2025-09-05 16:03:13] [Rank 0] step:7041/10000 train_time:299372ms step_avg:42.52ms +[2025-09-05 16:03:13] [Rank 0] step:7041/10000 train_time:299372ms step_avg:42.52ms +[2025-09-05 16:03:14] [Rank 0] step:7061/10000 train_time:300112ms step_avg:42.50ms +[2025-09-05 16:03:14] [Rank 0] step:7061/10000 train_time:300112ms step_avg:42.50ms +[2025-09-05 16:03:14] [Rank 0] step:7081/10000 train_time:300852ms step_avg:42.49ms +[2025-09-05 16:03:14] [Rank 0] step:7081/10000 train_time:300852ms step_avg:42.49ms +[2025-09-05 16:03:15] [Rank 0] step:7101/10000 train_time:301592ms step_avg:42.47ms +[2025-09-05 16:03:15] [Rank 0] step:7101/10000 train_time:301592ms step_avg:42.47ms +[2025-09-05 16:03:16] [Rank 0] step:7121/10000 train_time:302332ms step_avg:42.46ms +[2025-09-05 16:03:16] [Rank 0] step:7121/10000 train_time:302332ms step_avg:42.46ms +[2025-09-05 16:03:17] [Rank 0] step:7141/10000 train_time:303071ms step_avg:42.44ms +[2025-09-05 16:03:17] [Rank 0] step:7141/10000 train_time:303071ms step_avg:42.44ms +[2025-09-05 16:03:17] [Rank 0] step:7161/10000 train_time:303812ms step_avg:42.43ms +[2025-09-05 16:03:17] [Rank 0] step:7161/10000 train_time:303812ms step_avg:42.43ms +[2025-09-05 16:03:18] [Rank 0] step:7181/10000 train_time:304552ms step_avg:42.41ms +[2025-09-05 16:03:18] [Rank 0] step:7181/10000 train_time:304552ms step_avg:42.41ms +[2025-09-05 16:03:19] [Rank 0] step:7201/10000 train_time:305292ms step_avg:42.40ms +[2025-09-05 16:03:19] [Rank 0] step:7201/10000 train_time:305292ms step_avg:42.40ms +[2025-09-05 16:03:20] [Rank 0] step:7221/10000 train_time:306032ms step_avg:42.38ms +[2025-09-05 16:03:20] [Rank 0] step:7221/10000 train_time:306032ms step_avg:42.38ms +[2025-09-05 16:03:20] [Rank 0] step:7241/10000 train_time:306773ms step_avg:42.37ms +[2025-09-05 16:03:20] [Rank 0] step:7241/10000 train_time:306773ms step_avg:42.37ms +[2025-09-05 16:03:21] [Rank 0] step:7261/10000 train_time:307514ms step_avg:42.35ms +[2025-09-05 16:03:21] [Rank 0] step:7261/10000 train_time:307514ms step_avg:42.35ms +[2025-09-05 16:03:22] [Rank 0] step:7281/10000 train_time:308253ms step_avg:42.34ms +[2025-09-05 16:03:22] [Rank 0] step:7281/10000 train_time:308253ms step_avg:42.34ms +[2025-09-05 16:03:23] [Rank 0] step:7301/10000 train_time:308994ms step_avg:42.32ms +[2025-09-05 16:03:23] [Rank 0] step:7301/10000 train_time:308994ms step_avg:42.32ms +[2025-09-05 16:03:23] [Rank 0] step:7321/10000 train_time:309734ms step_avg:42.31ms +[2025-09-05 16:03:23] [Rank 0] step:7321/10000 train_time:309734ms step_avg:42.31ms +[2025-09-05 16:03:24] [Rank 0] step:7341/10000 train_time:310475ms step_avg:42.29ms +[2025-09-05 16:03:24] [Rank 0] step:7341/10000 train_time:310475ms step_avg:42.29ms +[2025-09-05 16:03:25] [Rank 0] step:7361/10000 train_time:311214ms step_avg:42.28ms +[2025-09-05 16:03:25] [Rank 0] step:7361/10000 train_time:311214ms step_avg:42.28ms +[2025-09-05 16:03:26] [Rank 0] step:7381/10000 train_time:311955ms step_avg:42.26ms +[2025-09-05 16:03:26] [Rank 0] step:7381/10000 train_time:311955ms step_avg:42.26ms +[2025-09-05 16:03:26] [Rank 0] step:7401/10000 train_time:312694ms step_avg:42.25ms +[2025-09-05 16:03:26] [Rank 0] step:7401/10000 train_time:312694ms step_avg:42.25ms +[2025-09-05 16:03:27] [Rank 0] step:7421/10000 train_time:313434ms step_avg:42.24ms +[2025-09-05 16:03:27] [Rank 0] step:7421/10000 train_time:313434ms step_avg:42.24ms +[2025-09-05 16:03:28] [Rank 0] step:7441/10000 train_time:314174ms step_avg:42.22ms +[2025-09-05 16:03:28] [Rank 0] step:7441/10000 train_time:314174ms step_avg:42.22ms +[2025-09-05 16:03:29] [Rank 0] step:7461/10000 train_time:314915ms step_avg:42.21ms +[2025-09-05 16:03:29] [Rank 0] step:7461/10000 train_time:314915ms step_avg:42.21ms +[2025-09-05 16:03:29] [Rank 0] step:7481/10000 train_time:315655ms step_avg:42.19ms +[2025-09-05 16:03:29] [Rank 0] step:7481/10000 train_time:315655ms step_avg:42.19ms +[2025-09-05 16:03:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:03:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:03:31] [Rank 0] PRINT: step:7500/10000 train_loss:1.4136 val_loss:1.4039 train_time:316618ms step_avg:42.22ms +[2025-09-05 16:03:31] [Rank 0] PRINT: step:7500/10000 train_loss:1.4136 val_loss:1.4039 train_time:316618ms step_avg:42.22ms +[2025-09-05 16:03:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:03:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:03:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:03:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:04:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:04:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:04:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:04:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:04:52] [Rank 0] Total Loss: 4.1262 +[2025-09-05 16:04:52] [Rank 0] Total Loss: 4.1262 +[2025-09-05 16:04:52] [Rank 0] Total FTA (Unweighted): 0.5713 +[2025-09-05 16:04:52] [Rank 0] Total FTA (Unweighted): 0.5713 +[2025-09-05 16:04:52] [Rank 0] Total FTA (Weighted): 0.5713 +[2025-09-05 16:04:52] [Rank 0] Total FTA (Weighted): 0.5713 +[2025-09-05 16:04:52] [Rank 0] Group 0 Loss: 3.3686 +[2025-09-05 16:04:52] [Rank 0] Group 0 Loss: 3.3686 +[2025-09-05 16:04:52] [Rank 0] Group 1 Loss: 3.2420 +[2025-09-05 16:04:52] [Rank 0] Group 1 Loss: 3.2420 +[2025-09-05 16:04:52] [Rank 0] Group 2 Loss: 3.1454 +[2025-09-05 16:04:52] [Rank 0] Group 2 Loss: 3.1454 +[2025-09-05 16:04:52] [Rank 0] Group 3 Loss: 3.5314 +[2025-09-05 16:04:52] [Rank 0] Group 3 Loss: 3.5314 +[2025-09-05 16:04:52] [Rank 0] Group 4 Loss: 3.6672 +[2025-09-05 16:04:52] [Rank 0] Group 4 Loss: 3.6672 +[2025-09-05 16:04:52] [Rank 0] Group 5 Loss: 3.8485 +[2025-09-05 16:04:52] [Rank 0] Group 5 Loss: 3.8485 +[2025-09-05 16:04:52] [Rank 0] Group 6 Loss: 3.9001 +[2025-09-05 16:04:52] [Rank 0] Group 6 Loss: 3.9001 +[2025-09-05 16:04:52] [Rank 0] Group 7 Loss: 4.1482 +[2025-09-05 16:04:52] [Rank 0] Group 7 Loss: 4.1482 +[2025-09-05 16:04:52] [Rank 0] Group 8 Loss: 4.3955 +[2025-09-05 16:04:52] [Rank 0] Group 8 Loss: 4.3955 +[2025-09-05 16:04:52] [Rank 0] Group 9 Loss: 4.5161 +[2025-09-05 16:04:52] [Rank 0] Group 9 Loss: 4.5161 +[2025-09-05 16:04:52] [Rank 0] Group 10 Loss: 4.6769 +[2025-09-05 16:04:52] [Rank 0] Group 10 Loss: 4.6769 +[2025-09-05 16:04:52] [Rank 0] Group 11 Loss: 4.6790 +[2025-09-05 16:04:52] [Rank 0] Group 11 Loss: 4.6790 +[2025-09-05 16:04:52] [Rank 0] Group 12 Loss: 4.6415 +[2025-09-05 16:04:52] [Rank 0] Group 12 Loss: 4.6415 +[2025-09-05 16:04:52] [Rank 0] Group 13 Loss: 4.7585 +[2025-09-05 16:04:52] [Rank 0] Group 13 Loss: 4.7585 +[2025-09-05 16:04:52] [Rank 0] Group 14 Loss: 4.7351 +[2025-09-05 16:04:52] [Rank 0] Group 14 Loss: 4.7351 +[2025-09-05 16:04:52] [Rank 0] Group 15 Loss: 4.7644 +[2025-09-05 16:04:52] [Rank 0] Group 15 Loss: 4.7644 +[2025-09-05 16:04:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:04:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:04:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:04:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:04:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:04:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:04:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:04:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:04:52] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:04:52] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:04:52] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:04:52] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:04:52] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:04:52] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:04:52] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:04:52] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:04:52] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:04:52] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:04:52] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:04:52] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:04:52] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:04:52] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:04:52] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 16:04:52] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 16:04:52] [Rank 0] Group 12 FTA: 0.3100 +[2025-09-05 16:04:52] [Rank 0] Group 12 FTA: 0.3100 +[2025-09-05 16:04:52] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 16:04:52] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 16:04:52] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:04:52] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:04:52] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:04:52] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:04:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:04:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:04:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:04:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:04:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:04:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:04:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:04:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:04:54] [Rank 0] step:7501/10000 train_time:316627ms step_avg:42.21ms +[2025-09-05 16:04:54] [Rank 0] step:7501/10000 train_time:316627ms step_avg:42.21ms +[2025-09-05 16:04:54] [Rank 0] step:7521/10000 train_time:317295ms step_avg:42.19ms +[2025-09-05 16:04:54] [Rank 0] step:7521/10000 train_time:317295ms step_avg:42.19ms +[2025-09-05 16:04:55] [Rank 0] step:7541/10000 train_time:318034ms step_avg:42.17ms +[2025-09-05 16:04:55] [Rank 0] step:7541/10000 train_time:318034ms step_avg:42.17ms +[2025-09-05 16:04:56] [Rank 0] step:7561/10000 train_time:318774ms step_avg:42.16ms +[2025-09-05 16:04:56] [Rank 0] step:7561/10000 train_time:318774ms step_avg:42.16ms +[2025-09-05 16:04:57] [Rank 0] step:7581/10000 train_time:319514ms step_avg:42.15ms +[2025-09-05 16:04:57] [Rank 0] step:7581/10000 train_time:319514ms step_avg:42.15ms +[2025-09-05 16:04:57] [Rank 0] step:7601/10000 train_time:320254ms step_avg:42.13ms +[2025-09-05 16:04:57] [Rank 0] step:7601/10000 train_time:320254ms step_avg:42.13ms +[2025-09-05 16:04:58] [Rank 0] step:7621/10000 train_time:320993ms step_avg:42.12ms +[2025-09-05 16:04:58] [Rank 0] step:7621/10000 train_time:320993ms step_avg:42.12ms +[2025-09-05 16:04:59] [Rank 0] step:7641/10000 train_time:321733ms step_avg:42.11ms +[2025-09-05 16:04:59] [Rank 0] step:7641/10000 train_time:321733ms step_avg:42.11ms +[2025-09-05 16:05:00] [Rank 0] step:7661/10000 train_time:322671ms step_avg:42.12ms +[2025-09-05 16:05:00] [Rank 0] step:7661/10000 train_time:322671ms step_avg:42.12ms +[2025-09-05 16:05:01] [Rank 0] step:7681/10000 train_time:323411ms step_avg:42.11ms +[2025-09-05 16:05:01] [Rank 0] step:7681/10000 train_time:323411ms step_avg:42.11ms +[2025-09-05 16:05:01] [Rank 0] step:7701/10000 train_time:324151ms step_avg:42.09ms +[2025-09-05 16:05:01] [Rank 0] step:7701/10000 train_time:324151ms step_avg:42.09ms +[2025-09-05 16:05:02] [Rank 0] step:7721/10000 train_time:324892ms step_avg:42.08ms +[2025-09-05 16:05:02] [Rank 0] step:7721/10000 train_time:324892ms step_avg:42.08ms +[2025-09-05 16:05:03] [Rank 0] step:7741/10000 train_time:325632ms step_avg:42.07ms +[2025-09-05 16:05:03] [Rank 0] step:7741/10000 train_time:325632ms step_avg:42.07ms +[2025-09-05 16:05:04] [Rank 0] step:7761/10000 train_time:326372ms step_avg:42.05ms +[2025-09-05 16:05:04] [Rank 0] step:7761/10000 train_time:326372ms step_avg:42.05ms +[2025-09-05 16:05:04] [Rank 0] step:7781/10000 train_time:327112ms step_avg:42.04ms +[2025-09-05 16:05:04] [Rank 0] step:7781/10000 train_time:327112ms step_avg:42.04ms +[2025-09-05 16:05:05] [Rank 0] step:7801/10000 train_time:327852ms step_avg:42.03ms +[2025-09-05 16:05:05] [Rank 0] step:7801/10000 train_time:327852ms step_avg:42.03ms +[2025-09-05 16:05:06] [Rank 0] step:7821/10000 train_time:328592ms step_avg:42.01ms +[2025-09-05 16:05:06] [Rank 0] step:7821/10000 train_time:328592ms step_avg:42.01ms +[2025-09-05 16:05:06] [Rank 0] step:7841/10000 train_time:329332ms step_avg:42.00ms +[2025-09-05 16:05:06] [Rank 0] step:7841/10000 train_time:329332ms step_avg:42.00ms +[2025-09-05 16:05:07] [Rank 0] step:7861/10000 train_time:330073ms step_avg:41.99ms +[2025-09-05 16:05:07] [Rank 0] step:7861/10000 train_time:330073ms step_avg:41.99ms +[2025-09-05 16:05:08] [Rank 0] step:7881/10000 train_time:330813ms step_avg:41.98ms +[2025-09-05 16:05:08] [Rank 0] step:7881/10000 train_time:330813ms step_avg:41.98ms +[2025-09-05 16:05:09] [Rank 0] step:7901/10000 train_time:331553ms step_avg:41.96ms +[2025-09-05 16:05:09] [Rank 0] step:7901/10000 train_time:331553ms step_avg:41.96ms +[2025-09-05 16:05:09] [Rank 0] step:7921/10000 train_time:332293ms step_avg:41.95ms +[2025-09-05 16:05:09] [Rank 0] step:7921/10000 train_time:332293ms step_avg:41.95ms +[2025-09-05 16:05:10] [Rank 0] step:7941/10000 train_time:333032ms step_avg:41.94ms +[2025-09-05 16:05:10] [Rank 0] step:7941/10000 train_time:333032ms step_avg:41.94ms +[2025-09-05 16:05:11] [Rank 0] step:7961/10000 train_time:333772ms step_avg:41.93ms +[2025-09-05 16:05:11] [Rank 0] step:7961/10000 train_time:333772ms step_avg:41.93ms +[2025-09-05 16:05:12] [Rank 0] step:7981/10000 train_time:334512ms step_avg:41.91ms +[2025-09-05 16:05:12] [Rank 0] step:7981/10000 train_time:334512ms step_avg:41.91ms +[2025-09-05 16:05:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:05:12] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:05:13] [Rank 0] PRINT: step:8000/10000 train_loss:1.4121 val_loss:1.4018 train_time:335332ms step_avg:41.92ms +[2025-09-05 16:05:13] [Rank 0] PRINT: step:8000/10000 train_loss:1.4121 val_loss:1.4018 train_time:335332ms step_avg:41.92ms +[2025-09-05 16:05:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:05:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:05:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:05:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:06:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:06:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:06:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:06:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:06:34] [Rank 0] Total Loss: 4.1512 +[2025-09-05 16:06:34] [Rank 0] Total Loss: 4.1512 +[2025-09-05 16:06:34] [Rank 0] Total FTA (Unweighted): 0.5713 +[2025-09-05 16:06:34] [Rank 0] Total FTA (Unweighted): 0.5713 +[2025-09-05 16:06:34] [Rank 0] Total FTA (Weighted): 0.5713 +[2025-09-05 16:06:34] [Rank 0] Total FTA (Weighted): 0.5713 +[2025-09-05 16:06:34] [Rank 0] Group 0 Loss: 3.3961 +[2025-09-05 16:06:34] [Rank 0] Group 0 Loss: 3.3961 +[2025-09-05 16:06:34] [Rank 0] Group 1 Loss: 3.2334 +[2025-09-05 16:06:34] [Rank 0] Group 1 Loss: 3.2334 +[2025-09-05 16:06:34] [Rank 0] Group 2 Loss: 3.2212 +[2025-09-05 16:06:34] [Rank 0] Group 2 Loss: 3.2212 +[2025-09-05 16:06:34] [Rank 0] Group 3 Loss: 3.5137 +[2025-09-05 16:06:34] [Rank 0] Group 3 Loss: 3.5137 +[2025-09-05 16:06:34] [Rank 0] Group 4 Loss: 3.7000 +[2025-09-05 16:06:34] [Rank 0] Group 4 Loss: 3.7000 +[2025-09-05 16:06:34] [Rank 0] Group 5 Loss: 3.8990 +[2025-09-05 16:06:34] [Rank 0] Group 5 Loss: 3.8990 +[2025-09-05 16:06:34] [Rank 0] Group 6 Loss: 3.9194 +[2025-09-05 16:06:34] [Rank 0] Group 6 Loss: 3.9194 +[2025-09-05 16:06:34] [Rank 0] Group 7 Loss: 4.1781 +[2025-09-05 16:06:34] [Rank 0] Group 7 Loss: 4.1781 +[2025-09-05 16:06:34] [Rank 0] Group 8 Loss: 4.4113 +[2025-09-05 16:06:34] [Rank 0] Group 8 Loss: 4.4113 +[2025-09-05 16:06:34] [Rank 0] Group 9 Loss: 4.5560 +[2025-09-05 16:06:34] [Rank 0] Group 9 Loss: 4.5560 +[2025-09-05 16:06:34] [Rank 0] Group 10 Loss: 4.7028 +[2025-09-05 16:06:34] [Rank 0] Group 10 Loss: 4.7028 +[2025-09-05 16:06:34] [Rank 0] Group 11 Loss: 4.6959 +[2025-09-05 16:06:34] [Rank 0] Group 11 Loss: 4.6959 +[2025-09-05 16:06:34] [Rank 0] Group 12 Loss: 4.6830 +[2025-09-05 16:06:34] [Rank 0] Group 12 Loss: 4.6830 +[2025-09-05 16:06:34] [Rank 0] Group 13 Loss: 4.7792 +[2025-09-05 16:06:34] [Rank 0] Group 13 Loss: 4.7792 +[2025-09-05 16:06:34] [Rank 0] Group 14 Loss: 4.7511 +[2025-09-05 16:06:34] [Rank 0] Group 14 Loss: 4.7511 +[2025-09-05 16:06:34] [Rank 0] Group 15 Loss: 4.7786 +[2025-09-05 16:06:34] [Rank 0] Group 15 Loss: 4.7786 +[2025-09-05 16:06:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:06:34] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:06:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:06:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:06:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:06:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:06:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:06:34] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:06:34] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:06:34] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:06:34] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:06:34] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:06:34] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:06:34] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:06:34] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 16:06:34] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 16:06:34] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 16:06:34] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 16:06:34] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 16:06:34] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 16:06:34] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:06:34] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:06:34] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 16:06:34] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 16:06:34] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 16:06:34] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 16:06:34] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 16:06:34] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 16:06:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:06:34] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:06:34] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:06:34] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:06:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:06:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:06:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:06:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:06:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:06:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:06:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:06:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:06:35] [Rank 0] step:8001/10000 train_time:335341ms step_avg:41.91ms +[2025-09-05 16:06:35] [Rank 0] step:8001/10000 train_time:335341ms step_avg:41.91ms +[2025-09-05 16:06:37] [Rank 0] step:8021/10000 train_time:336605ms step_avg:41.97ms +[2025-09-05 16:06:37] [Rank 0] step:8021/10000 train_time:336605ms step_avg:41.97ms +[2025-09-05 16:06:37] [Rank 0] step:8041/10000 train_time:337344ms step_avg:41.95ms +[2025-09-05 16:06:37] [Rank 0] step:8041/10000 train_time:337344ms step_avg:41.95ms +[2025-09-05 16:06:38] [Rank 0] step:8061/10000 train_time:338085ms step_avg:41.94ms +[2025-09-05 16:06:38] [Rank 0] step:8061/10000 train_time:338085ms step_avg:41.94ms +[2025-09-05 16:06:39] [Rank 0] step:8081/10000 train_time:338825ms step_avg:41.93ms +[2025-09-05 16:06:39] [Rank 0] step:8081/10000 train_time:338825ms step_avg:41.93ms +[2025-09-05 16:06:40] [Rank 0] step:8101/10000 train_time:339702ms step_avg:41.93ms +[2025-09-05 16:06:40] [Rank 0] step:8101/10000 train_time:339702ms step_avg:41.93ms +[2025-09-05 16:06:41] [Rank 0] step:8121/10000 train_time:340442ms step_avg:41.92ms +[2025-09-05 16:06:41] [Rank 0] step:8121/10000 train_time:340442ms step_avg:41.92ms +[2025-09-05 16:06:41] [Rank 0] step:8141/10000 train_time:341181ms step_avg:41.91ms +[2025-09-05 16:06:41] [Rank 0] step:8141/10000 train_time:341181ms step_avg:41.91ms +[2025-09-05 16:06:42] [Rank 0] step:8161/10000 train_time:342139ms step_avg:41.92ms +[2025-09-05 16:06:42] [Rank 0] step:8161/10000 train_time:342139ms step_avg:41.92ms +[2025-09-05 16:06:43] [Rank 0] step:8181/10000 train_time:342879ms step_avg:41.91ms +[2025-09-05 16:06:43] [Rank 0] step:8181/10000 train_time:342879ms step_avg:41.91ms +[2025-09-05 16:06:44] [Rank 0] step:8201/10000 train_time:343620ms step_avg:41.90ms +[2025-09-05 16:06:44] [Rank 0] step:8201/10000 train_time:343620ms step_avg:41.90ms +[2025-09-05 16:06:44] [Rank 0] step:8221/10000 train_time:344360ms step_avg:41.89ms +[2025-09-05 16:06:44] [Rank 0] step:8221/10000 train_time:344360ms step_avg:41.89ms +[2025-09-05 16:06:45] [Rank 0] step:8241/10000 train_time:345101ms step_avg:41.88ms +[2025-09-05 16:06:45] [Rank 0] step:8241/10000 train_time:345101ms step_avg:41.88ms +[2025-09-05 16:06:46] [Rank 0] step:8261/10000 train_time:345841ms step_avg:41.86ms +[2025-09-05 16:06:46] [Rank 0] step:8261/10000 train_time:345841ms step_avg:41.86ms +[2025-09-05 16:06:47] [Rank 0] step:8281/10000 train_time:346582ms step_avg:41.85ms +[2025-09-05 16:06:47] [Rank 0] step:8281/10000 train_time:346582ms step_avg:41.85ms +[2025-09-05 16:06:47] [Rank 0] step:8301/10000 train_time:347322ms step_avg:41.84ms +[2025-09-05 16:06:47] [Rank 0] step:8301/10000 train_time:347322ms step_avg:41.84ms +[2025-09-05 16:06:48] [Rank 0] step:8321/10000 train_time:348062ms step_avg:41.83ms +[2025-09-05 16:06:48] [Rank 0] step:8321/10000 train_time:348062ms step_avg:41.83ms +[2025-09-05 16:06:49] [Rank 0] step:8341/10000 train_time:348803ms step_avg:41.82ms +[2025-09-05 16:06:49] [Rank 0] step:8341/10000 train_time:348803ms step_avg:41.82ms +[2025-09-05 16:06:50] [Rank 0] step:8361/10000 train_time:349543ms step_avg:41.81ms +[2025-09-05 16:06:50] [Rank 0] step:8361/10000 train_time:349543ms step_avg:41.81ms +[2025-09-05 16:06:50] [Rank 0] step:8381/10000 train_time:350287ms step_avg:41.80ms +[2025-09-05 16:06:50] [Rank 0] step:8381/10000 train_time:350287ms step_avg:41.80ms +[2025-09-05 16:06:51] [Rank 0] step:8401/10000 train_time:351031ms step_avg:41.78ms +[2025-09-05 16:06:51] [Rank 0] step:8401/10000 train_time:351031ms step_avg:41.78ms +[2025-09-05 16:06:52] [Rank 0] step:8421/10000 train_time:351771ms step_avg:41.77ms +[2025-09-05 16:06:52] [Rank 0] step:8421/10000 train_time:351771ms step_avg:41.77ms +[2025-09-05 16:06:53] [Rank 0] step:8441/10000 train_time:352511ms step_avg:41.76ms +[2025-09-05 16:06:53] [Rank 0] step:8441/10000 train_time:352511ms step_avg:41.76ms +[2025-09-05 16:06:53] [Rank 0] step:8461/10000 train_time:353251ms step_avg:41.75ms +[2025-09-05 16:06:53] [Rank 0] step:8461/10000 train_time:353251ms step_avg:41.75ms +[2025-09-05 16:06:54] [Rank 0] step:8481/10000 train_time:353990ms step_avg:41.74ms +[2025-09-05 16:06:54] [Rank 0] step:8481/10000 train_time:353990ms step_avg:41.74ms +[2025-09-05 16:06:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:06:55] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:06:55] [Rank 0] PRINT: step:8500/10000 train_loss:1.4095 val_loss:1.4047 train_time:354812ms step_avg:41.74ms +[2025-09-05 16:06:55] [Rank 0] PRINT: step:8500/10000 train_loss:1.4095 val_loss:1.4047 train_time:354812ms step_avg:41.74ms +[2025-09-05 16:06:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:06:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:06:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:06:56] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:08:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:08:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:08:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:08:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:08:17] [Rank 0] Total Loss: 4.0893 +[2025-09-05 16:08:17] [Rank 0] Total Loss: 4.0893 +[2025-09-05 16:08:17] [Rank 0] Total FTA (Unweighted): 0.5763 +[2025-09-05 16:08:17] [Rank 0] Total FTA (Unweighted): 0.5763 +[2025-09-05 16:08:17] [Rank 0] Total FTA (Weighted): 0.5763 +[2025-09-05 16:08:17] [Rank 0] Total FTA (Weighted): 0.5763 +[2025-09-05 16:08:17] [Rank 0] Group 0 Loss: 3.4368 +[2025-09-05 16:08:17] [Rank 0] Group 0 Loss: 3.4368 +[2025-09-05 16:08:17] [Rank 0] Group 1 Loss: 3.1524 +[2025-09-05 16:08:17] [Rank 0] Group 1 Loss: 3.1524 +[2025-09-05 16:08:17] [Rank 0] Group 2 Loss: 3.1560 +[2025-09-05 16:08:17] [Rank 0] Group 2 Loss: 3.1560 +[2025-09-05 16:08:17] [Rank 0] Group 3 Loss: 3.4603 +[2025-09-05 16:08:17] [Rank 0] Group 3 Loss: 3.4603 +[2025-09-05 16:08:17] [Rank 0] Group 4 Loss: 3.6529 +[2025-09-05 16:08:17] [Rank 0] Group 4 Loss: 3.6529 +[2025-09-05 16:08:17] [Rank 0] Group 5 Loss: 3.8264 +[2025-09-05 16:08:17] [Rank 0] Group 5 Loss: 3.8264 +[2025-09-05 16:08:17] [Rank 0] Group 6 Loss: 3.8441 +[2025-09-05 16:08:17] [Rank 0] Group 6 Loss: 3.8441 +[2025-09-05 16:08:17] [Rank 0] Group 7 Loss: 4.1040 +[2025-09-05 16:08:17] [Rank 0] Group 7 Loss: 4.1040 +[2025-09-05 16:08:17] [Rank 0] Group 8 Loss: 4.3569 +[2025-09-05 16:08:17] [Rank 0] Group 8 Loss: 4.3569 +[2025-09-05 16:08:17] [Rank 0] Group 9 Loss: 4.4865 +[2025-09-05 16:08:17] [Rank 0] Group 9 Loss: 4.4865 +[2025-09-05 16:08:17] [Rank 0] Group 10 Loss: 4.6337 +[2025-09-05 16:08:17] [Rank 0] Group 10 Loss: 4.6337 +[2025-09-05 16:08:17] [Rank 0] Group 11 Loss: 4.6388 +[2025-09-05 16:08:17] [Rank 0] Group 11 Loss: 4.6388 +[2025-09-05 16:08:17] [Rank 0] Group 12 Loss: 4.6056 +[2025-09-05 16:08:17] [Rank 0] Group 12 Loss: 4.6056 +[2025-09-05 16:08:17] [Rank 0] Group 13 Loss: 4.6897 +[2025-09-05 16:08:17] [Rank 0] Group 13 Loss: 4.6897 +[2025-09-05 16:08:17] [Rank 0] Group 14 Loss: 4.6896 +[2025-09-05 16:08:17] [Rank 0] Group 14 Loss: 4.6896 +[2025-09-05 16:08:17] [Rank 0] Group 15 Loss: 4.6959 +[2025-09-05 16:08:17] [Rank 0] Group 15 Loss: 4.6959 +[2025-09-05 16:08:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:08:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:08:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:08:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:08:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:08:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:08:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:08:17] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:08:17] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:08:17] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:08:17] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 16:08:17] [Rank 0] Group 5 FTA: 0.6000 +[2025-09-05 16:08:17] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:08:17] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:08:17] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:08:17] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:08:17] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:08:17] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:08:17] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:08:17] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:08:17] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 16:08:17] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 16:08:17] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 16:08:17] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 16:08:17] [Rank 0] Group 12 FTA: 0.3600 +[2025-09-05 16:08:17] [Rank 0] Group 12 FTA: 0.3600 +[2025-09-05 16:08:17] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 16:08:17] [Rank 0] Group 13 FTA: 0.2300 +[2025-09-05 16:08:17] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:08:17] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:08:17] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 16:08:17] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 16:08:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:08:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:08:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:08:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:08:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:08:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:08:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:08:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:08:18] [Rank 0] step:8501/10000 train_time:354821ms step_avg:41.74ms +[2025-09-05 16:08:18] [Rank 0] step:8501/10000 train_time:354821ms step_avg:41.74ms +[2025-09-05 16:08:19] [Rank 0] step:8521/10000 train_time:355502ms step_avg:41.72ms +[2025-09-05 16:08:19] [Rank 0] step:8521/10000 train_time:355502ms step_avg:41.72ms +[2025-09-05 16:08:20] [Rank 0] step:8541/10000 train_time:356242ms step_avg:41.71ms +[2025-09-05 16:08:20] [Rank 0] step:8541/10000 train_time:356242ms step_avg:41.71ms +[2025-09-05 16:08:20] [Rank 0] step:8561/10000 train_time:356982ms step_avg:41.70ms +[2025-09-05 16:08:20] [Rank 0] step:8561/10000 train_time:356982ms step_avg:41.70ms +[2025-09-05 16:08:21] [Rank 0] step:8581/10000 train_time:357724ms step_avg:41.69ms +[2025-09-05 16:08:21] [Rank 0] step:8581/10000 train_time:357724ms step_avg:41.69ms +[2025-09-05 16:08:22] [Rank 0] step:8601/10000 train_time:358464ms step_avg:41.68ms +[2025-09-05 16:08:22] [Rank 0] step:8601/10000 train_time:358464ms step_avg:41.68ms +[2025-09-05 16:08:23] [Rank 0] step:8621/10000 train_time:359204ms step_avg:41.67ms +[2025-09-05 16:08:23] [Rank 0] step:8621/10000 train_time:359204ms step_avg:41.67ms +[2025-09-05 16:08:23] [Rank 0] step:8641/10000 train_time:359944ms step_avg:41.66ms +[2025-09-05 16:08:23] [Rank 0] step:8641/10000 train_time:359944ms step_avg:41.66ms +[2025-09-05 16:08:24] [Rank 0] step:8661/10000 train_time:360684ms step_avg:41.64ms +[2025-09-05 16:08:24] [Rank 0] step:8661/10000 train_time:360684ms step_avg:41.64ms +[2025-09-05 16:08:25] [Rank 0] step:8681/10000 train_time:361424ms step_avg:41.63ms +[2025-09-05 16:08:25] [Rank 0] step:8681/10000 train_time:361424ms step_avg:41.63ms +[2025-09-05 16:08:26] [Rank 0] step:8701/10000 train_time:362164ms step_avg:41.62ms +[2025-09-05 16:08:26] [Rank 0] step:8701/10000 train_time:362164ms step_avg:41.62ms +[2025-09-05 16:08:26] [Rank 0] step:8721/10000 train_time:362904ms step_avg:41.61ms +[2025-09-05 16:08:26] [Rank 0] step:8721/10000 train_time:362904ms step_avg:41.61ms +[2025-09-05 16:08:27] [Rank 0] step:8741/10000 train_time:363643ms step_avg:41.60ms +[2025-09-05 16:08:27] [Rank 0] step:8741/10000 train_time:363643ms step_avg:41.60ms +[2025-09-05 16:08:28] [Rank 0] step:8761/10000 train_time:364384ms step_avg:41.59ms +[2025-09-05 16:08:28] [Rank 0] step:8761/10000 train_time:364384ms step_avg:41.59ms +[2025-09-05 16:08:29] [Rank 0] step:8781/10000 train_time:365124ms step_avg:41.58ms +[2025-09-05 16:08:29] [Rank 0] step:8781/10000 train_time:365124ms step_avg:41.58ms +[2025-09-05 16:08:29] [Rank 0] step:8801/10000 train_time:365864ms step_avg:41.57ms +[2025-09-05 16:08:29] [Rank 0] step:8801/10000 train_time:365864ms step_avg:41.57ms +[2025-09-05 16:08:30] [Rank 0] step:8821/10000 train_time:366604ms step_avg:41.56ms +[2025-09-05 16:08:30] [Rank 0] step:8821/10000 train_time:366604ms step_avg:41.56ms +[2025-09-05 16:08:31] [Rank 0] step:8841/10000 train_time:367954ms step_avg:41.62ms +[2025-09-05 16:08:31] [Rank 0] step:8841/10000 train_time:367954ms step_avg:41.62ms +[2025-09-05 16:08:32] [Rank 0] step:8861/10000 train_time:368695ms step_avg:41.61ms +[2025-09-05 16:08:32] [Rank 0] step:8861/10000 train_time:368695ms step_avg:41.61ms +[2025-09-05 16:08:33] [Rank 0] step:8881/10000 train_time:369435ms step_avg:41.60ms +[2025-09-05 16:08:33] [Rank 0] step:8881/10000 train_time:369435ms step_avg:41.60ms +[2025-09-05 16:08:34] [Rank 0] step:8901/10000 train_time:370175ms step_avg:41.59ms +[2025-09-05 16:08:34] [Rank 0] step:8901/10000 train_time:370175ms step_avg:41.59ms +[2025-09-05 16:08:34] [Rank 0] step:8921/10000 train_time:370914ms step_avg:41.58ms +[2025-09-05 16:08:34] [Rank 0] step:8921/10000 train_time:370914ms step_avg:41.58ms +[2025-09-05 16:08:35] [Rank 0] step:8941/10000 train_time:371655ms step_avg:41.57ms +[2025-09-05 16:08:35] [Rank 0] step:8941/10000 train_time:371655ms step_avg:41.57ms +[2025-09-05 16:08:36] [Rank 0] step:8961/10000 train_time:372395ms step_avg:41.56ms +[2025-09-05 16:08:36] [Rank 0] step:8961/10000 train_time:372395ms step_avg:41.56ms +[2025-09-05 16:08:37] [Rank 0] step:8981/10000 train_time:373135ms step_avg:41.55ms +[2025-09-05 16:08:37] [Rank 0] step:8981/10000 train_time:373135ms step_avg:41.55ms +[2025-09-05 16:08:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:08:37] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:08:38] [Rank 0] PRINT: step:9000/10000 train_loss:1.4042 val_loss:1.3937 train_time:373956ms step_avg:41.55ms +[2025-09-05 16:08:38] [Rank 0] PRINT: step:9000/10000 train_loss:1.4042 val_loss:1.3937 train_time:373956ms step_avg:41.55ms +[2025-09-05 16:08:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:08:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:08:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:08:38] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:09:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:09:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:09:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:09:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:09:59] [Rank 0] Total Loss: 4.1779 +[2025-09-05 16:09:59] [Rank 0] Total Loss: 4.1779 +[2025-09-05 16:09:59] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 16:09:59] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 16:09:59] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 16:09:59] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 16:09:59] [Rank 0] Group 0 Loss: 3.4907 +[2025-09-05 16:09:59] [Rank 0] Group 0 Loss: 3.4907 +[2025-09-05 16:09:59] [Rank 0] Group 1 Loss: 3.2938 +[2025-09-05 16:09:59] [Rank 0] Group 1 Loss: 3.2938 +[2025-09-05 16:09:59] [Rank 0] Group 2 Loss: 3.2281 +[2025-09-05 16:09:59] [Rank 0] Group 2 Loss: 3.2281 +[2025-09-05 16:09:59] [Rank 0] Group 3 Loss: 3.5308 +[2025-09-05 16:09:59] [Rank 0] Group 3 Loss: 3.5308 +[2025-09-05 16:09:59] [Rank 0] Group 4 Loss: 3.7651 +[2025-09-05 16:09:59] [Rank 0] Group 4 Loss: 3.7651 +[2025-09-05 16:09:59] [Rank 0] Group 5 Loss: 3.8853 +[2025-09-05 16:09:59] [Rank 0] Group 5 Loss: 3.8853 +[2025-09-05 16:09:59] [Rank 0] Group 6 Loss: 3.9420 +[2025-09-05 16:09:59] [Rank 0] Group 6 Loss: 3.9420 +[2025-09-05 16:09:59] [Rank 0] Group 7 Loss: 4.1956 +[2025-09-05 16:09:59] [Rank 0] Group 7 Loss: 4.1956 +[2025-09-05 16:09:59] [Rank 0] Group 8 Loss: 4.4354 +[2025-09-05 16:09:59] [Rank 0] Group 8 Loss: 4.4354 +[2025-09-05 16:09:59] [Rank 0] Group 9 Loss: 4.5810 +[2025-09-05 16:09:59] [Rank 0] Group 9 Loss: 4.5810 +[2025-09-05 16:09:59] [Rank 0] Group 10 Loss: 4.7378 +[2025-09-05 16:09:59] [Rank 0] Group 10 Loss: 4.7378 +[2025-09-05 16:09:59] [Rank 0] Group 11 Loss: 4.7538 +[2025-09-05 16:09:59] [Rank 0] Group 11 Loss: 4.7538 +[2025-09-05 16:09:59] [Rank 0] Group 12 Loss: 4.7106 +[2025-09-05 16:09:59] [Rank 0] Group 12 Loss: 4.7106 +[2025-09-05 16:09:59] [Rank 0] Group 13 Loss: 4.7738 +[2025-09-05 16:09:59] [Rank 0] Group 13 Loss: 4.7738 +[2025-09-05 16:09:59] [Rank 0] Group 14 Loss: 4.7629 +[2025-09-05 16:09:59] [Rank 0] Group 14 Loss: 4.7629 +[2025-09-05 16:09:59] [Rank 0] Group 15 Loss: 4.7597 +[2025-09-05 16:09:59] [Rank 0] Group 15 Loss: 4.7597 +[2025-09-05 16:09:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:09:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:09:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:09:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:09:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:09:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:09:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:09:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:09:59] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:09:59] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:09:59] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:09:59] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:09:59] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:09:59] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:09:59] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:09:59] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:09:59] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 16:09:59] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 16:09:59] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:09:59] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:09:59] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 16:09:59] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 16:09:59] [Rank 0] Group 11 FTA: 0.4300 +[2025-09-05 16:09:59] [Rank 0] Group 11 FTA: 0.4300 +[2025-09-05 16:09:59] [Rank 0] Group 12 FTA: 0.4200 +[2025-09-05 16:09:59] [Rank 0] Group 12 FTA: 0.4200 +[2025-09-05 16:09:59] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 16:09:59] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 16:09:59] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 16:09:59] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 16:09:59] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:09:59] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:09:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:09:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:10:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:10:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:10:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:10:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:10:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:10:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:10:00] [Rank 0] step:9001/10000 train_time:373966ms step_avg:41.55ms +[2025-09-05 16:10:00] [Rank 0] step:9001/10000 train_time:373966ms step_avg:41.55ms +[2025-09-05 16:10:01] [Rank 0] step:9021/10000 train_time:374642ms step_avg:41.53ms +[2025-09-05 16:10:01] [Rank 0] step:9021/10000 train_time:374642ms step_avg:41.53ms +[2025-09-05 16:10:02] [Rank 0] step:9041/10000 train_time:375383ms step_avg:41.52ms +[2025-09-05 16:10:02] [Rank 0] step:9041/10000 train_time:375383ms step_avg:41.52ms +[2025-09-05 16:10:02] [Rank 0] step:9061/10000 train_time:376123ms step_avg:41.51ms +[2025-09-05 16:10:02] [Rank 0] step:9061/10000 train_time:376123ms step_avg:41.51ms +[2025-09-05 16:10:03] [Rank 0] step:9081/10000 train_time:376864ms step_avg:41.50ms +[2025-09-05 16:10:03] [Rank 0] step:9081/10000 train_time:376864ms step_avg:41.50ms +[2025-09-05 16:10:04] [Rank 0] step:9101/10000 train_time:377604ms step_avg:41.49ms +[2025-09-05 16:10:04] [Rank 0] step:9101/10000 train_time:377604ms step_avg:41.49ms +[2025-09-05 16:10:05] [Rank 0] step:9121/10000 train_time:378349ms step_avg:41.48ms +[2025-09-05 16:10:05] [Rank 0] step:9121/10000 train_time:378349ms step_avg:41.48ms +[2025-09-05 16:10:05] [Rank 0] step:9141/10000 train_time:379089ms step_avg:41.47ms +[2025-09-05 16:10:05] [Rank 0] step:9141/10000 train_time:379089ms step_avg:41.47ms +[2025-09-05 16:10:06] [Rank 0] step:9161/10000 train_time:379830ms step_avg:41.46ms +[2025-09-05 16:10:06] [Rank 0] step:9161/10000 train_time:379830ms step_avg:41.46ms +[2025-09-05 16:10:07] [Rank 0] step:9181/10000 train_time:380570ms step_avg:41.45ms +[2025-09-05 16:10:07] [Rank 0] step:9181/10000 train_time:380570ms step_avg:41.45ms +[2025-09-05 16:10:08] [Rank 0] step:9201/10000 train_time:381311ms step_avg:41.44ms +[2025-09-05 16:10:08] [Rank 0] step:9201/10000 train_time:381311ms step_avg:41.44ms +[2025-09-05 16:10:08] [Rank 0] step:9221/10000 train_time:382052ms step_avg:41.43ms +[2025-09-05 16:10:08] [Rank 0] step:9221/10000 train_time:382052ms step_avg:41.43ms +[2025-09-05 16:10:09] [Rank 0] step:9241/10000 train_time:382792ms step_avg:41.42ms +[2025-09-05 16:10:09] [Rank 0] step:9241/10000 train_time:382792ms step_avg:41.42ms +[2025-09-05 16:10:10] [Rank 0] step:9261/10000 train_time:383532ms step_avg:41.41ms +[2025-09-05 16:10:10] [Rank 0] step:9261/10000 train_time:383532ms step_avg:41.41ms +[2025-09-05 16:10:11] [Rank 0] step:9281/10000 train_time:384272ms step_avg:41.40ms +[2025-09-05 16:10:11] [Rank 0] step:9281/10000 train_time:384272ms step_avg:41.40ms +[2025-09-05 16:10:11] [Rank 0] step:9301/10000 train_time:385012ms step_avg:41.39ms +[2025-09-05 16:10:11] [Rank 0] step:9301/10000 train_time:385012ms step_avg:41.39ms +[2025-09-05 16:10:12] [Rank 0] step:9321/10000 train_time:385752ms step_avg:41.39ms +[2025-09-05 16:10:12] [Rank 0] step:9321/10000 train_time:385752ms step_avg:41.39ms +[2025-09-05 16:10:13] [Rank 0] step:9341/10000 train_time:386492ms step_avg:41.38ms +[2025-09-05 16:10:13] [Rank 0] step:9341/10000 train_time:386492ms step_avg:41.38ms +[2025-09-05 16:10:13] [Rank 0] step:9361/10000 train_time:387231ms step_avg:41.37ms +[2025-09-05 16:10:13] [Rank 0] step:9361/10000 train_time:387231ms step_avg:41.37ms +[2025-09-05 16:10:14] [Rank 0] step:9381/10000 train_time:387970ms step_avg:41.36ms +[2025-09-05 16:10:14] [Rank 0] step:9381/10000 train_time:387970ms step_avg:41.36ms +[2025-09-05 16:10:15] [Rank 0] step:9401/10000 train_time:388709ms step_avg:41.35ms +[2025-09-05 16:10:15] [Rank 0] step:9401/10000 train_time:388709ms step_avg:41.35ms +[2025-09-05 16:10:16] [Rank 0] step:9421/10000 train_time:389450ms step_avg:41.34ms +[2025-09-05 16:10:16] [Rank 0] step:9421/10000 train_time:389450ms step_avg:41.34ms +[2025-09-05 16:10:16] [Rank 0] step:9441/10000 train_time:390191ms step_avg:41.33ms +[2025-09-05 16:10:16] [Rank 0] step:9441/10000 train_time:390191ms step_avg:41.33ms +[2025-09-05 16:10:17] [Rank 0] step:9461/10000 train_time:390931ms step_avg:41.32ms +[2025-09-05 16:10:17] [Rank 0] step:9461/10000 train_time:390931ms step_avg:41.32ms +[2025-09-05 16:10:18] [Rank 0] step:9481/10000 train_time:391672ms step_avg:41.31ms +[2025-09-05 16:10:18] [Rank 0] step:9481/10000 train_time:391672ms step_avg:41.31ms +[2025-09-05 16:10:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:10:19] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:10:19] [Rank 0] PRINT: step:9500/10000 train_loss:1.3984 val_loss:1.3872 train_time:392493ms step_avg:41.32ms +[2025-09-05 16:10:19] [Rank 0] PRINT: step:9500/10000 train_loss:1.3984 val_loss:1.3872 train_time:392493ms step_avg:41.32ms +[2025-09-05 16:10:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:10:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:10:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:10:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:11:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:11:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:11:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:11:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:11:41] [Rank 0] Total Loss: 4.1316 +[2025-09-05 16:11:41] [Rank 0] Total Loss: 4.1316 +[2025-09-05 16:11:41] [Rank 0] Total FTA (Unweighted): 0.5875 +[2025-09-05 16:11:41] [Rank 0] Total FTA (Unweighted): 0.5875 +[2025-09-05 16:11:41] [Rank 0] Total FTA (Weighted): 0.5875 +[2025-09-05 16:11:41] [Rank 0] Total FTA (Weighted): 0.5875 +[2025-09-05 16:11:41] [Rank 0] Group 0 Loss: 3.4690 +[2025-09-05 16:11:41] [Rank 0] Group 0 Loss: 3.4690 +[2025-09-05 16:11:41] [Rank 0] Group 1 Loss: 3.2617 +[2025-09-05 16:11:41] [Rank 0] Group 1 Loss: 3.2617 +[2025-09-05 16:11:41] [Rank 0] Group 2 Loss: 3.2482 +[2025-09-05 16:11:41] [Rank 0] Group 2 Loss: 3.2482 +[2025-09-05 16:11:41] [Rank 0] Group 3 Loss: 3.5466 +[2025-09-05 16:11:41] [Rank 0] Group 3 Loss: 3.5466 +[2025-09-05 16:11:41] [Rank 0] Group 4 Loss: 3.6901 +[2025-09-05 16:11:41] [Rank 0] Group 4 Loss: 3.6901 +[2025-09-05 16:11:41] [Rank 0] Group 5 Loss: 3.8340 +[2025-09-05 16:11:41] [Rank 0] Group 5 Loss: 3.8340 +[2025-09-05 16:11:41] [Rank 0] Group 6 Loss: 3.8707 +[2025-09-05 16:11:41] [Rank 0] Group 6 Loss: 3.8707 +[2025-09-05 16:11:41] [Rank 0] Group 7 Loss: 4.1291 +[2025-09-05 16:11:41] [Rank 0] Group 7 Loss: 4.1291 +[2025-09-05 16:11:41] [Rank 0] Group 8 Loss: 4.4024 +[2025-09-05 16:11:41] [Rank 0] Group 8 Loss: 4.4024 +[2025-09-05 16:11:41] [Rank 0] Group 9 Loss: 4.5316 +[2025-09-05 16:11:41] [Rank 0] Group 9 Loss: 4.5316 +[2025-09-05 16:11:41] [Rank 0] Group 10 Loss: 4.6878 +[2025-09-05 16:11:41] [Rank 0] Group 10 Loss: 4.6878 +[2025-09-05 16:11:41] [Rank 0] Group 11 Loss: 4.6844 +[2025-09-05 16:11:41] [Rank 0] Group 11 Loss: 4.6844 +[2025-09-05 16:11:41] [Rank 0] Group 12 Loss: 4.6437 +[2025-09-05 16:11:41] [Rank 0] Group 12 Loss: 4.6437 +[2025-09-05 16:11:41] [Rank 0] Group 13 Loss: 4.7020 +[2025-09-05 16:11:41] [Rank 0] Group 13 Loss: 4.7020 +[2025-09-05 16:11:41] [Rank 0] Group 14 Loss: 4.6943 +[2025-09-05 16:11:41] [Rank 0] Group 14 Loss: 4.6943 +[2025-09-05 16:11:41] [Rank 0] Group 15 Loss: 4.7096 +[2025-09-05 16:11:41] [Rank 0] Group 15 Loss: 4.7096 +[2025-09-05 16:11:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:11:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:11:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:11:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:11:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:11:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:11:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:11:41] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:11:41] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:11:41] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:11:41] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:11:41] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:11:41] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:11:41] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:11:41] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:11:41] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:11:41] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 16:11:41] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 16:11:41] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 16:11:41] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 16:11:41] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 16:11:41] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 16:11:41] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 16:11:41] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 16:11:41] [Rank 0] Group 12 FTA: 0.4500 +[2025-09-05 16:11:41] [Rank 0] Group 12 FTA: 0.4500 +[2025-09-05 16:11:41] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 16:11:41] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 16:11:41] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:11:41] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:11:41] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 16:11:41] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 16:11:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:11:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:11:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:11:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:11:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:11:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:11:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:11:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:11:42] [Rank 0] step:9501/10000 train_time:392502ms step_avg:41.31ms +[2025-09-05 16:11:42] [Rank 0] step:9501/10000 train_time:392502ms step_avg:41.31ms +[2025-09-05 16:11:43] [Rank 0] step:9521/10000 train_time:393170ms step_avg:41.30ms +[2025-09-05 16:11:43] [Rank 0] step:9521/10000 train_time:393170ms step_avg:41.30ms +[2025-09-05 16:11:44] [Rank 0] step:9541/10000 train_time:393910ms step_avg:41.29ms +[2025-09-05 16:11:44] [Rank 0] step:9541/10000 train_time:393910ms step_avg:41.29ms +[2025-09-05 16:11:44] [Rank 0] step:9561/10000 train_time:394649ms step_avg:41.28ms +[2025-09-05 16:11:44] [Rank 0] step:9561/10000 train_time:394649ms step_avg:41.28ms +[2025-09-05 16:11:45] [Rank 0] step:9581/10000 train_time:395389ms step_avg:41.27ms +[2025-09-05 16:11:45] [Rank 0] step:9581/10000 train_time:395389ms step_avg:41.27ms +[2025-09-05 16:11:46] [Rank 0] step:9601/10000 train_time:396128ms step_avg:41.26ms +[2025-09-05 16:11:46] [Rank 0] step:9601/10000 train_time:396128ms step_avg:41.26ms +[2025-09-05 16:11:47] [Rank 0] step:9621/10000 train_time:396867ms step_avg:41.25ms +[2025-09-05 16:11:47] [Rank 0] step:9621/10000 train_time:396867ms step_avg:41.25ms +[2025-09-05 16:11:47] [Rank 0] step:9641/10000 train_time:397606ms step_avg:41.24ms +[2025-09-05 16:11:47] [Rank 0] step:9641/10000 train_time:397606ms step_avg:41.24ms +[2025-09-05 16:11:48] [Rank 0] step:9661/10000 train_time:398622ms step_avg:41.26ms +[2025-09-05 16:11:48] [Rank 0] step:9661/10000 train_time:398622ms step_avg:41.26ms +[2025-09-05 16:11:49] [Rank 0] step:9681/10000 train_time:399360ms step_avg:41.25ms +[2025-09-05 16:11:49] [Rank 0] step:9681/10000 train_time:399360ms step_avg:41.25ms +[2025-09-05 16:11:50] [Rank 0] step:9701/10000 train_time:400100ms step_avg:41.24ms +[2025-09-05 16:11:50] [Rank 0] step:9701/10000 train_time:400100ms step_avg:41.24ms +[2025-09-05 16:11:51] [Rank 0] step:9721/10000 train_time:400840ms step_avg:41.23ms +[2025-09-05 16:11:51] [Rank 0] step:9721/10000 train_time:400840ms step_avg:41.23ms +[2025-09-05 16:11:51] [Rank 0] step:9741/10000 train_time:401579ms step_avg:41.23ms +[2025-09-05 16:11:51] [Rank 0] step:9741/10000 train_time:401579ms step_avg:41.23ms +[2025-09-05 16:11:52] [Rank 0] step:9761/10000 train_time:402318ms step_avg:41.22ms +[2025-09-05 16:11:52] [Rank 0] step:9761/10000 train_time:402318ms step_avg:41.22ms +[2025-09-05 16:11:53] [Rank 0] step:9781/10000 train_time:403058ms step_avg:41.21ms +[2025-09-05 16:11:53] [Rank 0] step:9781/10000 train_time:403058ms step_avg:41.21ms +[2025-09-05 16:11:53] [Rank 0] step:9801/10000 train_time:403798ms step_avg:41.20ms +[2025-09-05 16:11:53] [Rank 0] step:9801/10000 train_time:403798ms step_avg:41.20ms +[2025-09-05 16:11:54] [Rank 0] step:9821/10000 train_time:404538ms step_avg:41.19ms +[2025-09-05 16:11:54] [Rank 0] step:9821/10000 train_time:404538ms step_avg:41.19ms +[2025-09-05 16:11:55] [Rank 0] step:9841/10000 train_time:405282ms step_avg:41.18ms +[2025-09-05 16:11:55] [Rank 0] step:9841/10000 train_time:405282ms step_avg:41.18ms +[2025-09-05 16:11:56] [Rank 0] step:9861/10000 train_time:406157ms step_avg:41.19ms +[2025-09-05 16:11:56] [Rank 0] step:9861/10000 train_time:406157ms step_avg:41.19ms +[2025-09-05 16:11:57] [Rank 0] step:9881/10000 train_time:406897ms step_avg:41.18ms +[2025-09-05 16:11:57] [Rank 0] step:9881/10000 train_time:406897ms step_avg:41.18ms +[2025-09-05 16:11:57] [Rank 0] step:9901/10000 train_time:407638ms step_avg:41.17ms +[2025-09-05 16:11:57] [Rank 0] step:9901/10000 train_time:407638ms step_avg:41.17ms +[2025-09-05 16:11:58] [Rank 0] step:9921/10000 train_time:408566ms step_avg:41.18ms +[2025-09-05 16:11:58] [Rank 0] step:9921/10000 train_time:408566ms step_avg:41.18ms +[2025-09-05 16:11:59] [Rank 0] step:9941/10000 train_time:409306ms step_avg:41.17ms +[2025-09-05 16:11:59] [Rank 0] step:9941/10000 train_time:409306ms step_avg:41.17ms +[2025-09-05 16:12:00] [Rank 0] step:9961/10000 train_time:410045ms step_avg:41.17ms +[2025-09-05 16:12:00] [Rank 0] step:9961/10000 train_time:410045ms step_avg:41.17ms +[2025-09-05 16:12:00] [Rank 0] step:9981/10000 train_time:410785ms step_avg:41.16ms +[2025-09-05 16:12:00] [Rank 0] step:9981/10000 train_time:410785ms step_avg:41.16ms +[2025-09-05 16:12:01] [Rank 0] step:10000/10000 train_time:411488ms step_avg:41.15ms +[2025-09-05 16:12:01] [Rank 0] step:10000/10000 train_time:411488ms step_avg:41.15ms +[2025-09-05 16:12:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:12:01] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:12:02] [Rank 0] PRINT: step:10000/10000 train_loss:1.3933 val_loss:1.3819 train_time:411613ms step_avg:41.16ms +[2025-09-05 16:12:02] [Rank 0] PRINT: step:10000/10000 train_loss:1.3933 val_loss:1.3819 train_time:411613ms step_avg:41.16ms +[2025-09-05 16:12:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:12:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:12:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:12:02] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:13:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:13:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:13:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:13:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:13:22] [Rank 0] Total Loss: 4.1631 +[2025-09-05 16:13:22] [Rank 0] Total Loss: 4.1631 +[2025-09-05 16:13:22] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 16:13:22] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 16:13:22] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 16:13:22] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 16:13:22] [Rank 0] Group 0 Loss: 3.4923 +[2025-09-05 16:13:22] [Rank 0] Group 0 Loss: 3.4923 +[2025-09-05 16:13:22] [Rank 0] Group 1 Loss: 3.3233 +[2025-09-05 16:13:22] [Rank 0] Group 1 Loss: 3.3233 +[2025-09-05 16:13:22] [Rank 0] Group 2 Loss: 3.2761 +[2025-09-05 16:13:22] [Rank 0] Group 2 Loss: 3.2761 +[2025-09-05 16:13:22] [Rank 0] Group 3 Loss: 3.5901 +[2025-09-05 16:13:22] [Rank 0] Group 3 Loss: 3.5901 +[2025-09-05 16:13:22] [Rank 0] Group 4 Loss: 3.7420 +[2025-09-05 16:13:22] [Rank 0] Group 4 Loss: 3.7420 +[2025-09-05 16:13:22] [Rank 0] Group 5 Loss: 3.8710 +[2025-09-05 16:13:22] [Rank 0] Group 5 Loss: 3.8710 +[2025-09-05 16:13:22] [Rank 0] Group 6 Loss: 3.9025 +[2025-09-05 16:13:22] [Rank 0] Group 6 Loss: 3.9025 +[2025-09-05 16:13:22] [Rank 0] Group 7 Loss: 4.1458 +[2025-09-05 16:13:22] [Rank 0] Group 7 Loss: 4.1458 +[2025-09-05 16:13:22] [Rank 0] Group 8 Loss: 4.4341 +[2025-09-05 16:13:22] [Rank 0] Group 8 Loss: 4.4341 +[2025-09-05 16:13:22] [Rank 0] Group 9 Loss: 4.5575 +[2025-09-05 16:13:22] [Rank 0] Group 9 Loss: 4.5575 +[2025-09-05 16:13:22] [Rank 0] Group 10 Loss: 4.7052 +[2025-09-05 16:13:22] [Rank 0] Group 10 Loss: 4.7052 +[2025-09-05 16:13:22] [Rank 0] Group 11 Loss: 4.7081 +[2025-09-05 16:13:22] [Rank 0] Group 11 Loss: 4.7081 +[2025-09-05 16:13:22] [Rank 0] Group 12 Loss: 4.6770 +[2025-09-05 16:13:22] [Rank 0] Group 12 Loss: 4.6770 +[2025-09-05 16:13:22] [Rank 0] Group 13 Loss: 4.7336 +[2025-09-05 16:13:22] [Rank 0] Group 13 Loss: 4.7336 +[2025-09-05 16:13:22] [Rank 0] Group 14 Loss: 4.7092 +[2025-09-05 16:13:22] [Rank 0] Group 14 Loss: 4.7092 +[2025-09-05 16:13:22] [Rank 0] Group 15 Loss: 4.7423 +[2025-09-05 16:13:22] [Rank 0] Group 15 Loss: 4.7423 +[2025-09-05 16:13:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:13:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:13:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:13:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:13:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:13:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:13:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:13:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:13:22] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:13:22] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 16:13:22] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:13:22] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:13:22] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:13:22] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:13:22] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:13:22] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:13:22] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 16:13:22] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 16:13:23] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:13:23] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:13:23] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 16:13:23] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 16:13:23] [Rank 0] Group 11 FTA: 0.4200 +[2025-09-05 16:13:23] [Rank 0] Group 11 FTA: 0.4200 +[2025-09-05 16:13:23] [Rank 0] Group 12 FTA: 0.4400 +[2025-09-05 16:13:23] [Rank 0] Group 12 FTA: 0.4400 +[2025-09-05 16:13:23] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 16:13:23] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 16:13:23] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:13:23] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:13:23] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:13:23] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:13:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:13:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_loss_curves.png +[2025-09-05 16:13:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:13:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/per_class_acc_curves.png +[2025-09-05 16:13:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:13:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_loss_curve.png +[2025-09-05 16:13:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:13:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_44/total_acc_curve.png +[2025-09-05 16:13:24] [Rank 0] step:10001/10000 train_time:411622ms step_avg:41.16ms +[2025-09-05 16:13:24] [Rank 0] step:10001/10000 train_time:411622ms step_avg:41.16ms +[2025-09-05 16:13:24] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 16:13:24 2025 --- +[2025-09-05 16:13:24] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 16:13:24 2025 --- +[2025-09-05 16:13:24] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 16:13:24] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..abd7c9a2bbf9f0bfe11f9a0d52b241cbd85fd7f1 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.5, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "3c46824e-b4ab-4d5d-90a4-f9d61a087e6c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..2ae2b56a13cf431a9301400a3b4c51c3a9780acc --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e52f6a5157d49b4863120e7b20a8e84307731cf3e855b11c93f78c3bd6beafd4 +size 417839 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..a80c4e3104f925012b9766c046bd99af498db24b --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6abe34da71876c44aa7d50ccbb945a2670a10a6fd7b51a9bbfe796edd42a46a2 +size 461385 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..a86bd86c995a424bc543c5ae43c881ce0682a671 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bcb4022a1c522e775eaba03e188528d7a03e85fdccde06c5c91821f4308593e +size 94644 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..4fe0d9bbebca04663853efc7898e1f248c6f45d3 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdc62f9f822229d271876f7eb9f90bae04581b2539b62bbf03e5a92c62067302 +size 117039 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/training_log_3c46824e-b4ab-4d5d-90a4-f9d61a087e6c.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/training_log_3c46824e-b4ab-4d5d-90a4-f9d61a087e6c.txt new file mode 100644 index 0000000000000000000000000000000000000000..a9282424451210e01d1846e6357d25e464aca836 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/training_log_3c46824e-b4ab-4d5d-90a4-f9d61a087e6c.txt @@ -0,0 +1,5614 @@ +[2025-09-05 16:13:47] [Rank 0] PRINT: --- Script Start: Fri Sep 5 16:13:47 2025 --- +[2025-09-05 16:13:47] [Rank 0] PRINT: --- Script Start: Fri Sep 5 16:13:47 2025 --- +[2025-09-05 16:13:47] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 16:13:47] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 16:13:47] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 16:13:47] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 16:13:47] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-05 16:13:47] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-05 16:13:47] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45 +[2025-09-05 16:13:47] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45 +[2025-09-05 16:13:47] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 16:13:47] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 16:13:47] [Rank 0] PRINT: Constructing model... +[2025-09-05 16:13:47] [Rank 0] PRINT: Constructing model... +[2025-09-05 16:13:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 16:13:49] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 16:13:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 16:13:49] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 16:13:49] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 16:13:49] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 16:13:53] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 16:13:53] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 16:13:53] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 16:13:53] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 16:13:53] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 16:13:53] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 16:13:53] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 16:13:53] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 16:13:53] [Rank 0] PRINT: Model returns: +[2025-09-05 16:13:53] [Rank 0] PRINT: Model returns: +[2025-09-05 16:13:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 16:13:53] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 16:13:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 16:13:53] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 16:13:53] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 16:13:53] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 16:13:53] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 16:13:53] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 16:13:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 16:13:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 16:13:58] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 16:13:58] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 16:13:58] [Rank 0] PRINT: Starting warmup... +[2025-09-05 16:13:58] [Rank 0] PRINT: Starting warmup... +[2025-09-05 16:14:44] [Rank 0] PRINT: Warmup complete. +[2025-09-05 16:14:44] [Rank 0] PRINT: Warmup complete. +[2025-09-05 16:14:44] [Rank 0] PRINT: Starting training... +[2025-09-05 16:14:44] [Rank 0] PRINT: Starting training... +[2025-09-05 16:14:51] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/fixed_eval_indices.json +[2025-09-05 16:14:51] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/fixed_eval_indices.json +[2025-09-05 16:14:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:14:51] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:14:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 16:14:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 16:15:27] [Rank 0] step:21/10000 train_time:32731ms step_avg:1558.64ms +[2025-09-05 16:15:27] [Rank 0] step:21/10000 train_time:32731ms step_avg:1558.64ms +[2025-09-05 16:15:28] [Rank 0] step:41/10000 train_time:33458ms step_avg:816.04ms +[2025-09-05 16:15:28] [Rank 0] step:41/10000 train_time:33458ms step_avg:816.04ms +[2025-09-05 16:15:28] [Rank 0] step:61/10000 train_time:34183ms step_avg:560.37ms +[2025-09-05 16:15:28] [Rank 0] step:61/10000 train_time:34183ms step_avg:560.37ms +[2025-09-05 16:15:29] [Rank 0] step:81/10000 train_time:34907ms step_avg:430.95ms +[2025-09-05 16:15:29] [Rank 0] step:81/10000 train_time:34907ms step_avg:430.95ms +[2025-09-05 16:15:30] [Rank 0] step:101/10000 train_time:35632ms step_avg:352.80ms +[2025-09-05 16:15:30] [Rank 0] step:101/10000 train_time:35632ms step_avg:352.80ms +[2025-09-05 16:15:31] [Rank 0] step:121/10000 train_time:36358ms step_avg:300.48ms +[2025-09-05 16:15:31] [Rank 0] step:121/10000 train_time:36358ms step_avg:300.48ms +[2025-09-05 16:15:31] [Rank 0] step:141/10000 train_time:37082ms step_avg:262.99ms +[2025-09-05 16:15:31] [Rank 0] step:141/10000 train_time:37082ms step_avg:262.99ms +[2025-09-05 16:15:32] [Rank 0] step:161/10000 train_time:37806ms step_avg:234.82ms +[2025-09-05 16:15:32] [Rank 0] step:161/10000 train_time:37806ms step_avg:234.82ms +[2025-09-05 16:15:33] [Rank 0] step:181/10000 train_time:38531ms step_avg:212.88ms +[2025-09-05 16:15:33] [Rank 0] step:181/10000 train_time:38531ms step_avg:212.88ms +[2025-09-05 16:15:33] [Rank 0] step:201/10000 train_time:39256ms step_avg:195.30ms +[2025-09-05 16:15:33] [Rank 0] step:201/10000 train_time:39256ms step_avg:195.30ms +[2025-09-05 16:15:34] [Rank 0] step:221/10000 train_time:39981ms step_avg:180.91ms +[2025-09-05 16:15:34] [Rank 0] step:221/10000 train_time:39981ms step_avg:180.91ms +[2025-09-05 16:15:35] [Rank 0] step:241/10000 train_time:40707ms step_avg:168.91ms +[2025-09-05 16:15:35] [Rank 0] step:241/10000 train_time:40707ms step_avg:168.91ms +[2025-09-05 16:15:36] [Rank 0] step:261/10000 train_time:41432ms step_avg:158.74ms +[2025-09-05 16:15:36] [Rank 0] step:261/10000 train_time:41432ms step_avg:158.74ms +[2025-09-05 16:15:36] [Rank 0] step:281/10000 train_time:42157ms step_avg:150.02ms +[2025-09-05 16:15:36] [Rank 0] step:281/10000 train_time:42157ms step_avg:150.02ms +[2025-09-05 16:15:37] [Rank 0] step:301/10000 train_time:42883ms step_avg:142.47ms +[2025-09-05 16:15:37] [Rank 0] step:301/10000 train_time:42883ms step_avg:142.47ms +[2025-09-05 16:15:38] [Rank 0] step:321/10000 train_time:43608ms step_avg:135.85ms +[2025-09-05 16:15:38] [Rank 0] step:321/10000 train_time:43608ms step_avg:135.85ms +[2025-09-05 16:15:38] [Rank 0] step:341/10000 train_time:44332ms step_avg:130.01ms +[2025-09-05 16:15:38] [Rank 0] step:341/10000 train_time:44332ms step_avg:130.01ms +[2025-09-05 16:15:39] [Rank 0] step:361/10000 train_time:45057ms step_avg:124.81ms +[2025-09-05 16:15:39] [Rank 0] step:361/10000 train_time:45057ms step_avg:124.81ms +[2025-09-05 16:15:40] [Rank 0] step:381/10000 train_time:45781ms step_avg:120.16ms +[2025-09-05 16:15:40] [Rank 0] step:381/10000 train_time:45781ms step_avg:120.16ms +[2025-09-05 16:15:41] [Rank 0] step:401/10000 train_time:46506ms step_avg:115.97ms +[2025-09-05 16:15:41] [Rank 0] step:401/10000 train_time:46506ms step_avg:115.97ms +[2025-09-05 16:15:41] [Rank 0] step:421/10000 train_time:47230ms step_avg:112.18ms +[2025-09-05 16:15:41] [Rank 0] step:421/10000 train_time:47230ms step_avg:112.18ms +[2025-09-05 16:15:42] [Rank 0] step:441/10000 train_time:47954ms step_avg:108.74ms +[2025-09-05 16:15:42] [Rank 0] step:441/10000 train_time:47954ms step_avg:108.74ms +[2025-09-05 16:15:43] [Rank 0] step:461/10000 train_time:48679ms step_avg:105.59ms +[2025-09-05 16:15:43] [Rank 0] step:461/10000 train_time:48679ms step_avg:105.59ms +[2025-09-05 16:15:44] [Rank 0] step:481/10000 train_time:49404ms step_avg:102.71ms +[2025-09-05 16:15:44] [Rank 0] step:481/10000 train_time:49404ms step_avg:102.71ms +[2025-09-05 16:15:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:15:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:15:45] [Rank 0] PRINT: step:500/10000 train_loss:3.6155 val_loss:2.4061 train_time:50208ms step_avg:100.42ms +[2025-09-05 16:15:45] [Rank 0] PRINT: step:500/10000 train_loss:3.6155 val_loss:2.4061 train_time:50208ms step_avg:100.42ms +[2025-09-05 16:15:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:15:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:15:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:15:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:17:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:17:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:17:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:17:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:17:06] [Rank 0] Total Loss: 5.0680 +[2025-09-05 16:17:06] [Rank 0] Total Loss: 5.0680 +[2025-09-05 16:17:06] [Rank 0] Total FTA (Unweighted): 0.2288 +[2025-09-05 16:17:06] [Rank 0] Total FTA (Unweighted): 0.2288 +[2025-09-05 16:17:06] [Rank 0] Total FTA (Weighted): 0.2288 +[2025-09-05 16:17:06] [Rank 0] Total FTA (Weighted): 0.2288 +[2025-09-05 16:17:06] [Rank 0] Group 0 Loss: 3.4834 +[2025-09-05 16:17:06] [Rank 0] Group 0 Loss: 3.4834 +[2025-09-05 16:17:06] [Rank 0] Group 1 Loss: 3.4689 +[2025-09-05 16:17:06] [Rank 0] Group 1 Loss: 3.4689 +[2025-09-05 16:17:06] [Rank 0] Group 2 Loss: 3.5493 +[2025-09-05 16:17:06] [Rank 0] Group 2 Loss: 3.5493 +[2025-09-05 16:17:06] [Rank 0] Group 3 Loss: 4.0034 +[2025-09-05 16:17:06] [Rank 0] Group 3 Loss: 4.0034 +[2025-09-05 16:17:06] [Rank 0] Group 4 Loss: 4.3634 +[2025-09-05 16:17:06] [Rank 0] Group 4 Loss: 4.3634 +[2025-09-05 16:17:06] [Rank 0] Group 5 Loss: 4.8147 +[2025-09-05 16:17:06] [Rank 0] Group 5 Loss: 4.8147 +[2025-09-05 16:17:06] [Rank 0] Group 6 Loss: 5.1905 +[2025-09-05 16:17:06] [Rank 0] Group 6 Loss: 5.1905 +[2025-09-05 16:17:06] [Rank 0] Group 7 Loss: 5.3605 +[2025-09-05 16:17:06] [Rank 0] Group 7 Loss: 5.3605 +[2025-09-05 16:17:06] [Rank 0] Group 8 Loss: 5.6761 +[2025-09-05 16:17:06] [Rank 0] Group 8 Loss: 5.6761 +[2025-09-05 16:17:06] [Rank 0] Group 9 Loss: 5.8310 +[2025-09-05 16:17:06] [Rank 0] Group 9 Loss: 5.8310 +[2025-09-05 16:17:06] [Rank 0] Group 10 Loss: 5.9381 +[2025-09-05 16:17:06] [Rank 0] Group 10 Loss: 5.9381 +[2025-09-05 16:17:06] [Rank 0] Group 11 Loss: 5.9830 +[2025-09-05 16:17:06] [Rank 0] Group 11 Loss: 5.9830 +[2025-09-05 16:17:06] [Rank 0] Group 12 Loss: 5.8479 +[2025-09-05 16:17:06] [Rank 0] Group 12 Loss: 5.8479 +[2025-09-05 16:17:06] [Rank 0] Group 13 Loss: 5.8354 +[2025-09-05 16:17:06] [Rank 0] Group 13 Loss: 5.8354 +[2025-09-05 16:17:06] [Rank 0] Group 14 Loss: 5.9177 +[2025-09-05 16:17:06] [Rank 0] Group 14 Loss: 5.9177 +[2025-09-05 16:17:07] [Rank 0] Group 15 Loss: 5.8244 +[2025-09-05 16:17:07] [Rank 0] Group 15 Loss: 5.8244 +[2025-09-05 16:17:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:17:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:17:07] [Rank 0] Group 1 FTA: 0.8300 +[2025-09-05 16:17:07] [Rank 0] Group 1 FTA: 0.8300 +[2025-09-05 16:17:07] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 16:17:07] [Rank 0] Group 2 FTA: 0.1800 +[2025-09-05 16:17:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 16:17:07] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 16:17:07] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 16:17:07] [Rank 0] Group 4 FTA: 0.1500 +[2025-09-05 16:17:07] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 16:17:07] [Rank 0] Group 5 FTA: 0.2000 +[2025-09-05 16:17:07] [Rank 0] Group 6 FTA: 0.1500 +[2025-09-05 16:17:07] [Rank 0] Group 6 FTA: 0.1500 +[2025-09-05 16:17:07] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 16:17:07] [Rank 0] Group 7 FTA: 0.1000 +[2025-09-05 16:17:07] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 16:17:07] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 16:17:07] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 16:17:07] [Rank 0] Group 9 FTA: 0.1000 +[2025-09-05 16:17:07] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 16:17:07] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 16:17:07] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:17:07] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:17:07] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 16:17:07] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 16:17:07] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:17:07] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:17:07] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:17:07] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:17:07] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:17:07] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:17:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:17:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:17:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:17:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:17:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:17:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:17:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:17:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:17:09] [Rank 0] step:501/10000 train_time:50217ms step_avg:100.23ms +[2025-09-05 16:17:09] [Rank 0] step:501/10000 train_time:50217ms step_avg:100.23ms +[2025-09-05 16:17:09] [Rank 0] step:521/10000 train_time:50877ms step_avg:97.65ms +[2025-09-05 16:17:09] [Rank 0] step:521/10000 train_time:50877ms step_avg:97.65ms +[2025-09-05 16:17:10] [Rank 0] step:541/10000 train_time:51602ms step_avg:95.38ms +[2025-09-05 16:17:10] [Rank 0] step:541/10000 train_time:51602ms step_avg:95.38ms +[2025-09-05 16:17:11] [Rank 0] step:561/10000 train_time:52326ms step_avg:93.27ms +[2025-09-05 16:17:11] [Rank 0] step:561/10000 train_time:52326ms step_avg:93.27ms +[2025-09-05 16:17:12] [Rank 0] step:581/10000 train_time:53203ms step_avg:91.57ms +[2025-09-05 16:17:12] [Rank 0] step:581/10000 train_time:53203ms step_avg:91.57ms +[2025-09-05 16:17:12] [Rank 0] step:601/10000 train_time:54034ms step_avg:89.91ms +[2025-09-05 16:17:12] [Rank 0] step:601/10000 train_time:54034ms step_avg:89.91ms +[2025-09-05 16:17:13] [Rank 0] step:621/10000 train_time:54759ms step_avg:88.18ms +[2025-09-05 16:17:13] [Rank 0] step:621/10000 train_time:54759ms step_avg:88.18ms +[2025-09-05 16:17:14] [Rank 0] step:641/10000 train_time:55605ms step_avg:86.75ms +[2025-09-05 16:17:14] [Rank 0] step:641/10000 train_time:55605ms step_avg:86.75ms +[2025-09-05 16:17:15] [Rank 0] step:661/10000 train_time:56329ms step_avg:85.22ms +[2025-09-05 16:17:15] [Rank 0] step:661/10000 train_time:56329ms step_avg:85.22ms +[2025-09-05 16:17:15] [Rank 0] step:681/10000 train_time:57054ms step_avg:83.78ms +[2025-09-05 16:17:15] [Rank 0] step:681/10000 train_time:57054ms step_avg:83.78ms +[2025-09-05 16:17:16] [Rank 0] step:701/10000 train_time:57778ms step_avg:82.42ms +[2025-09-05 16:17:16] [Rank 0] step:701/10000 train_time:57778ms step_avg:82.42ms +[2025-09-05 16:17:17] [Rank 0] step:721/10000 train_time:58503ms step_avg:81.14ms +[2025-09-05 16:17:17] [Rank 0] step:721/10000 train_time:58503ms step_avg:81.14ms +[2025-09-05 16:17:18] [Rank 0] step:741/10000 train_time:59330ms step_avg:80.07ms +[2025-09-05 16:17:18] [Rank 0] step:741/10000 train_time:59330ms step_avg:80.07ms +[2025-09-05 16:17:18] [Rank 0] step:761/10000 train_time:60059ms step_avg:78.92ms +[2025-09-05 16:17:18] [Rank 0] step:761/10000 train_time:60059ms step_avg:78.92ms +[2025-09-05 16:17:19] [Rank 0] step:781/10000 train_time:60788ms step_avg:77.83ms +[2025-09-05 16:17:19] [Rank 0] step:781/10000 train_time:60788ms step_avg:77.83ms +[2025-09-05 16:17:20] [Rank 0] step:801/10000 train_time:61517ms step_avg:76.80ms +[2025-09-05 16:17:20] [Rank 0] step:801/10000 train_time:61517ms step_avg:76.80ms +[2025-09-05 16:17:21] [Rank 0] step:821/10000 train_time:62855ms step_avg:76.56ms +[2025-09-05 16:17:21] [Rank 0] step:821/10000 train_time:62855ms step_avg:76.56ms +[2025-09-05 16:17:22] [Rank 0] step:841/10000 train_time:63584ms step_avg:75.61ms +[2025-09-05 16:17:22] [Rank 0] step:841/10000 train_time:63584ms step_avg:75.61ms +[2025-09-05 16:17:23] [Rank 0] step:861/10000 train_time:64313ms step_avg:74.70ms +[2025-09-05 16:17:23] [Rank 0] step:861/10000 train_time:64313ms step_avg:74.70ms +[2025-09-05 16:17:23] [Rank 0] step:881/10000 train_time:65043ms step_avg:73.83ms +[2025-09-05 16:17:23] [Rank 0] step:881/10000 train_time:65043ms step_avg:73.83ms +[2025-09-05 16:17:24] [Rank 0] step:901/10000 train_time:65772ms step_avg:73.00ms +[2025-09-05 16:17:24] [Rank 0] step:901/10000 train_time:65772ms step_avg:73.00ms +[2025-09-05 16:17:25] [Rank 0] step:921/10000 train_time:66502ms step_avg:72.21ms +[2025-09-05 16:17:25] [Rank 0] step:921/10000 train_time:66502ms step_avg:72.21ms +[2025-09-05 16:17:26] [Rank 0] step:941/10000 train_time:67232ms step_avg:71.45ms +[2025-09-05 16:17:26] [Rank 0] step:941/10000 train_time:67232ms step_avg:71.45ms +[2025-09-05 16:17:26] [Rank 0] step:961/10000 train_time:67962ms step_avg:70.72ms +[2025-09-05 16:17:26] [Rank 0] step:961/10000 train_time:67962ms step_avg:70.72ms +[2025-09-05 16:17:27] [Rank 0] step:981/10000 train_time:68691ms step_avg:70.02ms +[2025-09-05 16:17:27] [Rank 0] step:981/10000 train_time:68691ms step_avg:70.02ms +[2025-09-05 16:17:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:17:28] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:17:28] [Rank 0] PRINT: step:1000/10000 train_loss:2.0997 val_loss:1.8758 train_time:69501ms step_avg:69.50ms +[2025-09-05 16:17:28] [Rank 0] PRINT: step:1000/10000 train_loss:2.0997 val_loss:1.8758 train_time:69501ms step_avg:69.50ms +[2025-09-05 16:17:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:17:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:17:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:17:28] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:18:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:18:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:18:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:18:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:18:50] [Rank 0] Total Loss: 4.5158 +[2025-09-05 16:18:50] [Rank 0] Total Loss: 4.5158 +[2025-09-05 16:18:50] [Rank 0] Total FTA (Unweighted): 0.3362 +[2025-09-05 16:18:50] [Rank 0] Total FTA (Unweighted): 0.3362 +[2025-09-05 16:18:50] [Rank 0] Total FTA (Weighted): 0.3362 +[2025-09-05 16:18:50] [Rank 0] Total FTA (Weighted): 0.3362 +[2025-09-05 16:18:50] [Rank 0] Group 0 Loss: 3.2985 +[2025-09-05 16:18:50] [Rank 0] Group 0 Loss: 3.2985 +[2025-09-05 16:18:50] [Rank 0] Group 1 Loss: 3.2772 +[2025-09-05 16:18:50] [Rank 0] Group 1 Loss: 3.2772 +[2025-09-05 16:18:50] [Rank 0] Group 2 Loss: 3.1296 +[2025-09-05 16:18:50] [Rank 0] Group 2 Loss: 3.1296 +[2025-09-05 16:18:50] [Rank 0] Group 3 Loss: 3.5794 +[2025-09-05 16:18:50] [Rank 0] Group 3 Loss: 3.5794 +[2025-09-05 16:18:50] [Rank 0] Group 4 Loss: 3.8897 +[2025-09-05 16:18:50] [Rank 0] Group 4 Loss: 3.8897 +[2025-09-05 16:18:50] [Rank 0] Group 5 Loss: 4.0845 +[2025-09-05 16:18:50] [Rank 0] Group 5 Loss: 4.0845 +[2025-09-05 16:18:50] [Rank 0] Group 6 Loss: 4.4380 +[2025-09-05 16:18:50] [Rank 0] Group 6 Loss: 4.4380 +[2025-09-05 16:18:50] [Rank 0] Group 7 Loss: 4.7023 +[2025-09-05 16:18:50] [Rank 0] Group 7 Loss: 4.7023 +[2025-09-05 16:18:50] [Rank 0] Group 8 Loss: 4.9857 +[2025-09-05 16:18:50] [Rank 0] Group 8 Loss: 4.9857 +[2025-09-05 16:18:50] [Rank 0] Group 9 Loss: 5.1240 +[2025-09-05 16:18:50] [Rank 0] Group 9 Loss: 5.1240 +[2025-09-05 16:18:50] [Rank 0] Group 10 Loss: 5.2863 +[2025-09-05 16:18:50] [Rank 0] Group 10 Loss: 5.2863 +[2025-09-05 16:18:50] [Rank 0] Group 11 Loss: 5.3245 +[2025-09-05 16:18:50] [Rank 0] Group 11 Loss: 5.3245 +[2025-09-05 16:18:50] [Rank 0] Group 12 Loss: 5.2443 +[2025-09-05 16:18:50] [Rank 0] Group 12 Loss: 5.2443 +[2025-09-05 16:18:50] [Rank 0] Group 13 Loss: 5.2913 +[2025-09-05 16:18:50] [Rank 0] Group 13 Loss: 5.2913 +[2025-09-05 16:18:50] [Rank 0] Group 14 Loss: 5.2951 +[2025-09-05 16:18:50] [Rank 0] Group 14 Loss: 5.2951 +[2025-09-05 16:18:50] [Rank 0] Group 15 Loss: 5.3020 +[2025-09-05 16:18:50] [Rank 0] Group 15 Loss: 5.3020 +[2025-09-05 16:18:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:18:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:18:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:18:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:18:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:18:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:18:50] [Rank 0] Group 3 FTA: 0.3600 +[2025-09-05 16:18:50] [Rank 0] Group 3 FTA: 0.3600 +[2025-09-05 16:18:50] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 16:18:50] [Rank 0] Group 4 FTA: 0.3200 +[2025-09-05 16:18:50] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 16:18:50] [Rank 0] Group 5 FTA: 0.3000 +[2025-09-05 16:18:50] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 16:18:50] [Rank 0] Group 6 FTA: 0.3100 +[2025-09-05 16:18:50] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 16:18:50] [Rank 0] Group 7 FTA: 0.1500 +[2025-09-05 16:18:50] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 16:18:50] [Rank 0] Group 8 FTA: 0.2300 +[2025-09-05 16:18:50] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 16:18:50] [Rank 0] Group 9 FTA: 0.1300 +[2025-09-05 16:18:50] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 16:18:50] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 16:18:50] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:18:50] [Rank 0] Group 11 FTA: 0.0900 +[2025-09-05 16:18:50] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 16:18:50] [Rank 0] Group 12 FTA: 0.0900 +[2025-09-05 16:18:50] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:18:50] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:18:50] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 16:18:50] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 16:18:50] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:18:50] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:18:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:18:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:18:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:18:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:18:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:18:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:18:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:18:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:18:52] [Rank 0] step:1001/10000 train_time:69510ms step_avg:69.44ms +[2025-09-05 16:18:52] [Rank 0] step:1001/10000 train_time:69510ms step_avg:69.44ms +[2025-09-05 16:18:52] [Rank 0] step:1021/10000 train_time:70187ms step_avg:68.74ms +[2025-09-05 16:18:52] [Rank 0] step:1021/10000 train_time:70187ms step_avg:68.74ms +[2025-09-05 16:18:53] [Rank 0] step:1041/10000 train_time:70916ms step_avg:68.12ms +[2025-09-05 16:18:53] [Rank 0] step:1041/10000 train_time:70916ms step_avg:68.12ms +[2025-09-05 16:18:54] [Rank 0] step:1061/10000 train_time:71646ms step_avg:67.53ms +[2025-09-05 16:18:54] [Rank 0] step:1061/10000 train_time:71646ms step_avg:67.53ms +[2025-09-05 16:18:54] [Rank 0] step:1081/10000 train_time:72376ms step_avg:66.95ms +[2025-09-05 16:18:54] [Rank 0] step:1081/10000 train_time:72376ms step_avg:66.95ms +[2025-09-05 16:18:55] [Rank 0] step:1101/10000 train_time:73105ms step_avg:66.40ms +[2025-09-05 16:18:55] [Rank 0] step:1101/10000 train_time:73105ms step_avg:66.40ms +[2025-09-05 16:18:56] [Rank 0] step:1121/10000 train_time:73835ms step_avg:65.86ms +[2025-09-05 16:18:56] [Rank 0] step:1121/10000 train_time:73835ms step_avg:65.86ms +[2025-09-05 16:18:57] [Rank 0] step:1141/10000 train_time:74564ms step_avg:65.35ms +[2025-09-05 16:18:57] [Rank 0] step:1141/10000 train_time:74564ms step_avg:65.35ms +[2025-09-05 16:18:57] [Rank 0] step:1161/10000 train_time:75295ms step_avg:64.85ms +[2025-09-05 16:18:57] [Rank 0] step:1161/10000 train_time:75295ms step_avg:64.85ms +[2025-09-05 16:18:58] [Rank 0] step:1181/10000 train_time:76024ms step_avg:64.37ms +[2025-09-05 16:18:58] [Rank 0] step:1181/10000 train_time:76024ms step_avg:64.37ms +[2025-09-05 16:18:59] [Rank 0] step:1201/10000 train_time:76754ms step_avg:63.91ms +[2025-09-05 16:18:59] [Rank 0] step:1201/10000 train_time:76754ms step_avg:63.91ms +[2025-09-05 16:19:00] [Rank 0] step:1221/10000 train_time:77483ms step_avg:63.46ms +[2025-09-05 16:19:00] [Rank 0] step:1221/10000 train_time:77483ms step_avg:63.46ms +[2025-09-05 16:19:00] [Rank 0] step:1241/10000 train_time:78212ms step_avg:63.02ms +[2025-09-05 16:19:00] [Rank 0] step:1241/10000 train_time:78212ms step_avg:63.02ms +[2025-09-05 16:19:01] [Rank 0] step:1261/10000 train_time:78942ms step_avg:62.60ms +[2025-09-05 16:19:01] [Rank 0] step:1261/10000 train_time:78942ms step_avg:62.60ms +[2025-09-05 16:19:02] [Rank 0] step:1281/10000 train_time:79672ms step_avg:62.20ms +[2025-09-05 16:19:02] [Rank 0] step:1281/10000 train_time:79672ms step_avg:62.20ms +[2025-09-05 16:19:03] [Rank 0] step:1301/10000 train_time:80402ms step_avg:61.80ms +[2025-09-05 16:19:03] [Rank 0] step:1301/10000 train_time:80402ms step_avg:61.80ms +[2025-09-05 16:19:03] [Rank 0] step:1321/10000 train_time:81132ms step_avg:61.42ms +[2025-09-05 16:19:03] [Rank 0] step:1321/10000 train_time:81132ms step_avg:61.42ms +[2025-09-05 16:19:04] [Rank 0] step:1341/10000 train_time:81861ms step_avg:61.04ms +[2025-09-05 16:19:04] [Rank 0] step:1341/10000 train_time:81861ms step_avg:61.04ms +[2025-09-05 16:19:05] [Rank 0] step:1361/10000 train_time:82591ms step_avg:60.68ms +[2025-09-05 16:19:05] [Rank 0] step:1361/10000 train_time:82591ms step_avg:60.68ms +[2025-09-05 16:19:05] [Rank 0] step:1381/10000 train_time:83321ms step_avg:60.33ms +[2025-09-05 16:19:05] [Rank 0] step:1381/10000 train_time:83321ms step_avg:60.33ms +[2025-09-05 16:19:06] [Rank 0] step:1401/10000 train_time:84051ms step_avg:59.99ms +[2025-09-05 16:19:06] [Rank 0] step:1401/10000 train_time:84051ms step_avg:59.99ms +[2025-09-05 16:19:07] [Rank 0] step:1421/10000 train_time:84781ms step_avg:59.66ms +[2025-09-05 16:19:07] [Rank 0] step:1421/10000 train_time:84781ms step_avg:59.66ms +[2025-09-05 16:19:08] [Rank 0] step:1441/10000 train_time:85510ms step_avg:59.34ms +[2025-09-05 16:19:08] [Rank 0] step:1441/10000 train_time:85510ms step_avg:59.34ms +[2025-09-05 16:19:08] [Rank 0] step:1461/10000 train_time:86239ms step_avg:59.03ms +[2025-09-05 16:19:08] [Rank 0] step:1461/10000 train_time:86239ms step_avg:59.03ms +[2025-09-05 16:19:09] [Rank 0] step:1481/10000 train_time:86969ms step_avg:58.72ms +[2025-09-05 16:19:09] [Rank 0] step:1481/10000 train_time:86969ms step_avg:58.72ms +[2025-09-05 16:19:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:19:10] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:19:10] [Rank 0] PRINT: step:1500/10000 train_loss:1.7703 val_loss:1.6784 train_time:87779ms step_avg:58.52ms +[2025-09-05 16:19:10] [Rank 0] PRINT: step:1500/10000 train_loss:1.7703 val_loss:1.6784 train_time:87779ms step_avg:58.52ms +[2025-09-05 16:19:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:19:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:19:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:19:10] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:20:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:20:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:20:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:20:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:20:32] [Rank 0] Total Loss: 4.3022 +[2025-09-05 16:20:32] [Rank 0] Total Loss: 4.3022 +[2025-09-05 16:20:32] [Rank 0] Total FTA (Unweighted): 0.3900 +[2025-09-05 16:20:32] [Rank 0] Total FTA (Unweighted): 0.3900 +[2025-09-05 16:20:32] [Rank 0] Total FTA (Weighted): 0.3900 +[2025-09-05 16:20:32] [Rank 0] Total FTA (Weighted): 0.3900 +[2025-09-05 16:20:32] [Rank 0] Group 0 Loss: 3.1831 +[2025-09-05 16:20:32] [Rank 0] Group 0 Loss: 3.1831 +[2025-09-05 16:20:32] [Rank 0] Group 1 Loss: 3.1577 +[2025-09-05 16:20:32] [Rank 0] Group 1 Loss: 3.1577 +[2025-09-05 16:20:32] [Rank 0] Group 2 Loss: 3.1515 +[2025-09-05 16:20:32] [Rank 0] Group 2 Loss: 3.1515 +[2025-09-05 16:20:32] [Rank 0] Group 3 Loss: 3.4475 +[2025-09-05 16:20:32] [Rank 0] Group 3 Loss: 3.4475 +[2025-09-05 16:20:32] [Rank 0] Group 4 Loss: 3.6674 +[2025-09-05 16:20:32] [Rank 0] Group 4 Loss: 3.6674 +[2025-09-05 16:20:32] [Rank 0] Group 5 Loss: 3.9462 +[2025-09-05 16:20:32] [Rank 0] Group 5 Loss: 3.9462 +[2025-09-05 16:20:32] [Rank 0] Group 6 Loss: 4.1137 +[2025-09-05 16:20:32] [Rank 0] Group 6 Loss: 4.1137 +[2025-09-05 16:20:32] [Rank 0] Group 7 Loss: 4.3748 +[2025-09-05 16:20:32] [Rank 0] Group 7 Loss: 4.3748 +[2025-09-05 16:20:32] [Rank 0] Group 8 Loss: 4.6985 +[2025-09-05 16:20:32] [Rank 0] Group 8 Loss: 4.6985 +[2025-09-05 16:20:32] [Rank 0] Group 9 Loss: 4.8412 +[2025-09-05 16:20:32] [Rank 0] Group 9 Loss: 4.8412 +[2025-09-05 16:20:32] [Rank 0] Group 10 Loss: 5.0041 +[2025-09-05 16:20:32] [Rank 0] Group 10 Loss: 5.0041 +[2025-09-05 16:20:32] [Rank 0] Group 11 Loss: 5.0679 +[2025-09-05 16:20:32] [Rank 0] Group 11 Loss: 5.0679 +[2025-09-05 16:20:32] [Rank 0] Group 12 Loss: 4.9650 +[2025-09-05 16:20:32] [Rank 0] Group 12 Loss: 4.9650 +[2025-09-05 16:20:32] [Rank 0] Group 13 Loss: 5.0575 +[2025-09-05 16:20:32] [Rank 0] Group 13 Loss: 5.0575 +[2025-09-05 16:20:32] [Rank 0] Group 14 Loss: 5.1042 +[2025-09-05 16:20:32] [Rank 0] Group 14 Loss: 5.1042 +[2025-09-05 16:20:32] [Rank 0] Group 15 Loss: 5.0556 +[2025-09-05 16:20:32] [Rank 0] Group 15 Loss: 5.0556 +[2025-09-05 16:20:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:20:32] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 16:20:32] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 16:20:32] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 16:20:32] [Rank 0] Group 4 FTA: 0.4400 +[2025-09-05 16:20:32] [Rank 0] Group 5 FTA: 0.4000 +[2025-09-05 16:20:32] [Rank 0] Group 5 FTA: 0.4000 +[2025-09-05 16:20:32] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 16:20:32] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 16:20:32] [Rank 0] Group 7 FTA: 0.2700 +[2025-09-05 16:20:32] [Rank 0] Group 7 FTA: 0.2700 +[2025-09-05 16:20:32] [Rank 0] Group 8 FTA: 0.2500 +[2025-09-05 16:20:32] [Rank 0] Group 8 FTA: 0.2500 +[2025-09-05 16:20:32] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 16:20:32] [Rank 0] Group 9 FTA: 0.2000 +[2025-09-05 16:20:32] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 16:20:32] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 16:20:32] [Rank 0] Group 11 FTA: 0.0600 +[2025-09-05 16:20:32] [Rank 0] Group 11 FTA: 0.0600 +[2025-09-05 16:20:32] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:20:32] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:20:32] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:20:32] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:20:32] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-05 16:20:32] [Rank 0] Group 14 FTA: 0.0900 +[2025-09-05 16:20:32] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:20:32] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:20:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:20:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:20:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:20:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:20:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:20:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:20:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:20:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:20:33] [Rank 0] step:1501/10000 train_time:87788ms step_avg:58.49ms +[2025-09-05 16:20:33] [Rank 0] step:1501/10000 train_time:87788ms step_avg:58.49ms +[2025-09-05 16:20:34] [Rank 0] step:1521/10000 train_time:88444ms step_avg:58.15ms +[2025-09-05 16:20:34] [Rank 0] step:1521/10000 train_time:88444ms step_avg:58.15ms +[2025-09-05 16:20:35] [Rank 0] step:1541/10000 train_time:89174ms step_avg:57.87ms +[2025-09-05 16:20:35] [Rank 0] step:1541/10000 train_time:89174ms step_avg:57.87ms +[2025-09-05 16:20:35] [Rank 0] step:1561/10000 train_time:89904ms step_avg:57.59ms +[2025-09-05 16:20:35] [Rank 0] step:1561/10000 train_time:89904ms step_avg:57.59ms +[2025-09-05 16:20:36] [Rank 0] step:1581/10000 train_time:90634ms step_avg:57.33ms +[2025-09-05 16:20:36] [Rank 0] step:1581/10000 train_time:90634ms step_avg:57.33ms +[2025-09-05 16:20:37] [Rank 0] step:1601/10000 train_time:91364ms step_avg:57.07ms +[2025-09-05 16:20:37] [Rank 0] step:1601/10000 train_time:91364ms step_avg:57.07ms +[2025-09-05 16:20:38] [Rank 0] step:1621/10000 train_time:92094ms step_avg:56.81ms +[2025-09-05 16:20:38] [Rank 0] step:1621/10000 train_time:92094ms step_avg:56.81ms +[2025-09-05 16:20:39] [Rank 0] step:1641/10000 train_time:93448ms step_avg:56.95ms +[2025-09-05 16:20:39] [Rank 0] step:1641/10000 train_time:93448ms step_avg:56.95ms +[2025-09-05 16:20:40] [Rank 0] step:1661/10000 train_time:94178ms step_avg:56.70ms +[2025-09-05 16:20:40] [Rank 0] step:1661/10000 train_time:94178ms step_avg:56.70ms +[2025-09-05 16:20:40] [Rank 0] step:1681/10000 train_time:94908ms step_avg:56.46ms +[2025-09-05 16:20:40] [Rank 0] step:1681/10000 train_time:94908ms step_avg:56.46ms +[2025-09-05 16:20:41] [Rank 0] step:1701/10000 train_time:95638ms step_avg:56.22ms +[2025-09-05 16:20:41] [Rank 0] step:1701/10000 train_time:95638ms step_avg:56.22ms +[2025-09-05 16:20:42] [Rank 0] step:1721/10000 train_time:96367ms step_avg:55.99ms +[2025-09-05 16:20:42] [Rank 0] step:1721/10000 train_time:96367ms step_avg:55.99ms +[2025-09-05 16:20:43] [Rank 0] step:1741/10000 train_time:97097ms step_avg:55.77ms +[2025-09-05 16:20:43] [Rank 0] step:1741/10000 train_time:97097ms step_avg:55.77ms +[2025-09-05 16:20:43] [Rank 0] step:1761/10000 train_time:97827ms step_avg:55.55ms +[2025-09-05 16:20:43] [Rank 0] step:1761/10000 train_time:97827ms step_avg:55.55ms +[2025-09-05 16:20:44] [Rank 0] step:1781/10000 train_time:98557ms step_avg:55.34ms +[2025-09-05 16:20:44] [Rank 0] step:1781/10000 train_time:98557ms step_avg:55.34ms +[2025-09-05 16:20:45] [Rank 0] step:1801/10000 train_time:99287ms step_avg:55.13ms +[2025-09-05 16:20:45] [Rank 0] step:1801/10000 train_time:99287ms step_avg:55.13ms +[2025-09-05 16:20:46] [Rank 0] step:1821/10000 train_time:100017ms step_avg:54.92ms +[2025-09-05 16:20:46] [Rank 0] step:1821/10000 train_time:100017ms step_avg:54.92ms +[2025-09-05 16:20:46] [Rank 0] step:1841/10000 train_time:100748ms step_avg:54.72ms +[2025-09-05 16:20:46] [Rank 0] step:1841/10000 train_time:100748ms step_avg:54.72ms +[2025-09-05 16:20:47] [Rank 0] step:1861/10000 train_time:101478ms step_avg:54.53ms +[2025-09-05 16:20:47] [Rank 0] step:1861/10000 train_time:101478ms step_avg:54.53ms +[2025-09-05 16:20:48] [Rank 0] step:1881/10000 train_time:102208ms step_avg:54.34ms +[2025-09-05 16:20:48] [Rank 0] step:1881/10000 train_time:102208ms step_avg:54.34ms +[2025-09-05 16:20:48] [Rank 0] step:1901/10000 train_time:102938ms step_avg:54.15ms +[2025-09-05 16:20:48] [Rank 0] step:1901/10000 train_time:102938ms step_avg:54.15ms +[2025-09-05 16:20:49] [Rank 0] step:1921/10000 train_time:103667ms step_avg:53.97ms +[2025-09-05 16:20:49] [Rank 0] step:1921/10000 train_time:103667ms step_avg:53.97ms +[2025-09-05 16:20:50] [Rank 0] step:1941/10000 train_time:104397ms step_avg:53.79ms +[2025-09-05 16:20:50] [Rank 0] step:1941/10000 train_time:104397ms step_avg:53.79ms +[2025-09-05 16:20:51] [Rank 0] step:1961/10000 train_time:105127ms step_avg:53.61ms +[2025-09-05 16:20:51] [Rank 0] step:1961/10000 train_time:105127ms step_avg:53.61ms +[2025-09-05 16:20:51] [Rank 0] step:1981/10000 train_time:105857ms step_avg:53.44ms +[2025-09-05 16:20:51] [Rank 0] step:1981/10000 train_time:105857ms step_avg:53.44ms +[2025-09-05 16:20:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:20:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:20:53] [Rank 0] PRINT: step:2000/10000 train_loss:1.6242 val_loss:1.5710 train_time:106666ms step_avg:53.33ms +[2025-09-05 16:20:53] [Rank 0] PRINT: step:2000/10000 train_loss:1.6242 val_loss:1.5710 train_time:106666ms step_avg:53.33ms +[2025-09-05 16:20:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:20:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:20:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:20:53] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:22:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:22:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:22:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:22:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:22:14] [Rank 0] Total Loss: 4.1765 +[2025-09-05 16:22:14] [Rank 0] Total Loss: 4.1765 +[2025-09-05 16:22:14] [Rank 0] Total FTA (Unweighted): 0.4256 +[2025-09-05 16:22:14] [Rank 0] Total FTA (Unweighted): 0.4256 +[2025-09-05 16:22:14] [Rank 0] Total FTA (Weighted): 0.4256 +[2025-09-05 16:22:14] [Rank 0] Total FTA (Weighted): 0.4256 +[2025-09-05 16:22:14] [Rank 0] Group 0 Loss: 3.2885 +[2025-09-05 16:22:14] [Rank 0] Group 0 Loss: 3.2885 +[2025-09-05 16:22:14] [Rank 0] Group 1 Loss: 3.0447 +[2025-09-05 16:22:14] [Rank 0] Group 1 Loss: 3.0447 +[2025-09-05 16:22:14] [Rank 0] Group 2 Loss: 3.0558 +[2025-09-05 16:22:14] [Rank 0] Group 2 Loss: 3.0558 +[2025-09-05 16:22:14] [Rank 0] Group 3 Loss: 3.4335 +[2025-09-05 16:22:14] [Rank 0] Group 3 Loss: 3.4335 +[2025-09-05 16:22:14] [Rank 0] Group 4 Loss: 3.6173 +[2025-09-05 16:22:14] [Rank 0] Group 4 Loss: 3.6173 +[2025-09-05 16:22:14] [Rank 0] Group 5 Loss: 3.8662 +[2025-09-05 16:22:14] [Rank 0] Group 5 Loss: 3.8662 +[2025-09-05 16:22:15] [Rank 0] Group 6 Loss: 4.0691 +[2025-09-05 16:22:15] [Rank 0] Group 6 Loss: 4.0691 +[2025-09-05 16:22:15] [Rank 0] Group 7 Loss: 4.2057 +[2025-09-05 16:22:15] [Rank 0] Group 7 Loss: 4.2057 +[2025-09-05 16:22:15] [Rank 0] Group 8 Loss: 4.5152 +[2025-09-05 16:22:15] [Rank 0] Group 8 Loss: 4.5152 +[2025-09-05 16:22:15] [Rank 0] Group 9 Loss: 4.6217 +[2025-09-05 16:22:15] [Rank 0] Group 9 Loss: 4.6217 +[2025-09-05 16:22:15] [Rank 0] Group 10 Loss: 4.8514 +[2025-09-05 16:22:15] [Rank 0] Group 10 Loss: 4.8514 +[2025-09-05 16:22:15] [Rank 0] Group 11 Loss: 4.8630 +[2025-09-05 16:22:15] [Rank 0] Group 11 Loss: 4.8630 +[2025-09-05 16:22:15] [Rank 0] Group 12 Loss: 4.7734 +[2025-09-05 16:22:15] [Rank 0] Group 12 Loss: 4.7734 +[2025-09-05 16:22:15] [Rank 0] Group 13 Loss: 4.8533 +[2025-09-05 16:22:15] [Rank 0] Group 13 Loss: 4.8533 +[2025-09-05 16:22:15] [Rank 0] Group 14 Loss: 4.8703 +[2025-09-05 16:22:15] [Rank 0] Group 14 Loss: 4.8703 +[2025-09-05 16:22:15] [Rank 0] Group 15 Loss: 4.8944 +[2025-09-05 16:22:15] [Rank 0] Group 15 Loss: 4.8944 +[2025-09-05 16:22:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:22:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:22:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:22:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:22:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:22:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:22:15] [Rank 0] Group 3 FTA: 0.9700 +[2025-09-05 16:22:15] [Rank 0] Group 3 FTA: 0.9700 +[2025-09-05 16:22:15] [Rank 0] Group 4 FTA: 0.4100 +[2025-09-05 16:22:15] [Rank 0] Group 4 FTA: 0.4100 +[2025-09-05 16:22:15] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 16:22:15] [Rank 0] Group 5 FTA: 0.4900 +[2025-09-05 16:22:15] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 16:22:15] [Rank 0] Group 6 FTA: 0.3900 +[2025-09-05 16:22:15] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 16:22:15] [Rank 0] Group 7 FTA: 0.3200 +[2025-09-05 16:22:15] [Rank 0] Group 8 FTA: 0.3100 +[2025-09-05 16:22:15] [Rank 0] Group 8 FTA: 0.3100 +[2025-09-05 16:22:15] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 16:22:15] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 16:22:15] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 16:22:15] [Rank 0] Group 10 FTA: 0.1100 +[2025-09-05 16:22:15] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 16:22:15] [Rank 0] Group 11 FTA: 0.1200 +[2025-09-05 16:22:15] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:22:15] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:22:15] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 16:22:15] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 16:22:15] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 16:22:15] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 16:22:15] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 16:22:15] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 16:22:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:22:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:22:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:22:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:22:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:22:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:22:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:22:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:22:16] [Rank 0] step:2001/10000 train_time:106675ms step_avg:53.31ms +[2025-09-05 16:22:16] [Rank 0] step:2001/10000 train_time:106675ms step_avg:53.31ms +[2025-09-05 16:22:17] [Rank 0] step:2021/10000 train_time:107543ms step_avg:53.21ms +[2025-09-05 16:22:17] [Rank 0] step:2021/10000 train_time:107543ms step_avg:53.21ms +[2025-09-05 16:22:18] [Rank 0] step:2041/10000 train_time:108273ms step_avg:53.05ms +[2025-09-05 16:22:18] [Rank 0] step:2041/10000 train_time:108273ms step_avg:53.05ms +[2025-09-05 16:22:19] [Rank 0] step:2061/10000 train_time:109003ms step_avg:52.89ms +[2025-09-05 16:22:19] [Rank 0] step:2061/10000 train_time:109003ms step_avg:52.89ms +[2025-09-05 16:22:19] [Rank 0] step:2081/10000 train_time:109732ms step_avg:52.73ms +[2025-09-05 16:22:19] [Rank 0] step:2081/10000 train_time:109732ms step_avg:52.73ms +[2025-09-05 16:22:20] [Rank 0] step:2101/10000 train_time:110461ms step_avg:52.58ms +[2025-09-05 16:22:20] [Rank 0] step:2101/10000 train_time:110461ms step_avg:52.58ms +[2025-09-05 16:22:21] [Rank 0] step:2121/10000 train_time:111191ms step_avg:52.42ms +[2025-09-05 16:22:21] [Rank 0] step:2121/10000 train_time:111191ms step_avg:52.42ms +[2025-09-05 16:22:22] [Rank 0] step:2141/10000 train_time:111921ms step_avg:52.28ms +[2025-09-05 16:22:22] [Rank 0] step:2141/10000 train_time:111921ms step_avg:52.28ms +[2025-09-05 16:22:22] [Rank 0] step:2161/10000 train_time:112651ms step_avg:52.13ms +[2025-09-05 16:22:22] [Rank 0] step:2161/10000 train_time:112651ms step_avg:52.13ms +[2025-09-05 16:22:23] [Rank 0] step:2181/10000 train_time:113382ms step_avg:51.99ms +[2025-09-05 16:22:23] [Rank 0] step:2181/10000 train_time:113382ms step_avg:51.99ms +[2025-09-05 16:22:24] [Rank 0] step:2201/10000 train_time:114112ms step_avg:51.85ms +[2025-09-05 16:22:24] [Rank 0] step:2201/10000 train_time:114112ms step_avg:51.85ms +[2025-09-05 16:22:25] [Rank 0] step:2221/10000 train_time:114842ms step_avg:51.71ms +[2025-09-05 16:22:25] [Rank 0] step:2221/10000 train_time:114842ms step_avg:51.71ms +[2025-09-05 16:22:25] [Rank 0] step:2241/10000 train_time:115576ms step_avg:51.57ms +[2025-09-05 16:22:25] [Rank 0] step:2241/10000 train_time:115576ms step_avg:51.57ms +[2025-09-05 16:22:26] [Rank 0] step:2261/10000 train_time:116311ms step_avg:51.44ms +[2025-09-05 16:22:26] [Rank 0] step:2261/10000 train_time:116311ms step_avg:51.44ms +[2025-09-05 16:22:27] [Rank 0] step:2281/10000 train_time:117047ms step_avg:51.31ms +[2025-09-05 16:22:27] [Rank 0] step:2281/10000 train_time:117047ms step_avg:51.31ms +[2025-09-05 16:22:28] [Rank 0] step:2301/10000 train_time:117782ms step_avg:51.19ms +[2025-09-05 16:22:28] [Rank 0] step:2301/10000 train_time:117782ms step_avg:51.19ms +[2025-09-05 16:22:28] [Rank 0] step:2321/10000 train_time:118667ms step_avg:51.13ms +[2025-09-05 16:22:28] [Rank 0] step:2321/10000 train_time:118667ms step_avg:51.13ms +[2025-09-05 16:22:29] [Rank 0] step:2341/10000 train_time:119403ms step_avg:51.01ms +[2025-09-05 16:22:29] [Rank 0] step:2341/10000 train_time:119403ms step_avg:51.01ms +[2025-09-05 16:22:30] [Rank 0] step:2361/10000 train_time:120139ms step_avg:50.88ms +[2025-09-05 16:22:30] [Rank 0] step:2361/10000 train_time:120139ms step_avg:50.88ms +[2025-09-05 16:22:31] [Rank 0] step:2381/10000 train_time:121020ms step_avg:50.83ms +[2025-09-05 16:22:31] [Rank 0] step:2381/10000 train_time:121020ms step_avg:50.83ms +[2025-09-05 16:22:31] [Rank 0] step:2401/10000 train_time:121755ms step_avg:50.71ms +[2025-09-05 16:22:31] [Rank 0] step:2401/10000 train_time:121755ms step_avg:50.71ms +[2025-09-05 16:22:32] [Rank 0] step:2421/10000 train_time:122491ms step_avg:50.60ms +[2025-09-05 16:22:32] [Rank 0] step:2421/10000 train_time:122491ms step_avg:50.60ms +[2025-09-05 16:22:33] [Rank 0] step:2441/10000 train_time:123228ms step_avg:50.48ms +[2025-09-05 16:22:33] [Rank 0] step:2441/10000 train_time:123228ms step_avg:50.48ms +[2025-09-05 16:22:34] [Rank 0] step:2461/10000 train_time:123963ms step_avg:50.37ms +[2025-09-05 16:22:34] [Rank 0] step:2461/10000 train_time:123963ms step_avg:50.37ms +[2025-09-05 16:22:34] [Rank 0] step:2481/10000 train_time:124699ms step_avg:50.26ms +[2025-09-05 16:22:34] [Rank 0] step:2481/10000 train_time:124699ms step_avg:50.26ms +[2025-09-05 16:22:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:22:35] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:22:36] [Rank 0] PRINT: step:2500/10000 train_loss:1.5402 val_loss:1.4907 train_time:125516ms step_avg:50.21ms +[2025-09-05 16:22:36] [Rank 0] PRINT: step:2500/10000 train_loss:1.5402 val_loss:1.4907 train_time:125516ms step_avg:50.21ms +[2025-09-05 16:22:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:22:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:22:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:22:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:23:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:23:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:23:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:23:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:23:57] [Rank 0] Total Loss: 4.2122 +[2025-09-05 16:23:57] [Rank 0] Total Loss: 4.2122 +[2025-09-05 16:23:57] [Rank 0] Total FTA (Unweighted): 0.4525 +[2025-09-05 16:23:57] [Rank 0] Total FTA (Unweighted): 0.4525 +[2025-09-05 16:23:57] [Rank 0] Total FTA (Weighted): 0.4525 +[2025-09-05 16:23:57] [Rank 0] Total FTA (Weighted): 0.4525 +[2025-09-05 16:23:57] [Rank 0] Group 0 Loss: 3.4472 +[2025-09-05 16:23:57] [Rank 0] Group 0 Loss: 3.4472 +[2025-09-05 16:23:57] [Rank 0] Group 1 Loss: 3.1596 +[2025-09-05 16:23:57] [Rank 0] Group 1 Loss: 3.1596 +[2025-09-05 16:23:57] [Rank 0] Group 2 Loss: 3.1664 +[2025-09-05 16:23:57] [Rank 0] Group 2 Loss: 3.1664 +[2025-09-05 16:23:57] [Rank 0] Group 3 Loss: 3.5476 +[2025-09-05 16:23:57] [Rank 0] Group 3 Loss: 3.5476 +[2025-09-05 16:23:57] [Rank 0] Group 4 Loss: 3.6839 +[2025-09-05 16:23:57] [Rank 0] Group 4 Loss: 3.6839 +[2025-09-05 16:23:57] [Rank 0] Group 5 Loss: 3.8882 +[2025-09-05 16:23:57] [Rank 0] Group 5 Loss: 3.8882 +[2025-09-05 16:23:57] [Rank 0] Group 6 Loss: 4.0022 +[2025-09-05 16:23:57] [Rank 0] Group 6 Loss: 4.0022 +[2025-09-05 16:23:57] [Rank 0] Group 7 Loss: 4.2256 +[2025-09-05 16:23:57] [Rank 0] Group 7 Loss: 4.2256 +[2025-09-05 16:23:57] [Rank 0] Group 8 Loss: 4.5404 +[2025-09-05 16:23:57] [Rank 0] Group 8 Loss: 4.5404 +[2025-09-05 16:23:57] [Rank 0] Group 9 Loss: 4.6435 +[2025-09-05 16:23:57] [Rank 0] Group 9 Loss: 4.6435 +[2025-09-05 16:23:57] [Rank 0] Group 10 Loss: 4.8422 +[2025-09-05 16:23:57] [Rank 0] Group 10 Loss: 4.8422 +[2025-09-05 16:23:57] [Rank 0] Group 11 Loss: 4.8456 +[2025-09-05 16:23:57] [Rank 0] Group 11 Loss: 4.8456 +[2025-09-05 16:23:57] [Rank 0] Group 12 Loss: 4.7773 +[2025-09-05 16:23:57] [Rank 0] Group 12 Loss: 4.7773 +[2025-09-05 16:23:57] [Rank 0] Group 13 Loss: 4.8783 +[2025-09-05 16:23:57] [Rank 0] Group 13 Loss: 4.8783 +[2025-09-05 16:23:57] [Rank 0] Group 14 Loss: 4.8802 +[2025-09-05 16:23:57] [Rank 0] Group 14 Loss: 4.8802 +[2025-09-05 16:23:57] [Rank 0] Group 15 Loss: 4.8666 +[2025-09-05 16:23:57] [Rank 0] Group 15 Loss: 4.8666 +[2025-09-05 16:23:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:23:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:23:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:23:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:23:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:23:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:23:58] [Rank 0] Group 3 FTA: 0.9700 +[2025-09-05 16:23:58] [Rank 0] Group 3 FTA: 0.9700 +[2025-09-05 16:23:58] [Rank 0] Group 4 FTA: 0.5100 +[2025-09-05 16:23:58] [Rank 0] Group 4 FTA: 0.5100 +[2025-09-05 16:23:58] [Rank 0] Group 5 FTA: 0.5300 +[2025-09-05 16:23:58] [Rank 0] Group 5 FTA: 0.5300 +[2025-09-05 16:23:58] [Rank 0] Group 6 FTA: 0.4200 +[2025-09-05 16:23:58] [Rank 0] Group 6 FTA: 0.4200 +[2025-09-05 16:23:58] [Rank 0] Group 7 FTA: 0.3700 +[2025-09-05 16:23:58] [Rank 0] Group 7 FTA: 0.3700 +[2025-09-05 16:23:58] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 16:23:58] [Rank 0] Group 8 FTA: 0.3500 +[2025-09-05 16:23:58] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 16:23:58] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 16:23:58] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 16:23:58] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 16:23:58] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 16:23:58] [Rank 0] Group 11 FTA: 0.1500 +[2025-09-05 16:23:58] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 16:23:58] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 16:23:58] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:23:58] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:23:58] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:23:58] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:23:58] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:23:58] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:23:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:23:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:23:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:23:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:23:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:23:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:23:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:23:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:23:59] [Rank 0] step:2501/10000 train_time:125525ms step_avg:50.19ms +[2025-09-05 16:23:59] [Rank 0] step:2501/10000 train_time:125525ms step_avg:50.19ms +[2025-09-05 16:24:00] [Rank 0] step:2521/10000 train_time:126197ms step_avg:50.06ms +[2025-09-05 16:24:00] [Rank 0] step:2521/10000 train_time:126197ms step_avg:50.06ms +[2025-09-05 16:24:00] [Rank 0] step:2541/10000 train_time:126933ms step_avg:49.95ms +[2025-09-05 16:24:00] [Rank 0] step:2541/10000 train_time:126933ms step_avg:49.95ms +[2025-09-05 16:24:01] [Rank 0] step:2561/10000 train_time:127669ms step_avg:49.85ms +[2025-09-05 16:24:01] [Rank 0] step:2561/10000 train_time:127669ms step_avg:49.85ms +[2025-09-05 16:24:02] [Rank 0] step:2581/10000 train_time:128405ms step_avg:49.75ms +[2025-09-05 16:24:02] [Rank 0] step:2581/10000 train_time:128405ms step_avg:49.75ms +[2025-09-05 16:24:03] [Rank 0] step:2601/10000 train_time:129145ms step_avg:49.65ms +[2025-09-05 16:24:03] [Rank 0] step:2601/10000 train_time:129145ms step_avg:49.65ms +[2025-09-05 16:24:03] [Rank 0] step:2621/10000 train_time:129881ms step_avg:49.55ms +[2025-09-05 16:24:03] [Rank 0] step:2621/10000 train_time:129881ms step_avg:49.55ms +[2025-09-05 16:24:04] [Rank 0] step:2641/10000 train_time:130616ms step_avg:49.46ms +[2025-09-05 16:24:04] [Rank 0] step:2641/10000 train_time:130616ms step_avg:49.46ms +[2025-09-05 16:24:05] [Rank 0] step:2661/10000 train_time:131352ms step_avg:49.36ms +[2025-09-05 16:24:05] [Rank 0] step:2661/10000 train_time:131352ms step_avg:49.36ms +[2025-09-05 16:24:06] [Rank 0] step:2681/10000 train_time:132088ms step_avg:49.27ms +[2025-09-05 16:24:06] [Rank 0] step:2681/10000 train_time:132088ms step_avg:49.27ms +[2025-09-05 16:24:06] [Rank 0] step:2701/10000 train_time:132824ms step_avg:49.18ms +[2025-09-05 16:24:06] [Rank 0] step:2701/10000 train_time:132824ms step_avg:49.18ms +[2025-09-05 16:24:07] [Rank 0] step:2721/10000 train_time:133560ms step_avg:49.08ms +[2025-09-05 16:24:07] [Rank 0] step:2721/10000 train_time:133560ms step_avg:49.08ms +[2025-09-05 16:24:08] [Rank 0] step:2741/10000 train_time:134295ms step_avg:49.00ms +[2025-09-05 16:24:08] [Rank 0] step:2741/10000 train_time:134295ms step_avg:49.00ms +[2025-09-05 16:24:09] [Rank 0] step:2761/10000 train_time:135030ms step_avg:48.91ms +[2025-09-05 16:24:09] [Rank 0] step:2761/10000 train_time:135030ms step_avg:48.91ms +[2025-09-05 16:24:09] [Rank 0] step:2781/10000 train_time:135767ms step_avg:48.82ms +[2025-09-05 16:24:09] [Rank 0] step:2781/10000 train_time:135767ms step_avg:48.82ms +[2025-09-05 16:24:10] [Rank 0] step:2801/10000 train_time:136503ms step_avg:48.73ms +[2025-09-05 16:24:10] [Rank 0] step:2801/10000 train_time:136503ms step_avg:48.73ms +[2025-09-05 16:24:11] [Rank 0] step:2821/10000 train_time:137856ms step_avg:48.87ms +[2025-09-05 16:24:11] [Rank 0] step:2821/10000 train_time:137856ms step_avg:48.87ms +[2025-09-05 16:24:12] [Rank 0] step:2841/10000 train_time:138592ms step_avg:48.78ms +[2025-09-05 16:24:12] [Rank 0] step:2841/10000 train_time:138592ms step_avg:48.78ms +[2025-09-05 16:24:13] [Rank 0] step:2861/10000 train_time:139327ms step_avg:48.70ms +[2025-09-05 16:24:13] [Rank 0] step:2861/10000 train_time:139327ms step_avg:48.70ms +[2025-09-05 16:24:14] [Rank 0] step:2881/10000 train_time:140063ms step_avg:48.62ms +[2025-09-05 16:24:14] [Rank 0] step:2881/10000 train_time:140063ms step_avg:48.62ms +[2025-09-05 16:24:14] [Rank 0] step:2901/10000 train_time:140798ms step_avg:48.53ms +[2025-09-05 16:24:14] [Rank 0] step:2901/10000 train_time:140798ms step_avg:48.53ms +[2025-09-05 16:24:15] [Rank 0] step:2921/10000 train_time:141533ms step_avg:48.45ms +[2025-09-05 16:24:15] [Rank 0] step:2921/10000 train_time:141533ms step_avg:48.45ms +[2025-09-05 16:24:16] [Rank 0] step:2941/10000 train_time:142269ms step_avg:48.37ms +[2025-09-05 16:24:16] [Rank 0] step:2941/10000 train_time:142269ms step_avg:48.37ms +[2025-09-05 16:24:17] [Rank 0] step:2961/10000 train_time:143005ms step_avg:48.30ms +[2025-09-05 16:24:17] [Rank 0] step:2961/10000 train_time:143005ms step_avg:48.30ms +[2025-09-05 16:24:17] [Rank 0] step:2981/10000 train_time:143740ms step_avg:48.22ms +[2025-09-05 16:24:17] [Rank 0] step:2981/10000 train_time:143740ms step_avg:48.22ms +[2025-09-05 16:24:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:24:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:24:18] [Rank 0] PRINT: step:3000/10000 train_loss:1.4824 val_loss:1.4583 train_time:144558ms step_avg:48.19ms +[2025-09-05 16:24:18] [Rank 0] PRINT: step:3000/10000 train_loss:1.4824 val_loss:1.4583 train_time:144558ms step_avg:48.19ms +[2025-09-05 16:24:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:24:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:24:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:24:19] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:25:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:25:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:25:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:25:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:25:40] [Rank 0] Total Loss: 4.2716 +[2025-09-05 16:25:40] [Rank 0] Total Loss: 4.2716 +[2025-09-05 16:25:40] [Rank 0] Total FTA (Unweighted): 0.4706 +[2025-09-05 16:25:40] [Rank 0] Total FTA (Unweighted): 0.4706 +[2025-09-05 16:25:40] [Rank 0] Total FTA (Weighted): 0.4706 +[2025-09-05 16:25:40] [Rank 0] Total FTA (Weighted): 0.4706 +[2025-09-05 16:25:40] [Rank 0] Group 0 Loss: 3.3879 +[2025-09-05 16:25:40] [Rank 0] Group 0 Loss: 3.3879 +[2025-09-05 16:25:40] [Rank 0] Group 1 Loss: 3.2746 +[2025-09-05 16:25:40] [Rank 0] Group 1 Loss: 3.2746 +[2025-09-05 16:25:40] [Rank 0] Group 2 Loss: 3.2054 +[2025-09-05 16:25:40] [Rank 0] Group 2 Loss: 3.2054 +[2025-09-05 16:25:40] [Rank 0] Group 3 Loss: 3.5555 +[2025-09-05 16:25:40] [Rank 0] Group 3 Loss: 3.5555 +[2025-09-05 16:25:40] [Rank 0] Group 4 Loss: 3.7967 +[2025-09-05 16:25:40] [Rank 0] Group 4 Loss: 3.7967 +[2025-09-05 16:25:40] [Rank 0] Group 5 Loss: 3.9663 +[2025-09-05 16:25:40] [Rank 0] Group 5 Loss: 3.9663 +[2025-09-05 16:25:40] [Rank 0] Group 6 Loss: 4.0865 +[2025-09-05 16:25:40] [Rank 0] Group 6 Loss: 4.0865 +[2025-09-05 16:25:40] [Rank 0] Group 7 Loss: 4.3049 +[2025-09-05 16:25:40] [Rank 0] Group 7 Loss: 4.3049 +[2025-09-05 16:25:40] [Rank 0] Group 8 Loss: 4.5793 +[2025-09-05 16:25:40] [Rank 0] Group 8 Loss: 4.5793 +[2025-09-05 16:25:40] [Rank 0] Group 9 Loss: 4.6952 +[2025-09-05 16:25:40] [Rank 0] Group 9 Loss: 4.6952 +[2025-09-05 16:25:40] [Rank 0] Group 10 Loss: 4.9119 +[2025-09-05 16:25:40] [Rank 0] Group 10 Loss: 4.9119 +[2025-09-05 16:25:40] [Rank 0] Group 11 Loss: 4.9098 +[2025-09-05 16:25:40] [Rank 0] Group 11 Loss: 4.9098 +[2025-09-05 16:25:40] [Rank 0] Group 12 Loss: 4.8584 +[2025-09-05 16:25:40] [Rank 0] Group 12 Loss: 4.8584 +[2025-09-05 16:25:40] [Rank 0] Group 13 Loss: 4.9161 +[2025-09-05 16:25:40] [Rank 0] Group 13 Loss: 4.9161 +[2025-09-05 16:25:40] [Rank 0] Group 14 Loss: 4.9367 +[2025-09-05 16:25:40] [Rank 0] Group 14 Loss: 4.9367 +[2025-09-05 16:25:40] [Rank 0] Group 15 Loss: 4.9608 +[2025-09-05 16:25:40] [Rank 0] Group 15 Loss: 4.9608 +[2025-09-05 16:25:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:25:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:25:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:25:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:25:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:25:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:25:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:25:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:25:40] [Rank 0] Group 4 FTA: 0.5800 +[2025-09-05 16:25:40] [Rank 0] Group 4 FTA: 0.5800 +[2025-09-05 16:25:40] [Rank 0] Group 5 FTA: 0.5400 +[2025-09-05 16:25:40] [Rank 0] Group 5 FTA: 0.5400 +[2025-09-05 16:25:40] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 16:25:40] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 16:25:40] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 16:25:40] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 16:25:40] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 16:25:40] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 16:25:40] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 16:25:40] [Rank 0] Group 9 FTA: 0.2900 +[2025-09-05 16:25:40] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-05 16:25:40] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-05 16:25:40] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 16:25:40] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 16:25:40] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 16:25:40] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 16:25:40] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:25:40] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:25:40] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 16:25:40] [Rank 0] Group 14 FTA: 0.1100 +[2025-09-05 16:25:40] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:25:40] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:25:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:25:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:25:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:25:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:25:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:25:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:25:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:25:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:25:42] [Rank 0] step:3001/10000 train_time:144567ms step_avg:48.17ms +[2025-09-05 16:25:42] [Rank 0] step:3001/10000 train_time:144567ms step_avg:48.17ms +[2025-09-05 16:25:42] [Rank 0] step:3021/10000 train_time:145235ms step_avg:48.07ms +[2025-09-05 16:25:42] [Rank 0] step:3021/10000 train_time:145235ms step_avg:48.07ms +[2025-09-05 16:25:43] [Rank 0] step:3041/10000 train_time:145970ms step_avg:48.00ms +[2025-09-05 16:25:43] [Rank 0] step:3041/10000 train_time:145970ms step_avg:48.00ms +[2025-09-05 16:25:44] [Rank 0] step:3061/10000 train_time:146706ms step_avg:47.93ms +[2025-09-05 16:25:44] [Rank 0] step:3061/10000 train_time:146706ms step_avg:47.93ms +[2025-09-05 16:25:45] [Rank 0] step:3081/10000 train_time:147442ms step_avg:47.86ms +[2025-09-05 16:25:45] [Rank 0] step:3081/10000 train_time:147442ms step_avg:47.86ms +[2025-09-05 16:25:45] [Rank 0] step:3101/10000 train_time:148177ms step_avg:47.78ms +[2025-09-05 16:25:45] [Rank 0] step:3101/10000 train_time:148177ms step_avg:47.78ms +[2025-09-05 16:25:46] [Rank 0] step:3121/10000 train_time:148913ms step_avg:47.71ms +[2025-09-05 16:25:46] [Rank 0] step:3121/10000 train_time:148913ms step_avg:47.71ms +[2025-09-05 16:25:47] [Rank 0] step:3141/10000 train_time:149650ms step_avg:47.64ms +[2025-09-05 16:25:47] [Rank 0] step:3141/10000 train_time:149650ms step_avg:47.64ms +[2025-09-05 16:25:48] [Rank 0] step:3161/10000 train_time:150489ms step_avg:47.61ms +[2025-09-05 16:25:48] [Rank 0] step:3161/10000 train_time:150489ms step_avg:47.61ms +[2025-09-05 16:25:48] [Rank 0] step:3181/10000 train_time:151224ms step_avg:47.54ms +[2025-09-05 16:25:48] [Rank 0] step:3181/10000 train_time:151224ms step_avg:47.54ms +[2025-09-05 16:25:49] [Rank 0] step:3201/10000 train_time:151961ms step_avg:47.47ms +[2025-09-05 16:25:49] [Rank 0] step:3201/10000 train_time:151961ms step_avg:47.47ms +[2025-09-05 16:25:50] [Rank 0] step:3221/10000 train_time:152696ms step_avg:47.41ms +[2025-09-05 16:25:50] [Rank 0] step:3221/10000 train_time:152696ms step_avg:47.41ms +[2025-09-05 16:25:51] [Rank 0] step:3241/10000 train_time:153532ms step_avg:47.37ms +[2025-09-05 16:25:51] [Rank 0] step:3241/10000 train_time:153532ms step_avg:47.37ms +[2025-09-05 16:25:51] [Rank 0] step:3261/10000 train_time:154269ms step_avg:47.31ms +[2025-09-05 16:25:51] [Rank 0] step:3261/10000 train_time:154269ms step_avg:47.31ms +[2025-09-05 16:25:52] [Rank 0] step:3281/10000 train_time:155004ms step_avg:47.24ms +[2025-09-05 16:25:52] [Rank 0] step:3281/10000 train_time:155004ms step_avg:47.24ms +[2025-09-05 16:25:53] [Rank 0] step:3301/10000 train_time:155741ms step_avg:47.18ms +[2025-09-05 16:25:53] [Rank 0] step:3301/10000 train_time:155741ms step_avg:47.18ms +[2025-09-05 16:25:54] [Rank 0] step:3321/10000 train_time:156476ms step_avg:47.12ms +[2025-09-05 16:25:54] [Rank 0] step:3321/10000 train_time:156476ms step_avg:47.12ms +[2025-09-05 16:25:54] [Rank 0] step:3341/10000 train_time:157212ms step_avg:47.06ms +[2025-09-05 16:25:54] [Rank 0] step:3341/10000 train_time:157212ms step_avg:47.06ms +[2025-09-05 16:25:55] [Rank 0] step:3361/10000 train_time:157948ms step_avg:46.99ms +[2025-09-05 16:25:55] [Rank 0] step:3361/10000 train_time:157948ms step_avg:46.99ms +[2025-09-05 16:25:56] [Rank 0] step:3381/10000 train_time:158684ms step_avg:46.93ms +[2025-09-05 16:25:56] [Rank 0] step:3381/10000 train_time:158684ms step_avg:46.93ms +[2025-09-05 16:25:57] [Rank 0] step:3401/10000 train_time:159421ms step_avg:46.87ms +[2025-09-05 16:25:57] [Rank 0] step:3401/10000 train_time:159421ms step_avg:46.87ms +[2025-09-05 16:25:57] [Rank 0] step:3421/10000 train_time:160156ms step_avg:46.82ms +[2025-09-05 16:25:57] [Rank 0] step:3421/10000 train_time:160156ms step_avg:46.82ms +[2025-09-05 16:25:58] [Rank 0] step:3441/10000 train_time:160892ms step_avg:46.76ms +[2025-09-05 16:25:58] [Rank 0] step:3441/10000 train_time:160892ms step_avg:46.76ms +[2025-09-05 16:25:59] [Rank 0] step:3461/10000 train_time:161627ms step_avg:46.70ms +[2025-09-05 16:25:59] [Rank 0] step:3461/10000 train_time:161627ms step_avg:46.70ms +[2025-09-05 16:26:00] [Rank 0] step:3481/10000 train_time:162362ms step_avg:46.64ms +[2025-09-05 16:26:00] [Rank 0] step:3481/10000 train_time:162362ms step_avg:46.64ms +[2025-09-05 16:26:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:26:00] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:26:01] [Rank 0] PRINT: step:3500/10000 train_loss:1.4513 val_loss:1.4289 train_time:163179ms step_avg:46.62ms +[2025-09-05 16:26:01] [Rank 0] PRINT: step:3500/10000 train_loss:1.4513 val_loss:1.4289 train_time:163179ms step_avg:46.62ms +[2025-09-05 16:26:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:26:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:26:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:26:01] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:27:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:27:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:27:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:27:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:27:22] [Rank 0] Total Loss: 4.2576 +[2025-09-05 16:27:22] [Rank 0] Total Loss: 4.2576 +[2025-09-05 16:27:22] [Rank 0] Total FTA (Unweighted): 0.4825 +[2025-09-05 16:27:22] [Rank 0] Total FTA (Unweighted): 0.4825 +[2025-09-05 16:27:22] [Rank 0] Total FTA (Weighted): 0.4825 +[2025-09-05 16:27:22] [Rank 0] Total FTA (Weighted): 0.4825 +[2025-09-05 16:27:22] [Rank 0] Group 0 Loss: 3.3630 +[2025-09-05 16:27:22] [Rank 0] Group 0 Loss: 3.3630 +[2025-09-05 16:27:22] [Rank 0] Group 1 Loss: 3.2907 +[2025-09-05 16:27:22] [Rank 0] Group 1 Loss: 3.2907 +[2025-09-05 16:27:22] [Rank 0] Group 2 Loss: 3.2251 +[2025-09-05 16:27:22] [Rank 0] Group 2 Loss: 3.2251 +[2025-09-05 16:27:22] [Rank 0] Group 3 Loss: 3.5423 +[2025-09-05 16:27:22] [Rank 0] Group 3 Loss: 3.5423 +[2025-09-05 16:27:22] [Rank 0] Group 4 Loss: 3.8070 +[2025-09-05 16:27:22] [Rank 0] Group 4 Loss: 3.8070 +[2025-09-05 16:27:22] [Rank 0] Group 5 Loss: 3.9768 +[2025-09-05 16:27:22] [Rank 0] Group 5 Loss: 3.9768 +[2025-09-05 16:27:22] [Rank 0] Group 6 Loss: 4.0538 +[2025-09-05 16:27:22] [Rank 0] Group 6 Loss: 4.0538 +[2025-09-05 16:27:22] [Rank 0] Group 7 Loss: 4.3002 +[2025-09-05 16:27:22] [Rank 0] Group 7 Loss: 4.3002 +[2025-09-05 16:27:22] [Rank 0] Group 8 Loss: 4.5798 +[2025-09-05 16:27:22] [Rank 0] Group 8 Loss: 4.5798 +[2025-09-05 16:27:22] [Rank 0] Group 9 Loss: 4.6495 +[2025-09-05 16:27:22] [Rank 0] Group 9 Loss: 4.6495 +[2025-09-05 16:27:22] [Rank 0] Group 10 Loss: 4.8681 +[2025-09-05 16:27:22] [Rank 0] Group 10 Loss: 4.8681 +[2025-09-05 16:27:22] [Rank 0] Group 11 Loss: 4.8633 +[2025-09-05 16:27:22] [Rank 0] Group 11 Loss: 4.8633 +[2025-09-05 16:27:22] [Rank 0] Group 12 Loss: 4.8204 +[2025-09-05 16:27:22] [Rank 0] Group 12 Loss: 4.8204 +[2025-09-05 16:27:22] [Rank 0] Group 13 Loss: 4.8982 +[2025-09-05 16:27:22] [Rank 0] Group 13 Loss: 4.8982 +[2025-09-05 16:27:22] [Rank 0] Group 14 Loss: 4.9441 +[2025-09-05 16:27:22] [Rank 0] Group 14 Loss: 4.9441 +[2025-09-05 16:27:22] [Rank 0] Group 15 Loss: 4.9399 +[2025-09-05 16:27:22] [Rank 0] Group 15 Loss: 4.9399 +[2025-09-05 16:27:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:27:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:27:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:27:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:27:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:27:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:27:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:27:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:27:22] [Rank 0] Group 4 FTA: 0.6500 +[2025-09-05 16:27:22] [Rank 0] Group 4 FTA: 0.6500 +[2025-09-05 16:27:22] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:27:22] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:27:22] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 16:27:22] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 16:27:22] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 16:27:22] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 16:27:22] [Rank 0] Group 8 FTA: 0.4100 +[2025-09-05 16:27:22] [Rank 0] Group 8 FTA: 0.4100 +[2025-09-05 16:27:22] [Rank 0] Group 9 FTA: 0.3000 +[2025-09-05 16:27:22] [Rank 0] Group 9 FTA: 0.3000 +[2025-09-05 16:27:22] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-05 16:27:22] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-05 16:27:22] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 16:27:22] [Rank 0] Group 11 FTA: 0.1600 +[2025-09-05 16:27:22] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:27:22] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:27:22] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:27:22] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:27:22] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:27:22] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:27:22] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:27:22] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:27:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:27:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:27:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:27:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:27:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:27:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:27:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:27:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:27:23] [Rank 0] step:3501/10000 train_time:163188ms step_avg:46.61ms +[2025-09-05 16:27:23] [Rank 0] step:3501/10000 train_time:163188ms step_avg:46.61ms +[2025-09-05 16:27:24] [Rank 0] step:3521/10000 train_time:163872ms step_avg:46.54ms +[2025-09-05 16:27:24] [Rank 0] step:3521/10000 train_time:163872ms step_avg:46.54ms +[2025-09-05 16:27:25] [Rank 0] step:3541/10000 train_time:164608ms step_avg:46.49ms +[2025-09-05 16:27:25] [Rank 0] step:3541/10000 train_time:164608ms step_avg:46.49ms +[2025-09-05 16:27:26] [Rank 0] step:3561/10000 train_time:165343ms step_avg:46.43ms +[2025-09-05 16:27:26] [Rank 0] step:3561/10000 train_time:165343ms step_avg:46.43ms +[2025-09-05 16:27:26] [Rank 0] step:3581/10000 train_time:166079ms step_avg:46.38ms +[2025-09-05 16:27:26] [Rank 0] step:3581/10000 train_time:166079ms step_avg:46.38ms +[2025-09-05 16:27:27] [Rank 0] step:3601/10000 train_time:166816ms step_avg:46.32ms +[2025-09-05 16:27:27] [Rank 0] step:3601/10000 train_time:166816ms step_avg:46.32ms +[2025-09-05 16:27:28] [Rank 0] step:3621/10000 train_time:167551ms step_avg:46.27ms +[2025-09-05 16:27:28] [Rank 0] step:3621/10000 train_time:167551ms step_avg:46.27ms +[2025-09-05 16:27:29] [Rank 0] step:3641/10000 train_time:168897ms step_avg:46.39ms +[2025-09-05 16:27:29] [Rank 0] step:3641/10000 train_time:168897ms step_avg:46.39ms +[2025-09-05 16:27:30] [Rank 0] step:3661/10000 train_time:169634ms step_avg:46.34ms +[2025-09-05 16:27:30] [Rank 0] step:3661/10000 train_time:169634ms step_avg:46.34ms +[2025-09-05 16:27:31] [Rank 0] step:3681/10000 train_time:170369ms step_avg:46.28ms +[2025-09-05 16:27:31] [Rank 0] step:3681/10000 train_time:170369ms step_avg:46.28ms +[2025-09-05 16:27:31] [Rank 0] step:3701/10000 train_time:171105ms step_avg:46.23ms +[2025-09-05 16:27:31] [Rank 0] step:3701/10000 train_time:171105ms step_avg:46.23ms +[2025-09-05 16:27:32] [Rank 0] step:3721/10000 train_time:171841ms step_avg:46.18ms +[2025-09-05 16:27:32] [Rank 0] step:3721/10000 train_time:171841ms step_avg:46.18ms +[2025-09-05 16:27:33] [Rank 0] step:3741/10000 train_time:172578ms step_avg:46.13ms +[2025-09-05 16:27:33] [Rank 0] step:3741/10000 train_time:172578ms step_avg:46.13ms +[2025-09-05 16:27:34] [Rank 0] step:3761/10000 train_time:173313ms step_avg:46.08ms +[2025-09-05 16:27:34] [Rank 0] step:3761/10000 train_time:173313ms step_avg:46.08ms +[2025-09-05 16:27:34] [Rank 0] step:3781/10000 train_time:174049ms step_avg:46.03ms +[2025-09-05 16:27:34] [Rank 0] step:3781/10000 train_time:174049ms step_avg:46.03ms +[2025-09-05 16:27:35] [Rank 0] step:3801/10000 train_time:174784ms step_avg:45.98ms +[2025-09-05 16:27:35] [Rank 0] step:3801/10000 train_time:174784ms step_avg:45.98ms +[2025-09-05 16:27:36] [Rank 0] step:3821/10000 train_time:175520ms step_avg:45.94ms +[2025-09-05 16:27:36] [Rank 0] step:3821/10000 train_time:175520ms step_avg:45.94ms +[2025-09-05 16:27:37] [Rank 0] step:3841/10000 train_time:176256ms step_avg:45.89ms +[2025-09-05 16:27:37] [Rank 0] step:3841/10000 train_time:176256ms step_avg:45.89ms +[2025-09-05 16:27:37] [Rank 0] step:3861/10000 train_time:176992ms step_avg:45.84ms +[2025-09-05 16:27:37] [Rank 0] step:3861/10000 train_time:176992ms step_avg:45.84ms +[2025-09-05 16:27:38] [Rank 0] step:3881/10000 train_time:177727ms step_avg:45.79ms +[2025-09-05 16:27:38] [Rank 0] step:3881/10000 train_time:177727ms step_avg:45.79ms +[2025-09-05 16:27:39] [Rank 0] step:3901/10000 train_time:178463ms step_avg:45.75ms +[2025-09-05 16:27:39] [Rank 0] step:3901/10000 train_time:178463ms step_avg:45.75ms +[2025-09-05 16:27:40] [Rank 0] step:3921/10000 train_time:179200ms step_avg:45.70ms +[2025-09-05 16:27:40] [Rank 0] step:3921/10000 train_time:179200ms step_avg:45.70ms +[2025-09-05 16:27:40] [Rank 0] step:3941/10000 train_time:179936ms step_avg:45.66ms +[2025-09-05 16:27:40] [Rank 0] step:3941/10000 train_time:179936ms step_avg:45.66ms +[2025-09-05 16:27:41] [Rank 0] step:3961/10000 train_time:180671ms step_avg:45.61ms +[2025-09-05 16:27:41] [Rank 0] step:3961/10000 train_time:180671ms step_avg:45.61ms +[2025-09-05 16:27:42] [Rank 0] step:3981/10000 train_time:181407ms step_avg:45.57ms +[2025-09-05 16:27:42] [Rank 0] step:3981/10000 train_time:181407ms step_avg:45.57ms +[2025-09-05 16:27:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:27:42] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:27:43] [Rank 0] PRINT: step:4000/10000 train_loss:1.4307 val_loss:1.4152 train_time:182224ms step_avg:45.56ms +[2025-09-05 16:27:43] [Rank 0] PRINT: step:4000/10000 train_loss:1.4307 val_loss:1.4152 train_time:182224ms step_avg:45.56ms +[2025-09-05 16:27:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:27:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:27:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:27:43] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:29:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:29:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:29:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:29:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:29:05] [Rank 0] Total Loss: 4.2149 +[2025-09-05 16:29:05] [Rank 0] Total Loss: 4.2149 +[2025-09-05 16:29:05] [Rank 0] Total FTA (Unweighted): 0.5025 +[2025-09-05 16:29:05] [Rank 0] Total FTA (Unweighted): 0.5025 +[2025-09-05 16:29:05] [Rank 0] Total FTA (Weighted): 0.5025 +[2025-09-05 16:29:05] [Rank 0] Total FTA (Weighted): 0.5025 +[2025-09-05 16:29:05] [Rank 0] Group 0 Loss: 3.5011 +[2025-09-05 16:29:05] [Rank 0] Group 0 Loss: 3.5011 +[2025-09-05 16:29:05] [Rank 0] Group 1 Loss: 3.3354 +[2025-09-05 16:29:05] [Rank 0] Group 1 Loss: 3.3354 +[2025-09-05 16:29:05] [Rank 0] Group 2 Loss: 3.2102 +[2025-09-05 16:29:05] [Rank 0] Group 2 Loss: 3.2102 +[2025-09-05 16:29:05] [Rank 0] Group 3 Loss: 3.5467 +[2025-09-05 16:29:05] [Rank 0] Group 3 Loss: 3.5467 +[2025-09-05 16:29:05] [Rank 0] Group 4 Loss: 3.7711 +[2025-09-05 16:29:05] [Rank 0] Group 4 Loss: 3.7711 +[2025-09-05 16:29:05] [Rank 0] Group 5 Loss: 3.9315 +[2025-09-05 16:29:05] [Rank 0] Group 5 Loss: 3.9315 +[2025-09-05 16:29:05] [Rank 0] Group 6 Loss: 4.0035 +[2025-09-05 16:29:05] [Rank 0] Group 6 Loss: 4.0035 +[2025-09-05 16:29:05] [Rank 0] Group 7 Loss: 4.1967 +[2025-09-05 16:29:05] [Rank 0] Group 7 Loss: 4.1967 +[2025-09-05 16:29:05] [Rank 0] Group 8 Loss: 4.4661 +[2025-09-05 16:29:05] [Rank 0] Group 8 Loss: 4.4661 +[2025-09-05 16:29:05] [Rank 0] Group 9 Loss: 4.5917 +[2025-09-05 16:29:05] [Rank 0] Group 9 Loss: 4.5917 +[2025-09-05 16:29:05] [Rank 0] Group 10 Loss: 4.7612 +[2025-09-05 16:29:05] [Rank 0] Group 10 Loss: 4.7612 +[2025-09-05 16:29:05] [Rank 0] Group 11 Loss: 4.7631 +[2025-09-05 16:29:05] [Rank 0] Group 11 Loss: 4.7631 +[2025-09-05 16:29:05] [Rank 0] Group 12 Loss: 4.7493 +[2025-09-05 16:29:05] [Rank 0] Group 12 Loss: 4.7493 +[2025-09-05 16:29:05] [Rank 0] Group 13 Loss: 4.8410 +[2025-09-05 16:29:05] [Rank 0] Group 13 Loss: 4.8410 +[2025-09-05 16:29:05] [Rank 0] Group 14 Loss: 4.8747 +[2025-09-05 16:29:05] [Rank 0] Group 14 Loss: 4.8747 +[2025-09-05 16:29:05] [Rank 0] Group 15 Loss: 4.8957 +[2025-09-05 16:29:05] [Rank 0] Group 15 Loss: 4.8957 +[2025-09-05 16:29:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:29:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:29:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:29:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:29:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:29:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:29:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:29:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:29:05] [Rank 0] Group 4 FTA: 0.7200 +[2025-09-05 16:29:05] [Rank 0] Group 4 FTA: 0.7200 +[2025-09-05 16:29:05] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:29:05] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:29:05] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 16:29:05] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 16:29:05] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 16:29:05] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 16:29:05] [Rank 0] Group 8 FTA: 0.4300 +[2025-09-05 16:29:05] [Rank 0] Group 8 FTA: 0.4300 +[2025-09-05 16:29:05] [Rank 0] Group 9 FTA: 0.3500 +[2025-09-05 16:29:05] [Rank 0] Group 9 FTA: 0.3500 +[2025-09-05 16:29:05] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 16:29:05] [Rank 0] Group 10 FTA: 0.3700 +[2025-09-05 16:29:05] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 16:29:05] [Rank 0] Group 11 FTA: 0.2100 +[2025-09-05 16:29:05] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 16:29:05] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 16:29:05] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:29:05] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:29:05] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:29:05] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:29:05] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:29:05] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:29:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:29:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:29:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:29:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:29:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:29:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:29:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:29:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:29:06] [Rank 0] step:4001/10000 train_time:182234ms step_avg:45.55ms +[2025-09-05 16:29:06] [Rank 0] step:4001/10000 train_time:182234ms step_avg:45.55ms +[2025-09-05 16:29:08] [Rank 0] step:4021/10000 train_time:183523ms step_avg:45.64ms +[2025-09-05 16:29:08] [Rank 0] step:4021/10000 train_time:183523ms step_avg:45.64ms +[2025-09-05 16:29:08] [Rank 0] step:4041/10000 train_time:184260ms step_avg:45.60ms +[2025-09-05 16:29:08] [Rank 0] step:4041/10000 train_time:184260ms step_avg:45.60ms +[2025-09-05 16:29:09] [Rank 0] step:4061/10000 train_time:184995ms step_avg:45.55ms +[2025-09-05 16:29:09] [Rank 0] step:4061/10000 train_time:184995ms step_avg:45.55ms +[2025-09-05 16:29:10] [Rank 0] step:4081/10000 train_time:185730ms step_avg:45.51ms +[2025-09-05 16:29:10] [Rank 0] step:4081/10000 train_time:185730ms step_avg:45.51ms +[2025-09-05 16:29:11] [Rank 0] step:4101/10000 train_time:186466ms step_avg:45.47ms +[2025-09-05 16:29:11] [Rank 0] step:4101/10000 train_time:186466ms step_avg:45.47ms +[2025-09-05 16:29:11] [Rank 0] step:4121/10000 train_time:187202ms step_avg:45.43ms +[2025-09-05 16:29:11] [Rank 0] step:4121/10000 train_time:187202ms step_avg:45.43ms +[2025-09-05 16:29:12] [Rank 0] step:4141/10000 train_time:187938ms step_avg:45.38ms +[2025-09-05 16:29:12] [Rank 0] step:4141/10000 train_time:187938ms step_avg:45.38ms +[2025-09-05 16:29:13] [Rank 0] step:4161/10000 train_time:188673ms step_avg:45.34ms +[2025-09-05 16:29:13] [Rank 0] step:4161/10000 train_time:188673ms step_avg:45.34ms +[2025-09-05 16:29:14] [Rank 0] step:4181/10000 train_time:189409ms step_avg:45.30ms +[2025-09-05 16:29:14] [Rank 0] step:4181/10000 train_time:189409ms step_avg:45.30ms +[2025-09-05 16:29:14] [Rank 0] step:4201/10000 train_time:190144ms step_avg:45.26ms +[2025-09-05 16:29:14] [Rank 0] step:4201/10000 train_time:190144ms step_avg:45.26ms +[2025-09-05 16:29:15] [Rank 0] step:4221/10000 train_time:190880ms step_avg:45.22ms +[2025-09-05 16:29:15] [Rank 0] step:4221/10000 train_time:190880ms step_avg:45.22ms +[2025-09-05 16:29:16] [Rank 0] step:4241/10000 train_time:191616ms step_avg:45.18ms +[2025-09-05 16:29:16] [Rank 0] step:4241/10000 train_time:191616ms step_avg:45.18ms +[2025-09-05 16:29:17] [Rank 0] step:4261/10000 train_time:192352ms step_avg:45.14ms +[2025-09-05 16:29:17] [Rank 0] step:4261/10000 train_time:192352ms step_avg:45.14ms +[2025-09-05 16:29:17] [Rank 0] step:4281/10000 train_time:193088ms step_avg:45.10ms +[2025-09-05 16:29:17] [Rank 0] step:4281/10000 train_time:193088ms step_avg:45.10ms +[2025-09-05 16:29:18] [Rank 0] step:4301/10000 train_time:193824ms step_avg:45.06ms +[2025-09-05 16:29:18] [Rank 0] step:4301/10000 train_time:193824ms step_avg:45.06ms +[2025-09-05 16:29:19] [Rank 0] step:4321/10000 train_time:194560ms step_avg:45.03ms +[2025-09-05 16:29:19] [Rank 0] step:4321/10000 train_time:194560ms step_avg:45.03ms +[2025-09-05 16:29:19] [Rank 0] step:4341/10000 train_time:195296ms step_avg:44.99ms +[2025-09-05 16:29:19] [Rank 0] step:4341/10000 train_time:195296ms step_avg:44.99ms +[2025-09-05 16:29:20] [Rank 0] step:4361/10000 train_time:196032ms step_avg:44.95ms +[2025-09-05 16:29:20] [Rank 0] step:4361/10000 train_time:196032ms step_avg:44.95ms +[2025-09-05 16:29:21] [Rank 0] step:4381/10000 train_time:196768ms step_avg:44.91ms +[2025-09-05 16:29:21] [Rank 0] step:4381/10000 train_time:196768ms step_avg:44.91ms +[2025-09-05 16:29:22] [Rank 0] step:4401/10000 train_time:197504ms step_avg:44.88ms +[2025-09-05 16:29:22] [Rank 0] step:4401/10000 train_time:197504ms step_avg:44.88ms +[2025-09-05 16:29:22] [Rank 0] step:4421/10000 train_time:198239ms step_avg:44.84ms +[2025-09-05 16:29:22] [Rank 0] step:4421/10000 train_time:198239ms step_avg:44.84ms +[2025-09-05 16:29:23] [Rank 0] step:4441/10000 train_time:198975ms step_avg:44.80ms +[2025-09-05 16:29:23] [Rank 0] step:4441/10000 train_time:198975ms step_avg:44.80ms +[2025-09-05 16:29:24] [Rank 0] step:4461/10000 train_time:199712ms step_avg:44.77ms +[2025-09-05 16:29:24] [Rank 0] step:4461/10000 train_time:199712ms step_avg:44.77ms +[2025-09-05 16:29:25] [Rank 0] step:4481/10000 train_time:200448ms step_avg:44.73ms +[2025-09-05 16:29:25] [Rank 0] step:4481/10000 train_time:200448ms step_avg:44.73ms +[2025-09-05 16:29:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:29:25] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:29:26] [Rank 0] PRINT: step:4500/10000 train_loss:1.4202 val_loss:1.4059 train_time:201264ms step_avg:44.73ms +[2025-09-05 16:29:26] [Rank 0] PRINT: step:4500/10000 train_loss:1.4202 val_loss:1.4059 train_time:201264ms step_avg:44.73ms +[2025-09-05 16:29:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:29:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:29:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:29:26] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:30:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:30:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:30:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:30:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:30:48] [Rank 0] Total Loss: 4.1804 +[2025-09-05 16:30:48] [Rank 0] Total Loss: 4.1804 +[2025-09-05 16:30:48] [Rank 0] Total FTA (Unweighted): 0.5162 +[2025-09-05 16:30:48] [Rank 0] Total FTA (Unweighted): 0.5162 +[2025-09-05 16:30:48] [Rank 0] Total FTA (Weighted): 0.5162 +[2025-09-05 16:30:48] [Rank 0] Total FTA (Weighted): 0.5162 +[2025-09-05 16:30:48] [Rank 0] Group 0 Loss: 3.4668 +[2025-09-05 16:30:48] [Rank 0] Group 0 Loss: 3.4668 +[2025-09-05 16:30:48] [Rank 0] Group 1 Loss: 3.2595 +[2025-09-05 16:30:48] [Rank 0] Group 1 Loss: 3.2595 +[2025-09-05 16:30:48] [Rank 0] Group 2 Loss: 3.1791 +[2025-09-05 16:30:48] [Rank 0] Group 2 Loss: 3.1791 +[2025-09-05 16:30:48] [Rank 0] Group 3 Loss: 3.5504 +[2025-09-05 16:30:48] [Rank 0] Group 3 Loss: 3.5504 +[2025-09-05 16:30:48] [Rank 0] Group 4 Loss: 3.6840 +[2025-09-05 16:30:48] [Rank 0] Group 4 Loss: 3.6840 +[2025-09-05 16:30:48] [Rank 0] Group 5 Loss: 3.9081 +[2025-09-05 16:30:48] [Rank 0] Group 5 Loss: 3.9081 +[2025-09-05 16:30:48] [Rank 0] Group 6 Loss: 3.9379 +[2025-09-05 16:30:48] [Rank 0] Group 6 Loss: 3.9379 +[2025-09-05 16:30:48] [Rank 0] Group 7 Loss: 4.1957 +[2025-09-05 16:30:48] [Rank 0] Group 7 Loss: 4.1957 +[2025-09-05 16:30:48] [Rank 0] Group 8 Loss: 4.4439 +[2025-09-05 16:30:48] [Rank 0] Group 8 Loss: 4.4439 +[2025-09-05 16:30:48] [Rank 0] Group 9 Loss: 4.5534 +[2025-09-05 16:30:48] [Rank 0] Group 9 Loss: 4.5534 +[2025-09-05 16:30:48] [Rank 0] Group 10 Loss: 4.7253 +[2025-09-05 16:30:48] [Rank 0] Group 10 Loss: 4.7253 +[2025-09-05 16:30:48] [Rank 0] Group 11 Loss: 4.7781 +[2025-09-05 16:30:48] [Rank 0] Group 11 Loss: 4.7781 +[2025-09-05 16:30:48] [Rank 0] Group 12 Loss: 4.7297 +[2025-09-05 16:30:48] [Rank 0] Group 12 Loss: 4.7297 +[2025-09-05 16:30:48] [Rank 0] Group 13 Loss: 4.7793 +[2025-09-05 16:30:48] [Rank 0] Group 13 Loss: 4.7793 +[2025-09-05 16:30:48] [Rank 0] Group 14 Loss: 4.8454 +[2025-09-05 16:30:48] [Rank 0] Group 14 Loss: 4.8454 +[2025-09-05 16:30:48] [Rank 0] Group 15 Loss: 4.8501 +[2025-09-05 16:30:48] [Rank 0] Group 15 Loss: 4.8501 +[2025-09-05 16:30:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:30:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:30:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:30:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:30:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:30:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:30:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:30:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:30:48] [Rank 0] Group 4 FTA: 0.7800 +[2025-09-05 16:30:48] [Rank 0] Group 4 FTA: 0.7800 +[2025-09-05 16:30:48] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:30:48] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:30:48] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 16:30:48] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 16:30:48] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 16:30:48] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 16:30:48] [Rank 0] Group 8 FTA: 0.4500 +[2025-09-05 16:30:48] [Rank 0] Group 8 FTA: 0.4500 +[2025-09-05 16:30:48] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-05 16:30:48] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-05 16:30:48] [Rank 0] Group 10 FTA: 0.4500 +[2025-09-05 16:30:48] [Rank 0] Group 10 FTA: 0.4500 +[2025-09-05 16:30:48] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 16:30:48] [Rank 0] Group 11 FTA: 0.2800 +[2025-09-05 16:30:48] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:30:48] [Rank 0] Group 12 FTA: 0.1200 +[2025-09-05 16:30:48] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:30:48] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:30:48] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:30:48] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:30:48] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 16:30:48] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 16:30:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:30:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:30:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:30:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:30:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:30:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:30:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:30:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:30:49] [Rank 0] step:4501/10000 train_time:201274ms step_avg:44.72ms +[2025-09-05 16:30:49] [Rank 0] step:4501/10000 train_time:201274ms step_avg:44.72ms +[2025-09-05 16:30:50] [Rank 0] step:4521/10000 train_time:201948ms step_avg:44.67ms +[2025-09-05 16:30:50] [Rank 0] step:4521/10000 train_time:201948ms step_avg:44.67ms +[2025-09-05 16:30:51] [Rank 0] step:4541/10000 train_time:202684ms step_avg:44.63ms +[2025-09-05 16:30:51] [Rank 0] step:4541/10000 train_time:202684ms step_avg:44.63ms +[2025-09-05 16:30:52] [Rank 0] step:4561/10000 train_time:203421ms step_avg:44.60ms +[2025-09-05 16:30:52] [Rank 0] step:4561/10000 train_time:203421ms step_avg:44.60ms +[2025-09-05 16:30:52] [Rank 0] step:4581/10000 train_time:204157ms step_avg:44.57ms +[2025-09-05 16:30:52] [Rank 0] step:4581/10000 train_time:204157ms step_avg:44.57ms +[2025-09-05 16:30:53] [Rank 0] step:4601/10000 train_time:204893ms step_avg:44.53ms +[2025-09-05 16:30:53] [Rank 0] step:4601/10000 train_time:204893ms step_avg:44.53ms +[2025-09-05 16:30:54] [Rank 0] step:4621/10000 train_time:205752ms step_avg:44.53ms +[2025-09-05 16:30:54] [Rank 0] step:4621/10000 train_time:205752ms step_avg:44.53ms +[2025-09-05 16:30:55] [Rank 0] step:4641/10000 train_time:206487ms step_avg:44.49ms +[2025-09-05 16:30:55] [Rank 0] step:4641/10000 train_time:206487ms step_avg:44.49ms +[2025-09-05 16:30:55] [Rank 0] step:4661/10000 train_time:207224ms step_avg:44.46ms +[2025-09-05 16:30:55] [Rank 0] step:4661/10000 train_time:207224ms step_avg:44.46ms +[2025-09-05 16:30:56] [Rank 0] step:4681/10000 train_time:208077ms step_avg:44.45ms +[2025-09-05 16:30:56] [Rank 0] step:4681/10000 train_time:208077ms step_avg:44.45ms +[2025-09-05 16:30:57] [Rank 0] step:4701/10000 train_time:208813ms step_avg:44.42ms +[2025-09-05 16:30:57] [Rank 0] step:4701/10000 train_time:208813ms step_avg:44.42ms +[2025-09-05 16:30:58] [Rank 0] step:4721/10000 train_time:209549ms step_avg:44.39ms +[2025-09-05 16:30:58] [Rank 0] step:4721/10000 train_time:209549ms step_avg:44.39ms +[2025-09-05 16:30:58] [Rank 0] step:4741/10000 train_time:210285ms step_avg:44.35ms +[2025-09-05 16:30:58] [Rank 0] step:4741/10000 train_time:210285ms step_avg:44.35ms +[2025-09-05 16:30:59] [Rank 0] step:4761/10000 train_time:211021ms step_avg:44.32ms +[2025-09-05 16:30:59] [Rank 0] step:4761/10000 train_time:211021ms step_avg:44.32ms +[2025-09-05 16:31:00] [Rank 0] step:4781/10000 train_time:211756ms step_avg:44.29ms +[2025-09-05 16:31:00] [Rank 0] step:4781/10000 train_time:211756ms step_avg:44.29ms +[2025-09-05 16:31:01] [Rank 0] step:4801/10000 train_time:212493ms step_avg:44.26ms +[2025-09-05 16:31:01] [Rank 0] step:4801/10000 train_time:212493ms step_avg:44.26ms +[2025-09-05 16:31:01] [Rank 0] step:4821/10000 train_time:213228ms step_avg:44.23ms +[2025-09-05 16:31:01] [Rank 0] step:4821/10000 train_time:213228ms step_avg:44.23ms +[2025-09-05 16:31:02] [Rank 0] step:4841/10000 train_time:214274ms step_avg:44.26ms +[2025-09-05 16:31:02] [Rank 0] step:4841/10000 train_time:214274ms step_avg:44.26ms +[2025-09-05 16:31:03] [Rank 0] step:4861/10000 train_time:215010ms step_avg:44.23ms +[2025-09-05 16:31:03] [Rank 0] step:4861/10000 train_time:215010ms step_avg:44.23ms +[2025-09-05 16:31:04] [Rank 0] step:4881/10000 train_time:215746ms step_avg:44.20ms +[2025-09-05 16:31:04] [Rank 0] step:4881/10000 train_time:215746ms step_avg:44.20ms +[2025-09-05 16:31:05] [Rank 0] step:4901/10000 train_time:216483ms step_avg:44.17ms +[2025-09-05 16:31:05] [Rank 0] step:4901/10000 train_time:216483ms step_avg:44.17ms +[2025-09-05 16:31:05] [Rank 0] step:4921/10000 train_time:217218ms step_avg:44.14ms +[2025-09-05 16:31:05] [Rank 0] step:4921/10000 train_time:217218ms step_avg:44.14ms +[2025-09-05 16:31:06] [Rank 0] step:4941/10000 train_time:217955ms step_avg:44.11ms +[2025-09-05 16:31:06] [Rank 0] step:4941/10000 train_time:217955ms step_avg:44.11ms +[2025-09-05 16:31:07] [Rank 0] step:4961/10000 train_time:218690ms step_avg:44.08ms +[2025-09-05 16:31:07] [Rank 0] step:4961/10000 train_time:218690ms step_avg:44.08ms +[2025-09-05 16:31:08] [Rank 0] step:4981/10000 train_time:219426ms step_avg:44.05ms +[2025-09-05 16:31:08] [Rank 0] step:4981/10000 train_time:219426ms step_avg:44.05ms +[2025-09-05 16:31:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:31:08] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:31:09] [Rank 0] PRINT: step:5000/10000 train_loss:1.4149 val_loss:1.4045 train_time:220243ms step_avg:44.05ms +[2025-09-05 16:31:09] [Rank 0] PRINT: step:5000/10000 train_loss:1.4149 val_loss:1.4045 train_time:220243ms step_avg:44.05ms +[2025-09-05 16:31:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:31:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:31:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:31:09] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:32:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:32:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:32:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:32:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:32:31] [Rank 0] Total Loss: 4.2121 +[2025-09-05 16:32:31] [Rank 0] Total Loss: 4.2121 +[2025-09-05 16:32:31] [Rank 0] Total FTA (Unweighted): 0.5275 +[2025-09-05 16:32:31] [Rank 0] Total FTA (Unweighted): 0.5275 +[2025-09-05 16:32:31] [Rank 0] Total FTA (Weighted): 0.5275 +[2025-09-05 16:32:31] [Rank 0] Total FTA (Weighted): 0.5275 +[2025-09-05 16:32:31] [Rank 0] Group 0 Loss: 3.4314 +[2025-09-05 16:32:31] [Rank 0] Group 0 Loss: 3.4314 +[2025-09-05 16:32:31] [Rank 0] Group 1 Loss: 3.2944 +[2025-09-05 16:32:31] [Rank 0] Group 1 Loss: 3.2944 +[2025-09-05 16:32:31] [Rank 0] Group 2 Loss: 3.2867 +[2025-09-05 16:32:31] [Rank 0] Group 2 Loss: 3.2867 +[2025-09-05 16:32:31] [Rank 0] Group 3 Loss: 3.5664 +[2025-09-05 16:32:31] [Rank 0] Group 3 Loss: 3.5664 +[2025-09-05 16:32:31] [Rank 0] Group 4 Loss: 3.7150 +[2025-09-05 16:32:31] [Rank 0] Group 4 Loss: 3.7150 +[2025-09-05 16:32:31] [Rank 0] Group 5 Loss: 3.9154 +[2025-09-05 16:32:31] [Rank 0] Group 5 Loss: 3.9154 +[2025-09-05 16:32:31] [Rank 0] Group 6 Loss: 3.9780 +[2025-09-05 16:32:31] [Rank 0] Group 6 Loss: 3.9780 +[2025-09-05 16:32:31] [Rank 0] Group 7 Loss: 4.1822 +[2025-09-05 16:32:31] [Rank 0] Group 7 Loss: 4.1822 +[2025-09-05 16:32:31] [Rank 0] Group 8 Loss: 4.4742 +[2025-09-05 16:32:31] [Rank 0] Group 8 Loss: 4.4742 +[2025-09-05 16:32:31] [Rank 0] Group 9 Loss: 4.5877 +[2025-09-05 16:32:31] [Rank 0] Group 9 Loss: 4.5877 +[2025-09-05 16:32:31] [Rank 0] Group 10 Loss: 4.7844 +[2025-09-05 16:32:31] [Rank 0] Group 10 Loss: 4.7844 +[2025-09-05 16:32:31] [Rank 0] Group 11 Loss: 4.8235 +[2025-09-05 16:32:31] [Rank 0] Group 11 Loss: 4.8235 +[2025-09-05 16:32:31] [Rank 0] Group 12 Loss: 4.7552 +[2025-09-05 16:32:31] [Rank 0] Group 12 Loss: 4.7552 +[2025-09-05 16:32:31] [Rank 0] Group 13 Loss: 4.8179 +[2025-09-05 16:32:31] [Rank 0] Group 13 Loss: 4.8179 +[2025-09-05 16:32:31] [Rank 0] Group 14 Loss: 4.8910 +[2025-09-05 16:32:31] [Rank 0] Group 14 Loss: 4.8910 +[2025-09-05 16:32:31] [Rank 0] Group 15 Loss: 4.8908 +[2025-09-05 16:32:31] [Rank 0] Group 15 Loss: 4.8908 +[2025-09-05 16:32:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:32:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:32:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:32:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:32:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:32:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:32:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:32:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:32:31] [Rank 0] Group 4 FTA: 0.8500 +[2025-09-05 16:32:31] [Rank 0] Group 4 FTA: 0.8500 +[2025-09-05 16:32:31] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:32:31] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:32:31] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:32:31] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 16:32:31] [Rank 0] Group 7 FTA: 0.4300 +[2025-09-05 16:32:31] [Rank 0] Group 7 FTA: 0.4300 +[2025-09-05 16:32:31] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 16:32:31] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 16:32:31] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-05 16:32:31] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-05 16:32:31] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 16:32:31] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 16:32:31] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 16:32:31] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 16:32:31] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 16:32:31] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 16:32:31] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:32:31] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:32:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:32:31] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 16:32:31] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:32:31] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:32:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:32:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:32:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:32:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:32:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:32:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:32:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:32:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:32:32] [Rank 0] step:5001/10000 train_time:220252ms step_avg:44.04ms +[2025-09-05 16:32:32] [Rank 0] step:5001/10000 train_time:220252ms step_avg:44.04ms +[2025-09-05 16:32:33] [Rank 0] step:5021/10000 train_time:220924ms step_avg:44.00ms +[2025-09-05 16:32:33] [Rank 0] step:5021/10000 train_time:220924ms step_avg:44.00ms +[2025-09-05 16:32:33] [Rank 0] step:5041/10000 train_time:221660ms step_avg:43.97ms +[2025-09-05 16:32:33] [Rank 0] step:5041/10000 train_time:221660ms step_avg:43.97ms +[2025-09-05 16:32:34] [Rank 0] step:5061/10000 train_time:222396ms step_avg:43.94ms +[2025-09-05 16:32:34] [Rank 0] step:5061/10000 train_time:222396ms step_avg:43.94ms +[2025-09-05 16:32:35] [Rank 0] step:5081/10000 train_time:223132ms step_avg:43.92ms +[2025-09-05 16:32:35] [Rank 0] step:5081/10000 train_time:223132ms step_avg:43.92ms +[2025-09-05 16:32:36] [Rank 0] step:5101/10000 train_time:223868ms step_avg:43.89ms +[2025-09-05 16:32:36] [Rank 0] step:5101/10000 train_time:223868ms step_avg:43.89ms +[2025-09-05 16:32:36] [Rank 0] step:5121/10000 train_time:224604ms step_avg:43.86ms +[2025-09-05 16:32:36] [Rank 0] step:5121/10000 train_time:224604ms step_avg:43.86ms +[2025-09-05 16:32:37] [Rank 0] step:5141/10000 train_time:225340ms step_avg:43.83ms +[2025-09-05 16:32:37] [Rank 0] step:5141/10000 train_time:225340ms step_avg:43.83ms +[2025-09-05 16:32:38] [Rank 0] step:5161/10000 train_time:226076ms step_avg:43.80ms +[2025-09-05 16:32:38] [Rank 0] step:5161/10000 train_time:226076ms step_avg:43.80ms +[2025-09-05 16:32:39] [Rank 0] step:5181/10000 train_time:226813ms step_avg:43.78ms +[2025-09-05 16:32:39] [Rank 0] step:5181/10000 train_time:226813ms step_avg:43.78ms +[2025-09-05 16:32:39] [Rank 0] step:5201/10000 train_time:227549ms step_avg:43.75ms +[2025-09-05 16:32:39] [Rank 0] step:5201/10000 train_time:227549ms step_avg:43.75ms +[2025-09-05 16:32:40] [Rank 0] step:5221/10000 train_time:228284ms step_avg:43.72ms +[2025-09-05 16:32:40] [Rank 0] step:5221/10000 train_time:228284ms step_avg:43.72ms +[2025-09-05 16:32:41] [Rank 0] step:5241/10000 train_time:229020ms step_avg:43.70ms +[2025-09-05 16:32:41] [Rank 0] step:5241/10000 train_time:229020ms step_avg:43.70ms +[2025-09-05 16:32:42] [Rank 0] step:5261/10000 train_time:229756ms step_avg:43.67ms +[2025-09-05 16:32:42] [Rank 0] step:5261/10000 train_time:229756ms step_avg:43.67ms +[2025-09-05 16:32:42] [Rank 0] step:5281/10000 train_time:230492ms step_avg:43.65ms +[2025-09-05 16:32:42] [Rank 0] step:5281/10000 train_time:230492ms step_avg:43.65ms +[2025-09-05 16:32:43] [Rank 0] step:5301/10000 train_time:231228ms step_avg:43.62ms +[2025-09-05 16:32:43] [Rank 0] step:5301/10000 train_time:231228ms step_avg:43.62ms +[2025-09-05 16:32:44] [Rank 0] step:5321/10000 train_time:231964ms step_avg:43.59ms +[2025-09-05 16:32:44] [Rank 0] step:5321/10000 train_time:231964ms step_avg:43.59ms +[2025-09-05 16:32:45] [Rank 0] step:5341/10000 train_time:232701ms step_avg:43.57ms +[2025-09-05 16:32:45] [Rank 0] step:5341/10000 train_time:232701ms step_avg:43.57ms +[2025-09-05 16:32:45] [Rank 0] step:5361/10000 train_time:233436ms step_avg:43.54ms +[2025-09-05 16:32:45] [Rank 0] step:5361/10000 train_time:233436ms step_avg:43.54ms +[2025-09-05 16:32:46] [Rank 0] step:5381/10000 train_time:234173ms step_avg:43.52ms +[2025-09-05 16:32:46] [Rank 0] step:5381/10000 train_time:234173ms step_avg:43.52ms +[2025-09-05 16:32:47] [Rank 0] step:5401/10000 train_time:234908ms step_avg:43.49ms +[2025-09-05 16:32:47] [Rank 0] step:5401/10000 train_time:234908ms step_avg:43.49ms +[2025-09-05 16:32:47] [Rank 0] step:5421/10000 train_time:235644ms step_avg:43.47ms +[2025-09-05 16:32:47] [Rank 0] step:5421/10000 train_time:235644ms step_avg:43.47ms +[2025-09-05 16:32:48] [Rank 0] step:5441/10000 train_time:236380ms step_avg:43.44ms +[2025-09-05 16:32:48] [Rank 0] step:5441/10000 train_time:236380ms step_avg:43.44ms +[2025-09-05 16:32:49] [Rank 0] step:5461/10000 train_time:237116ms step_avg:43.42ms +[2025-09-05 16:32:49] [Rank 0] step:5461/10000 train_time:237116ms step_avg:43.42ms +[2025-09-05 16:32:50] [Rank 0] step:5481/10000 train_time:237852ms step_avg:43.40ms +[2025-09-05 16:32:50] [Rank 0] step:5481/10000 train_time:237852ms step_avg:43.40ms +[2025-09-05 16:32:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:32:50] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:32:51] [Rank 0] PRINT: step:5500/10000 train_loss:1.4109 val_loss:1.3999 train_time:238669ms step_avg:43.39ms +[2025-09-05 16:32:51] [Rank 0] PRINT: step:5500/10000 train_loss:1.4109 val_loss:1.3999 train_time:238669ms step_avg:43.39ms +[2025-09-05 16:32:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:32:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:32:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:32:51] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:34:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:34:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:34:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:34:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:34:12] [Rank 0] Total Loss: 4.1434 +[2025-09-05 16:34:12] [Rank 0] Total Loss: 4.1434 +[2025-09-05 16:34:12] [Rank 0] Total FTA (Unweighted): 0.5369 +[2025-09-05 16:34:12] [Rank 0] Total FTA (Unweighted): 0.5369 +[2025-09-05 16:34:12] [Rank 0] Total FTA (Weighted): 0.5369 +[2025-09-05 16:34:12] [Rank 0] Total FTA (Weighted): 0.5369 +[2025-09-05 16:34:12] [Rank 0] Group 0 Loss: 3.3922 +[2025-09-05 16:34:12] [Rank 0] Group 0 Loss: 3.3922 +[2025-09-05 16:34:12] [Rank 0] Group 1 Loss: 3.2694 +[2025-09-05 16:34:12] [Rank 0] Group 1 Loss: 3.2694 +[2025-09-05 16:34:12] [Rank 0] Group 2 Loss: 3.1358 +[2025-09-05 16:34:12] [Rank 0] Group 2 Loss: 3.1358 +[2025-09-05 16:34:12] [Rank 0] Group 3 Loss: 3.5654 +[2025-09-05 16:34:12] [Rank 0] Group 3 Loss: 3.5654 +[2025-09-05 16:34:12] [Rank 0] Group 4 Loss: 3.6631 +[2025-09-05 16:34:12] [Rank 0] Group 4 Loss: 3.6631 +[2025-09-05 16:34:12] [Rank 0] Group 5 Loss: 3.8806 +[2025-09-05 16:34:12] [Rank 0] Group 5 Loss: 3.8806 +[2025-09-05 16:34:12] [Rank 0] Group 6 Loss: 3.9349 +[2025-09-05 16:34:12] [Rank 0] Group 6 Loss: 3.9349 +[2025-09-05 16:34:12] [Rank 0] Group 7 Loss: 4.1312 +[2025-09-05 16:34:12] [Rank 0] Group 7 Loss: 4.1312 +[2025-09-05 16:34:12] [Rank 0] Group 8 Loss: 4.3788 +[2025-09-05 16:34:12] [Rank 0] Group 8 Loss: 4.3788 +[2025-09-05 16:34:12] [Rank 0] Group 9 Loss: 4.5126 +[2025-09-05 16:34:12] [Rank 0] Group 9 Loss: 4.5126 +[2025-09-05 16:34:12] [Rank 0] Group 10 Loss: 4.7030 +[2025-09-05 16:34:12] [Rank 0] Group 10 Loss: 4.7030 +[2025-09-05 16:34:12] [Rank 0] Group 11 Loss: 4.7142 +[2025-09-05 16:34:12] [Rank 0] Group 11 Loss: 4.7142 +[2025-09-05 16:34:12] [Rank 0] Group 12 Loss: 4.6770 +[2025-09-05 16:34:12] [Rank 0] Group 12 Loss: 4.6770 +[2025-09-05 16:34:12] [Rank 0] Group 13 Loss: 4.7628 +[2025-09-05 16:34:12] [Rank 0] Group 13 Loss: 4.7628 +[2025-09-05 16:34:12] [Rank 0] Group 14 Loss: 4.7645 +[2025-09-05 16:34:12] [Rank 0] Group 14 Loss: 4.7645 +[2025-09-05 16:34:12] [Rank 0] Group 15 Loss: 4.8082 +[2025-09-05 16:34:12] [Rank 0] Group 15 Loss: 4.8082 +[2025-09-05 16:34:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:34:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:34:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:34:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:34:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:34:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:34:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:34:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:34:12] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 16:34:12] [Rank 0] Group 4 FTA: 0.8300 +[2025-09-05 16:34:12] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:34:12] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:34:12] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 16:34:12] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 16:34:12] [Rank 0] Group 7 FTA: 0.4500 +[2025-09-05 16:34:12] [Rank 0] Group 7 FTA: 0.4500 +[2025-09-05 16:34:12] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 16:34:12] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 16:34:12] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 16:34:12] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 16:34:12] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 16:34:12] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 16:34:12] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 16:34:12] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 16:34:12] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 16:34:12] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 16:34:12] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:34:12] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:34:12] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:34:12] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:34:12] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:34:12] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:34:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:34:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:34:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:34:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:34:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:34:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:34:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:34:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:34:14] [Rank 0] step:5501/10000 train_time:238678ms step_avg:43.39ms +[2025-09-05 16:34:14] [Rank 0] step:5501/10000 train_time:238678ms step_avg:43.39ms +[2025-09-05 16:34:14] [Rank 0] step:5521/10000 train_time:239359ms step_avg:43.35ms +[2025-09-05 16:34:14] [Rank 0] step:5521/10000 train_time:239359ms step_avg:43.35ms +[2025-09-05 16:34:15] [Rank 0] step:5541/10000 train_time:240094ms step_avg:43.33ms +[2025-09-05 16:34:15] [Rank 0] step:5541/10000 train_time:240094ms step_avg:43.33ms +[2025-09-05 16:34:16] [Rank 0] step:5561/10000 train_time:240855ms step_avg:43.31ms +[2025-09-05 16:34:16] [Rank 0] step:5561/10000 train_time:240855ms step_avg:43.31ms +[2025-09-05 16:34:17] [Rank 0] step:5581/10000 train_time:241590ms step_avg:43.29ms +[2025-09-05 16:34:17] [Rank 0] step:5581/10000 train_time:241590ms step_avg:43.29ms +[2025-09-05 16:34:17] [Rank 0] step:5601/10000 train_time:242327ms step_avg:43.26ms +[2025-09-05 16:34:17] [Rank 0] step:5601/10000 train_time:242327ms step_avg:43.26ms +[2025-09-05 16:34:18] [Rank 0] step:5621/10000 train_time:243062ms step_avg:43.24ms +[2025-09-05 16:34:18] [Rank 0] step:5621/10000 train_time:243062ms step_avg:43.24ms +[2025-09-05 16:34:19] [Rank 0] step:5641/10000 train_time:244424ms step_avg:43.33ms +[2025-09-05 16:34:19] [Rank 0] step:5641/10000 train_time:244424ms step_avg:43.33ms +[2025-09-05 16:34:20] [Rank 0] step:5661/10000 train_time:245161ms step_avg:43.31ms +[2025-09-05 16:34:20] [Rank 0] step:5661/10000 train_time:245161ms step_avg:43.31ms +[2025-09-05 16:34:21] [Rank 0] step:5681/10000 train_time:245897ms step_avg:43.28ms +[2025-09-05 16:34:21] [Rank 0] step:5681/10000 train_time:245897ms step_avg:43.28ms +[2025-09-05 16:34:22] [Rank 0] step:5701/10000 train_time:246634ms step_avg:43.26ms +[2025-09-05 16:34:22] [Rank 0] step:5701/10000 train_time:246634ms step_avg:43.26ms +[2025-09-05 16:34:22] [Rank 0] step:5721/10000 train_time:247370ms step_avg:43.24ms +[2025-09-05 16:34:22] [Rank 0] step:5721/10000 train_time:247370ms step_avg:43.24ms +[2025-09-05 16:34:23] [Rank 0] step:5741/10000 train_time:248106ms step_avg:43.22ms +[2025-09-05 16:34:23] [Rank 0] step:5741/10000 train_time:248106ms step_avg:43.22ms +[2025-09-05 16:34:24] [Rank 0] step:5761/10000 train_time:248842ms step_avg:43.19ms +[2025-09-05 16:34:24] [Rank 0] step:5761/10000 train_time:248842ms step_avg:43.19ms +[2025-09-05 16:34:25] [Rank 0] step:5781/10000 train_time:249580ms step_avg:43.17ms +[2025-09-05 16:34:25] [Rank 0] step:5781/10000 train_time:249580ms step_avg:43.17ms +[2025-09-05 16:34:25] [Rank 0] step:5801/10000 train_time:250316ms step_avg:43.15ms +[2025-09-05 16:34:25] [Rank 0] step:5801/10000 train_time:250316ms step_avg:43.15ms +[2025-09-05 16:34:26] [Rank 0] step:5821/10000 train_time:251053ms step_avg:43.13ms +[2025-09-05 16:34:26] [Rank 0] step:5821/10000 train_time:251053ms step_avg:43.13ms +[2025-09-05 16:34:27] [Rank 0] step:5841/10000 train_time:251789ms step_avg:43.11ms +[2025-09-05 16:34:27] [Rank 0] step:5841/10000 train_time:251789ms step_avg:43.11ms +[2025-09-05 16:34:27] [Rank 0] step:5861/10000 train_time:252524ms step_avg:43.09ms +[2025-09-05 16:34:27] [Rank 0] step:5861/10000 train_time:252524ms step_avg:43.09ms +[2025-09-05 16:34:28] [Rank 0] step:5881/10000 train_time:253260ms step_avg:43.06ms +[2025-09-05 16:34:28] [Rank 0] step:5881/10000 train_time:253260ms step_avg:43.06ms +[2025-09-05 16:34:29] [Rank 0] step:5901/10000 train_time:253996ms step_avg:43.04ms +[2025-09-05 16:34:29] [Rank 0] step:5901/10000 train_time:253996ms step_avg:43.04ms +[2025-09-05 16:34:30] [Rank 0] step:5921/10000 train_time:254732ms step_avg:43.02ms +[2025-09-05 16:34:30] [Rank 0] step:5921/10000 train_time:254732ms step_avg:43.02ms +[2025-09-05 16:34:30] [Rank 0] step:5941/10000 train_time:255468ms step_avg:43.00ms +[2025-09-05 16:34:30] [Rank 0] step:5941/10000 train_time:255468ms step_avg:43.00ms +[2025-09-05 16:34:31] [Rank 0] step:5961/10000 train_time:256204ms step_avg:42.98ms +[2025-09-05 16:34:31] [Rank 0] step:5961/10000 train_time:256204ms step_avg:42.98ms +[2025-09-05 16:34:32] [Rank 0] step:5981/10000 train_time:256940ms step_avg:42.96ms +[2025-09-05 16:34:32] [Rank 0] step:5981/10000 train_time:256940ms step_avg:42.96ms +[2025-09-05 16:34:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:34:33] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:34:33] [Rank 0] PRINT: step:6000/10000 train_loss:1.4077 val_loss:1.3987 train_time:257756ms step_avg:42.96ms +[2025-09-05 16:34:33] [Rank 0] PRINT: step:6000/10000 train_loss:1.4077 val_loss:1.3987 train_time:257756ms step_avg:42.96ms +[2025-09-05 16:34:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:34:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:34:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:34:33] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:35:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:35:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:35:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:35:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:35:54] [Rank 0] Total Loss: 4.2915 +[2025-09-05 16:35:54] [Rank 0] Total Loss: 4.2915 +[2025-09-05 16:35:54] [Rank 0] Total FTA (Unweighted): 0.5450 +[2025-09-05 16:35:54] [Rank 0] Total FTA (Unweighted): 0.5450 +[2025-09-05 16:35:54] [Rank 0] Total FTA (Weighted): 0.5450 +[2025-09-05 16:35:54] [Rank 0] Total FTA (Weighted): 0.5450 +[2025-09-05 16:35:54] [Rank 0] Group 0 Loss: 3.4987 +[2025-09-05 16:35:54] [Rank 0] Group 0 Loss: 3.4987 +[2025-09-05 16:35:55] [Rank 0] Group 1 Loss: 3.2925 +[2025-09-05 16:35:55] [Rank 0] Group 1 Loss: 3.2925 +[2025-09-05 16:35:55] [Rank 0] Group 2 Loss: 3.2930 +[2025-09-05 16:35:55] [Rank 0] Group 2 Loss: 3.2930 +[2025-09-05 16:35:55] [Rank 0] Group 3 Loss: 3.6834 +[2025-09-05 16:35:55] [Rank 0] Group 3 Loss: 3.6834 +[2025-09-05 16:35:55] [Rank 0] Group 4 Loss: 3.8803 +[2025-09-05 16:35:55] [Rank 0] Group 4 Loss: 3.8803 +[2025-09-05 16:35:55] [Rank 0] Group 5 Loss: 4.0408 +[2025-09-05 16:35:55] [Rank 0] Group 5 Loss: 4.0408 +[2025-09-05 16:35:55] [Rank 0] Group 6 Loss: 4.0547 +[2025-09-05 16:35:55] [Rank 0] Group 6 Loss: 4.0547 +[2025-09-05 16:35:55] [Rank 0] Group 7 Loss: 4.2905 +[2025-09-05 16:35:55] [Rank 0] Group 7 Loss: 4.2905 +[2025-09-05 16:35:55] [Rank 0] Group 8 Loss: 4.5713 +[2025-09-05 16:35:55] [Rank 0] Group 8 Loss: 4.5713 +[2025-09-05 16:35:55] [Rank 0] Group 9 Loss: 4.6643 +[2025-09-05 16:35:55] [Rank 0] Group 9 Loss: 4.6643 +[2025-09-05 16:35:55] [Rank 0] Group 10 Loss: 4.9166 +[2025-09-05 16:35:55] [Rank 0] Group 10 Loss: 4.9166 +[2025-09-05 16:35:55] [Rank 0] Group 11 Loss: 4.9128 +[2025-09-05 16:35:55] [Rank 0] Group 11 Loss: 4.9128 +[2025-09-05 16:35:55] [Rank 0] Group 12 Loss: 4.8312 +[2025-09-05 16:35:55] [Rank 0] Group 12 Loss: 4.8312 +[2025-09-05 16:35:55] [Rank 0] Group 13 Loss: 4.8547 +[2025-09-05 16:35:55] [Rank 0] Group 13 Loss: 4.8547 +[2025-09-05 16:35:55] [Rank 0] Group 14 Loss: 4.9310 +[2025-09-05 16:35:55] [Rank 0] Group 14 Loss: 4.9310 +[2025-09-05 16:35:55] [Rank 0] Group 15 Loss: 4.9486 +[2025-09-05 16:35:55] [Rank 0] Group 15 Loss: 4.9486 +[2025-09-05 16:35:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:35:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:35:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:35:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:35:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:35:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:35:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:35:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:35:55] [Rank 0] Group 4 FTA: 0.8500 +[2025-09-05 16:35:55] [Rank 0] Group 4 FTA: 0.8500 +[2025-09-05 16:35:55] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:35:55] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:35:55] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 16:35:55] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 16:35:55] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 16:35:55] [Rank 0] Group 7 FTA: 0.4400 +[2025-09-05 16:35:55] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 16:35:55] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 16:35:55] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:35:55] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:35:55] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 16:35:55] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 16:35:55] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 16:35:55] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 16:35:55] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 16:35:55] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 16:35:55] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 16:35:55] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 16:35:55] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:35:55] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:35:55] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:35:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:35:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:35:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:35:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:35:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:35:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:35:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:35:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:35:56] [Rank 0] step:6001/10000 train_time:257765ms step_avg:42.95ms +[2025-09-05 16:35:56] [Rank 0] step:6001/10000 train_time:257765ms step_avg:42.95ms +[2025-09-05 16:35:57] [Rank 0] step:6021/10000 train_time:259033ms step_avg:43.02ms +[2025-09-05 16:35:57] [Rank 0] step:6021/10000 train_time:259033ms step_avg:43.02ms +[2025-09-05 16:35:58] [Rank 0] step:6041/10000 train_time:259769ms step_avg:43.00ms +[2025-09-05 16:35:58] [Rank 0] step:6041/10000 train_time:259769ms step_avg:43.00ms +[2025-09-05 16:35:59] [Rank 0] step:6061/10000 train_time:260505ms step_avg:42.98ms +[2025-09-05 16:35:59] [Rank 0] step:6061/10000 train_time:260505ms step_avg:42.98ms +[2025-09-05 16:36:00] [Rank 0] step:6081/10000 train_time:261241ms step_avg:42.96ms +[2025-09-05 16:36:00] [Rank 0] step:6081/10000 train_time:261241ms step_avg:42.96ms +[2025-09-05 16:36:00] [Rank 0] step:6101/10000 train_time:261976ms step_avg:42.94ms +[2025-09-05 16:36:00] [Rank 0] step:6101/10000 train_time:261976ms step_avg:42.94ms +[2025-09-05 16:36:01] [Rank 0] step:6121/10000 train_time:262713ms step_avg:42.92ms +[2025-09-05 16:36:01] [Rank 0] step:6121/10000 train_time:262713ms step_avg:42.92ms +[2025-09-05 16:36:02] [Rank 0] step:6141/10000 train_time:263449ms step_avg:42.90ms +[2025-09-05 16:36:02] [Rank 0] step:6141/10000 train_time:263449ms step_avg:42.90ms +[2025-09-05 16:36:02] [Rank 0] step:6161/10000 train_time:264185ms step_avg:42.88ms +[2025-09-05 16:36:02] [Rank 0] step:6161/10000 train_time:264185ms step_avg:42.88ms +[2025-09-05 16:36:03] [Rank 0] step:6181/10000 train_time:264921ms step_avg:42.86ms +[2025-09-05 16:36:03] [Rank 0] step:6181/10000 train_time:264921ms step_avg:42.86ms +[2025-09-05 16:36:04] [Rank 0] step:6201/10000 train_time:265657ms step_avg:42.84ms +[2025-09-05 16:36:04] [Rank 0] step:6201/10000 train_time:265657ms step_avg:42.84ms +[2025-09-05 16:36:05] [Rank 0] step:6221/10000 train_time:266392ms step_avg:42.82ms +[2025-09-05 16:36:05] [Rank 0] step:6221/10000 train_time:266392ms step_avg:42.82ms +[2025-09-05 16:36:05] [Rank 0] step:6241/10000 train_time:267127ms step_avg:42.80ms +[2025-09-05 16:36:05] [Rank 0] step:6241/10000 train_time:267127ms step_avg:42.80ms +[2025-09-05 16:36:06] [Rank 0] step:6261/10000 train_time:267864ms step_avg:42.78ms +[2025-09-05 16:36:06] [Rank 0] step:6261/10000 train_time:267864ms step_avg:42.78ms +[2025-09-05 16:36:07] [Rank 0] step:6281/10000 train_time:268599ms step_avg:42.76ms +[2025-09-05 16:36:07] [Rank 0] step:6281/10000 train_time:268599ms step_avg:42.76ms +[2025-09-05 16:36:08] [Rank 0] step:6301/10000 train_time:269335ms step_avg:42.74ms +[2025-09-05 16:36:08] [Rank 0] step:6301/10000 train_time:269335ms step_avg:42.74ms +[2025-09-05 16:36:08] [Rank 0] step:6321/10000 train_time:270070ms step_avg:42.73ms +[2025-09-05 16:36:08] [Rank 0] step:6321/10000 train_time:270070ms step_avg:42.73ms +[2025-09-05 16:36:09] [Rank 0] step:6341/10000 train_time:270806ms step_avg:42.71ms +[2025-09-05 16:36:09] [Rank 0] step:6341/10000 train_time:270806ms step_avg:42.71ms +[2025-09-05 16:36:10] [Rank 0] step:6361/10000 train_time:271668ms step_avg:42.71ms +[2025-09-05 16:36:10] [Rank 0] step:6361/10000 train_time:271668ms step_avg:42.71ms +[2025-09-05 16:36:11] [Rank 0] step:6381/10000 train_time:272404ms step_avg:42.69ms +[2025-09-05 16:36:11] [Rank 0] step:6381/10000 train_time:272404ms step_avg:42.69ms +[2025-09-05 16:36:11] [Rank 0] step:6401/10000 train_time:273139ms step_avg:42.67ms +[2025-09-05 16:36:11] [Rank 0] step:6401/10000 train_time:273139ms step_avg:42.67ms +[2025-09-05 16:36:12] [Rank 0] step:6421/10000 train_time:274012ms step_avg:42.67ms +[2025-09-05 16:36:12] [Rank 0] step:6421/10000 train_time:274012ms step_avg:42.67ms +[2025-09-05 16:36:13] [Rank 0] step:6441/10000 train_time:274747ms step_avg:42.66ms +[2025-09-05 16:36:13] [Rank 0] step:6441/10000 train_time:274747ms step_avg:42.66ms +[2025-09-05 16:36:14] [Rank 0] step:6461/10000 train_time:275483ms step_avg:42.64ms +[2025-09-05 16:36:14] [Rank 0] step:6461/10000 train_time:275483ms step_avg:42.64ms +[2025-09-05 16:36:15] [Rank 0] step:6481/10000 train_time:276218ms step_avg:42.62ms +[2025-09-05 16:36:15] [Rank 0] step:6481/10000 train_time:276218ms step_avg:42.62ms +[2025-09-05 16:36:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:36:15] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:36:16] [Rank 0] PRINT: step:6500/10000 train_loss:1.4060 val_loss:1.3951 train_time:277035ms step_avg:42.62ms +[2025-09-05 16:36:16] [Rank 0] PRINT: step:6500/10000 train_loss:1.4060 val_loss:1.3951 train_time:277035ms step_avg:42.62ms +[2025-09-05 16:36:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:36:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:36:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:36:16] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:37:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:37:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:37:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:37:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:37:38] [Rank 0] Total Loss: 4.1912 +[2025-09-05 16:37:38] [Rank 0] Total Loss: 4.1912 +[2025-09-05 16:37:38] [Rank 0] Total FTA (Unweighted): 0.5437 +[2025-09-05 16:37:38] [Rank 0] Total FTA (Unweighted): 0.5437 +[2025-09-05 16:37:38] [Rank 0] Total FTA (Weighted): 0.5437 +[2025-09-05 16:37:38] [Rank 0] Total FTA (Weighted): 0.5437 +[2025-09-05 16:37:38] [Rank 0] Group 0 Loss: 3.4425 +[2025-09-05 16:37:38] [Rank 0] Group 0 Loss: 3.4425 +[2025-09-05 16:37:38] [Rank 0] Group 1 Loss: 3.2285 +[2025-09-05 16:37:38] [Rank 0] Group 1 Loss: 3.2285 +[2025-09-05 16:37:38] [Rank 0] Group 2 Loss: 3.1997 +[2025-09-05 16:37:38] [Rank 0] Group 2 Loss: 3.1997 +[2025-09-05 16:37:38] [Rank 0] Group 3 Loss: 3.6221 +[2025-09-05 16:37:38] [Rank 0] Group 3 Loss: 3.6221 +[2025-09-05 16:37:38] [Rank 0] Group 4 Loss: 3.7798 +[2025-09-05 16:37:38] [Rank 0] Group 4 Loss: 3.7798 +[2025-09-05 16:37:38] [Rank 0] Group 5 Loss: 3.9174 +[2025-09-05 16:37:38] [Rank 0] Group 5 Loss: 3.9174 +[2025-09-05 16:37:38] [Rank 0] Group 6 Loss: 3.9539 +[2025-09-05 16:37:38] [Rank 0] Group 6 Loss: 3.9539 +[2025-09-05 16:37:38] [Rank 0] Group 7 Loss: 4.1685 +[2025-09-05 16:37:38] [Rank 0] Group 7 Loss: 4.1685 +[2025-09-05 16:37:38] [Rank 0] Group 8 Loss: 4.4814 +[2025-09-05 16:37:38] [Rank 0] Group 8 Loss: 4.4814 +[2025-09-05 16:37:38] [Rank 0] Group 9 Loss: 4.5761 +[2025-09-05 16:37:38] [Rank 0] Group 9 Loss: 4.5761 +[2025-09-05 16:37:38] [Rank 0] Group 10 Loss: 4.7668 +[2025-09-05 16:37:38] [Rank 0] Group 10 Loss: 4.7668 +[2025-09-05 16:37:38] [Rank 0] Group 11 Loss: 4.7789 +[2025-09-05 16:37:38] [Rank 0] Group 11 Loss: 4.7789 +[2025-09-05 16:37:38] [Rank 0] Group 12 Loss: 4.6991 +[2025-09-05 16:37:38] [Rank 0] Group 12 Loss: 4.6991 +[2025-09-05 16:37:38] [Rank 0] Group 13 Loss: 4.7797 +[2025-09-05 16:37:38] [Rank 0] Group 13 Loss: 4.7797 +[2025-09-05 16:37:38] [Rank 0] Group 14 Loss: 4.8204 +[2025-09-05 16:37:38] [Rank 0] Group 14 Loss: 4.8204 +[2025-09-05 16:37:38] [Rank 0] Group 15 Loss: 4.8439 +[2025-09-05 16:37:38] [Rank 0] Group 15 Loss: 4.8439 +[2025-09-05 16:37:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:37:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:37:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:37:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:37:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:37:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:37:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:37:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:37:38] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 16:37:38] [Rank 0] Group 4 FTA: 0.8700 +[2025-09-05 16:37:38] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:37:38] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:37:38] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:37:38] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:37:38] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 16:37:38] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 16:37:38] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:37:38] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:37:38] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 16:37:38] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 16:37:38] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:37:38] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:37:38] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 16:37:38] [Rank 0] Group 11 FTA: 0.3100 +[2025-09-05 16:37:38] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 16:37:38] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 16:37:38] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:37:38] [Rank 0] Group 13 FTA: 0.1300 +[2025-09-05 16:37:38] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:37:38] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:37:38] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 16:37:38] [Rank 0] Group 15 FTA: 0.0400 +[2025-09-05 16:37:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:37:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:37:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:37:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:37:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:37:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:37:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:37:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:37:39] [Rank 0] step:6501/10000 train_time:277044ms step_avg:42.62ms +[2025-09-05 16:37:39] [Rank 0] step:6501/10000 train_time:277044ms step_avg:42.62ms +[2025-09-05 16:37:40] [Rank 0] step:6521/10000 train_time:277706ms step_avg:42.59ms +[2025-09-05 16:37:40] [Rank 0] step:6521/10000 train_time:277706ms step_avg:42.59ms +[2025-09-05 16:37:41] [Rank 0] step:6541/10000 train_time:278442ms step_avg:42.57ms +[2025-09-05 16:37:41] [Rank 0] step:6541/10000 train_time:278442ms step_avg:42.57ms +[2025-09-05 16:37:42] [Rank 0] step:6561/10000 train_time:279178ms step_avg:42.55ms +[2025-09-05 16:37:42] [Rank 0] step:6561/10000 train_time:279178ms step_avg:42.55ms +[2025-09-05 16:37:42] [Rank 0] step:6581/10000 train_time:279915ms step_avg:42.53ms +[2025-09-05 16:37:42] [Rank 0] step:6581/10000 train_time:279915ms step_avg:42.53ms +[2025-09-05 16:37:43] [Rank 0] step:6601/10000 train_time:280651ms step_avg:42.52ms +[2025-09-05 16:37:43] [Rank 0] step:6601/10000 train_time:280651ms step_avg:42.52ms +[2025-09-05 16:37:44] [Rank 0] step:6621/10000 train_time:281387ms step_avg:42.50ms +[2025-09-05 16:37:44] [Rank 0] step:6621/10000 train_time:281387ms step_avg:42.50ms +[2025-09-05 16:37:44] [Rank 0] step:6641/10000 train_time:282122ms step_avg:42.48ms +[2025-09-05 16:37:44] [Rank 0] step:6641/10000 train_time:282122ms step_avg:42.48ms +[2025-09-05 16:37:45] [Rank 0] step:6661/10000 train_time:282858ms step_avg:42.46ms +[2025-09-05 16:37:45] [Rank 0] step:6661/10000 train_time:282858ms step_avg:42.46ms +[2025-09-05 16:37:46] [Rank 0] step:6681/10000 train_time:283594ms step_avg:42.45ms +[2025-09-05 16:37:46] [Rank 0] step:6681/10000 train_time:283594ms step_avg:42.45ms +[2025-09-05 16:37:47] [Rank 0] step:6701/10000 train_time:284330ms step_avg:42.43ms +[2025-09-05 16:37:47] [Rank 0] step:6701/10000 train_time:284330ms step_avg:42.43ms +[2025-09-05 16:37:47] [Rank 0] step:6721/10000 train_time:285066ms step_avg:42.41ms +[2025-09-05 16:37:47] [Rank 0] step:6721/10000 train_time:285066ms step_avg:42.41ms +[2025-09-05 16:37:48] [Rank 0] step:6741/10000 train_time:285802ms step_avg:42.40ms +[2025-09-05 16:37:48] [Rank 0] step:6741/10000 train_time:285802ms step_avg:42.40ms +[2025-09-05 16:37:49] [Rank 0] step:6761/10000 train_time:286538ms step_avg:42.38ms +[2025-09-05 16:37:49] [Rank 0] step:6761/10000 train_time:286538ms step_avg:42.38ms +[2025-09-05 16:37:50] [Rank 0] step:6781/10000 train_time:287273ms step_avg:42.36ms +[2025-09-05 16:37:50] [Rank 0] step:6781/10000 train_time:287273ms step_avg:42.36ms +[2025-09-05 16:37:50] [Rank 0] step:6801/10000 train_time:288009ms step_avg:42.35ms +[2025-09-05 16:37:50] [Rank 0] step:6801/10000 train_time:288009ms step_avg:42.35ms +[2025-09-05 16:37:51] [Rank 0] step:6821/10000 train_time:288745ms step_avg:42.33ms +[2025-09-05 16:37:51] [Rank 0] step:6821/10000 train_time:288745ms step_avg:42.33ms +[2025-09-05 16:37:52] [Rank 0] step:6841/10000 train_time:290106ms step_avg:42.41ms +[2025-09-05 16:37:52] [Rank 0] step:6841/10000 train_time:290106ms step_avg:42.41ms +[2025-09-05 16:37:53] [Rank 0] step:6861/10000 train_time:290842ms step_avg:42.39ms +[2025-09-05 16:37:53] [Rank 0] step:6861/10000 train_time:290842ms step_avg:42.39ms +[2025-09-05 16:37:54] [Rank 0] step:6881/10000 train_time:291578ms step_avg:42.37ms +[2025-09-05 16:37:54] [Rank 0] step:6881/10000 train_time:291578ms step_avg:42.37ms +[2025-09-05 16:37:55] [Rank 0] step:6901/10000 train_time:292314ms step_avg:42.36ms +[2025-09-05 16:37:55] [Rank 0] step:6901/10000 train_time:292314ms step_avg:42.36ms +[2025-09-05 16:37:55] [Rank 0] step:6921/10000 train_time:293050ms step_avg:42.34ms +[2025-09-05 16:37:55] [Rank 0] step:6921/10000 train_time:293050ms step_avg:42.34ms +[2025-09-05 16:37:56] [Rank 0] step:6941/10000 train_time:293785ms step_avg:42.33ms +[2025-09-05 16:37:56] [Rank 0] step:6941/10000 train_time:293785ms step_avg:42.33ms +[2025-09-05 16:37:57] [Rank 0] step:6961/10000 train_time:294521ms step_avg:42.31ms +[2025-09-05 16:37:57] [Rank 0] step:6961/10000 train_time:294521ms step_avg:42.31ms +[2025-09-05 16:37:58] [Rank 0] step:6981/10000 train_time:295257ms step_avg:42.29ms +[2025-09-05 16:37:58] [Rank 0] step:6981/10000 train_time:295257ms step_avg:42.29ms +[2025-09-05 16:37:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:37:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:37:59] [Rank 0] PRINT: step:7000/10000 train_loss:1.4026 val_loss:1.3933 train_time:296073ms step_avg:42.30ms +[2025-09-05 16:37:59] [Rank 0] PRINT: step:7000/10000 train_loss:1.4026 val_loss:1.3933 train_time:296073ms step_avg:42.30ms +[2025-09-05 16:37:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:37:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:37:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:37:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:39:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:39:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:39:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:39:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:39:20] [Rank 0] Total Loss: 4.1752 +[2025-09-05 16:39:20] [Rank 0] Total Loss: 4.1752 +[2025-09-05 16:39:20] [Rank 0] Total FTA (Unweighted): 0.5563 +[2025-09-05 16:39:20] [Rank 0] Total FTA (Unweighted): 0.5563 +[2025-09-05 16:39:20] [Rank 0] Total FTA (Weighted): 0.5563 +[2025-09-05 16:39:20] [Rank 0] Total FTA (Weighted): 0.5563 +[2025-09-05 16:39:20] [Rank 0] Group 0 Loss: 3.4171 +[2025-09-05 16:39:20] [Rank 0] Group 0 Loss: 3.4171 +[2025-09-05 16:39:20] [Rank 0] Group 1 Loss: 3.2860 +[2025-09-05 16:39:20] [Rank 0] Group 1 Loss: 3.2860 +[2025-09-05 16:39:20] [Rank 0] Group 2 Loss: 3.2211 +[2025-09-05 16:39:20] [Rank 0] Group 2 Loss: 3.2211 +[2025-09-05 16:39:20] [Rank 0] Group 3 Loss: 3.6187 +[2025-09-05 16:39:20] [Rank 0] Group 3 Loss: 3.6187 +[2025-09-05 16:39:20] [Rank 0] Group 4 Loss: 3.7052 +[2025-09-05 16:39:20] [Rank 0] Group 4 Loss: 3.7052 +[2025-09-05 16:39:20] [Rank 0] Group 5 Loss: 3.9171 +[2025-09-05 16:39:20] [Rank 0] Group 5 Loss: 3.9171 +[2025-09-05 16:39:20] [Rank 0] Group 6 Loss: 3.9504 +[2025-09-05 16:39:20] [Rank 0] Group 6 Loss: 3.9504 +[2025-09-05 16:39:20] [Rank 0] Group 7 Loss: 4.1550 +[2025-09-05 16:39:20] [Rank 0] Group 7 Loss: 4.1550 +[2025-09-05 16:39:20] [Rank 0] Group 8 Loss: 4.4240 +[2025-09-05 16:39:20] [Rank 0] Group 8 Loss: 4.4240 +[2025-09-05 16:39:20] [Rank 0] Group 9 Loss: 4.5535 +[2025-09-05 16:39:20] [Rank 0] Group 9 Loss: 4.5535 +[2025-09-05 16:39:20] [Rank 0] Group 10 Loss: 4.7256 +[2025-09-05 16:39:20] [Rank 0] Group 10 Loss: 4.7256 +[2025-09-05 16:39:20] [Rank 0] Group 11 Loss: 4.7380 +[2025-09-05 16:39:20] [Rank 0] Group 11 Loss: 4.7380 +[2025-09-05 16:39:20] [Rank 0] Group 12 Loss: 4.7005 +[2025-09-05 16:39:20] [Rank 0] Group 12 Loss: 4.7005 +[2025-09-05 16:39:20] [Rank 0] Group 13 Loss: 4.7960 +[2025-09-05 16:39:20] [Rank 0] Group 13 Loss: 4.7960 +[2025-09-05 16:39:20] [Rank 0] Group 14 Loss: 4.7904 +[2025-09-05 16:39:20] [Rank 0] Group 14 Loss: 4.7904 +[2025-09-05 16:39:20] [Rank 0] Group 15 Loss: 4.8054 +[2025-09-05 16:39:20] [Rank 0] Group 15 Loss: 4.8054 +[2025-09-05 16:39:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:39:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:39:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:39:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:39:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:39:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:39:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:39:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:39:20] [Rank 0] Group 4 FTA: 0.8800 +[2025-09-05 16:39:20] [Rank 0] Group 4 FTA: 0.8800 +[2025-09-05 16:39:20] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:39:20] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:39:20] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:39:20] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:39:20] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:39:20] [Rank 0] Group 7 FTA: 0.5000 +[2025-09-05 16:39:20] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:39:20] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:39:20] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:39:20] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 16:39:20] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:39:20] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 16:39:20] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 16:39:20] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 16:39:20] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 16:39:20] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 16:39:20] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 16:39:20] [Rank 0] Group 13 FTA: 0.2000 +[2025-09-05 16:39:20] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:39:20] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:39:20] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:39:20] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 16:39:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:39:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:39:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:39:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:39:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:39:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:39:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:39:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:39:22] [Rank 0] step:7001/10000 train_time:296082ms step_avg:42.29ms +[2025-09-05 16:39:22] [Rank 0] step:7001/10000 train_time:296082ms step_avg:42.29ms +[2025-09-05 16:39:23] [Rank 0] step:7021/10000 train_time:296851ms step_avg:42.28ms +[2025-09-05 16:39:23] [Rank 0] step:7021/10000 train_time:296851ms step_avg:42.28ms +[2025-09-05 16:39:23] [Rank 0] step:7041/10000 train_time:297587ms step_avg:42.26ms +[2025-09-05 16:39:23] [Rank 0] step:7041/10000 train_time:297587ms step_avg:42.26ms +[2025-09-05 16:39:24] [Rank 0] step:7061/10000 train_time:298323ms step_avg:42.25ms +[2025-09-05 16:39:24] [Rank 0] step:7061/10000 train_time:298323ms step_avg:42.25ms +[2025-09-05 16:39:25] [Rank 0] step:7081/10000 train_time:299060ms step_avg:42.23ms +[2025-09-05 16:39:25] [Rank 0] step:7081/10000 train_time:299060ms step_avg:42.23ms +[2025-09-05 16:39:26] [Rank 0] step:7101/10000 train_time:299795ms step_avg:42.22ms +[2025-09-05 16:39:26] [Rank 0] step:7101/10000 train_time:299795ms step_avg:42.22ms +[2025-09-05 16:39:26] [Rank 0] step:7121/10000 train_time:300531ms step_avg:42.20ms +[2025-09-05 16:39:26] [Rank 0] step:7121/10000 train_time:300531ms step_avg:42.20ms +[2025-09-05 16:39:27] [Rank 0] step:7141/10000 train_time:301266ms step_avg:42.19ms +[2025-09-05 16:39:27] [Rank 0] step:7141/10000 train_time:301266ms step_avg:42.19ms +[2025-09-05 16:39:28] [Rank 0] step:7161/10000 train_time:302002ms step_avg:42.17ms +[2025-09-05 16:39:28] [Rank 0] step:7161/10000 train_time:302002ms step_avg:42.17ms +[2025-09-05 16:39:29] [Rank 0] step:7181/10000 train_time:302738ms step_avg:42.16ms +[2025-09-05 16:39:29] [Rank 0] step:7181/10000 train_time:302738ms step_avg:42.16ms +[2025-09-05 16:39:29] [Rank 0] step:7201/10000 train_time:303473ms step_avg:42.14ms +[2025-09-05 16:39:29] [Rank 0] step:7201/10000 train_time:303473ms step_avg:42.14ms +[2025-09-05 16:39:30] [Rank 0] step:7221/10000 train_time:304209ms step_avg:42.13ms +[2025-09-05 16:39:30] [Rank 0] step:7221/10000 train_time:304209ms step_avg:42.13ms +[2025-09-05 16:39:31] [Rank 0] step:7241/10000 train_time:304945ms step_avg:42.11ms +[2025-09-05 16:39:31] [Rank 0] step:7241/10000 train_time:304945ms step_avg:42.11ms +[2025-09-05 16:39:32] [Rank 0] step:7261/10000 train_time:305681ms step_avg:42.10ms +[2025-09-05 16:39:32] [Rank 0] step:7261/10000 train_time:305681ms step_avg:42.10ms +[2025-09-05 16:39:32] [Rank 0] step:7281/10000 train_time:306417ms step_avg:42.08ms +[2025-09-05 16:39:32] [Rank 0] step:7281/10000 train_time:306417ms step_avg:42.08ms +[2025-09-05 16:39:33] [Rank 0] step:7301/10000 train_time:307153ms step_avg:42.07ms +[2025-09-05 16:39:33] [Rank 0] step:7301/10000 train_time:307153ms step_avg:42.07ms +[2025-09-05 16:39:34] [Rank 0] step:7321/10000 train_time:307889ms step_avg:42.06ms +[2025-09-05 16:39:34] [Rank 0] step:7321/10000 train_time:307889ms step_avg:42.06ms +[2025-09-05 16:39:34] [Rank 0] step:7341/10000 train_time:308624ms step_avg:42.04ms +[2025-09-05 16:39:34] [Rank 0] step:7341/10000 train_time:308624ms step_avg:42.04ms +[2025-09-05 16:39:35] [Rank 0] step:7361/10000 train_time:309360ms step_avg:42.03ms +[2025-09-05 16:39:35] [Rank 0] step:7361/10000 train_time:309360ms step_avg:42.03ms +[2025-09-05 16:39:36] [Rank 0] step:7381/10000 train_time:310096ms step_avg:42.01ms +[2025-09-05 16:39:36] [Rank 0] step:7381/10000 train_time:310096ms step_avg:42.01ms +[2025-09-05 16:39:37] [Rank 0] step:7401/10000 train_time:310832ms step_avg:42.00ms +[2025-09-05 16:39:37] [Rank 0] step:7401/10000 train_time:310832ms step_avg:42.00ms +[2025-09-05 16:39:37] [Rank 0] step:7421/10000 train_time:311567ms step_avg:41.98ms +[2025-09-05 16:39:37] [Rank 0] step:7421/10000 train_time:311567ms step_avg:41.98ms +[2025-09-05 16:39:38] [Rank 0] step:7441/10000 train_time:312302ms step_avg:41.97ms +[2025-09-05 16:39:38] [Rank 0] step:7441/10000 train_time:312302ms step_avg:41.97ms +[2025-09-05 16:39:39] [Rank 0] step:7461/10000 train_time:313037ms step_avg:41.96ms +[2025-09-05 16:39:39] [Rank 0] step:7461/10000 train_time:313037ms step_avg:41.96ms +[2025-09-05 16:39:40] [Rank 0] step:7481/10000 train_time:313773ms step_avg:41.94ms +[2025-09-05 16:39:40] [Rank 0] step:7481/10000 train_time:313773ms step_avg:41.94ms +[2025-09-05 16:39:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:39:40] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:39:41] [Rank 0] PRINT: step:7500/10000 train_loss:1.4005 val_loss:1.3922 train_time:314589ms step_avg:41.95ms +[2025-09-05 16:39:41] [Rank 0] PRINT: step:7500/10000 train_loss:1.4005 val_loss:1.3922 train_time:314589ms step_avg:41.95ms +[2025-09-05 16:39:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:39:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:39:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:39:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:41:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:41:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:41:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:41:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:41:02] [Rank 0] Total Loss: 4.1931 +[2025-09-05 16:41:02] [Rank 0] Total Loss: 4.1931 +[2025-09-05 16:41:02] [Rank 0] Total FTA (Unweighted): 0.5650 +[2025-09-05 16:41:02] [Rank 0] Total FTA (Unweighted): 0.5650 +[2025-09-05 16:41:02] [Rank 0] Total FTA (Weighted): 0.5650 +[2025-09-05 16:41:02] [Rank 0] Total FTA (Weighted): 0.5650 +[2025-09-05 16:41:02] [Rank 0] Group 0 Loss: 3.4094 +[2025-09-05 16:41:02] [Rank 0] Group 0 Loss: 3.4094 +[2025-09-05 16:41:02] [Rank 0] Group 1 Loss: 3.2934 +[2025-09-05 16:41:02] [Rank 0] Group 1 Loss: 3.2934 +[2025-09-05 16:41:02] [Rank 0] Group 2 Loss: 3.2595 +[2025-09-05 16:41:02] [Rank 0] Group 2 Loss: 3.2595 +[2025-09-05 16:41:02] [Rank 0] Group 3 Loss: 3.6231 +[2025-09-05 16:41:02] [Rank 0] Group 3 Loss: 3.6231 +[2025-09-05 16:41:02] [Rank 0] Group 4 Loss: 3.7449 +[2025-09-05 16:41:02] [Rank 0] Group 4 Loss: 3.7449 +[2025-09-05 16:41:02] [Rank 0] Group 5 Loss: 3.9468 +[2025-09-05 16:41:02] [Rank 0] Group 5 Loss: 3.9468 +[2025-09-05 16:41:02] [Rank 0] Group 6 Loss: 3.9943 +[2025-09-05 16:41:02] [Rank 0] Group 6 Loss: 3.9943 +[2025-09-05 16:41:02] [Rank 0] Group 7 Loss: 4.1949 +[2025-09-05 16:41:02] [Rank 0] Group 7 Loss: 4.1949 +[2025-09-05 16:41:02] [Rank 0] Group 8 Loss: 4.4423 +[2025-09-05 16:41:02] [Rank 0] Group 8 Loss: 4.4423 +[2025-09-05 16:41:02] [Rank 0] Group 9 Loss: 4.5538 +[2025-09-05 16:41:02] [Rank 0] Group 9 Loss: 4.5538 +[2025-09-05 16:41:02] [Rank 0] Group 10 Loss: 4.7585 +[2025-09-05 16:41:02] [Rank 0] Group 10 Loss: 4.7585 +[2025-09-05 16:41:02] [Rank 0] Group 11 Loss: 4.7660 +[2025-09-05 16:41:02] [Rank 0] Group 11 Loss: 4.7660 +[2025-09-05 16:41:02] [Rank 0] Group 12 Loss: 4.6986 +[2025-09-05 16:41:02] [Rank 0] Group 12 Loss: 4.6986 +[2025-09-05 16:41:02] [Rank 0] Group 13 Loss: 4.7760 +[2025-09-05 16:41:02] [Rank 0] Group 13 Loss: 4.7760 +[2025-09-05 16:41:02] [Rank 0] Group 14 Loss: 4.7967 +[2025-09-05 16:41:02] [Rank 0] Group 14 Loss: 4.7967 +[2025-09-05 16:41:02] [Rank 0] Group 15 Loss: 4.8312 +[2025-09-05 16:41:02] [Rank 0] Group 15 Loss: 4.8312 +[2025-09-05 16:41:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:41:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:41:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:41:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:41:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:41:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:41:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:41:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:41:02] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 16:41:02] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 16:41:02] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:41:02] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:41:02] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:41:02] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:41:02] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 16:41:02] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 16:41:02] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:41:02] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 16:41:02] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 16:41:02] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 16:41:02] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 16:41:02] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 16:41:02] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 16:41:02] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 16:41:02] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 16:41:02] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 16:41:02] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 16:41:02] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 16:41:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:41:02] [Rank 0] Group 14 FTA: 0.1300 +[2025-09-05 16:41:02] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:41:02] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:41:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:41:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:41:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:41:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:41:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:41:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:41:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:41:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:41:04] [Rank 0] step:7501/10000 train_time:314599ms step_avg:41.94ms +[2025-09-05 16:41:04] [Rank 0] step:7501/10000 train_time:314599ms step_avg:41.94ms +[2025-09-05 16:41:05] [Rank 0] step:7521/10000 train_time:315270ms step_avg:41.92ms +[2025-09-05 16:41:05] [Rank 0] step:7521/10000 train_time:315270ms step_avg:41.92ms +[2025-09-05 16:41:05] [Rank 0] step:7541/10000 train_time:316007ms step_avg:41.91ms +[2025-09-05 16:41:05] [Rank 0] step:7541/10000 train_time:316007ms step_avg:41.91ms +[2025-09-05 16:41:06] [Rank 0] step:7561/10000 train_time:316743ms step_avg:41.89ms +[2025-09-05 16:41:06] [Rank 0] step:7561/10000 train_time:316743ms step_avg:41.89ms +[2025-09-05 16:41:07] [Rank 0] step:7581/10000 train_time:317478ms step_avg:41.88ms +[2025-09-05 16:41:07] [Rank 0] step:7581/10000 train_time:317478ms step_avg:41.88ms +[2025-09-05 16:41:07] [Rank 0] step:7601/10000 train_time:318214ms step_avg:41.86ms +[2025-09-05 16:41:07] [Rank 0] step:7601/10000 train_time:318214ms step_avg:41.86ms +[2025-09-05 16:41:08] [Rank 0] step:7621/10000 train_time:318950ms step_avg:41.85ms +[2025-09-05 16:41:08] [Rank 0] step:7621/10000 train_time:318950ms step_avg:41.85ms +[2025-09-05 16:41:10] [Rank 0] step:7641/10000 train_time:319909ms step_avg:41.87ms +[2025-09-05 16:41:10] [Rank 0] step:7641/10000 train_time:319909ms step_avg:41.87ms +[2025-09-05 16:41:10] [Rank 0] step:7661/10000 train_time:321035ms step_avg:41.91ms +[2025-09-05 16:41:10] [Rank 0] step:7661/10000 train_time:321035ms step_avg:41.91ms +[2025-09-05 16:41:11] [Rank 0] step:7681/10000 train_time:321771ms step_avg:41.89ms +[2025-09-05 16:41:11] [Rank 0] step:7681/10000 train_time:321771ms step_avg:41.89ms +[2025-09-05 16:41:12] [Rank 0] step:7701/10000 train_time:322507ms step_avg:41.88ms +[2025-09-05 16:41:12] [Rank 0] step:7701/10000 train_time:322507ms step_avg:41.88ms +[2025-09-05 16:41:13] [Rank 0] step:7721/10000 train_time:323244ms step_avg:41.87ms +[2025-09-05 16:41:13] [Rank 0] step:7721/10000 train_time:323244ms step_avg:41.87ms +[2025-09-05 16:41:13] [Rank 0] step:7741/10000 train_time:323979ms step_avg:41.85ms +[2025-09-05 16:41:13] [Rank 0] step:7741/10000 train_time:323979ms step_avg:41.85ms +[2025-09-05 16:41:14] [Rank 0] step:7761/10000 train_time:324715ms step_avg:41.84ms +[2025-09-05 16:41:14] [Rank 0] step:7761/10000 train_time:324715ms step_avg:41.84ms +[2025-09-05 16:41:15] [Rank 0] step:7781/10000 train_time:325452ms step_avg:41.83ms +[2025-09-05 16:41:15] [Rank 0] step:7781/10000 train_time:325452ms step_avg:41.83ms +[2025-09-05 16:41:15] [Rank 0] step:7801/10000 train_time:326187ms step_avg:41.81ms +[2025-09-05 16:41:15] [Rank 0] step:7801/10000 train_time:326187ms step_avg:41.81ms +[2025-09-05 16:41:16] [Rank 0] step:7821/10000 train_time:326923ms step_avg:41.80ms +[2025-09-05 16:41:16] [Rank 0] step:7821/10000 train_time:326923ms step_avg:41.80ms +[2025-09-05 16:41:17] [Rank 0] step:7841/10000 train_time:327658ms step_avg:41.79ms +[2025-09-05 16:41:17] [Rank 0] step:7841/10000 train_time:327658ms step_avg:41.79ms +[2025-09-05 16:41:18] [Rank 0] step:7861/10000 train_time:328394ms step_avg:41.78ms +[2025-09-05 16:41:18] [Rank 0] step:7861/10000 train_time:328394ms step_avg:41.78ms +[2025-09-05 16:41:18] [Rank 0] step:7881/10000 train_time:329131ms step_avg:41.76ms +[2025-09-05 16:41:18] [Rank 0] step:7881/10000 train_time:329131ms step_avg:41.76ms +[2025-09-05 16:41:19] [Rank 0] step:7901/10000 train_time:329867ms step_avg:41.75ms +[2025-09-05 16:41:19] [Rank 0] step:7901/10000 train_time:329867ms step_avg:41.75ms +[2025-09-05 16:41:20] [Rank 0] step:7921/10000 train_time:330603ms step_avg:41.74ms +[2025-09-05 16:41:20] [Rank 0] step:7921/10000 train_time:330603ms step_avg:41.74ms +[2025-09-05 16:41:21] [Rank 0] step:7941/10000 train_time:331339ms step_avg:41.73ms +[2025-09-05 16:41:21] [Rank 0] step:7941/10000 train_time:331339ms step_avg:41.73ms +[2025-09-05 16:41:21] [Rank 0] step:7961/10000 train_time:332075ms step_avg:41.71ms +[2025-09-05 16:41:21] [Rank 0] step:7961/10000 train_time:332075ms step_avg:41.71ms +[2025-09-05 16:41:22] [Rank 0] step:7981/10000 train_time:332811ms step_avg:41.70ms +[2025-09-05 16:41:22] [Rank 0] step:7981/10000 train_time:332811ms step_avg:41.70ms +[2025-09-05 16:41:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:41:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:41:23] [Rank 0] PRINT: step:8000/10000 train_loss:1.3995 val_loss:1.3894 train_time:333627ms step_avg:41.70ms +[2025-09-05 16:41:23] [Rank 0] PRINT: step:8000/10000 train_loss:1.3995 val_loss:1.3894 train_time:333627ms step_avg:41.70ms +[2025-09-05 16:41:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:41:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:41:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:41:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:42:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:42:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:42:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:42:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:42:45] [Rank 0] Total Loss: 4.1285 +[2025-09-05 16:42:45] [Rank 0] Total Loss: 4.1285 +[2025-09-05 16:42:45] [Rank 0] Total FTA (Unweighted): 0.5694 +[2025-09-05 16:42:45] [Rank 0] Total FTA (Unweighted): 0.5694 +[2025-09-05 16:42:45] [Rank 0] Total FTA (Weighted): 0.5694 +[2025-09-05 16:42:45] [Rank 0] Total FTA (Weighted): 0.5694 +[2025-09-05 16:42:45] [Rank 0] Group 0 Loss: 3.3937 +[2025-09-05 16:42:45] [Rank 0] Group 0 Loss: 3.3937 +[2025-09-05 16:42:45] [Rank 0] Group 1 Loss: 3.3015 +[2025-09-05 16:42:45] [Rank 0] Group 1 Loss: 3.3015 +[2025-09-05 16:42:45] [Rank 0] Group 2 Loss: 3.2203 +[2025-09-05 16:42:45] [Rank 0] Group 2 Loss: 3.2203 +[2025-09-05 16:42:45] [Rank 0] Group 3 Loss: 3.5370 +[2025-09-05 16:42:45] [Rank 0] Group 3 Loss: 3.5370 +[2025-09-05 16:42:45] [Rank 0] Group 4 Loss: 3.6740 +[2025-09-05 16:42:45] [Rank 0] Group 4 Loss: 3.6740 +[2025-09-05 16:42:45] [Rank 0] Group 5 Loss: 3.8598 +[2025-09-05 16:42:45] [Rank 0] Group 5 Loss: 3.8598 +[2025-09-05 16:42:45] [Rank 0] Group 6 Loss: 3.9172 +[2025-09-05 16:42:45] [Rank 0] Group 6 Loss: 3.9172 +[2025-09-05 16:42:45] [Rank 0] Group 7 Loss: 4.1061 +[2025-09-05 16:42:45] [Rank 0] Group 7 Loss: 4.1061 +[2025-09-05 16:42:45] [Rank 0] Group 8 Loss: 4.3659 +[2025-09-05 16:42:45] [Rank 0] Group 8 Loss: 4.3659 +[2025-09-05 16:42:45] [Rank 0] Group 9 Loss: 4.4820 +[2025-09-05 16:42:45] [Rank 0] Group 9 Loss: 4.4820 +[2025-09-05 16:42:45] [Rank 0] Group 10 Loss: 4.6641 +[2025-09-05 16:42:45] [Rank 0] Group 10 Loss: 4.6641 +[2025-09-05 16:42:45] [Rank 0] Group 11 Loss: 4.6641 +[2025-09-05 16:42:45] [Rank 0] Group 11 Loss: 4.6641 +[2025-09-05 16:42:45] [Rank 0] Group 12 Loss: 4.6717 +[2025-09-05 16:42:45] [Rank 0] Group 12 Loss: 4.6717 +[2025-09-05 16:42:45] [Rank 0] Group 13 Loss: 4.7469 +[2025-09-05 16:42:45] [Rank 0] Group 13 Loss: 4.7469 +[2025-09-05 16:42:45] [Rank 0] Group 14 Loss: 4.7148 +[2025-09-05 16:42:45] [Rank 0] Group 14 Loss: 4.7148 +[2025-09-05 16:42:45] [Rank 0] Group 15 Loss: 4.7361 +[2025-09-05 16:42:45] [Rank 0] Group 15 Loss: 4.7361 +[2025-09-05 16:42:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:42:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:42:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:42:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:42:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:42:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:42:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:42:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:42:45] [Rank 0] Group 4 FTA: 0.9200 +[2025-09-05 16:42:45] [Rank 0] Group 4 FTA: 0.9200 +[2025-09-05 16:42:45] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:42:45] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:42:45] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:42:45] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:42:45] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 16:42:45] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 16:42:45] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 16:42:45] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 16:42:45] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 16:42:45] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 16:42:45] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 16:42:45] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 16:42:45] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 16:42:45] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 16:42:45] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-05 16:42:45] [Rank 0] Group 12 FTA: 0.3200 +[2025-09-05 16:42:45] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 16:42:45] [Rank 0] Group 13 FTA: 0.1900 +[2025-09-05 16:42:45] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 16:42:45] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 16:42:45] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:42:45] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:42:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:42:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:42:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:42:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:42:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:42:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:42:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:42:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:42:46] [Rank 0] step:8001/10000 train_time:333637ms step_avg:41.70ms +[2025-09-05 16:42:46] [Rank 0] step:8001/10000 train_time:333637ms step_avg:41.70ms +[2025-09-05 16:42:47] [Rank 0] step:8021/10000 train_time:334920ms step_avg:41.76ms +[2025-09-05 16:42:47] [Rank 0] step:8021/10000 train_time:334920ms step_avg:41.76ms +[2025-09-05 16:42:48] [Rank 0] step:8041/10000 train_time:335655ms step_avg:41.74ms +[2025-09-05 16:42:48] [Rank 0] step:8041/10000 train_time:335655ms step_avg:41.74ms +[2025-09-05 16:42:49] [Rank 0] step:8061/10000 train_time:336391ms step_avg:41.73ms +[2025-09-05 16:42:49] [Rank 0] step:8061/10000 train_time:336391ms step_avg:41.73ms +[2025-09-05 16:42:50] [Rank 0] step:8081/10000 train_time:337127ms step_avg:41.72ms +[2025-09-05 16:42:50] [Rank 0] step:8081/10000 train_time:337127ms step_avg:41.72ms +[2025-09-05 16:42:50] [Rank 0] step:8101/10000 train_time:337862ms step_avg:41.71ms +[2025-09-05 16:42:50] [Rank 0] step:8101/10000 train_time:337862ms step_avg:41.71ms +[2025-09-05 16:42:51] [Rank 0] step:8121/10000 train_time:338598ms step_avg:41.69ms +[2025-09-05 16:42:51] [Rank 0] step:8121/10000 train_time:338598ms step_avg:41.69ms +[2025-09-05 16:42:52] [Rank 0] step:8141/10000 train_time:339334ms step_avg:41.68ms +[2025-09-05 16:42:52] [Rank 0] step:8141/10000 train_time:339334ms step_avg:41.68ms +[2025-09-05 16:42:53] [Rank 0] step:8161/10000 train_time:340070ms step_avg:41.67ms +[2025-09-05 16:42:53] [Rank 0] step:8161/10000 train_time:340070ms step_avg:41.67ms +[2025-09-05 16:42:53] [Rank 0] step:8181/10000 train_time:340806ms step_avg:41.66ms +[2025-09-05 16:42:53] [Rank 0] step:8181/10000 train_time:340806ms step_avg:41.66ms +[2025-09-05 16:42:54] [Rank 0] step:8201/10000 train_time:341542ms step_avg:41.65ms +[2025-09-05 16:42:54] [Rank 0] step:8201/10000 train_time:341542ms step_avg:41.65ms +[2025-09-05 16:42:55] [Rank 0] step:8221/10000 train_time:342278ms step_avg:41.63ms +[2025-09-05 16:42:55] [Rank 0] step:8221/10000 train_time:342278ms step_avg:41.63ms +[2025-09-05 16:42:56] [Rank 0] step:8241/10000 train_time:343014ms step_avg:41.62ms +[2025-09-05 16:42:56] [Rank 0] step:8241/10000 train_time:343014ms step_avg:41.62ms +[2025-09-05 16:42:56] [Rank 0] step:8261/10000 train_time:343750ms step_avg:41.61ms +[2025-09-05 16:42:56] [Rank 0] step:8261/10000 train_time:343750ms step_avg:41.61ms +[2025-09-05 16:42:57] [Rank 0] step:8281/10000 train_time:344486ms step_avg:41.60ms +[2025-09-05 16:42:57] [Rank 0] step:8281/10000 train_time:344486ms step_avg:41.60ms +[2025-09-05 16:42:58] [Rank 0] step:8301/10000 train_time:345222ms step_avg:41.59ms +[2025-09-05 16:42:58] [Rank 0] step:8301/10000 train_time:345222ms step_avg:41.59ms +[2025-09-05 16:42:59] [Rank 0] step:8321/10000 train_time:345957ms step_avg:41.58ms +[2025-09-05 16:42:59] [Rank 0] step:8321/10000 train_time:345957ms step_avg:41.58ms +[2025-09-05 16:42:59] [Rank 0] step:8341/10000 train_time:346693ms step_avg:41.56ms +[2025-09-05 16:42:59] [Rank 0] step:8341/10000 train_time:346693ms step_avg:41.56ms +[2025-09-05 16:43:00] [Rank 0] step:8361/10000 train_time:347429ms step_avg:41.55ms +[2025-09-05 16:43:00] [Rank 0] step:8361/10000 train_time:347429ms step_avg:41.55ms +[2025-09-05 16:43:01] [Rank 0] step:8381/10000 train_time:348165ms step_avg:41.54ms +[2025-09-05 16:43:01] [Rank 0] step:8381/10000 train_time:348165ms step_avg:41.54ms +[2025-09-05 16:43:01] [Rank 0] step:8401/10000 train_time:348902ms step_avg:41.53ms +[2025-09-05 16:43:01] [Rank 0] step:8401/10000 train_time:348902ms step_avg:41.53ms +[2025-09-05 16:43:02] [Rank 0] step:8421/10000 train_time:349638ms step_avg:41.52ms +[2025-09-05 16:43:02] [Rank 0] step:8421/10000 train_time:349638ms step_avg:41.52ms +[2025-09-05 16:43:03] [Rank 0] step:8441/10000 train_time:350373ms step_avg:41.51ms +[2025-09-05 16:43:03] [Rank 0] step:8441/10000 train_time:350373ms step_avg:41.51ms +[2025-09-05 16:43:04] [Rank 0] step:8461/10000 train_time:351109ms step_avg:41.50ms +[2025-09-05 16:43:04] [Rank 0] step:8461/10000 train_time:351109ms step_avg:41.50ms +[2025-09-05 16:43:04] [Rank 0] step:8481/10000 train_time:351845ms step_avg:41.49ms +[2025-09-05 16:43:04] [Rank 0] step:8481/10000 train_time:351845ms step_avg:41.49ms +[2025-09-05 16:43:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:43:05] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:43:06] [Rank 0] PRINT: step:8500/10000 train_loss:1.3968 val_loss:1.3879 train_time:352661ms step_avg:41.49ms +[2025-09-05 16:43:06] [Rank 0] PRINT: step:8500/10000 train_loss:1.3968 val_loss:1.3879 train_time:352661ms step_avg:41.49ms +[2025-09-05 16:43:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:43:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:43:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:43:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:44:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:44:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:44:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:44:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:44:27] [Rank 0] Total Loss: 4.1926 +[2025-09-05 16:44:27] [Rank 0] Total Loss: 4.1926 +[2025-09-05 16:44:27] [Rank 0] Total FTA (Unweighted): 0.5869 +[2025-09-05 16:44:27] [Rank 0] Total FTA (Unweighted): 0.5869 +[2025-09-05 16:44:27] [Rank 0] Total FTA (Weighted): 0.5869 +[2025-09-05 16:44:27] [Rank 0] Total FTA (Weighted): 0.5869 +[2025-09-05 16:44:27] [Rank 0] Group 0 Loss: 3.3830 +[2025-09-05 16:44:27] [Rank 0] Group 0 Loss: 3.3830 +[2025-09-05 16:44:27] [Rank 0] Group 1 Loss: 3.3231 +[2025-09-05 16:44:27] [Rank 0] Group 1 Loss: 3.3231 +[2025-09-05 16:44:27] [Rank 0] Group 2 Loss: 3.3096 +[2025-09-05 16:44:27] [Rank 0] Group 2 Loss: 3.3096 +[2025-09-05 16:44:27] [Rank 0] Group 3 Loss: 3.5974 +[2025-09-05 16:44:27] [Rank 0] Group 3 Loss: 3.5974 +[2025-09-05 16:44:27] [Rank 0] Group 4 Loss: 3.7640 +[2025-09-05 16:44:27] [Rank 0] Group 4 Loss: 3.7640 +[2025-09-05 16:44:27] [Rank 0] Group 5 Loss: 3.9299 +[2025-09-05 16:44:27] [Rank 0] Group 5 Loss: 3.9299 +[2025-09-05 16:44:27] [Rank 0] Group 6 Loss: 3.9546 +[2025-09-05 16:44:27] [Rank 0] Group 6 Loss: 3.9546 +[2025-09-05 16:44:27] [Rank 0] Group 7 Loss: 4.1811 +[2025-09-05 16:44:27] [Rank 0] Group 7 Loss: 4.1811 +[2025-09-05 16:44:27] [Rank 0] Group 8 Loss: 4.4662 +[2025-09-05 16:44:27] [Rank 0] Group 8 Loss: 4.4662 +[2025-09-05 16:44:27] [Rank 0] Group 9 Loss: 4.5969 +[2025-09-05 16:44:27] [Rank 0] Group 9 Loss: 4.5969 +[2025-09-05 16:44:27] [Rank 0] Group 10 Loss: 4.7675 +[2025-09-05 16:44:27] [Rank 0] Group 10 Loss: 4.7675 +[2025-09-05 16:44:27] [Rank 0] Group 11 Loss: 4.7212 +[2025-09-05 16:44:27] [Rank 0] Group 11 Loss: 4.7212 +[2025-09-05 16:44:27] [Rank 0] Group 12 Loss: 4.7172 +[2025-09-05 16:44:27] [Rank 0] Group 12 Loss: 4.7172 +[2025-09-05 16:44:27] [Rank 0] Group 13 Loss: 4.7509 +[2025-09-05 16:44:27] [Rank 0] Group 13 Loss: 4.7509 +[2025-09-05 16:44:27] [Rank 0] Group 14 Loss: 4.8177 +[2025-09-05 16:44:27] [Rank 0] Group 14 Loss: 4.8177 +[2025-09-05 16:44:27] [Rank 0] Group 15 Loss: 4.8009 +[2025-09-05 16:44:27] [Rank 0] Group 15 Loss: 4.8009 +[2025-09-05 16:44:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:44:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:44:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:44:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:44:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:44:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:44:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:44:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:44:27] [Rank 0] Group 4 FTA: 0.9800 +[2025-09-05 16:44:27] [Rank 0] Group 4 FTA: 0.9800 +[2025-09-05 16:44:27] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:44:27] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 16:44:27] [Rank 0] Group 6 FTA: 0.5400 +[2025-09-05 16:44:27] [Rank 0] Group 6 FTA: 0.5400 +[2025-09-05 16:44:27] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-05 16:44:27] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-05 16:44:27] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 16:44:27] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 16:44:27] [Rank 0] Group 9 FTA: 0.4400 +[2025-09-05 16:44:27] [Rank 0] Group 9 FTA: 0.4400 +[2025-09-05 16:44:27] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 16:44:27] [Rank 0] Group 10 FTA: 0.5200 +[2025-09-05 16:44:27] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 16:44:27] [Rank 0] Group 11 FTA: 0.3600 +[2025-09-05 16:44:27] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 16:44:27] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 16:44:27] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 16:44:27] [Rank 0] Group 13 FTA: 0.2500 +[2025-09-05 16:44:27] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:44:27] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 16:44:27] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 16:44:27] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 16:44:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:44:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:44:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:44:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:44:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:44:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:44:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:44:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:44:28] [Rank 0] step:8501/10000 train_time:352671ms step_avg:41.49ms +[2025-09-05 16:44:28] [Rank 0] step:8501/10000 train_time:352671ms step_avg:41.49ms +[2025-09-05 16:44:29] [Rank 0] step:8521/10000 train_time:353353ms step_avg:41.47ms +[2025-09-05 16:44:29] [Rank 0] step:8521/10000 train_time:353353ms step_avg:41.47ms +[2025-09-05 16:44:30] [Rank 0] step:8541/10000 train_time:354089ms step_avg:41.46ms +[2025-09-05 16:44:30] [Rank 0] step:8541/10000 train_time:354089ms step_avg:41.46ms +[2025-09-05 16:44:31] [Rank 0] step:8561/10000 train_time:354824ms step_avg:41.45ms +[2025-09-05 16:44:31] [Rank 0] step:8561/10000 train_time:354824ms step_avg:41.45ms +[2025-09-05 16:44:31] [Rank 0] step:8581/10000 train_time:355560ms step_avg:41.44ms +[2025-09-05 16:44:31] [Rank 0] step:8581/10000 train_time:355560ms step_avg:41.44ms +[2025-09-05 16:44:32] [Rank 0] step:8601/10000 train_time:356295ms step_avg:41.42ms +[2025-09-05 16:44:32] [Rank 0] step:8601/10000 train_time:356295ms step_avg:41.42ms +[2025-09-05 16:44:33] [Rank 0] step:8621/10000 train_time:357031ms step_avg:41.41ms +[2025-09-05 16:44:33] [Rank 0] step:8621/10000 train_time:357031ms step_avg:41.41ms +[2025-09-05 16:44:34] [Rank 0] step:8641/10000 train_time:357767ms step_avg:41.40ms +[2025-09-05 16:44:34] [Rank 0] step:8641/10000 train_time:357767ms step_avg:41.40ms +[2025-09-05 16:44:34] [Rank 0] step:8661/10000 train_time:358504ms step_avg:41.39ms +[2025-09-05 16:44:34] [Rank 0] step:8661/10000 train_time:358504ms step_avg:41.39ms +[2025-09-05 16:44:35] [Rank 0] step:8681/10000 train_time:359240ms step_avg:41.38ms +[2025-09-05 16:44:35] [Rank 0] step:8681/10000 train_time:359240ms step_avg:41.38ms +[2025-09-05 16:44:36] [Rank 0] step:8701/10000 train_time:360094ms step_avg:41.39ms +[2025-09-05 16:44:36] [Rank 0] step:8701/10000 train_time:360094ms step_avg:41.39ms +[2025-09-05 16:44:37] [Rank 0] step:8721/10000 train_time:360829ms step_avg:41.37ms +[2025-09-05 16:44:37] [Rank 0] step:8721/10000 train_time:360829ms step_avg:41.37ms +[2025-09-05 16:44:37] [Rank 0] step:8741/10000 train_time:361566ms step_avg:41.36ms +[2025-09-05 16:44:37] [Rank 0] step:8741/10000 train_time:361566ms step_avg:41.36ms +[2025-09-05 16:44:38] [Rank 0] step:8761/10000 train_time:362440ms step_avg:41.37ms +[2025-09-05 16:44:38] [Rank 0] step:8761/10000 train_time:362440ms step_avg:41.37ms +[2025-09-05 16:44:39] [Rank 0] step:8781/10000 train_time:363176ms step_avg:41.36ms +[2025-09-05 16:44:39] [Rank 0] step:8781/10000 train_time:363176ms step_avg:41.36ms +[2025-09-05 16:44:40] [Rank 0] step:8801/10000 train_time:363912ms step_avg:41.35ms +[2025-09-05 16:44:40] [Rank 0] step:8801/10000 train_time:363912ms step_avg:41.35ms +[2025-09-05 16:44:40] [Rank 0] step:8821/10000 train_time:364647ms step_avg:41.34ms +[2025-09-05 16:44:40] [Rank 0] step:8821/10000 train_time:364647ms step_avg:41.34ms +[2025-09-05 16:44:42] [Rank 0] step:8841/10000 train_time:366002ms step_avg:41.40ms +[2025-09-05 16:44:42] [Rank 0] step:8841/10000 train_time:366002ms step_avg:41.40ms +[2025-09-05 16:44:43] [Rank 0] step:8861/10000 train_time:366738ms step_avg:41.39ms +[2025-09-05 16:44:43] [Rank 0] step:8861/10000 train_time:366738ms step_avg:41.39ms +[2025-09-05 16:44:43] [Rank 0] step:8881/10000 train_time:367474ms step_avg:41.38ms +[2025-09-05 16:44:43] [Rank 0] step:8881/10000 train_time:367474ms step_avg:41.38ms +[2025-09-05 16:44:44] [Rank 0] step:8901/10000 train_time:368209ms step_avg:41.37ms +[2025-09-05 16:44:44] [Rank 0] step:8901/10000 train_time:368209ms step_avg:41.37ms +[2025-09-05 16:44:45] [Rank 0] step:8921/10000 train_time:368945ms step_avg:41.36ms +[2025-09-05 16:44:45] [Rank 0] step:8921/10000 train_time:368945ms step_avg:41.36ms +[2025-09-05 16:44:46] [Rank 0] step:8941/10000 train_time:369681ms step_avg:41.35ms +[2025-09-05 16:44:46] [Rank 0] step:8941/10000 train_time:369681ms step_avg:41.35ms +[2025-09-05 16:44:46] [Rank 0] step:8961/10000 train_time:370417ms step_avg:41.34ms +[2025-09-05 16:44:46] [Rank 0] step:8961/10000 train_time:370417ms step_avg:41.34ms +[2025-09-05 16:44:47] [Rank 0] step:8981/10000 train_time:371153ms step_avg:41.33ms +[2025-09-05 16:44:47] [Rank 0] step:8981/10000 train_time:371153ms step_avg:41.33ms +[2025-09-05 16:44:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:44:48] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:44:49] [Rank 0] PRINT: step:9000/10000 train_loss:1.3927 val_loss:1.3835 train_time:371969ms step_avg:41.33ms +[2025-09-05 16:44:49] [Rank 0] PRINT: step:9000/10000 train_loss:1.3927 val_loss:1.3835 train_time:371969ms step_avg:41.33ms +[2025-09-05 16:44:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:44:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:44:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:44:49] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:46:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:46:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:46:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:46:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:46:11] [Rank 0] Total Loss: 4.2244 +[2025-09-05 16:46:11] [Rank 0] Total Loss: 4.2244 +[2025-09-05 16:46:11] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 16:46:11] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 16:46:11] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 16:46:11] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 16:46:11] [Rank 0] Group 0 Loss: 3.4334 +[2025-09-05 16:46:11] [Rank 0] Group 0 Loss: 3.4334 +[2025-09-05 16:46:11] [Rank 0] Group 1 Loss: 3.3605 +[2025-09-05 16:46:11] [Rank 0] Group 1 Loss: 3.3605 +[2025-09-05 16:46:11] [Rank 0] Group 2 Loss: 3.3004 +[2025-09-05 16:46:11] [Rank 0] Group 2 Loss: 3.3004 +[2025-09-05 16:46:11] [Rank 0] Group 3 Loss: 3.6568 +[2025-09-05 16:46:11] [Rank 0] Group 3 Loss: 3.6568 +[2025-09-05 16:46:11] [Rank 0] Group 4 Loss: 3.7420 +[2025-09-05 16:46:11] [Rank 0] Group 4 Loss: 3.7420 +[2025-09-05 16:46:11] [Rank 0] Group 5 Loss: 3.9878 +[2025-09-05 16:46:11] [Rank 0] Group 5 Loss: 3.9878 +[2025-09-05 16:46:11] [Rank 0] Group 6 Loss: 4.0125 +[2025-09-05 16:46:11] [Rank 0] Group 6 Loss: 4.0125 +[2025-09-05 16:46:11] [Rank 0] Group 7 Loss: 4.2450 +[2025-09-05 16:46:11] [Rank 0] Group 7 Loss: 4.2450 +[2025-09-05 16:46:11] [Rank 0] Group 8 Loss: 4.4646 +[2025-09-05 16:46:11] [Rank 0] Group 8 Loss: 4.4646 +[2025-09-05 16:46:11] [Rank 0] Group 9 Loss: 4.6228 +[2025-09-05 16:46:11] [Rank 0] Group 9 Loss: 4.6228 +[2025-09-05 16:46:11] [Rank 0] Group 10 Loss: 4.8236 +[2025-09-05 16:46:11] [Rank 0] Group 10 Loss: 4.8236 +[2025-09-05 16:46:11] [Rank 0] Group 11 Loss: 4.7496 +[2025-09-05 16:46:11] [Rank 0] Group 11 Loss: 4.7496 +[2025-09-05 16:46:11] [Rank 0] Group 12 Loss: 4.7438 +[2025-09-05 16:46:11] [Rank 0] Group 12 Loss: 4.7438 +[2025-09-05 16:46:11] [Rank 0] Group 13 Loss: 4.7956 +[2025-09-05 16:46:11] [Rank 0] Group 13 Loss: 4.7956 +[2025-09-05 16:46:11] [Rank 0] Group 14 Loss: 4.8322 +[2025-09-05 16:46:11] [Rank 0] Group 14 Loss: 4.8322 +[2025-09-05 16:46:11] [Rank 0] Group 15 Loss: 4.8203 +[2025-09-05 16:46:11] [Rank 0] Group 15 Loss: 4.8203 +[2025-09-05 16:46:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:46:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:46:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:46:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:46:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:46:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:46:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:46:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:46:11] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 16:46:11] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 16:46:11] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:46:11] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:46:11] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:46:11] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:46:11] [Rank 0] Group 7 FTA: 0.5500 +[2025-09-05 16:46:11] [Rank 0] Group 7 FTA: 0.5500 +[2025-09-05 16:46:11] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 16:46:11] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 16:46:11] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 16:46:11] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 16:46:11] [Rank 0] Group 10 FTA: 0.5300 +[2025-09-05 16:46:11] [Rank 0] Group 10 FTA: 0.5300 +[2025-09-05 16:46:11] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 16:46:11] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 16:46:11] [Rank 0] Group 12 FTA: 0.4000 +[2025-09-05 16:46:11] [Rank 0] Group 12 FTA: 0.4000 +[2025-09-05 16:46:11] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 16:46:11] [Rank 0] Group 13 FTA: 0.2600 +[2025-09-05 16:46:11] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 16:46:11] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 16:46:11] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:46:11] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:46:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:46:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:46:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:46:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:46:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:46:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:46:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:46:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:46:12] [Rank 0] step:9001/10000 train_time:371979ms step_avg:41.33ms +[2025-09-05 16:46:12] [Rank 0] step:9001/10000 train_time:371979ms step_avg:41.33ms +[2025-09-05 16:46:13] [Rank 0] step:9021/10000 train_time:372652ms step_avg:41.31ms +[2025-09-05 16:46:13] [Rank 0] step:9021/10000 train_time:372652ms step_avg:41.31ms +[2025-09-05 16:46:14] [Rank 0] step:9041/10000 train_time:373389ms step_avg:41.30ms +[2025-09-05 16:46:14] [Rank 0] step:9041/10000 train_time:373389ms step_avg:41.30ms +[2025-09-05 16:46:14] [Rank 0] step:9061/10000 train_time:374125ms step_avg:41.29ms +[2025-09-05 16:46:14] [Rank 0] step:9061/10000 train_time:374125ms step_avg:41.29ms +[2025-09-05 16:46:15] [Rank 0] step:9081/10000 train_time:374860ms step_avg:41.28ms +[2025-09-05 16:46:15] [Rank 0] step:9081/10000 train_time:374860ms step_avg:41.28ms +[2025-09-05 16:46:16] [Rank 0] step:9101/10000 train_time:375597ms step_avg:41.27ms +[2025-09-05 16:46:16] [Rank 0] step:9101/10000 train_time:375597ms step_avg:41.27ms +[2025-09-05 16:46:17] [Rank 0] step:9121/10000 train_time:376332ms step_avg:41.26ms +[2025-09-05 16:46:17] [Rank 0] step:9121/10000 train_time:376332ms step_avg:41.26ms +[2025-09-05 16:46:17] [Rank 0] step:9141/10000 train_time:377067ms step_avg:41.25ms +[2025-09-05 16:46:17] [Rank 0] step:9141/10000 train_time:377067ms step_avg:41.25ms +[2025-09-05 16:46:18] [Rank 0] step:9161/10000 train_time:377907ms step_avg:41.25ms +[2025-09-05 16:46:18] [Rank 0] step:9161/10000 train_time:377907ms step_avg:41.25ms +[2025-09-05 16:46:19] [Rank 0] step:9181/10000 train_time:378643ms step_avg:41.24ms +[2025-09-05 16:46:19] [Rank 0] step:9181/10000 train_time:378643ms step_avg:41.24ms +[2025-09-05 16:46:20] [Rank 0] step:9201/10000 train_time:379385ms step_avg:41.23ms +[2025-09-05 16:46:20] [Rank 0] step:9201/10000 train_time:379385ms step_avg:41.23ms +[2025-09-05 16:46:20] [Rank 0] step:9221/10000 train_time:380121ms step_avg:41.22ms +[2025-09-05 16:46:20] [Rank 0] step:9221/10000 train_time:380121ms step_avg:41.22ms +[2025-09-05 16:46:21] [Rank 0] step:9241/10000 train_time:380857ms step_avg:41.21ms +[2025-09-05 16:46:21] [Rank 0] step:9241/10000 train_time:380857ms step_avg:41.21ms +[2025-09-05 16:46:22] [Rank 0] step:9261/10000 train_time:381592ms step_avg:41.20ms +[2025-09-05 16:46:22] [Rank 0] step:9261/10000 train_time:381592ms step_avg:41.20ms +[2025-09-05 16:46:23] [Rank 0] step:9281/10000 train_time:382328ms step_avg:41.19ms +[2025-09-05 16:46:23] [Rank 0] step:9281/10000 train_time:382328ms step_avg:41.19ms +[2025-09-05 16:46:23] [Rank 0] step:9301/10000 train_time:383064ms step_avg:41.19ms +[2025-09-05 16:46:23] [Rank 0] step:9301/10000 train_time:383064ms step_avg:41.19ms +[2025-09-05 16:46:24] [Rank 0] step:9321/10000 train_time:383799ms step_avg:41.18ms +[2025-09-05 16:46:24] [Rank 0] step:9321/10000 train_time:383799ms step_avg:41.18ms +[2025-09-05 16:46:25] [Rank 0] step:9341/10000 train_time:384535ms step_avg:41.17ms +[2025-09-05 16:46:25] [Rank 0] step:9341/10000 train_time:384535ms step_avg:41.17ms +[2025-09-05 16:46:25] [Rank 0] step:9361/10000 train_time:385270ms step_avg:41.16ms +[2025-09-05 16:46:25] [Rank 0] step:9361/10000 train_time:385270ms step_avg:41.16ms +[2025-09-05 16:46:26] [Rank 0] step:9381/10000 train_time:386007ms step_avg:41.15ms +[2025-09-05 16:46:26] [Rank 0] step:9381/10000 train_time:386007ms step_avg:41.15ms +[2025-09-05 16:46:27] [Rank 0] step:9401/10000 train_time:386743ms step_avg:41.14ms +[2025-09-05 16:46:27] [Rank 0] step:9401/10000 train_time:386743ms step_avg:41.14ms +[2025-09-05 16:46:28] [Rank 0] step:9421/10000 train_time:387478ms step_avg:41.13ms +[2025-09-05 16:46:28] [Rank 0] step:9421/10000 train_time:387478ms step_avg:41.13ms +[2025-09-05 16:46:29] [Rank 0] step:9441/10000 train_time:388315ms step_avg:41.13ms +[2025-09-05 16:46:29] [Rank 0] step:9441/10000 train_time:388315ms step_avg:41.13ms +[2025-09-05 16:46:29] [Rank 0] step:9461/10000 train_time:389051ms step_avg:41.12ms +[2025-09-05 16:46:29] [Rank 0] step:9461/10000 train_time:389051ms step_avg:41.12ms +[2025-09-05 16:46:30] [Rank 0] step:9481/10000 train_time:389786ms step_avg:41.11ms +[2025-09-05 16:46:30] [Rank 0] step:9481/10000 train_time:389786ms step_avg:41.11ms +[2025-09-05 16:46:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:46:31] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:46:31] [Rank 0] PRINT: step:9500/10000 train_loss:1.3881 val_loss:1.3781 train_time:390603ms step_avg:41.12ms +[2025-09-05 16:46:31] [Rank 0] PRINT: step:9500/10000 train_loss:1.3881 val_loss:1.3781 train_time:390603ms step_avg:41.12ms +[2025-09-05 16:46:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:46:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:46:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:46:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:47:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:47:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:47:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:47:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:47:52] [Rank 0] Total Loss: 4.2049 +[2025-09-05 16:47:52] [Rank 0] Total Loss: 4.2049 +[2025-09-05 16:47:52] [Rank 0] Total FTA (Unweighted): 0.5925 +[2025-09-05 16:47:52] [Rank 0] Total FTA (Unweighted): 0.5925 +[2025-09-05 16:47:52] [Rank 0] Total FTA (Weighted): 0.5925 +[2025-09-05 16:47:52] [Rank 0] Total FTA (Weighted): 0.5925 +[2025-09-05 16:47:52] [Rank 0] Group 0 Loss: 3.4351 +[2025-09-05 16:47:52] [Rank 0] Group 0 Loss: 3.4351 +[2025-09-05 16:47:52] [Rank 0] Group 1 Loss: 3.3786 +[2025-09-05 16:47:52] [Rank 0] Group 1 Loss: 3.3786 +[2025-09-05 16:47:52] [Rank 0] Group 2 Loss: 3.3218 +[2025-09-05 16:47:52] [Rank 0] Group 2 Loss: 3.3218 +[2025-09-05 16:47:52] [Rank 0] Group 3 Loss: 3.6783 +[2025-09-05 16:47:52] [Rank 0] Group 3 Loss: 3.6783 +[2025-09-05 16:47:52] [Rank 0] Group 4 Loss: 3.7300 +[2025-09-05 16:47:52] [Rank 0] Group 4 Loss: 3.7300 +[2025-09-05 16:47:52] [Rank 0] Group 5 Loss: 3.9473 +[2025-09-05 16:47:52] [Rank 0] Group 5 Loss: 3.9473 +[2025-09-05 16:47:52] [Rank 0] Group 6 Loss: 3.9897 +[2025-09-05 16:47:52] [Rank 0] Group 6 Loss: 3.9897 +[2025-09-05 16:47:52] [Rank 0] Group 7 Loss: 4.1827 +[2025-09-05 16:47:52] [Rank 0] Group 7 Loss: 4.1827 +[2025-09-05 16:47:52] [Rank 0] Group 8 Loss: 4.4473 +[2025-09-05 16:47:52] [Rank 0] Group 8 Loss: 4.4473 +[2025-09-05 16:47:52] [Rank 0] Group 9 Loss: 4.5723 +[2025-09-05 16:47:52] [Rank 0] Group 9 Loss: 4.5723 +[2025-09-05 16:47:52] [Rank 0] Group 10 Loss: 4.7934 +[2025-09-05 16:47:52] [Rank 0] Group 10 Loss: 4.7934 +[2025-09-05 16:47:52] [Rank 0] Group 11 Loss: 4.7340 +[2025-09-05 16:47:52] [Rank 0] Group 11 Loss: 4.7340 +[2025-09-05 16:47:52] [Rank 0] Group 12 Loss: 4.7095 +[2025-09-05 16:47:52] [Rank 0] Group 12 Loss: 4.7095 +[2025-09-05 16:47:52] [Rank 0] Group 13 Loss: 4.7577 +[2025-09-05 16:47:52] [Rank 0] Group 13 Loss: 4.7577 +[2025-09-05 16:47:52] [Rank 0] Group 14 Loss: 4.8081 +[2025-09-05 16:47:52] [Rank 0] Group 14 Loss: 4.8081 +[2025-09-05 16:47:52] [Rank 0] Group 15 Loss: 4.7924 +[2025-09-05 16:47:52] [Rank 0] Group 15 Loss: 4.7924 +[2025-09-05 16:47:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:47:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:47:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:47:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:47:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:47:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:47:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:47:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:47:52] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 16:47:52] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 16:47:52] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:47:52] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 16:47:52] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:47:52] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 16:47:52] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-05 16:47:52] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-05 16:47:52] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 16:47:52] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 16:47:52] [Rank 0] Group 9 FTA: 0.4400 +[2025-09-05 16:47:52] [Rank 0] Group 9 FTA: 0.4400 +[2025-09-05 16:47:52] [Rank 0] Group 10 FTA: 0.5400 +[2025-09-05 16:47:52] [Rank 0] Group 10 FTA: 0.5400 +[2025-09-05 16:47:52] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 16:47:52] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 16:47:52] [Rank 0] Group 12 FTA: 0.4500 +[2025-09-05 16:47:52] [Rank 0] Group 12 FTA: 0.4500 +[2025-09-05 16:47:53] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 16:47:53] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 16:47:53] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:47:53] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 16:47:53] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:47:53] [Rank 0] Group 15 FTA: 0.1000 +[2025-09-05 16:47:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:47:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:47:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:47:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:47:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:47:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:47:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:47:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:47:54] [Rank 0] step:9501/10000 train_time:390612ms step_avg:41.11ms +[2025-09-05 16:47:54] [Rank 0] step:9501/10000 train_time:390612ms step_avg:41.11ms +[2025-09-05 16:47:55] [Rank 0] step:9521/10000 train_time:391294ms step_avg:41.10ms +[2025-09-05 16:47:55] [Rank 0] step:9521/10000 train_time:391294ms step_avg:41.10ms +[2025-09-05 16:47:55] [Rank 0] step:9541/10000 train_time:392030ms step_avg:41.09ms +[2025-09-05 16:47:55] [Rank 0] step:9541/10000 train_time:392030ms step_avg:41.09ms +[2025-09-05 16:47:56] [Rank 0] step:9561/10000 train_time:392766ms step_avg:41.08ms +[2025-09-05 16:47:56] [Rank 0] step:9561/10000 train_time:392766ms step_avg:41.08ms +[2025-09-05 16:47:57] [Rank 0] step:9581/10000 train_time:393501ms step_avg:41.07ms +[2025-09-05 16:47:57] [Rank 0] step:9581/10000 train_time:393501ms step_avg:41.07ms +[2025-09-05 16:47:58] [Rank 0] step:9601/10000 train_time:394237ms step_avg:41.06ms +[2025-09-05 16:47:58] [Rank 0] step:9601/10000 train_time:394237ms step_avg:41.06ms +[2025-09-05 16:47:58] [Rank 0] step:9621/10000 train_time:394973ms step_avg:41.05ms +[2025-09-05 16:47:58] [Rank 0] step:9621/10000 train_time:394973ms step_avg:41.05ms +[2025-09-05 16:47:59] [Rank 0] step:9641/10000 train_time:395709ms step_avg:41.04ms +[2025-09-05 16:47:59] [Rank 0] step:9641/10000 train_time:395709ms step_avg:41.04ms +[2025-09-05 16:48:00] [Rank 0] step:9661/10000 train_time:396721ms step_avg:41.06ms +[2025-09-05 16:48:00] [Rank 0] step:9661/10000 train_time:396721ms step_avg:41.06ms +[2025-09-05 16:48:01] [Rank 0] step:9681/10000 train_time:397457ms step_avg:41.06ms +[2025-09-05 16:48:01] [Rank 0] step:9681/10000 train_time:397457ms step_avg:41.06ms +[2025-09-05 16:48:02] [Rank 0] step:9701/10000 train_time:398194ms step_avg:41.05ms +[2025-09-05 16:48:02] [Rank 0] step:9701/10000 train_time:398194ms step_avg:41.05ms +[2025-09-05 16:48:02] [Rank 0] step:9721/10000 train_time:398929ms step_avg:41.04ms +[2025-09-05 16:48:02] [Rank 0] step:9721/10000 train_time:398929ms step_avg:41.04ms +[2025-09-05 16:48:03] [Rank 0] step:9741/10000 train_time:399665ms step_avg:41.03ms +[2025-09-05 16:48:03] [Rank 0] step:9741/10000 train_time:399665ms step_avg:41.03ms +[2025-09-05 16:48:04] [Rank 0] step:9761/10000 train_time:400401ms step_avg:41.02ms +[2025-09-05 16:48:04] [Rank 0] step:9761/10000 train_time:400401ms step_avg:41.02ms +[2025-09-05 16:48:04] [Rank 0] step:9781/10000 train_time:401137ms step_avg:41.01ms +[2025-09-05 16:48:04] [Rank 0] step:9781/10000 train_time:401137ms step_avg:41.01ms +[2025-09-05 16:48:05] [Rank 0] step:9801/10000 train_time:401874ms step_avg:41.00ms +[2025-09-05 16:48:05] [Rank 0] step:9801/10000 train_time:401874ms step_avg:41.00ms +[2025-09-05 16:48:06] [Rank 0] step:9821/10000 train_time:402609ms step_avg:40.99ms +[2025-09-05 16:48:06] [Rank 0] step:9821/10000 train_time:402609ms step_avg:40.99ms +[2025-09-05 16:48:07] [Rank 0] step:9841/10000 train_time:403345ms step_avg:40.99ms +[2025-09-05 16:48:07] [Rank 0] step:9841/10000 train_time:403345ms step_avg:40.99ms +[2025-09-05 16:48:07] [Rank 0] step:9861/10000 train_time:404082ms step_avg:40.98ms +[2025-09-05 16:48:07] [Rank 0] step:9861/10000 train_time:404082ms step_avg:40.98ms +[2025-09-05 16:48:08] [Rank 0] step:9881/10000 train_time:404817ms step_avg:40.97ms +[2025-09-05 16:48:08] [Rank 0] step:9881/10000 train_time:404817ms step_avg:40.97ms +[2025-09-05 16:48:09] [Rank 0] step:9901/10000 train_time:405553ms step_avg:40.96ms +[2025-09-05 16:48:09] [Rank 0] step:9901/10000 train_time:405553ms step_avg:40.96ms +[2025-09-05 16:48:10] [Rank 0] step:9921/10000 train_time:406289ms step_avg:40.95ms +[2025-09-05 16:48:10] [Rank 0] step:9921/10000 train_time:406289ms step_avg:40.95ms +[2025-09-05 16:48:10] [Rank 0] step:9941/10000 train_time:407025ms step_avg:40.94ms +[2025-09-05 16:48:10] [Rank 0] step:9941/10000 train_time:407025ms step_avg:40.94ms +[2025-09-05 16:48:11] [Rank 0] step:9961/10000 train_time:407762ms step_avg:40.94ms +[2025-09-05 16:48:11] [Rank 0] step:9961/10000 train_time:407762ms step_avg:40.94ms +[2025-09-05 16:48:12] [Rank 0] step:9981/10000 train_time:408497ms step_avg:40.93ms +[2025-09-05 16:48:12] [Rank 0] step:9981/10000 train_time:408497ms step_avg:40.93ms +[2025-09-05 16:48:13] [Rank 0] step:10000/10000 train_time:409196ms step_avg:40.92ms +[2025-09-05 16:48:13] [Rank 0] step:10000/10000 train_time:409196ms step_avg:40.92ms +[2025-09-05 16:48:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:48:13] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:48:13] [Rank 0] PRINT: step:10000/10000 train_loss:1.3835 val_loss:1.3728 train_time:409318ms step_avg:40.93ms +[2025-09-05 16:48:13] [Rank 0] PRINT: step:10000/10000 train_loss:1.3835 val_loss:1.3728 train_time:409318ms step_avg:40.93ms +[2025-09-05 16:48:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:48:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:48:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:48:13] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:49:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:49:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:49:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:49:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:49:35] [Rank 0] Total Loss: 4.2533 +[2025-09-05 16:49:35] [Rank 0] Total Loss: 4.2533 +[2025-09-05 16:49:35] [Rank 0] Total FTA (Unweighted): 0.6006 +[2025-09-05 16:49:35] [Rank 0] Total FTA (Unweighted): 0.6006 +[2025-09-05 16:49:35] [Rank 0] Total FTA (Weighted): 0.6006 +[2025-09-05 16:49:35] [Rank 0] Total FTA (Weighted): 0.6006 +[2025-09-05 16:49:35] [Rank 0] Group 0 Loss: 3.4441 +[2025-09-05 16:49:35] [Rank 0] Group 0 Loss: 3.4441 +[2025-09-05 16:49:35] [Rank 0] Group 1 Loss: 3.4062 +[2025-09-05 16:49:35] [Rank 0] Group 1 Loss: 3.4062 +[2025-09-05 16:49:35] [Rank 0] Group 2 Loss: 3.3038 +[2025-09-05 16:49:35] [Rank 0] Group 2 Loss: 3.3038 +[2025-09-05 16:49:35] [Rank 0] Group 3 Loss: 3.7572 +[2025-09-05 16:49:35] [Rank 0] Group 3 Loss: 3.7572 +[2025-09-05 16:49:35] [Rank 0] Group 4 Loss: 3.7607 +[2025-09-05 16:49:35] [Rank 0] Group 4 Loss: 3.7607 +[2025-09-05 16:49:35] [Rank 0] Group 5 Loss: 4.0113 +[2025-09-05 16:49:35] [Rank 0] Group 5 Loss: 4.0113 +[2025-09-05 16:49:35] [Rank 0] Group 6 Loss: 4.0399 +[2025-09-05 16:49:35] [Rank 0] Group 6 Loss: 4.0399 +[2025-09-05 16:49:35] [Rank 0] Group 7 Loss: 4.2689 +[2025-09-05 16:49:35] [Rank 0] Group 7 Loss: 4.2689 +[2025-09-05 16:49:35] [Rank 0] Group 8 Loss: 4.5204 +[2025-09-05 16:49:35] [Rank 0] Group 8 Loss: 4.5204 +[2025-09-05 16:49:35] [Rank 0] Group 9 Loss: 4.6287 +[2025-09-05 16:49:35] [Rank 0] Group 9 Loss: 4.6287 +[2025-09-05 16:49:35] [Rank 0] Group 10 Loss: 4.8651 +[2025-09-05 16:49:35] [Rank 0] Group 10 Loss: 4.8651 +[2025-09-05 16:49:35] [Rank 0] Group 11 Loss: 4.7872 +[2025-09-05 16:49:35] [Rank 0] Group 11 Loss: 4.7872 +[2025-09-05 16:49:35] [Rank 0] Group 12 Loss: 4.7653 +[2025-09-05 16:49:35] [Rank 0] Group 12 Loss: 4.7653 +[2025-09-05 16:49:35] [Rank 0] Group 13 Loss: 4.8030 +[2025-09-05 16:49:35] [Rank 0] Group 13 Loss: 4.8030 +[2025-09-05 16:49:35] [Rank 0] Group 14 Loss: 4.8557 +[2025-09-05 16:49:35] [Rank 0] Group 14 Loss: 4.8557 +[2025-09-05 16:49:35] [Rank 0] Group 15 Loss: 4.8355 +[2025-09-05 16:49:35] [Rank 0] Group 15 Loss: 4.8355 +[2025-09-05 16:49:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:49:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:49:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:49:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:49:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:49:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:49:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:49:35] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:49:35] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 16:49:35] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 16:49:35] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:49:35] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 16:49:35] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-05 16:49:35] [Rank 0] Group 6 FTA: 0.5700 +[2025-09-05 16:49:35] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-05 16:49:35] [Rank 0] Group 7 FTA: 0.5400 +[2025-09-05 16:49:35] [Rank 0] Group 8 FTA: 0.5300 +[2025-09-05 16:49:35] [Rank 0] Group 8 FTA: 0.5300 +[2025-09-05 16:49:35] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 16:49:35] [Rank 0] Group 9 FTA: 0.4300 +[2025-09-05 16:49:35] [Rank 0] Group 10 FTA: 0.5300 +[2025-09-05 16:49:35] [Rank 0] Group 10 FTA: 0.5300 +[2025-09-05 16:49:35] [Rank 0] Group 11 FTA: 0.4300 +[2025-09-05 16:49:35] [Rank 0] Group 11 FTA: 0.4300 +[2025-09-05 16:49:35] [Rank 0] Group 12 FTA: 0.4800 +[2025-09-05 16:49:35] [Rank 0] Group 12 FTA: 0.4800 +[2025-09-05 16:49:35] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 16:49:35] [Rank 0] Group 13 FTA: 0.2700 +[2025-09-05 16:49:35] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 16:49:35] [Rank 0] Group 14 FTA: 0.1900 +[2025-09-05 16:49:35] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:49:35] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 16:49:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:49:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_loss_curves.png +[2025-09-05 16:49:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:49:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/per_class_acc_curves.png +[2025-09-05 16:49:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:49:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_loss_curve.png +[2025-09-05 16:49:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:49:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/total_acc_curve.png +[2025-09-05 16:49:36] [Rank 0] step:10001/10000 train_time:409328ms step_avg:40.93ms +[2025-09-05 16:49:36] [Rank 0] step:10001/10000 train_time:409328ms step_avg:40.93ms +[2025-09-05 16:49:36] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 16:49:36 2025 --- +[2025-09-05 16:49:36] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 16:49:36 2025 --- +[2025-09-05 16:49:36] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 16:49:36] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/training_log_91e420ef-804e-4af0-b5e0-0f86dd57a6e0.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/training_log_91e420ef-804e-4af0-b5e0-0f86dd57a6e0.txt new file mode 100644 index 0000000000000000000000000000000000000000..8d5d3f5d612b0cbe9b8e6ec29daf109f2b7eb73b --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45/training_log_91e420ef-804e-4af0-b5e0-0f86dd57a6e0.txt @@ -0,0 +1,2756 @@ +[2025-09-05 14:16:49] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:16:49 2025 --- +[2025-09-05 14:16:49] [Rank 0] PRINT: --- Script Start: Fri Sep 5 14:16:49 2025 --- +[2025-09-05 14:16:49] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:16:49] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 14:16:49] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:16:49] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 14:16:49] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-05 14:16:49] [Rank 0] PRINT: Using fixed seed: 45 +[2025-09-05 14:16:49] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45 +[2025-09-05 14:16:49] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_45 +[2025-09-05 14:16:49] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:16:49] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 14:16:49] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:16:49] [Rank 0] PRINT: Constructing model... +[2025-09-05 14:16:50] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:16:50] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 14:16:50] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:16:50] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 14:16:50] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:16:50] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 14:16:50] [Rank 0] PRINT: Model test failed: +[2025-09-05 14:16:50] [Rank 0] PRINT: Model test failed: +[2025-09-05 14:16:50] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:16:50] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 14:16:50] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:16:50] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 14:16:50] [Rank 0] PRINT: Model test still fails: +[2025-09-05 14:16:50] [Rank 0] PRINT: Model test still fails: +[2025-09-05 14:16:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:16:50] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 14:16:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:16:50] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 14:16:50] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:16:50] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 14:16:52] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:16:52] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 14:16:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:16:53] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 14:16:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:16:59] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 14:16:59] [Rank 0] PRINT: Starting warmup... +[2025-09-05 14:16:59] [Rank 0] PRINT: Starting warmup... diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/config.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3dc985bfa8ad60cb633a2bb4ac5f6bd052bb38bd --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/config.json @@ -0,0 +1,29 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 9, + "model_parameterization": "gated", + "per_group_k": 100, + "muon_lr": 0.01, + "adam_lr": 0.001, + "base_dir": "logs_qa_sgd_gated/lr_search_long", + "sgd_lr": 0.5, + "m_val": 15, + "qa_jsonl_path": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl" + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin", + "val_tokens": 491520, + "train_seq_len": 3072, + "val_seq_len": 16384, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "1f1ed48d-d4b2-4629-91b3-a7158e383e32", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/fixed_eval_indices.json b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/fixed_eval_indices.json new file mode 100644 index 0000000000000000000000000000000000000000..a823775225c5e592eb10700e5e0319b0491b1eb6 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/fixed_eval_indices.json @@ -0,0 +1 @@ +{"1": [1238956, 182074, 1437575, 1061037, 383150, 1176376, 926, 823011, 832520, 1266421, 512738, 144357, 848076, 890204, 213997, 95146, 261767, 467731, 832231, 217985, 913168, 107253, 1361828, 61314, 1230420, 1133619, 146690, 429587, 419151, 58695, 1579770, 503799, 1421284, 882534, 1022637, 785343, 1154604, 67783, 1325109, 243941, 1213240, 438111, 460295, 269373, 538055, 1347006, 71775, 255496, 299906, 1227973, 815402, 190082, 1304077, 1023347, 613801, 983830, 1284420, 389321, 1625224, 717538, 1172273, 992184, 1181312, 1014039, 885952, 1538489, 158933, 1667270, 1250445, 958097, 1458224, 1306495, 62945, 733843, 1360200, 540493, 762461, 501460, 1208142, 1180559, 1333588, 690481, 355756, 618511, 733586, 650301, 799437, 165533, 1238977, 323078, 1485080, 609610, 1212241, 606952, 1253407, 1420922, 327112, 701, 777907, 1626516], "0": [1390189, 1220977, 1312259, 1201125, 1235379, 1272843, 344142, 1119560, 856330, 766456, 1145928, 550624, 684681, 1309879, 54620, 1126124, 1278077, 910610, 1532044, 73645, 460165, 462723, 457959, 476877, 320873, 1408316, 989156, 1276325, 338260, 577743, 1136972, 25115, 211215, 1296818, 968705, 436781, 318224, 385325, 1516441, 533464, 1628693, 703399, 579670, 1518978, 305154, 1625960, 1400284, 713841, 1652150, 811009, 336920, 881015, 512030, 1347531, 1005706, 412167, 173312, 1136816, 343017, 537797, 1489267, 935475, 703854, 1570584, 1504269, 1458356, 1310700, 1242505, 509976, 22096, 788120, 1616850, 1591198, 1151224, 797820, 823995, 1153364, 1060220, 727674, 580729, 148912, 334290, 686098, 1633151, 1130523, 1012440, 1374480, 953410, 1381730, 1376118, 932096, 739115, 739014, 263875, 1400308, 556177, 1341771, 348626, 575350, 254846], "10": [748592, 515936, 1645691, 329828, 1104748, 1453439, 406461, 1557507, 1146644, 1398421, 163667, 228310, 407739, 585218, 785725, 1371077, 134834, 15431, 1303784, 273099, 1530332, 1320219, 1329247, 753603, 664548, 501873, 1512467, 583734, 1355643, 600440, 428998, 1426876, 121341, 1456507, 892281, 1242339, 1304508, 968336, 1593981, 634471, 1253799, 593156, 1486078, 712425, 441285, 618217, 1024661, 1395408, 1423117, 1463918, 121752, 800727, 52402, 1185143, 1651644, 1583572, 1302889, 1040220, 1234122, 333457, 1552652, 1567291, 1405501, 222708, 1469496, 875611, 894032, 783951, 934281, 109533, 198744, 643064, 197926, 224405, 1169998, 956511, 1096712, 1213224, 828780, 204659, 564225, 257079, 754287, 1598723, 821179, 1156648, 60674, 290993, 1215838, 1556010, 1025931, 687671, 252775, 771448, 574110, 578615, 279536, 1503737, 1089787, 2338], "14": [1602125, 1388977, 709154, 666138, 1446614, 572103, 99632, 25794, 1487658, 15077, 1600205, 682155, 679850, 1462049, 970283, 81336, 378196, 1627374, 832926, 1217832, 740140, 1336941, 793290, 1441127, 297494, 303372, 339524, 1363322, 525184, 593339, 61055, 1468219, 1110310, 1061855, 1046762, 410655, 449257, 530144, 1362373, 241552, 926068, 1618007, 629406, 327589, 263223, 1525290, 621372, 606355, 534601, 126046, 934024, 1181343, 1234759, 295773, 838035, 1447427, 1395837, 1568116, 486923, 465204, 98192, 1145989, 271529, 720487, 1091877, 653313, 872526, 13890, 111971, 671086, 301528, 50698, 455373, 786098, 329199, 599178, 644737, 1404606, 33829, 279256, 161938, 309352, 1537653, 21723, 309413, 478184, 340938, 1080124, 1375458, 1301528, 331423, 212589, 1220931, 112707, 454022, 1466562, 1238127, 341561, 1497264, 247245], "3": [143966, 715765, 455823, 1414629, 814361, 278667, 700211, 1286614, 1601013, 923359, 1105960, 549680, 371512, 732263, 1236795, 432942, 1379421, 488467, 679369, 1200930, 479010, 683138, 700149, 1422714, 348019, 1263383, 1044275, 820031, 969830, 75153, 1410929, 394116, 1486865, 299414, 264214, 1416493, 428688, 107076, 753671, 674417, 1456072, 307080, 1259105, 1563344, 1198484, 456551, 1061513, 1128111, 922541, 1040277, 1360208, 1411429, 259514, 883801, 716781, 1004987, 285196, 266162, 781115, 385241, 252691, 1398963, 1440582, 1300653, 756663, 231617, 1319447, 42576, 1434337, 20658, 581284, 178420, 1626533, 694079, 1531414, 1631177, 1478512, 647215, 706568, 303323, 10540, 1010611, 1150313, 159016, 1614581, 1257722, 470491, 1333309, 94699, 756222, 1456446, 296132, 1186414, 1591257, 1155050, 789439, 1472373, 1403759, 1451276, 745193], "4": [1227524, 563788, 436964, 1380176, 1648232, 267090, 273744, 1031517, 1580586, 416846, 1309866, 114170, 1649739, 1161604, 690741, 1432068, 224570, 322835, 1559902, 479718, 709757, 410142, 1668701, 1143278, 968258, 406289, 884233, 1080971, 269480, 631196, 421463, 374502, 430292, 1309826, 1422997, 1439354, 131354, 975504, 84660, 1590390, 288064, 1419944, 695652, 320713, 260376, 222416, 937739, 1104508, 1323311, 962301, 1060820, 1117857, 280792, 692359, 656651, 358878, 828406, 598701, 127434, 1511259, 1332605, 1178325, 1281187, 203589, 792701, 684176, 543034, 980077, 982509, 640126, 1252418, 1529627, 519645, 359068, 992779, 550843, 1493488, 447632, 1512599, 361760, 626672, 89042, 604248, 574827, 87735, 1432602, 473061, 668609, 925422, 311417, 71495, 682067, 221270, 600053, 673169, 1408933, 719302, 788676, 998101, 243902], "15": [1444113, 1102795, 1149896, 1181364, 349047, 1076908, 816006, 945310, 197585, 276574, 946546, 1505399, 740223, 1154813, 1312891, 1261018, 249986, 1504592, 647427, 915615, 1245136, 1288301, 802239, 410533, 805664, 1013403, 498363, 777073, 1508176, 18111, 1147340, 880289, 215773, 924760, 1301970, 318135, 1524489, 103475, 872748, 1568512, 722849, 1364157, 838956, 1451408, 970208, 1350756, 1498244, 980461, 1098677, 1141812, 463859, 95672, 1220365, 554451, 524014, 1161048, 17478, 113548, 1027741, 1657643, 1462809, 1279104, 85588, 241936, 510707, 151817, 1191227, 574942, 735662, 1046248, 1056729, 96279, 781378, 1042711, 1602339, 1503514, 299087, 898968, 553537, 293167, 897565, 572125, 425215, 916907, 1210531, 1013275, 201532, 883447, 1641610, 1185682, 425791, 831133, 1291227, 1099064, 934983, 588125, 1284591, 1163873, 524656, 1230762], "13": [1122510, 535854, 845319, 585144, 884184, 441130, 934654, 1046629, 1512962, 1190830, 1462685, 90530, 894615, 1527783, 415323, 90852, 1556905, 1002406, 366356, 1386022, 846952, 928360, 3138, 406205, 78588, 1208032, 1210206, 257177, 137047, 711380, 693628, 105893, 392053, 1301525, 677934, 1549630, 1134520, 447564, 1194061, 672969, 740755, 1045536, 633455, 1336343, 613147, 1161931, 234095, 1246643, 643373, 834221, 1351442, 1502069, 1401555, 759927, 1320337, 666063, 291222, 956633, 400583, 963313, 683474, 534882, 1101312, 1519551, 1513194, 752124, 1028953, 1383362, 169732, 785294, 160568, 1053580, 774534, 372048, 292049, 791470, 1318537, 141966, 510573, 1440830, 930544, 1352026, 463299, 1245306, 159755, 1566413, 316835, 1135200, 266962, 112892, 253603, 1328380, 703630, 21798, 268463, 7112, 1232576, 374978, 240715, 1299198], "2": [463910, 845243, 985993, 402000, 962033, 473711, 108588, 1074932, 539045, 1162793, 1086008, 778638, 751876, 53650, 465364, 803834, 820717, 9490, 473708, 26912, 970677, 1441139, 231484, 749139, 286603, 1346834, 1641988, 699767, 816768, 1220504, 978431, 1331006, 611334, 829473, 271532, 566760, 332292, 1326050, 887731, 597530, 940973, 491159, 1150471, 122010, 225465, 382413, 1207486, 1568920, 992793, 896409, 1479386, 1539347, 759024, 293232, 1345256, 1282360, 956014, 371580, 1202087, 1638038, 892501, 38073, 1651323, 1103388, 1315239, 79005, 1045045, 1230831, 1206253, 1376976, 140037, 320204, 274836, 663525, 397448, 666367, 1501713, 570453, 836424, 694423, 157979, 279074, 1235605, 536170, 301830, 778556, 1069897, 181207, 1445408, 1082031, 969552, 1083538, 1395871, 168187, 5476, 514815, 194418, 1038889, 1288748, 1535767], "8": [258910, 518286, 80108, 811775, 937854, 1623550, 1592007, 661848, 1484674, 300, 104226, 1075215, 1190104, 8885, 1062956, 88002, 1020623, 189243, 1265153, 1472857, 301412, 21371, 441401, 1078954, 1648642, 894192, 1083738, 1182805, 1347655, 316568, 1063971, 255177, 186613, 550617, 102173, 563694, 465966, 965224, 372481, 929520, 1423441, 6575, 862309, 144894, 891435, 123628, 653301, 1378790, 1247156, 1129087, 1381991, 410876, 391895, 202136, 868149, 233421, 564464, 1174972, 1385073, 238326, 483488, 507729, 1329666, 955266, 479623, 902401, 1343890, 827390, 371073, 1480060, 1241650, 1670957, 644144, 1371709, 1324243, 670126, 23715, 1566067, 70858, 522312, 558100, 301714, 359605, 1401913, 1060057, 1027855, 987700, 720084, 324937, 562646, 390910, 1398541, 1097298, 1626769, 1461630, 1430950, 650075, 1025475, 274735, 650932], "7": [181761, 629797, 231148, 365747, 1229714, 500408, 198146, 500703, 1250728, 1578820, 753672, 84476, 1379367, 355424, 1591855, 1400949, 1194404, 912060, 884197, 320928, 1096692, 714201, 331003, 1345763, 1620998, 457231, 1542074, 1108477, 1602113, 641593, 542514, 159345, 360278, 35579, 506, 470465, 1606024, 1349783, 731258, 382125, 883800, 1652746, 382071, 99241, 20216, 1398849, 245155, 1601928, 1544592, 1632383, 1597152, 1266758, 1032571, 88287, 521379, 1423306, 579207, 1152743, 271926, 418168, 8827, 1358019, 429579, 380995, 1040420, 1067138, 283335, 1150469, 280745, 1184794, 468245, 948227, 357499, 527525, 1067827, 816862, 360563, 433490, 258517, 581805, 1089641, 1537081, 473494, 1480784, 60701, 686908, 614323, 143069, 738219, 1234467, 927103, 64088, 658420, 1353941, 1517049, 753334, 198407, 434613, 850731, 1211609], "5": [1250448, 711876, 285881, 342500, 987883, 195047, 810508, 356140, 999602, 284542, 730936, 736191, 661291, 11822, 551928, 617679, 384856, 414906, 238071, 473605, 868498, 1269087, 558407, 245083, 810169, 1456559, 1657433, 70018, 865238, 416303, 1389734, 1561961, 534163, 1098802, 1494214, 1285484, 599936, 296819, 696611, 307176, 29697, 731071, 257656, 1465441, 1668863, 166995, 1668377, 1457645, 1400335, 1525120, 33323, 437285, 459069, 620176, 231704, 846424, 1391144, 364636, 63552, 1093702, 67741, 285428, 454922, 719128, 1393391, 87651, 1418561, 1312176, 652985, 1446762, 71280, 1594253, 89848, 628629, 1117344, 882598, 1014283, 1601256, 739004, 508965, 1273914, 44245, 1267971, 1430470, 611385, 930900, 1439585, 1545682, 1300966, 1331569, 364367, 27900, 107616, 1579070, 94723, 818780, 1183546, 384575, 142627, 431847], "11": [12772, 1091320, 1136723, 560663, 715094, 1097800, 1016547, 1299126, 961155, 725788, 999768, 1277062, 650665, 1323964, 1024796, 1255124, 215111, 268904, 442949, 358645, 621958, 1666210, 1154349, 375517, 614870, 1166618, 1418680, 795635, 184985, 129300, 129366, 1343289, 1151524, 193531, 634856, 1618906, 43721, 1601844, 591772, 1322095, 1522284, 431856, 574972, 1544663, 1389257, 1113893, 1515857, 614345, 809163, 132406, 95327, 997226, 423104, 910705, 261584, 765030, 1216397, 439297, 1565563, 1595785, 1622521, 975588, 1314965, 1205327, 1501612, 1642358, 656493, 1369509, 195325, 1554913, 1661675, 790468, 511387, 1227059, 1219143, 1118001, 788451, 697651, 1575073, 1345104, 167862, 1409843, 138942, 984075, 1382364, 1299538, 214601, 1541475, 459076, 353219, 1310728, 757670, 904351, 334019, 1003259, 670104, 10386, 13556, 1097931, 1526407], "6": [295849, 644897, 1207883, 1648549, 1478750, 1486750, 1156499, 621047, 1387936, 662848, 1362507, 891859, 1188350, 1298200, 1044010, 767288, 1191230, 683760, 533545, 498960, 1421819, 366846, 54389, 1382045, 1002942, 1285004, 457209, 1074763, 823805, 1508295, 719970, 837720, 19881, 1189285, 1226048, 1078656, 1433699, 14805, 254682, 614235, 805087, 900964, 343832, 561709, 243288, 465867, 497085, 1153456, 730115, 42050, 878511, 1382508, 667820, 712856, 715963, 1499782, 1516002, 444399, 726168, 1204910, 591639, 986194, 107160, 1596752, 1239812, 591854, 536489, 1347905, 1597670, 122237, 620494, 354940, 1561646, 1352115, 74785, 163277, 590349, 1429584, 1440950, 848184, 1580723, 1631128, 979479, 758861, 84655, 1079672, 673692, 1315324, 167985, 1353485, 21815, 1175739, 1360408, 1062086, 1369999, 733863, 1371964, 61279, 1152056, 220910], "9": [780278, 1156801, 827882, 1095960, 874763, 1330006, 979412, 1154745, 1399441, 515095, 1505401, 1454104, 1563117, 1316054, 57098, 43367, 1175749, 1257252, 865038, 624410, 186727, 907758, 126168, 1353431, 167806, 1021404, 1585615, 846939, 552247, 1400471, 68525, 190495, 1556857, 181528, 47677, 136468, 189446, 269049, 262467, 503841, 1367168, 382007, 486946, 621235, 622683, 469399, 1537192, 707594, 1003091, 56021, 1354302, 799290, 1618221, 696794, 836658, 1591562, 251096, 1321886, 412122, 1352217, 946767, 1267324, 213504, 747174, 1403653, 1589967, 849743, 549269, 663800, 1315189, 451416, 1330881, 1352197, 1517840, 827169, 1228725, 1097725, 955944, 1375509, 1590213, 266039, 1558465, 1498493, 604161, 608723, 1547013, 1484549, 1530909, 1325336, 1584866, 630737, 1023780, 227549, 961233, 987591, 159267, 608424, 636687, 745397, 460687], "12": [1586709, 357745, 1274743, 202991, 1250469, 837216, 757864, 185235, 493474, 621351, 22706, 1096491, 186138, 1482718, 534525, 1157131, 1350934, 1167912, 1366603, 756644, 311945, 72417, 706444, 1464146, 1236814, 74555, 983713, 1182807, 1082263, 804850, 1028788, 1405036, 932746, 742978, 1589076, 723766, 898380, 906214, 913629, 33386, 256107, 1245317, 360574, 836359, 565572, 400905, 1269191, 1042959, 886332, 1415571, 68870, 470376, 1531, 1333957, 834102, 674685, 839119, 255119, 552181, 1239034, 490552, 468715, 963242, 1612174, 21991, 1157603, 975554, 1342832, 883818, 1475260, 1666539, 1252155, 986632, 906429, 1495805, 561464, 1219097, 567704, 1463586, 1571831, 689821, 481310, 1407520, 97442, 512704, 962995, 876670, 956570, 1292534, 914981, 1655195, 870222, 196978, 617479, 1610748, 128227, 213356, 1590232, 230140, 942835]} \ No newline at end of file diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..6a5f350bd270dc64420d42cf4e73abd09eaa31d9 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14759eba413747762bf416536d97fbf195186ea7b5451522a57703768fa8a8e8 +size 423019 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..492ffeee6e63505ab61b0f0c01506c2450ff7206 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc60677a4ffa4631b5123dd5665346162d457f0d62e5786c69525162c5336abb +size 481147 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..4e16d2eb4dea46ebed4c577bbd9cbd49e2395799 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85352a87d4c45f1fc2411733c2624f1a47a43bc74df2c492a055a577e0cde996 +size 94335 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..3dbfffaa221b24066c1b010eba91cbb56fc5778b --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc17fa2c8833f0548816f0b35fae5669998125e1999288b8eecfa215399e7d23 +size 118890 diff --git a/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/training_log_1f1ed48d-d4b2-4629-91b3-a7158e383e32.txt b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/training_log_1f1ed48d-d4b2-4629-91b3-a7158e383e32.txt new file mode 100644 index 0000000000000000000000000000000000000000..f77f1ea10290bd315544eac62411b00b24358247 --- /dev/null +++ b/logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/training_log_1f1ed48d-d4b2-4629-91b3-a7158e383e32.txt @@ -0,0 +1,5614 @@ +[2025-09-05 16:50:00] [Rank 0] PRINT: --- Script Start: Fri Sep 5 16:50:00 2025 --- +[2025-09-05 16:50:00] [Rank 0] PRINT: --- Script Start: Fri Sep 5 16:50:00 2025 --- +[2025-09-05 16:50:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 16:50:00] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=9, model_parameterization='gated', per_group_k=100, muon_lr=0.01, adam_lr=0.001, base_dir='logs_qa_sgd_gated/lr_search_long', sgd_lr=0.5, m_val=15, qa_jsonl_path='/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl') +[2025-09-05 16:50:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 16:50:00] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-09-05 16:50:00] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-05 16:50:00] [Rank 0] PRINT: Using fixed seed: 46 +[2025-09-05 16:50:00] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46 +[2025-09-05 16:50:00] [Rank 0] PRINT: Run directory: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46 +[2025-09-05 16:50:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 16:50:00] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory_copy/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo","gated"]) +parser.add_argument("--per_group_k", type=int, default=100, help="Number of samples per group") +parser.add_argument("--muon_lr", type=float, default=0.01, help="Learning rate for Muon optimizer.") +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") +parser.add_argument("--base_dir", type=str, default="logs_all_0821/gated", help="Base directory for logs") +parser.add_argument("--sgd_lr", type=float, default=0.01, help="Learning rate for SGD optimizer (used in mode 9).") +parser.add_argument("--m_val", type=int, default=15, + help="Power-law exponent m used by the dataset generator.") +parser.add_argument("--qa_jsonl_path", type=str, + default="/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15.jsonl", + help="Path to the QA jsonl used for evaluation (fixed eval set).") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +M_FOR_POWERLAW: int = exp_args.m_val +QA_JSONL_PATH: str = exp_args.qa_jsonl_path +PER_GROUP_K: int = exp_args.per_group_k + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "gated": + print("Using architecture (models.nano_gpt_gated) with GatedSelfAttention") + from models.nano_GPT_gated import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory_copy/qa_m15/qa_tail_m15_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 491520 + train_seq_len = 3*1024 + val_seq_len = 4*4*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +# run_dir_path_str = f"/home/wangshuche/MUON_theory/modded-nanogpt/logs_bios/qa/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +# run_dir_path = Path(run_dir_path_str) +run_dir_path_str = None +base_log_dir = Path(exp_args.base_dir) +# Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.sgd_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.muon_lr}_seed_{exp_args.seed}" + # run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, fixed_indices=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + #with open(qa_data_path, 'r', encoding='utf-8') as f: + # qa_data = [json.loads(line) for line in f] + + #if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + # print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + # data_by_class = defaultdict(list) + # for item in qa_data: data_by_class[item['class_id']].append(item) + # sample_ratio = num_samples / len(qa_data) + # stratified_sample_data = [] + # for class_id, items in data_by_class.items(): + # num_to_sample = max(1, int(len(items) * sample_ratio)) + # sampled_items = random.sample(items, min(len(items), num_to_sample)) + # stratified_sample_data.extend(sampled_items) + # qa_data = stratified_sample_data + # print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + qa_data = [] + if fixed_indices is not None: + needed = set() + for arr in fixed_indices.values(): + needed.update(arr) + with open(qa_data_path, 'r', encoding='utf-8') as f: + for idx, line in enumerate(f): + if idx in needed: + try: + qa_data.append(json.loads(line)) + except Exception: + continue + print0(f"PRINT: Fixed-eval set loaded with {len(qa_data)} samples.", console=True) + else: + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + print0(f"PRINT: WARNING: fixed_indices is None; using all {len(qa_data)} samples (may reintroduce jitter).", console=True) + + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + + # Two methods for calculating total accuracy + total_acc_weighted = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 # Original method: weighted by samples + total_acc_unweighted = sum(avg_group_acc.values()) / len(avg_group_acc) if avg_group_acc else 0 # New method: simple average across groups + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc_weighted': total_acc_weighted, # Sample-weighted total accuracy + 'total_acc_unweighted': total_acc_unweighted, # Simple average total accuracy across groups + 'total_acc': total_acc_unweighted # Primarily use simple average method + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + + + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## + +print0("PRINT: Constructing model...", console=True) +if exp_args.model_parameterization == "qkvo": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +elif exp_args.model_parameterization == "gated": + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=10, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + muon_lr = exp_args.muon_lr + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + print0(f"PRINT: SGD optimizer configured with lr={sgd_lr}, momentum=0.9, weight_decay=1e-4", console=True) + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "gated" : + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + mlp_up_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + mlp_up_params.append(block_module.mlp.c_up.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_up_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params+ mlp_up_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + elif current_optimizer_mode == 9: # sgd + momentum + # This mode uses SGD with momentum for all parameters, no Muon or Adam + print0(f"PRINT: Mode 9: Using pure SGD+Momentum (lr={exp_args.sgd_lr}).", console=True) + all_params = list(model.parameters()) + sgd_lr = exp_args.sgd_lr # Use learning rate from command line argument + optimizer1 = torch.optim.SGD(all_params, lr=sgd_lr, momentum=0.9, weight_decay=1e-4) + optimizer2 = None + optimizers = [optimizer1] + elif current_optimizer_mode == 10: # Muon on O Attn, MLP + print0(f"PRINT: Mode 10: Muon on O Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + all_mlp_matrices + adam_matrix_target_list = attn_v_params + attn_qk_group + elif current_optimizer_mode == 13: + print0(f"PRINT: Mode 32: Muon on W_2, W_O. Adam on V Attn, QK Attn, W_1 (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + mlp_w2_group + adam_matrix_target_list = attn_qk_group + attn_v_params + mlp_w1_group + elif current_optimizer_mode == 14: + print0(f"PRINT: Mode 14: Muon on W_O. Adam on V Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_o_params + adam_matrix_target_list = attn_qk_group + attn_v_params +all_mlp_matrices + elif current_optimizer_mode == 15: + print0(f"PRINT: Mode 15: Muon on W_V. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + adam_matrix_target_list = attn_qk_group + attn_o_params +all_mlp_matrices + elif current_optimizer_mode == 16: + print0(f"PRINT: Mode 15: Muon on QKV. Adam on O Attn, QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_v_params + attn_qk_group + adam_matrix_target_list = attn_o_params +all_mlp_matrices + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Skip Adam and Muon setup for SGD mode (9) + if current_optimizer_mode != 9: + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=muon_lr, momentum=0.95, nesterov=False, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + # Add gradient clipping for SGD mode in warmup too + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + + + # ===== [ADD] Fixed eval set (per-group equal sampling) ===== + FIXED_VAL_INDEX_PATH = run_dir_path / "fixed_eval_indices.json" + #PER_GROUP_K = 100 # Number of samples per group + + def _is_valid_qa_text_for_fta(text: str) -> bool: + # Quick filtering for building fixed eval set, ensure parseable "?" + "Answer:" + if not isinstance(text, str): + return False + return re.search(r'^(.*?\?)\s*Answer\s*:\s*(.+)$', text, re.IGNORECASE) is not None + + def build_fixed_eval_indices(jsonl_path, class_to_group_map, per_group_k, seed=2025): + rng = random.Random(seed) + # Build buckets by group_id for each line, but only collect samples that can be parsed for FTA + buckets = defaultdict(list) # gid -> [line_idx, ...] + with open(jsonl_path, "r", encoding="utf-8") as f: + for i, line in enumerate(f): + try: + item = json.loads(line) + except Exception: + continue + gid = class_to_group_map.get(item.get("class_id")) + if gid is None: + continue + if not _is_valid_qa_text_for_fta(item.get("text", "")): + continue + buckets[gid].append(i) + + fixed = {} + for gid, arr in buckets.items(): + if len(arr) <= per_group_k: + fixed[str(gid)] = arr[:] # Take all if fewer than K samples + else: + fixed[str(gid)] = rng.sample(arr, per_group_k) + return fixed + + # You already have: QA_JSONL_PATH / M_FOR_POWERLAW + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map_global = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + if not FIXED_VAL_INDEX_PATH.exists(): + fixed_idx = build_fixed_eval_indices(QA_JSONL_PATH, class_to_group_map_global, PER_GROUP_K) + with open(FIXED_VAL_INDEX_PATH, "w") as f: + json.dump(fixed_idx, f) + print0(f"PRINT: Built fixed eval set. Saved to {FIXED_VAL_INDEX_PATH}", console=True) + else: + print0(f"PRINT: Using existing fixed eval set: {FIXED_VAL_INDEX_PATH}", console=True) + # --- FIX: Load the indices if the file already exists --- + with open(FIXED_VAL_INDEX_PATH, "r") as f: + fixed_idx = json.load(f) + # ===== [END ADD] ===== + + # ------------------------------------ + #QA_JSONL_PATH = "/home/wangshuche/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail_m15.jsonl" + #M_FOR_POWERLAW = 15 + #NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + #num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + fixed_indices=fixed_idx + ) + + # + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA (Unweighted): {eval_results['total_acc_unweighted']:.4f}", console=True) + print0(f" Total FTA (Weighted): {eval_results['total_acc_weighted']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc_unweighted'] # Use simple average method + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + # Add gradient clipping for SGD mode to prevent gradient explosion + if exp_args.optimizer_mode == 9: + torch.nn.utils.clip_grad_norm_(model_compiled.parameters(), max_norm=1.0) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() +[2025-09-05 16:50:00] [Rank 0] PRINT: Constructing model... +[2025-09-05 16:50:00] [Rank 0] PRINT: Constructing model... +[2025-09-05 16:50:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 16:50:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-09-05 16:50:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 16:50:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-09-05 16:50:01] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 16:50:01] [Rank 0] PRINT: Testing model forward function: +[2025-09-05 16:50:06] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 16:50:06] [Rank 0] PRINT: Model test - Result type: +[2025-09-05 16:50:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 16:50:06] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-09-05 16:50:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 16:50:06] [Rank 0] PRINT: Saved original model reference for inference. +[2025-09-05 16:50:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 16:50:06] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-09-05 16:50:06] [Rank 0] PRINT: Model returns: +[2025-09-05 16:50:06] [Rank 0] PRINT: Model returns: +[2025-09-05 16:50:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 16:50:06] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-09-05 16:50:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 16:50:06] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 9 +[2025-09-05 16:50:06] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 16:50:06] [Rank 0] PRINT: Mode 9: Using pure SGD+Momentum (lr=0.5). +[2025-09-05 16:50:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 16:50:06] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-09-05 16:50:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 16:50:06] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-09-05 16:50:11] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 16:50:11] [Rank 0] PRINT: Model compilation complete. +[2025-09-05 16:50:11] [Rank 0] PRINT: Starting warmup... +[2025-09-05 16:50:11] [Rank 0] PRINT: Starting warmup... +[2025-09-05 16:50:51] [Rank 0] PRINT: Warmup complete. +[2025-09-05 16:50:51] [Rank 0] PRINT: Warmup complete. +[2025-09-05 16:50:51] [Rank 0] PRINT: Starting training... +[2025-09-05 16:50:51] [Rank 0] PRINT: Starting training... +[2025-09-05 16:50:58] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/fixed_eval_indices.json +[2025-09-05 16:50:58] [Rank 0] PRINT: Built fixed eval set. Saved to logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/fixed_eval_indices.json +[2025-09-05 16:50:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:50:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:51:02] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 16:51:02] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-09-05 16:51:34] [Rank 0] step:21/10000 train_time:32574ms step_avg:1551.15ms +[2025-09-05 16:51:34] [Rank 0] step:21/10000 train_time:32574ms step_avg:1551.15ms +[2025-09-05 16:51:35] [Rank 0] step:41/10000 train_time:33300ms step_avg:812.20ms +[2025-09-05 16:51:35] [Rank 0] step:41/10000 train_time:33300ms step_avg:812.20ms +[2025-09-05 16:51:36] [Rank 0] step:61/10000 train_time:34025ms step_avg:557.79ms +[2025-09-05 16:51:36] [Rank 0] step:61/10000 train_time:34025ms step_avg:557.79ms +[2025-09-05 16:51:36] [Rank 0] step:81/10000 train_time:34750ms step_avg:429.01ms +[2025-09-05 16:51:36] [Rank 0] step:81/10000 train_time:34750ms step_avg:429.01ms +[2025-09-05 16:51:37] [Rank 0] step:101/10000 train_time:35475ms step_avg:351.24ms +[2025-09-05 16:51:37] [Rank 0] step:101/10000 train_time:35475ms step_avg:351.24ms +[2025-09-05 16:51:38] [Rank 0] step:121/10000 train_time:36200ms step_avg:299.17ms +[2025-09-05 16:51:38] [Rank 0] step:121/10000 train_time:36200ms step_avg:299.17ms +[2025-09-05 16:51:39] [Rank 0] step:141/10000 train_time:36925ms step_avg:261.88ms +[2025-09-05 16:51:39] [Rank 0] step:141/10000 train_time:36925ms step_avg:261.88ms +[2025-09-05 16:51:39] [Rank 0] step:161/10000 train_time:37650ms step_avg:233.85ms +[2025-09-05 16:51:39] [Rank 0] step:161/10000 train_time:37650ms step_avg:233.85ms +[2025-09-05 16:51:40] [Rank 0] step:181/10000 train_time:38374ms step_avg:212.01ms +[2025-09-05 16:51:40] [Rank 0] step:181/10000 train_time:38374ms step_avg:212.01ms +[2025-09-05 16:51:41] [Rank 0] step:201/10000 train_time:39100ms step_avg:194.53ms +[2025-09-05 16:51:41] [Rank 0] step:201/10000 train_time:39100ms step_avg:194.53ms +[2025-09-05 16:51:42] [Rank 0] step:221/10000 train_time:39825ms step_avg:180.20ms +[2025-09-05 16:51:42] [Rank 0] step:221/10000 train_time:39825ms step_avg:180.20ms +[2025-09-05 16:51:42] [Rank 0] step:241/10000 train_time:40550ms step_avg:168.26ms +[2025-09-05 16:51:42] [Rank 0] step:241/10000 train_time:40550ms step_avg:168.26ms +[2025-09-05 16:51:43] [Rank 0] step:261/10000 train_time:41277ms step_avg:158.15ms +[2025-09-05 16:51:43] [Rank 0] step:261/10000 train_time:41277ms step_avg:158.15ms +[2025-09-05 16:51:44] [Rank 0] step:281/10000 train_time:42002ms step_avg:149.47ms +[2025-09-05 16:51:44] [Rank 0] step:281/10000 train_time:42002ms step_avg:149.47ms +[2025-09-05 16:51:44] [Rank 0] step:301/10000 train_time:42730ms step_avg:141.96ms +[2025-09-05 16:51:44] [Rank 0] step:301/10000 train_time:42730ms step_avg:141.96ms +[2025-09-05 16:51:45] [Rank 0] step:321/10000 train_time:43459ms step_avg:135.38ms +[2025-09-05 16:51:45] [Rank 0] step:321/10000 train_time:43459ms step_avg:135.38ms +[2025-09-05 16:51:46] [Rank 0] step:341/10000 train_time:44183ms step_avg:129.57ms +[2025-09-05 16:51:46] [Rank 0] step:341/10000 train_time:44183ms step_avg:129.57ms +[2025-09-05 16:51:47] [Rank 0] step:361/10000 train_time:44908ms step_avg:124.40ms +[2025-09-05 16:51:47] [Rank 0] step:361/10000 train_time:44908ms step_avg:124.40ms +[2025-09-05 16:51:47] [Rank 0] step:381/10000 train_time:45633ms step_avg:119.77ms +[2025-09-05 16:51:47] [Rank 0] step:381/10000 train_time:45633ms step_avg:119.77ms +[2025-09-05 16:51:48] [Rank 0] step:401/10000 train_time:46358ms step_avg:115.61ms +[2025-09-05 16:51:48] [Rank 0] step:401/10000 train_time:46358ms step_avg:115.61ms +[2025-09-05 16:51:49] [Rank 0] step:421/10000 train_time:47083ms step_avg:111.84ms +[2025-09-05 16:51:49] [Rank 0] step:421/10000 train_time:47083ms step_avg:111.84ms +[2025-09-05 16:51:50] [Rank 0] step:441/10000 train_time:47809ms step_avg:108.41ms +[2025-09-05 16:51:50] [Rank 0] step:441/10000 train_time:47809ms step_avg:108.41ms +[2025-09-05 16:51:50] [Rank 0] step:461/10000 train_time:48533ms step_avg:105.28ms +[2025-09-05 16:51:50] [Rank 0] step:461/10000 train_time:48533ms step_avg:105.28ms +[2025-09-05 16:51:51] [Rank 0] step:481/10000 train_time:49258ms step_avg:102.41ms +[2025-09-05 16:51:51] [Rank 0] step:481/10000 train_time:49258ms step_avg:102.41ms +[2025-09-05 16:51:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:51:52] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:51:52] [Rank 0] PRINT: step:500/10000 train_loss:3.5124 val_loss:2.2777 train_time:50063ms step_avg:100.13ms +[2025-09-05 16:51:52] [Rank 0] PRINT: step:500/10000 train_loss:3.5124 val_loss:2.2777 train_time:50063ms step_avg:100.13ms +[2025-09-05 16:51:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:51:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:51:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:51:52] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:53:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:53:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:53:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:53:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:53:14] [Rank 0] Total Loss: 4.8595 +[2025-09-05 16:53:14] [Rank 0] Total Loss: 4.8595 +[2025-09-05 16:53:14] [Rank 0] Total FTA (Unweighted): 0.2513 +[2025-09-05 16:53:14] [Rank 0] Total FTA (Unweighted): 0.2513 +[2025-09-05 16:53:14] [Rank 0] Total FTA (Weighted): 0.2512 +[2025-09-05 16:53:14] [Rank 0] Total FTA (Weighted): 0.2512 +[2025-09-05 16:53:14] [Rank 0] Group 0 Loss: 3.4825 +[2025-09-05 16:53:14] [Rank 0] Group 0 Loss: 3.4825 +[2025-09-05 16:53:14] [Rank 0] Group 1 Loss: 3.2407 +[2025-09-05 16:53:14] [Rank 0] Group 1 Loss: 3.2407 +[2025-09-05 16:53:14] [Rank 0] Group 2 Loss: 3.3237 +[2025-09-05 16:53:14] [Rank 0] Group 2 Loss: 3.3237 +[2025-09-05 16:53:14] [Rank 0] Group 3 Loss: 3.8557 +[2025-09-05 16:53:14] [Rank 0] Group 3 Loss: 3.8557 +[2025-09-05 16:53:14] [Rank 0] Group 4 Loss: 4.0639 +[2025-09-05 16:53:14] [Rank 0] Group 4 Loss: 4.0639 +[2025-09-05 16:53:14] [Rank 0] Group 5 Loss: 4.5781 +[2025-09-05 16:53:14] [Rank 0] Group 5 Loss: 4.5781 +[2025-09-05 16:53:14] [Rank 0] Group 6 Loss: 4.9499 +[2025-09-05 16:53:14] [Rank 0] Group 6 Loss: 4.9499 +[2025-09-05 16:53:14] [Rank 0] Group 7 Loss: 5.1071 +[2025-09-05 16:53:14] [Rank 0] Group 7 Loss: 5.1071 +[2025-09-05 16:53:14] [Rank 0] Group 8 Loss: 5.4684 +[2025-09-05 16:53:14] [Rank 0] Group 8 Loss: 5.4684 +[2025-09-05 16:53:14] [Rank 0] Group 9 Loss: 5.5894 +[2025-09-05 16:53:14] [Rank 0] Group 9 Loss: 5.5894 +[2025-09-05 16:53:14] [Rank 0] Group 10 Loss: 5.6850 +[2025-09-05 16:53:14] [Rank 0] Group 10 Loss: 5.6850 +[2025-09-05 16:53:14] [Rank 0] Group 11 Loss: 5.7549 +[2025-09-05 16:53:14] [Rank 0] Group 11 Loss: 5.7549 +[2025-09-05 16:53:14] [Rank 0] Group 12 Loss: 5.6334 +[2025-09-05 16:53:14] [Rank 0] Group 12 Loss: 5.6334 +[2025-09-05 16:53:14] [Rank 0] Group 13 Loss: 5.6682 +[2025-09-05 16:53:14] [Rank 0] Group 13 Loss: 5.6682 +[2025-09-05 16:53:14] [Rank 0] Group 14 Loss: 5.7028 +[2025-09-05 16:53:14] [Rank 0] Group 14 Loss: 5.7028 +[2025-09-05 16:53:14] [Rank 0] Group 15 Loss: 5.6479 +[2025-09-05 16:53:14] [Rank 0] Group 15 Loss: 5.6479 +[2025-09-05 16:53:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:53:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:53:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:53:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:53:14] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 16:53:14] [Rank 0] Group 2 FTA: 0.3100 +[2025-09-05 16:53:14] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 16:53:14] [Rank 0] Group 3 FTA: 0.1700 +[2025-09-05 16:53:14] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 16:53:14] [Rank 0] Group 4 FTA: 0.2200 +[2025-09-05 16:53:14] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 16:53:14] [Rank 0] Group 5 FTA: 0.2100 +[2025-09-05 16:53:14] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 16:53:14] [Rank 0] Group 6 FTA: 0.1700 +[2025-09-05 16:53:14] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 16:53:14] [Rank 0] Group 7 FTA: 0.1100 +[2025-09-05 16:53:14] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 16:53:14] [Rank 0] Group 8 FTA: 0.1500 +[2025-09-05 16:53:14] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 16:53:14] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 16:53:14] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 16:53:14] [Rank 0] Group 10 FTA: 0.0800 +[2025-09-05 16:53:14] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 16:53:14] [Rank 0] Group 11 FTA: 0.1000 +[2025-09-05 16:53:14] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 16:53:14] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 16:53:14] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:53:14] [Rank 0] Group 13 FTA: 0.1100 +[2025-09-05 16:53:14] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:53:14] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:53:14] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:53:14] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 16:53:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 16:53:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 16:53:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 16:53:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 16:53:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 16:53:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 16:53:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 16:53:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 16:53:16] [Rank 0] step:501/10000 train_time:50074ms step_avg:99.95ms +[2025-09-05 16:53:16] [Rank 0] step:501/10000 train_time:50074ms step_avg:99.95ms +[2025-09-05 16:53:16] [Rank 0] step:521/10000 train_time:50727ms step_avg:97.36ms +[2025-09-05 16:53:16] [Rank 0] step:521/10000 train_time:50727ms step_avg:97.36ms +[2025-09-05 16:53:17] [Rank 0] step:541/10000 train_time:51452ms step_avg:95.11ms +[2025-09-05 16:53:17] [Rank 0] step:541/10000 train_time:51452ms step_avg:95.11ms +[2025-09-05 16:53:18] [Rank 0] step:561/10000 train_time:52177ms step_avg:93.01ms +[2025-09-05 16:53:18] [Rank 0] step:561/10000 train_time:52177ms step_avg:93.01ms +[2025-09-05 16:53:18] [Rank 0] step:581/10000 train_time:52901ms step_avg:91.05ms +[2025-09-05 16:53:18] [Rank 0] step:581/10000 train_time:52901ms step_avg:91.05ms +[2025-09-05 16:53:19] [Rank 0] step:601/10000 train_time:53626ms step_avg:89.23ms +[2025-09-05 16:53:19] [Rank 0] step:601/10000 train_time:53626ms step_avg:89.23ms +[2025-09-05 16:53:20] [Rank 0] step:621/10000 train_time:54350ms step_avg:87.52ms +[2025-09-05 16:53:20] [Rank 0] step:621/10000 train_time:54350ms step_avg:87.52ms +[2025-09-05 16:53:21] [Rank 0] step:641/10000 train_time:55075ms step_avg:85.92ms +[2025-09-05 16:53:21] [Rank 0] step:641/10000 train_time:55075ms step_avg:85.92ms +[2025-09-05 16:53:21] [Rank 0] step:661/10000 train_time:55800ms step_avg:84.42ms +[2025-09-05 16:53:21] [Rank 0] step:661/10000 train_time:55800ms step_avg:84.42ms +[2025-09-05 16:53:22] [Rank 0] step:681/10000 train_time:56524ms step_avg:83.00ms +[2025-09-05 16:53:22] [Rank 0] step:681/10000 train_time:56524ms step_avg:83.00ms +[2025-09-05 16:53:23] [Rank 0] step:701/10000 train_time:57249ms step_avg:81.67ms +[2025-09-05 16:53:23] [Rank 0] step:701/10000 train_time:57249ms step_avg:81.67ms +[2025-09-05 16:53:24] [Rank 0] step:721/10000 train_time:57974ms step_avg:80.41ms +[2025-09-05 16:53:24] [Rank 0] step:721/10000 train_time:57974ms step_avg:80.41ms +[2025-09-05 16:53:24] [Rank 0] step:741/10000 train_time:58698ms step_avg:79.21ms +[2025-09-05 16:53:24] [Rank 0] step:741/10000 train_time:58698ms step_avg:79.21ms +[2025-09-05 16:53:25] [Rank 0] step:761/10000 train_time:59427ms step_avg:78.09ms +[2025-09-05 16:53:25] [Rank 0] step:761/10000 train_time:59427ms step_avg:78.09ms +[2025-09-05 16:53:26] [Rank 0] step:781/10000 train_time:60158ms step_avg:77.03ms +[2025-09-05 16:53:26] [Rank 0] step:781/10000 train_time:60158ms step_avg:77.03ms +[2025-09-05 16:53:26] [Rank 0] step:801/10000 train_time:60888ms step_avg:76.01ms +[2025-09-05 16:53:26] [Rank 0] step:801/10000 train_time:60888ms step_avg:76.01ms +[2025-09-05 16:53:28] [Rank 0] step:821/10000 train_time:62220ms step_avg:75.79ms +[2025-09-05 16:53:28] [Rank 0] step:821/10000 train_time:62220ms step_avg:75.79ms +[2025-09-05 16:53:29] [Rank 0] step:841/10000 train_time:62950ms step_avg:74.85ms +[2025-09-05 16:53:29] [Rank 0] step:841/10000 train_time:62950ms step_avg:74.85ms +[2025-09-05 16:53:29] [Rank 0] step:861/10000 train_time:63679ms step_avg:73.96ms +[2025-09-05 16:53:29] [Rank 0] step:861/10000 train_time:63679ms step_avg:73.96ms +[2025-09-05 16:53:30] [Rank 0] step:881/10000 train_time:64409ms step_avg:73.11ms +[2025-09-05 16:53:30] [Rank 0] step:881/10000 train_time:64409ms step_avg:73.11ms +[2025-09-05 16:53:31] [Rank 0] step:901/10000 train_time:65138ms step_avg:72.30ms +[2025-09-05 16:53:31] [Rank 0] step:901/10000 train_time:65138ms step_avg:72.30ms +[2025-09-05 16:53:31] [Rank 0] step:921/10000 train_time:65868ms step_avg:71.52ms +[2025-09-05 16:53:31] [Rank 0] step:921/10000 train_time:65868ms step_avg:71.52ms +[2025-09-05 16:53:32] [Rank 0] step:941/10000 train_time:66598ms step_avg:70.77ms +[2025-09-05 16:53:32] [Rank 0] step:941/10000 train_time:66598ms step_avg:70.77ms +[2025-09-05 16:53:33] [Rank 0] step:961/10000 train_time:67328ms step_avg:70.06ms +[2025-09-05 16:53:33] [Rank 0] step:961/10000 train_time:67328ms step_avg:70.06ms +[2025-09-05 16:53:34] [Rank 0] step:981/10000 train_time:68057ms step_avg:69.38ms +[2025-09-05 16:53:34] [Rank 0] step:981/10000 train_time:68057ms step_avg:69.38ms +[2025-09-05 16:53:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:53:34] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:53:35] [Rank 0] PRINT: step:1000/10000 train_loss:2.0092 val_loss:1.8019 train_time:68867ms step_avg:68.87ms +[2025-09-05 16:53:35] [Rank 0] PRINT: step:1000/10000 train_loss:2.0092 val_loss:1.8019 train_time:68867ms step_avg:68.87ms +[2025-09-05 16:53:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:53:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:53:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:53:35] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:54:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:54:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:54:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:54:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:54:56] [Rank 0] Total Loss: 4.4556 +[2025-09-05 16:54:56] [Rank 0] Total Loss: 4.4556 +[2025-09-05 16:54:56] [Rank 0] Total FTA (Unweighted): 0.3513 +[2025-09-05 16:54:56] [Rank 0] Total FTA (Unweighted): 0.3513 +[2025-09-05 16:54:56] [Rank 0] Total FTA (Weighted): 0.3513 +[2025-09-05 16:54:56] [Rank 0] Total FTA (Weighted): 0.3513 +[2025-09-05 16:54:56] [Rank 0] Group 0 Loss: 3.4012 +[2025-09-05 16:54:56] [Rank 0] Group 0 Loss: 3.4012 +[2025-09-05 16:54:56] [Rank 0] Group 1 Loss: 3.2409 +[2025-09-05 16:54:56] [Rank 0] Group 1 Loss: 3.2409 +[2025-09-05 16:54:56] [Rank 0] Group 2 Loss: 3.1514 +[2025-09-05 16:54:56] [Rank 0] Group 2 Loss: 3.1514 +[2025-09-05 16:54:56] [Rank 0] Group 3 Loss: 3.6531 +[2025-09-05 16:54:56] [Rank 0] Group 3 Loss: 3.6531 +[2025-09-05 16:54:56] [Rank 0] Group 4 Loss: 3.7313 +[2025-09-05 16:54:56] [Rank 0] Group 4 Loss: 3.7313 +[2025-09-05 16:54:56] [Rank 0] Group 5 Loss: 4.0375 +[2025-09-05 16:54:56] [Rank 0] Group 5 Loss: 4.0375 +[2025-09-05 16:54:56] [Rank 0] Group 6 Loss: 4.3610 +[2025-09-05 16:54:56] [Rank 0] Group 6 Loss: 4.3610 +[2025-09-05 16:54:56] [Rank 0] Group 7 Loss: 4.5663 +[2025-09-05 16:54:56] [Rank 0] Group 7 Loss: 4.5663 +[2025-09-05 16:54:56] [Rank 0] Group 8 Loss: 4.8924 +[2025-09-05 16:54:56] [Rank 0] Group 8 Loss: 4.8924 +[2025-09-05 16:54:56] [Rank 0] Group 9 Loss: 5.0470 +[2025-09-05 16:54:56] [Rank 0] Group 9 Loss: 5.0470 +[2025-09-05 16:54:56] [Rank 0] Group 10 Loss: 5.1704 +[2025-09-05 16:54:56] [Rank 0] Group 10 Loss: 5.1704 +[2025-09-05 16:54:56] [Rank 0] Group 11 Loss: 5.1826 +[2025-09-05 16:54:56] [Rank 0] Group 11 Loss: 5.1826 +[2025-09-05 16:54:56] [Rank 0] Group 12 Loss: 5.1754 +[2025-09-05 16:54:56] [Rank 0] Group 12 Loss: 5.1754 +[2025-09-05 16:54:56] [Rank 0] Group 13 Loss: 5.2438 +[2025-09-05 16:54:56] [Rank 0] Group 13 Loss: 5.2438 +[2025-09-05 16:54:56] [Rank 0] Group 14 Loss: 5.2415 +[2025-09-05 16:54:56] [Rank 0] Group 14 Loss: 5.2415 +[2025-09-05 16:54:56] [Rank 0] Group 15 Loss: 5.1942 +[2025-09-05 16:54:56] [Rank 0] Group 15 Loss: 5.1942 +[2025-09-05 16:54:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:54:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:54:56] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:54:56] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:54:56] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:54:56] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:54:56] [Rank 0] Group 3 FTA: 0.4500 +[2025-09-05 16:54:56] [Rank 0] Group 3 FTA: 0.4500 +[2025-09-05 16:54:56] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 16:54:56] [Rank 0] Group 4 FTA: 0.3800 +[2025-09-05 16:54:56] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 16:54:56] [Rank 0] Group 5 FTA: 0.3400 +[2025-09-05 16:54:56] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 16:54:56] [Rank 0] Group 6 FTA: 0.3500 +[2025-09-05 16:54:56] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 16:54:56] [Rank 0] Group 7 FTA: 0.1700 +[2025-09-05 16:54:56] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 16:54:56] [Rank 0] Group 8 FTA: 0.2400 +[2025-09-05 16:54:56] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 16:54:56] [Rank 0] Group 9 FTA: 0.1100 +[2025-09-05 16:54:56] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-05 16:54:56] [Rank 0] Group 10 FTA: 0.0900 +[2025-09-05 16:54:56] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 16:54:56] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 16:54:56] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 16:54:56] [Rank 0] Group 12 FTA: 0.0800 +[2025-09-05 16:54:56] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 16:54:56] [Rank 0] Group 13 FTA: 0.0900 +[2025-09-05 16:54:56] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:54:56] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 16:54:56] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 16:54:56] [Rank 0] Group 15 FTA: 0.0500 +[2025-09-05 16:54:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 16:54:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 16:54:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 16:54:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 16:54:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 16:54:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 16:54:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 16:54:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 16:54:58] [Rank 0] step:1001/10000 train_time:68878ms step_avg:68.81ms +[2025-09-05 16:54:58] [Rank 0] step:1001/10000 train_time:68878ms step_avg:68.81ms +[2025-09-05 16:54:59] [Rank 0] step:1021/10000 train_time:69542ms step_avg:68.11ms +[2025-09-05 16:54:59] [Rank 0] step:1021/10000 train_time:69542ms step_avg:68.11ms +[2025-09-05 16:54:59] [Rank 0] step:1041/10000 train_time:70271ms step_avg:67.50ms +[2025-09-05 16:54:59] [Rank 0] step:1041/10000 train_time:70271ms step_avg:67.50ms +[2025-09-05 16:55:00] [Rank 0] step:1061/10000 train_time:71000ms step_avg:66.92ms +[2025-09-05 16:55:00] [Rank 0] step:1061/10000 train_time:71000ms step_avg:66.92ms +[2025-09-05 16:55:01] [Rank 0] step:1081/10000 train_time:71730ms step_avg:66.36ms +[2025-09-05 16:55:01] [Rank 0] step:1081/10000 train_time:71730ms step_avg:66.36ms +[2025-09-05 16:55:01] [Rank 0] step:1101/10000 train_time:72460ms step_avg:65.81ms +[2025-09-05 16:55:01] [Rank 0] step:1101/10000 train_time:72460ms step_avg:65.81ms +[2025-09-05 16:55:02] [Rank 0] step:1121/10000 train_time:73190ms step_avg:65.29ms +[2025-09-05 16:55:02] [Rank 0] step:1121/10000 train_time:73190ms step_avg:65.29ms +[2025-09-05 16:55:03] [Rank 0] step:1141/10000 train_time:73921ms step_avg:64.79ms +[2025-09-05 16:55:03] [Rank 0] step:1141/10000 train_time:73921ms step_avg:64.79ms +[2025-09-05 16:55:04] [Rank 0] step:1161/10000 train_time:74650ms step_avg:64.30ms +[2025-09-05 16:55:04] [Rank 0] step:1161/10000 train_time:74650ms step_avg:64.30ms +[2025-09-05 16:55:04] [Rank 0] step:1181/10000 train_time:75380ms step_avg:63.83ms +[2025-09-05 16:55:04] [Rank 0] step:1181/10000 train_time:75380ms step_avg:63.83ms +[2025-09-05 16:55:05] [Rank 0] step:1201/10000 train_time:76110ms step_avg:63.37ms +[2025-09-05 16:55:05] [Rank 0] step:1201/10000 train_time:76110ms step_avg:63.37ms +[2025-09-05 16:55:06] [Rank 0] step:1221/10000 train_time:76840ms step_avg:62.93ms +[2025-09-05 16:55:06] [Rank 0] step:1221/10000 train_time:76840ms step_avg:62.93ms +[2025-09-05 16:55:07] [Rank 0] step:1241/10000 train_time:77569ms step_avg:62.51ms +[2025-09-05 16:55:07] [Rank 0] step:1241/10000 train_time:77569ms step_avg:62.51ms +[2025-09-05 16:55:07] [Rank 0] step:1261/10000 train_time:78299ms step_avg:62.09ms +[2025-09-05 16:55:07] [Rank 0] step:1261/10000 train_time:78299ms step_avg:62.09ms +[2025-09-05 16:55:08] [Rank 0] step:1281/10000 train_time:79139ms step_avg:61.78ms +[2025-09-05 16:55:08] [Rank 0] step:1281/10000 train_time:79139ms step_avg:61.78ms +[2025-09-05 16:55:09] [Rank 0] step:1301/10000 train_time:79868ms step_avg:61.39ms +[2025-09-05 16:55:09] [Rank 0] step:1301/10000 train_time:79868ms step_avg:61.39ms +[2025-09-05 16:55:10] [Rank 0] step:1321/10000 train_time:80597ms step_avg:61.01ms +[2025-09-05 16:55:10] [Rank 0] step:1321/10000 train_time:80597ms step_avg:61.01ms +[2025-09-05 16:55:10] [Rank 0] step:1341/10000 train_time:81473ms step_avg:60.76ms +[2025-09-05 16:55:10] [Rank 0] step:1341/10000 train_time:81473ms step_avg:60.76ms +[2025-09-05 16:55:11] [Rank 0] step:1361/10000 train_time:82203ms step_avg:60.40ms +[2025-09-05 16:55:11] [Rank 0] step:1361/10000 train_time:82203ms step_avg:60.40ms +[2025-09-05 16:55:12] [Rank 0] step:1381/10000 train_time:82932ms step_avg:60.05ms +[2025-09-05 16:55:12] [Rank 0] step:1381/10000 train_time:82932ms step_avg:60.05ms +[2025-09-05 16:55:13] [Rank 0] step:1401/10000 train_time:83662ms step_avg:59.72ms +[2025-09-05 16:55:13] [Rank 0] step:1401/10000 train_time:83662ms step_avg:59.72ms +[2025-09-05 16:55:13] [Rank 0] step:1421/10000 train_time:84392ms step_avg:59.39ms +[2025-09-05 16:55:13] [Rank 0] step:1421/10000 train_time:84392ms step_avg:59.39ms +[2025-09-05 16:55:14] [Rank 0] step:1441/10000 train_time:85121ms step_avg:59.07ms +[2025-09-05 16:55:14] [Rank 0] step:1441/10000 train_time:85121ms step_avg:59.07ms +[2025-09-05 16:55:15] [Rank 0] step:1461/10000 train_time:85851ms step_avg:58.76ms +[2025-09-05 16:55:15] [Rank 0] step:1461/10000 train_time:85851ms step_avg:58.76ms +[2025-09-05 16:55:16] [Rank 0] step:1481/10000 train_time:86580ms step_avg:58.46ms +[2025-09-05 16:55:16] [Rank 0] step:1481/10000 train_time:86580ms step_avg:58.46ms +[2025-09-05 16:55:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:55:16] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:55:17] [Rank 0] PRINT: step:1500/10000 train_loss:1.7082 val_loss:1.6286 train_time:87390ms step_avg:58.26ms +[2025-09-05 16:55:17] [Rank 0] PRINT: step:1500/10000 train_loss:1.7082 val_loss:1.6286 train_time:87390ms step_avg:58.26ms +[2025-09-05 16:55:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:55:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:55:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:55:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:56:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:56:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:56:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:56:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:56:38] [Rank 0] Total Loss: 4.3802 +[2025-09-05 16:56:38] [Rank 0] Total Loss: 4.3802 +[2025-09-05 16:56:38] [Rank 0] Total FTA (Unweighted): 0.4075 +[2025-09-05 16:56:38] [Rank 0] Total FTA (Unweighted): 0.4075 +[2025-09-05 16:56:38] [Rank 0] Total FTA (Weighted): 0.4075 +[2025-09-05 16:56:38] [Rank 0] Total FTA (Weighted): 0.4075 +[2025-09-05 16:56:38] [Rank 0] Group 0 Loss: 3.4432 +[2025-09-05 16:56:38] [Rank 0] Group 0 Loss: 3.4432 +[2025-09-05 16:56:38] [Rank 0] Group 1 Loss: 3.2438 +[2025-09-05 16:56:38] [Rank 0] Group 1 Loss: 3.2438 +[2025-09-05 16:56:38] [Rank 0] Group 2 Loss: 3.1815 +[2025-09-05 16:56:38] [Rank 0] Group 2 Loss: 3.1815 +[2025-09-05 16:56:38] [Rank 0] Group 3 Loss: 3.6575 +[2025-09-05 16:56:38] [Rank 0] Group 3 Loss: 3.6575 +[2025-09-05 16:56:38] [Rank 0] Group 4 Loss: 3.7610 +[2025-09-05 16:56:38] [Rank 0] Group 4 Loss: 3.7610 +[2025-09-05 16:56:38] [Rank 0] Group 5 Loss: 3.9984 +[2025-09-05 16:56:38] [Rank 0] Group 5 Loss: 3.9984 +[2025-09-05 16:56:38] [Rank 0] Group 6 Loss: 4.2068 +[2025-09-05 16:56:38] [Rank 0] Group 6 Loss: 4.2068 +[2025-09-05 16:56:38] [Rank 0] Group 7 Loss: 4.4191 +[2025-09-05 16:56:38] [Rank 0] Group 7 Loss: 4.4191 +[2025-09-05 16:56:38] [Rank 0] Group 8 Loss: 4.7589 +[2025-09-05 16:56:38] [Rank 0] Group 8 Loss: 4.7589 +[2025-09-05 16:56:38] [Rank 0] Group 9 Loss: 4.8919 +[2025-09-05 16:56:38] [Rank 0] Group 9 Loss: 4.8919 +[2025-09-05 16:56:38] [Rank 0] Group 10 Loss: 5.0706 +[2025-09-05 16:56:38] [Rank 0] Group 10 Loss: 5.0706 +[2025-09-05 16:56:38] [Rank 0] Group 11 Loss: 5.0754 +[2025-09-05 16:56:38] [Rank 0] Group 11 Loss: 5.0754 +[2025-09-05 16:56:38] [Rank 0] Group 12 Loss: 5.0351 +[2025-09-05 16:56:38] [Rank 0] Group 12 Loss: 5.0351 +[2025-09-05 16:56:38] [Rank 0] Group 13 Loss: 5.1212 +[2025-09-05 16:56:38] [Rank 0] Group 13 Loss: 5.1212 +[2025-09-05 16:56:38] [Rank 0] Group 14 Loss: 5.1382 +[2025-09-05 16:56:38] [Rank 0] Group 14 Loss: 5.1382 +[2025-09-05 16:56:38] [Rank 0] Group 15 Loss: 5.0803 +[2025-09-05 16:56:38] [Rank 0] Group 15 Loss: 5.0803 +[2025-09-05 16:56:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:56:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:56:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:56:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:56:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:56:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:56:38] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 16:56:38] [Rank 0] Group 3 FTA: 0.7300 +[2025-09-05 16:56:38] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 16:56:38] [Rank 0] Group 4 FTA: 0.4500 +[2025-09-05 16:56:38] [Rank 0] Group 5 FTA: 0.4700 +[2025-09-05 16:56:38] [Rank 0] Group 5 FTA: 0.4700 +[2025-09-05 16:56:38] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 16:56:38] [Rank 0] Group 6 FTA: 0.3800 +[2025-09-05 16:56:38] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 16:56:38] [Rank 0] Group 7 FTA: 0.2600 +[2025-09-05 16:56:38] [Rank 0] Group 8 FTA: 0.2900 +[2025-09-05 16:56:38] [Rank 0] Group 8 FTA: 0.2900 +[2025-09-05 16:56:38] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 16:56:38] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 16:56:38] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 16:56:38] [Rank 0] Group 10 FTA: 0.1300 +[2025-09-05 16:56:38] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 16:56:38] [Rank 0] Group 11 FTA: 0.1100 +[2025-09-05 16:56:38] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 16:56:38] [Rank 0] Group 12 FTA: 0.1100 +[2025-09-05 16:56:38] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:56:38] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 16:56:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:56:38] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 16:56:38] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:56:38] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:56:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 16:56:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 16:56:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 16:56:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 16:56:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 16:56:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 16:56:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 16:56:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 16:56:39] [Rank 0] step:1501/10000 train_time:87401ms step_avg:58.23ms +[2025-09-05 16:56:39] [Rank 0] step:1501/10000 train_time:87401ms step_avg:58.23ms +[2025-09-05 16:56:40] [Rank 0] step:1521/10000 train_time:88064ms step_avg:57.90ms +[2025-09-05 16:56:40] [Rank 0] step:1521/10000 train_time:88064ms step_avg:57.90ms +[2025-09-05 16:56:41] [Rank 0] step:1541/10000 train_time:88794ms step_avg:57.62ms +[2025-09-05 16:56:41] [Rank 0] step:1541/10000 train_time:88794ms step_avg:57.62ms +[2025-09-05 16:56:42] [Rank 0] step:1561/10000 train_time:89523ms step_avg:57.35ms +[2025-09-05 16:56:42] [Rank 0] step:1561/10000 train_time:89523ms step_avg:57.35ms +[2025-09-05 16:56:42] [Rank 0] step:1581/10000 train_time:90253ms step_avg:57.09ms +[2025-09-05 16:56:42] [Rank 0] step:1581/10000 train_time:90253ms step_avg:57.09ms +[2025-09-05 16:56:43] [Rank 0] step:1601/10000 train_time:90983ms step_avg:56.83ms +[2025-09-05 16:56:43] [Rank 0] step:1601/10000 train_time:90983ms step_avg:56.83ms +[2025-09-05 16:56:44] [Rank 0] step:1621/10000 train_time:91713ms step_avg:56.58ms +[2025-09-05 16:56:44] [Rank 0] step:1621/10000 train_time:91713ms step_avg:56.58ms +[2025-09-05 16:56:45] [Rank 0] step:1641/10000 train_time:93072ms step_avg:56.72ms +[2025-09-05 16:56:45] [Rank 0] step:1641/10000 train_time:93072ms step_avg:56.72ms +[2025-09-05 16:56:46] [Rank 0] step:1661/10000 train_time:93802ms step_avg:56.47ms +[2025-09-05 16:56:46] [Rank 0] step:1661/10000 train_time:93802ms step_avg:56.47ms +[2025-09-05 16:56:47] [Rank 0] step:1681/10000 train_time:94532ms step_avg:56.24ms +[2025-09-05 16:56:47] [Rank 0] step:1681/10000 train_time:94532ms step_avg:56.24ms +[2025-09-05 16:56:47] [Rank 0] step:1701/10000 train_time:95262ms step_avg:56.00ms +[2025-09-05 16:56:47] [Rank 0] step:1701/10000 train_time:95262ms step_avg:56.00ms +[2025-09-05 16:56:48] [Rank 0] step:1721/10000 train_time:95992ms step_avg:55.78ms +[2025-09-05 16:56:48] [Rank 0] step:1721/10000 train_time:95992ms step_avg:55.78ms +[2025-09-05 16:56:49] [Rank 0] step:1741/10000 train_time:96722ms step_avg:55.56ms +[2025-09-05 16:56:49] [Rank 0] step:1741/10000 train_time:96722ms step_avg:55.56ms +[2025-09-05 16:56:50] [Rank 0] step:1761/10000 train_time:97452ms step_avg:55.34ms +[2025-09-05 16:56:50] [Rank 0] step:1761/10000 train_time:97452ms step_avg:55.34ms +[2025-09-05 16:56:50] [Rank 0] step:1781/10000 train_time:98181ms step_avg:55.13ms +[2025-09-05 16:56:50] [Rank 0] step:1781/10000 train_time:98181ms step_avg:55.13ms +[2025-09-05 16:56:51] [Rank 0] step:1801/10000 train_time:98911ms step_avg:54.92ms +[2025-09-05 16:56:51] [Rank 0] step:1801/10000 train_time:98911ms step_avg:54.92ms +[2025-09-05 16:56:52] [Rank 0] step:1821/10000 train_time:99641ms step_avg:54.72ms +[2025-09-05 16:56:52] [Rank 0] step:1821/10000 train_time:99641ms step_avg:54.72ms +[2025-09-05 16:56:52] [Rank 0] step:1841/10000 train_time:100371ms step_avg:54.52ms +[2025-09-05 16:56:52] [Rank 0] step:1841/10000 train_time:100371ms step_avg:54.52ms +[2025-09-05 16:56:53] [Rank 0] step:1861/10000 train_time:101101ms step_avg:54.33ms +[2025-09-05 16:56:53] [Rank 0] step:1861/10000 train_time:101101ms step_avg:54.33ms +[2025-09-05 16:56:54] [Rank 0] step:1881/10000 train_time:101832ms step_avg:54.14ms +[2025-09-05 16:56:54] [Rank 0] step:1881/10000 train_time:101832ms step_avg:54.14ms +[2025-09-05 16:56:55] [Rank 0] step:1901/10000 train_time:102562ms step_avg:53.95ms +[2025-09-05 16:56:55] [Rank 0] step:1901/10000 train_time:102562ms step_avg:53.95ms +[2025-09-05 16:56:55] [Rank 0] step:1921/10000 train_time:103292ms step_avg:53.77ms +[2025-09-05 16:56:55] [Rank 0] step:1921/10000 train_time:103292ms step_avg:53.77ms +[2025-09-05 16:56:56] [Rank 0] step:1941/10000 train_time:104022ms step_avg:53.59ms +[2025-09-05 16:56:56] [Rank 0] step:1941/10000 train_time:104022ms step_avg:53.59ms +[2025-09-05 16:56:57] [Rank 0] step:1961/10000 train_time:104752ms step_avg:53.42ms +[2025-09-05 16:56:57] [Rank 0] step:1961/10000 train_time:104752ms step_avg:53.42ms +[2025-09-05 16:56:58] [Rank 0] step:1981/10000 train_time:105482ms step_avg:53.25ms +[2025-09-05 16:56:58] [Rank 0] step:1981/10000 train_time:105482ms step_avg:53.25ms +[2025-09-05 16:56:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:56:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:56:59] [Rank 0] PRINT: step:2000/10000 train_loss:1.5901 val_loss:1.5470 train_time:106291ms step_avg:53.15ms +[2025-09-05 16:56:59] [Rank 0] PRINT: step:2000/10000 train_loss:1.5901 val_loss:1.5470 train_time:106291ms step_avg:53.15ms +[2025-09-05 16:56:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:56:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:56:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:56:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:58:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:58:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 16:58:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:58:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 16:58:24] [Rank 0] Total Loss: 4.2258 +[2025-09-05 16:58:24] [Rank 0] Total Loss: 4.2258 +[2025-09-05 16:58:24] [Rank 0] Total FTA (Unweighted): 0.4487 +[2025-09-05 16:58:24] [Rank 0] Total FTA (Unweighted): 0.4487 +[2025-09-05 16:58:24] [Rank 0] Total FTA (Weighted): 0.4487 +[2025-09-05 16:58:24] [Rank 0] Total FTA (Weighted): 0.4487 +[2025-09-05 16:58:24] [Rank 0] Group 0 Loss: 3.3992 +[2025-09-05 16:58:24] [Rank 0] Group 0 Loss: 3.3992 +[2025-09-05 16:58:24] [Rank 0] Group 1 Loss: 3.0509 +[2025-09-05 16:58:24] [Rank 0] Group 1 Loss: 3.0509 +[2025-09-05 16:58:24] [Rank 0] Group 2 Loss: 3.1186 +[2025-09-05 16:58:24] [Rank 0] Group 2 Loss: 3.1186 +[2025-09-05 16:58:24] [Rank 0] Group 3 Loss: 3.5641 +[2025-09-05 16:58:24] [Rank 0] Group 3 Loss: 3.5641 +[2025-09-05 16:58:24] [Rank 0] Group 4 Loss: 3.6701 +[2025-09-05 16:58:24] [Rank 0] Group 4 Loss: 3.6701 +[2025-09-05 16:58:24] [Rank 0] Group 5 Loss: 3.8863 +[2025-09-05 16:58:24] [Rank 0] Group 5 Loss: 3.8863 +[2025-09-05 16:58:24] [Rank 0] Group 6 Loss: 3.9937 +[2025-09-05 16:58:24] [Rank 0] Group 6 Loss: 3.9937 +[2025-09-05 16:58:24] [Rank 0] Group 7 Loss: 4.2176 +[2025-09-05 16:58:24] [Rank 0] Group 7 Loss: 4.2176 +[2025-09-05 16:58:24] [Rank 0] Group 8 Loss: 4.5723 +[2025-09-05 16:58:24] [Rank 0] Group 8 Loss: 4.5723 +[2025-09-05 16:58:24] [Rank 0] Group 9 Loss: 4.6998 +[2025-09-05 16:58:24] [Rank 0] Group 9 Loss: 4.6998 +[2025-09-05 16:58:24] [Rank 0] Group 10 Loss: 4.8522 +[2025-09-05 16:58:24] [Rank 0] Group 10 Loss: 4.8522 +[2025-09-05 16:58:24] [Rank 0] Group 11 Loss: 4.8756 +[2025-09-05 16:58:24] [Rank 0] Group 11 Loss: 4.8756 +[2025-09-05 16:58:24] [Rank 0] Group 12 Loss: 4.8444 +[2025-09-05 16:58:24] [Rank 0] Group 12 Loss: 4.8444 +[2025-09-05 16:58:24] [Rank 0] Group 13 Loss: 4.9383 +[2025-09-05 16:58:24] [Rank 0] Group 13 Loss: 4.9383 +[2025-09-05 16:58:24] [Rank 0] Group 14 Loss: 4.9701 +[2025-09-05 16:58:24] [Rank 0] Group 14 Loss: 4.9701 +[2025-09-05 16:58:24] [Rank 0] Group 15 Loss: 4.9602 +[2025-09-05 16:58:24] [Rank 0] Group 15 Loss: 4.9602 +[2025-09-05 16:58:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:58:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 16:58:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:58:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 16:58:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:58:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 16:58:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:58:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 16:58:24] [Rank 0] Group 4 FTA: 0.5400 +[2025-09-05 16:58:24] [Rank 0] Group 4 FTA: 0.5400 +[2025-09-05 16:58:24] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 16:58:24] [Rank 0] Group 5 FTA: 0.5100 +[2025-09-05 16:58:24] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 16:58:24] [Rank 0] Group 6 FTA: 0.4000 +[2025-09-05 16:58:24] [Rank 0] Group 7 FTA: 0.3900 +[2025-09-05 16:58:24] [Rank 0] Group 7 FTA: 0.3900 +[2025-09-05 16:58:24] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 16:58:24] [Rank 0] Group 8 FTA: 0.3400 +[2025-09-05 16:58:24] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 16:58:24] [Rank 0] Group 9 FTA: 0.2400 +[2025-09-05 16:58:24] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 16:58:24] [Rank 0] Group 10 FTA: 0.1900 +[2025-09-05 16:58:24] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 16:58:24] [Rank 0] Group 11 FTA: 0.1300 +[2025-09-05 16:58:24] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 16:58:24] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 16:58:24] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:58:24] [Rank 0] Group 13 FTA: 0.1200 +[2025-09-05 16:58:24] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 16:58:24] [Rank 0] Group 14 FTA: 0.1000 +[2025-09-05 16:58:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:58:24] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 16:58:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 16:58:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 16:58:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 16:58:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 16:58:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 16:58:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 16:58:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 16:58:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 16:58:26] [Rank 0] step:2001/10000 train_time:106302ms step_avg:53.12ms +[2025-09-05 16:58:26] [Rank 0] step:2001/10000 train_time:106302ms step_avg:53.12ms +[2025-09-05 16:58:26] [Rank 0] step:2021/10000 train_time:106974ms step_avg:52.93ms +[2025-09-05 16:58:26] [Rank 0] step:2021/10000 train_time:106974ms step_avg:52.93ms +[2025-09-05 16:58:27] [Rank 0] step:2041/10000 train_time:107704ms step_avg:52.77ms +[2025-09-05 16:58:27] [Rank 0] step:2041/10000 train_time:107704ms step_avg:52.77ms +[2025-09-05 16:58:28] [Rank 0] step:2061/10000 train_time:108433ms step_avg:52.61ms +[2025-09-05 16:58:28] [Rank 0] step:2061/10000 train_time:108433ms step_avg:52.61ms +[2025-09-05 16:58:29] [Rank 0] step:2081/10000 train_time:109163ms step_avg:52.46ms +[2025-09-05 16:58:29] [Rank 0] step:2081/10000 train_time:109163ms step_avg:52.46ms +[2025-09-05 16:58:29] [Rank 0] step:2101/10000 train_time:109894ms step_avg:52.31ms +[2025-09-05 16:58:29] [Rank 0] step:2101/10000 train_time:109894ms step_avg:52.31ms +[2025-09-05 16:58:30] [Rank 0] step:2121/10000 train_time:110624ms step_avg:52.16ms +[2025-09-05 16:58:30] [Rank 0] step:2121/10000 train_time:110624ms step_avg:52.16ms +[2025-09-05 16:58:31] [Rank 0] step:2141/10000 train_time:111354ms step_avg:52.01ms +[2025-09-05 16:58:31] [Rank 0] step:2141/10000 train_time:111354ms step_avg:52.01ms +[2025-09-05 16:58:31] [Rank 0] step:2161/10000 train_time:112084ms step_avg:51.87ms +[2025-09-05 16:58:31] [Rank 0] step:2161/10000 train_time:112084ms step_avg:51.87ms +[2025-09-05 16:58:32] [Rank 0] step:2181/10000 train_time:112815ms step_avg:51.73ms +[2025-09-05 16:58:32] [Rank 0] step:2181/10000 train_time:112815ms step_avg:51.73ms +[2025-09-05 16:58:33] [Rank 0] step:2201/10000 train_time:113545ms step_avg:51.59ms +[2025-09-05 16:58:33] [Rank 0] step:2201/10000 train_time:113545ms step_avg:51.59ms +[2025-09-05 16:58:34] [Rank 0] step:2221/10000 train_time:114275ms step_avg:51.45ms +[2025-09-05 16:58:34] [Rank 0] step:2221/10000 train_time:114275ms step_avg:51.45ms +[2025-09-05 16:58:34] [Rank 0] step:2241/10000 train_time:115009ms step_avg:51.32ms +[2025-09-05 16:58:34] [Rank 0] step:2241/10000 train_time:115009ms step_avg:51.32ms +[2025-09-05 16:58:35] [Rank 0] step:2261/10000 train_time:115745ms step_avg:51.19ms +[2025-09-05 16:58:35] [Rank 0] step:2261/10000 train_time:115745ms step_avg:51.19ms +[2025-09-05 16:58:36] [Rank 0] step:2281/10000 train_time:116481ms step_avg:51.07ms +[2025-09-05 16:58:36] [Rank 0] step:2281/10000 train_time:116481ms step_avg:51.07ms +[2025-09-05 16:58:37] [Rank 0] step:2301/10000 train_time:117218ms step_avg:50.94ms +[2025-09-05 16:58:37] [Rank 0] step:2301/10000 train_time:117218ms step_avg:50.94ms +[2025-09-05 16:58:37] [Rank 0] step:2321/10000 train_time:117953ms step_avg:50.82ms +[2025-09-05 16:58:37] [Rank 0] step:2321/10000 train_time:117953ms step_avg:50.82ms +[2025-09-05 16:58:38] [Rank 0] step:2341/10000 train_time:118689ms step_avg:50.70ms +[2025-09-05 16:58:38] [Rank 0] step:2341/10000 train_time:118689ms step_avg:50.70ms +[2025-09-05 16:58:39] [Rank 0] step:2361/10000 train_time:119425ms step_avg:50.58ms +[2025-09-05 16:58:39] [Rank 0] step:2361/10000 train_time:119425ms step_avg:50.58ms +[2025-09-05 16:58:40] [Rank 0] step:2381/10000 train_time:120161ms step_avg:50.47ms +[2025-09-05 16:58:40] [Rank 0] step:2381/10000 train_time:120161ms step_avg:50.47ms +[2025-09-05 16:58:40] [Rank 0] step:2401/10000 train_time:120898ms step_avg:50.35ms +[2025-09-05 16:58:40] [Rank 0] step:2401/10000 train_time:120898ms step_avg:50.35ms +[2025-09-05 16:58:41] [Rank 0] step:2421/10000 train_time:121633ms step_avg:50.24ms +[2025-09-05 16:58:41] [Rank 0] step:2421/10000 train_time:121633ms step_avg:50.24ms +[2025-09-05 16:58:42] [Rank 0] step:2441/10000 train_time:122370ms step_avg:50.13ms +[2025-09-05 16:58:42] [Rank 0] step:2441/10000 train_time:122370ms step_avg:50.13ms +[2025-09-05 16:58:42] [Rank 0] step:2461/10000 train_time:123106ms step_avg:50.02ms +[2025-09-05 16:58:42] [Rank 0] step:2461/10000 train_time:123106ms step_avg:50.02ms +[2025-09-05 16:58:43] [Rank 0] step:2481/10000 train_time:123843ms step_avg:49.92ms +[2025-09-05 16:58:43] [Rank 0] step:2481/10000 train_time:123843ms step_avg:49.92ms +[2025-09-05 16:58:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:58:44] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 16:58:45] [Rank 0] PRINT: step:2500/10000 train_loss:1.5273 val_loss:1.4889 train_time:124660ms step_avg:49.86ms +[2025-09-05 16:58:45] [Rank 0] PRINT: step:2500/10000 train_loss:1.5273 val_loss:1.4889 train_time:124660ms step_avg:49.86ms +[2025-09-05 16:58:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:58:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 16:58:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 16:58:45] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:00:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:00:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:00:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:00:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:00:09] [Rank 0] Total Loss: 4.2474 +[2025-09-05 17:00:09] [Rank 0] Total Loss: 4.2474 +[2025-09-05 17:00:09] [Rank 0] Total FTA (Unweighted): 0.4619 +[2025-09-05 17:00:09] [Rank 0] Total FTA (Unweighted): 0.4619 +[2025-09-05 17:00:09] [Rank 0] Total FTA (Weighted): 0.4619 +[2025-09-05 17:00:09] [Rank 0] Total FTA (Weighted): 0.4619 +[2025-09-05 17:00:09] [Rank 0] Group 0 Loss: 3.4381 +[2025-09-05 17:00:09] [Rank 0] Group 0 Loss: 3.4381 +[2025-09-05 17:00:09] [Rank 0] Group 1 Loss: 3.2789 +[2025-09-05 17:00:09] [Rank 0] Group 1 Loss: 3.2789 +[2025-09-05 17:00:09] [Rank 0] Group 2 Loss: 3.1892 +[2025-09-05 17:00:09] [Rank 0] Group 2 Loss: 3.1892 +[2025-09-05 17:00:09] [Rank 0] Group 3 Loss: 3.6377 +[2025-09-05 17:00:09] [Rank 0] Group 3 Loss: 3.6377 +[2025-09-05 17:00:09] [Rank 0] Group 4 Loss: 3.7043 +[2025-09-05 17:00:09] [Rank 0] Group 4 Loss: 3.7043 +[2025-09-05 17:00:09] [Rank 0] Group 5 Loss: 3.8650 +[2025-09-05 17:00:09] [Rank 0] Group 5 Loss: 3.8650 +[2025-09-05 17:00:09] [Rank 0] Group 6 Loss: 4.0084 +[2025-09-05 17:00:09] [Rank 0] Group 6 Loss: 4.0084 +[2025-09-05 17:00:09] [Rank 0] Group 7 Loss: 4.2508 +[2025-09-05 17:00:09] [Rank 0] Group 7 Loss: 4.2508 +[2025-09-05 17:00:09] [Rank 0] Group 8 Loss: 4.5495 +[2025-09-05 17:00:09] [Rank 0] Group 8 Loss: 4.5495 +[2025-09-05 17:00:09] [Rank 0] Group 9 Loss: 4.6608 +[2025-09-05 17:00:09] [Rank 0] Group 9 Loss: 4.6608 +[2025-09-05 17:00:09] [Rank 0] Group 10 Loss: 4.8832 +[2025-09-05 17:00:09] [Rank 0] Group 10 Loss: 4.8832 +[2025-09-05 17:00:09] [Rank 0] Group 11 Loss: 4.8836 +[2025-09-05 17:00:09] [Rank 0] Group 11 Loss: 4.8836 +[2025-09-05 17:00:09] [Rank 0] Group 12 Loss: 4.8352 +[2025-09-05 17:00:09] [Rank 0] Group 12 Loss: 4.8352 +[2025-09-05 17:00:09] [Rank 0] Group 13 Loss: 4.9000 +[2025-09-05 17:00:09] [Rank 0] Group 13 Loss: 4.9000 +[2025-09-05 17:00:09] [Rank 0] Group 14 Loss: 4.9412 +[2025-09-05 17:00:09] [Rank 0] Group 14 Loss: 4.9412 +[2025-09-05 17:00:09] [Rank 0] Group 15 Loss: 4.9326 +[2025-09-05 17:00:09] [Rank 0] Group 15 Loss: 4.9326 +[2025-09-05 17:00:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:00:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:00:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:00:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:00:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:00:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:00:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:00:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:00:09] [Rank 0] Group 4 FTA: 0.5600 +[2025-09-05 17:00:09] [Rank 0] Group 4 FTA: 0.5600 +[2025-09-05 17:00:09] [Rank 0] Group 5 FTA: 0.5500 +[2025-09-05 17:00:09] [Rank 0] Group 5 FTA: 0.5500 +[2025-09-05 17:00:09] [Rank 0] Group 6 FTA: 0.4300 +[2025-09-05 17:00:09] [Rank 0] Group 6 FTA: 0.4300 +[2025-09-05 17:00:09] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 17:00:09] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 17:00:09] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:00:09] [Rank 0] Group 8 FTA: 0.3600 +[2025-09-05 17:00:09] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:00:09] [Rank 0] Group 9 FTA: 0.2700 +[2025-09-05 17:00:09] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 17:00:09] [Rank 0] Group 10 FTA: 0.2300 +[2025-09-05 17:00:09] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 17:00:09] [Rank 0] Group 11 FTA: 0.1400 +[2025-09-05 17:00:09] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 17:00:09] [Rank 0] Group 12 FTA: 0.1000 +[2025-09-05 17:00:09] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:00:09] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:00:09] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:00:09] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:00:09] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:00:09] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:00:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:00:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:00:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:00:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:00:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:00:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:00:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:00:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:00:11] [Rank 0] step:2501/10000 train_time:124671ms step_avg:49.85ms +[2025-09-05 17:00:11] [Rank 0] step:2501/10000 train_time:124671ms step_avg:49.85ms +[2025-09-05 17:00:12] [Rank 0] step:2521/10000 train_time:125334ms step_avg:49.72ms +[2025-09-05 17:00:12] [Rank 0] step:2521/10000 train_time:125334ms step_avg:49.72ms +[2025-09-05 17:00:12] [Rank 0] step:2541/10000 train_time:126070ms step_avg:49.61ms +[2025-09-05 17:00:12] [Rank 0] step:2541/10000 train_time:126070ms step_avg:49.61ms +[2025-09-05 17:00:13] [Rank 0] step:2561/10000 train_time:126805ms step_avg:49.51ms +[2025-09-05 17:00:13] [Rank 0] step:2561/10000 train_time:126805ms step_avg:49.51ms +[2025-09-05 17:00:14] [Rank 0] step:2581/10000 train_time:127541ms step_avg:49.42ms +[2025-09-05 17:00:14] [Rank 0] step:2581/10000 train_time:127541ms step_avg:49.42ms +[2025-09-05 17:00:15] [Rank 0] step:2601/10000 train_time:128277ms step_avg:49.32ms +[2025-09-05 17:00:15] [Rank 0] step:2601/10000 train_time:128277ms step_avg:49.32ms +[2025-09-05 17:00:15] [Rank 0] step:2621/10000 train_time:129013ms step_avg:49.22ms +[2025-09-05 17:00:15] [Rank 0] step:2621/10000 train_time:129013ms step_avg:49.22ms +[2025-09-05 17:00:16] [Rank 0] step:2641/10000 train_time:129749ms step_avg:49.13ms +[2025-09-05 17:00:16] [Rank 0] step:2641/10000 train_time:129749ms step_avg:49.13ms +[2025-09-05 17:00:17] [Rank 0] step:2661/10000 train_time:130486ms step_avg:49.04ms +[2025-09-05 17:00:17] [Rank 0] step:2661/10000 train_time:130486ms step_avg:49.04ms +[2025-09-05 17:00:17] [Rank 0] step:2681/10000 train_time:131222ms step_avg:48.95ms +[2025-09-05 17:00:17] [Rank 0] step:2681/10000 train_time:131222ms step_avg:48.95ms +[2025-09-05 17:00:18] [Rank 0] step:2701/10000 train_time:131958ms step_avg:48.86ms +[2025-09-05 17:00:18] [Rank 0] step:2701/10000 train_time:131958ms step_avg:48.86ms +[2025-09-05 17:00:19] [Rank 0] step:2721/10000 train_time:132695ms step_avg:48.77ms +[2025-09-05 17:00:19] [Rank 0] step:2721/10000 train_time:132695ms step_avg:48.77ms +[2025-09-05 17:00:20] [Rank 0] step:2741/10000 train_time:133431ms step_avg:48.68ms +[2025-09-05 17:00:20] [Rank 0] step:2741/10000 train_time:133431ms step_avg:48.68ms +[2025-09-05 17:00:20] [Rank 0] step:2761/10000 train_time:134167ms step_avg:48.59ms +[2025-09-05 17:00:20] [Rank 0] step:2761/10000 train_time:134167ms step_avg:48.59ms +[2025-09-05 17:00:21] [Rank 0] step:2781/10000 train_time:134903ms step_avg:48.51ms +[2025-09-05 17:00:21] [Rank 0] step:2781/10000 train_time:134903ms step_avg:48.51ms +[2025-09-05 17:00:22] [Rank 0] step:2801/10000 train_time:135638ms step_avg:48.42ms +[2025-09-05 17:00:22] [Rank 0] step:2801/10000 train_time:135638ms step_avg:48.42ms +[2025-09-05 17:00:23] [Rank 0] step:2821/10000 train_time:136976ms step_avg:48.56ms +[2025-09-05 17:00:23] [Rank 0] step:2821/10000 train_time:136976ms step_avg:48.56ms +[2025-09-05 17:00:24] [Rank 0] step:2841/10000 train_time:137713ms step_avg:48.47ms +[2025-09-05 17:00:24] [Rank 0] step:2841/10000 train_time:137713ms step_avg:48.47ms +[2025-09-05 17:00:25] [Rank 0] step:2861/10000 train_time:138585ms step_avg:48.44ms +[2025-09-05 17:00:25] [Rank 0] step:2861/10000 train_time:138585ms step_avg:48.44ms +[2025-09-05 17:00:26] [Rank 0] step:2881/10000 train_time:139321ms step_avg:48.36ms +[2025-09-05 17:00:26] [Rank 0] step:2881/10000 train_time:139321ms step_avg:48.36ms +[2025-09-05 17:00:26] [Rank 0] step:2901/10000 train_time:140058ms step_avg:48.28ms +[2025-09-05 17:00:26] [Rank 0] step:2901/10000 train_time:140058ms step_avg:48.28ms +[2025-09-05 17:00:27] [Rank 0] step:2921/10000 train_time:140793ms step_avg:48.20ms +[2025-09-05 17:00:27] [Rank 0] step:2921/10000 train_time:140793ms step_avg:48.20ms +[2025-09-05 17:00:28] [Rank 0] step:2941/10000 train_time:141738ms step_avg:48.19ms +[2025-09-05 17:00:28] [Rank 0] step:2941/10000 train_time:141738ms step_avg:48.19ms +[2025-09-05 17:00:29] [Rank 0] step:2961/10000 train_time:142474ms step_avg:48.12ms +[2025-09-05 17:00:29] [Rank 0] step:2961/10000 train_time:142474ms step_avg:48.12ms +[2025-09-05 17:00:29] [Rank 0] step:2981/10000 train_time:143210ms step_avg:48.04ms +[2025-09-05 17:00:29] [Rank 0] step:2981/10000 train_time:143210ms step_avg:48.04ms +[2025-09-05 17:00:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:00:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:00:31] [Rank 0] PRINT: step:3000/10000 train_loss:1.4800 val_loss:1.4549 train_time:144026ms step_avg:48.01ms +[2025-09-05 17:00:31] [Rank 0] PRINT: step:3000/10000 train_loss:1.4800 val_loss:1.4549 train_time:144026ms step_avg:48.01ms +[2025-09-05 17:00:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:00:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:00:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:00:31] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:01:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:01:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:01:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:01:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:01:52] [Rank 0] Total Loss: 4.2320 +[2025-09-05 17:01:52] [Rank 0] Total Loss: 4.2320 +[2025-09-05 17:01:52] [Rank 0] Total FTA (Unweighted): 0.4781 +[2025-09-05 17:01:52] [Rank 0] Total FTA (Unweighted): 0.4781 +[2025-09-05 17:01:52] [Rank 0] Total FTA (Weighted): 0.4781 +[2025-09-05 17:01:52] [Rank 0] Total FTA (Weighted): 0.4781 +[2025-09-05 17:01:52] [Rank 0] Group 0 Loss: 3.4452 +[2025-09-05 17:01:52] [Rank 0] Group 0 Loss: 3.4452 +[2025-09-05 17:01:52] [Rank 0] Group 1 Loss: 3.2548 +[2025-09-05 17:01:52] [Rank 0] Group 1 Loss: 3.2548 +[2025-09-05 17:01:52] [Rank 0] Group 2 Loss: 3.1283 +[2025-09-05 17:01:52] [Rank 0] Group 2 Loss: 3.1283 +[2025-09-05 17:01:52] [Rank 0] Group 3 Loss: 3.6264 +[2025-09-05 17:01:52] [Rank 0] Group 3 Loss: 3.6264 +[2025-09-05 17:01:52] [Rank 0] Group 4 Loss: 3.6962 +[2025-09-05 17:01:52] [Rank 0] Group 4 Loss: 3.6962 +[2025-09-05 17:01:52] [Rank 0] Group 5 Loss: 3.8821 +[2025-09-05 17:01:52] [Rank 0] Group 5 Loss: 3.8821 +[2025-09-05 17:01:52] [Rank 0] Group 6 Loss: 4.0119 +[2025-09-05 17:01:52] [Rank 0] Group 6 Loss: 4.0119 +[2025-09-05 17:01:52] [Rank 0] Group 7 Loss: 4.2425 +[2025-09-05 17:01:52] [Rank 0] Group 7 Loss: 4.2425 +[2025-09-05 17:01:52] [Rank 0] Group 8 Loss: 4.5352 +[2025-09-05 17:01:52] [Rank 0] Group 8 Loss: 4.5352 +[2025-09-05 17:01:52] [Rank 0] Group 9 Loss: 4.6649 +[2025-09-05 17:01:52] [Rank 0] Group 9 Loss: 4.6649 +[2025-09-05 17:01:52] [Rank 0] Group 10 Loss: 4.8583 +[2025-09-05 17:01:52] [Rank 0] Group 10 Loss: 4.8583 +[2025-09-05 17:01:52] [Rank 0] Group 11 Loss: 4.8623 +[2025-09-05 17:01:52] [Rank 0] Group 11 Loss: 4.8623 +[2025-09-05 17:01:52] [Rank 0] Group 12 Loss: 4.7955 +[2025-09-05 17:01:52] [Rank 0] Group 12 Loss: 4.7955 +[2025-09-05 17:01:52] [Rank 0] Group 13 Loss: 4.8637 +[2025-09-05 17:01:52] [Rank 0] Group 13 Loss: 4.8637 +[2025-09-05 17:01:52] [Rank 0] Group 14 Loss: 4.9107 +[2025-09-05 17:01:52] [Rank 0] Group 14 Loss: 4.9107 +[2025-09-05 17:01:52] [Rank 0] Group 15 Loss: 4.9342 +[2025-09-05 17:01:52] [Rank 0] Group 15 Loss: 4.9342 +[2025-09-05 17:01:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:01:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:01:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:01:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:01:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:01:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:01:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:01:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:01:52] [Rank 0] Group 4 FTA: 0.6000 +[2025-09-05 17:01:52] [Rank 0] Group 4 FTA: 0.6000 +[2025-09-05 17:01:52] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:01:52] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:01:52] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 17:01:52] [Rank 0] Group 6 FTA: 0.4400 +[2025-09-05 17:01:52] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 17:01:52] [Rank 0] Group 7 FTA: 0.4000 +[2025-09-05 17:01:52] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 17:01:52] [Rank 0] Group 8 FTA: 0.3800 +[2025-09-05 17:01:52] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 17:01:52] [Rank 0] Group 9 FTA: 0.2800 +[2025-09-05 17:01:52] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-05 17:01:52] [Rank 0] Group 10 FTA: 0.2900 +[2025-09-05 17:01:52] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 17:01:52] [Rank 0] Group 11 FTA: 0.1900 +[2025-09-05 17:01:52] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 17:01:52] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 17:01:52] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 17:01:52] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 17:01:52] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:01:52] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:01:52] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 17:01:52] [Rank 0] Group 15 FTA: 0.0600 +[2025-09-05 17:01:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:01:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:01:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:01:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:01:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:01:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:01:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:01:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:01:53] [Rank 0] step:3001/10000 train_time:144036ms step_avg:48.00ms +[2025-09-05 17:01:53] [Rank 0] step:3001/10000 train_time:144036ms step_avg:48.00ms +[2025-09-05 17:01:54] [Rank 0] step:3021/10000 train_time:144713ms step_avg:47.90ms +[2025-09-05 17:01:54] [Rank 0] step:3021/10000 train_time:144713ms step_avg:47.90ms +[2025-09-05 17:01:55] [Rank 0] step:3041/10000 train_time:145449ms step_avg:47.83ms +[2025-09-05 17:01:55] [Rank 0] step:3041/10000 train_time:145449ms step_avg:47.83ms +[2025-09-05 17:01:55] [Rank 0] step:3061/10000 train_time:146185ms step_avg:47.76ms +[2025-09-05 17:01:55] [Rank 0] step:3061/10000 train_time:146185ms step_avg:47.76ms +[2025-09-05 17:01:56] [Rank 0] step:3081/10000 train_time:146922ms step_avg:47.69ms +[2025-09-05 17:01:56] [Rank 0] step:3081/10000 train_time:146922ms step_avg:47.69ms +[2025-09-05 17:01:57] [Rank 0] step:3101/10000 train_time:147658ms step_avg:47.62ms +[2025-09-05 17:01:57] [Rank 0] step:3101/10000 train_time:147658ms step_avg:47.62ms +[2025-09-05 17:01:57] [Rank 0] step:3121/10000 train_time:148394ms step_avg:47.55ms +[2025-09-05 17:01:57] [Rank 0] step:3121/10000 train_time:148394ms step_avg:47.55ms +[2025-09-05 17:01:58] [Rank 0] step:3141/10000 train_time:149131ms step_avg:47.48ms +[2025-09-05 17:01:58] [Rank 0] step:3141/10000 train_time:149131ms step_avg:47.48ms +[2025-09-05 17:01:59] [Rank 0] step:3161/10000 train_time:149867ms step_avg:47.41ms +[2025-09-05 17:01:59] [Rank 0] step:3161/10000 train_time:149867ms step_avg:47.41ms +[2025-09-05 17:02:00] [Rank 0] step:3181/10000 train_time:150603ms step_avg:47.34ms +[2025-09-05 17:02:00] [Rank 0] step:3181/10000 train_time:150603ms step_avg:47.34ms +[2025-09-05 17:02:00] [Rank 0] step:3201/10000 train_time:151339ms step_avg:47.28ms +[2025-09-05 17:02:00] [Rank 0] step:3201/10000 train_time:151339ms step_avg:47.28ms +[2025-09-05 17:02:01] [Rank 0] step:3221/10000 train_time:152076ms step_avg:47.21ms +[2025-09-05 17:02:01] [Rank 0] step:3221/10000 train_time:152076ms step_avg:47.21ms +[2025-09-05 17:02:02] [Rank 0] step:3241/10000 train_time:152812ms step_avg:47.15ms +[2025-09-05 17:02:02] [Rank 0] step:3241/10000 train_time:152812ms step_avg:47.15ms +[2025-09-05 17:02:03] [Rank 0] step:3261/10000 train_time:153547ms step_avg:47.09ms +[2025-09-05 17:02:03] [Rank 0] step:3261/10000 train_time:153547ms step_avg:47.09ms +[2025-09-05 17:02:03] [Rank 0] step:3281/10000 train_time:154284ms step_avg:47.02ms +[2025-09-05 17:02:03] [Rank 0] step:3281/10000 train_time:154284ms step_avg:47.02ms +[2025-09-05 17:02:04] [Rank 0] step:3301/10000 train_time:155020ms step_avg:46.96ms +[2025-09-05 17:02:04] [Rank 0] step:3301/10000 train_time:155020ms step_avg:46.96ms +[2025-09-05 17:02:05] [Rank 0] step:3321/10000 train_time:155756ms step_avg:46.90ms +[2025-09-05 17:02:05] [Rank 0] step:3321/10000 train_time:155756ms step_avg:46.90ms +[2025-09-05 17:02:06] [Rank 0] step:3341/10000 train_time:156493ms step_avg:46.84ms +[2025-09-05 17:02:06] [Rank 0] step:3341/10000 train_time:156493ms step_avg:46.84ms +[2025-09-05 17:02:06] [Rank 0] step:3361/10000 train_time:157228ms step_avg:46.78ms +[2025-09-05 17:02:06] [Rank 0] step:3361/10000 train_time:157228ms step_avg:46.78ms +[2025-09-05 17:02:07] [Rank 0] step:3381/10000 train_time:157964ms step_avg:46.72ms +[2025-09-05 17:02:07] [Rank 0] step:3381/10000 train_time:157964ms step_avg:46.72ms +[2025-09-05 17:02:08] [Rank 0] step:3401/10000 train_time:158700ms step_avg:46.66ms +[2025-09-05 17:02:08] [Rank 0] step:3401/10000 train_time:158700ms step_avg:46.66ms +[2025-09-05 17:02:09] [Rank 0] step:3421/10000 train_time:159436ms step_avg:46.61ms +[2025-09-05 17:02:09] [Rank 0] step:3421/10000 train_time:159436ms step_avg:46.61ms +[2025-09-05 17:02:09] [Rank 0] step:3441/10000 train_time:160172ms step_avg:46.55ms +[2025-09-05 17:02:09] [Rank 0] step:3441/10000 train_time:160172ms step_avg:46.55ms +[2025-09-05 17:02:10] [Rank 0] step:3461/10000 train_time:160907ms step_avg:46.49ms +[2025-09-05 17:02:10] [Rank 0] step:3461/10000 train_time:160907ms step_avg:46.49ms +[2025-09-05 17:02:11] [Rank 0] step:3481/10000 train_time:161643ms step_avg:46.44ms +[2025-09-05 17:02:11] [Rank 0] step:3481/10000 train_time:161643ms step_avg:46.44ms +[2025-09-05 17:02:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:02:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:02:12] [Rank 0] PRINT: step:3500/10000 train_loss:1.4563 val_loss:1.4417 train_time:162460ms step_avg:46.42ms +[2025-09-05 17:02:12] [Rank 0] PRINT: step:3500/10000 train_loss:1.4563 val_loss:1.4417 train_time:162460ms step_avg:46.42ms +[2025-09-05 17:02:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:02:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:02:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:02:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:03:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:03:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:03:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:03:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:03:33] [Rank 0] Total Loss: 4.2040 +[2025-09-05 17:03:33] [Rank 0] Total Loss: 4.2040 +[2025-09-05 17:03:33] [Rank 0] Total FTA (Unweighted): 0.4944 +[2025-09-05 17:03:33] [Rank 0] Total FTA (Unweighted): 0.4944 +[2025-09-05 17:03:33] [Rank 0] Total FTA (Weighted): 0.4944 +[2025-09-05 17:03:33] [Rank 0] Total FTA (Weighted): 0.4944 +[2025-09-05 17:03:33] [Rank 0] Group 0 Loss: 3.5776 +[2025-09-05 17:03:33] [Rank 0] Group 0 Loss: 3.5776 +[2025-09-05 17:03:33] [Rank 0] Group 1 Loss: 3.2784 +[2025-09-05 17:03:33] [Rank 0] Group 1 Loss: 3.2784 +[2025-09-05 17:03:33] [Rank 0] Group 2 Loss: 3.1377 +[2025-09-05 17:03:33] [Rank 0] Group 2 Loss: 3.1377 +[2025-09-05 17:03:33] [Rank 0] Group 3 Loss: 3.5599 +[2025-09-05 17:03:33] [Rank 0] Group 3 Loss: 3.5599 +[2025-09-05 17:03:33] [Rank 0] Group 4 Loss: 3.6955 +[2025-09-05 17:03:33] [Rank 0] Group 4 Loss: 3.6955 +[2025-09-05 17:03:33] [Rank 0] Group 5 Loss: 3.8680 +[2025-09-05 17:03:33] [Rank 0] Group 5 Loss: 3.8680 +[2025-09-05 17:03:33] [Rank 0] Group 6 Loss: 3.9929 +[2025-09-05 17:03:33] [Rank 0] Group 6 Loss: 3.9929 +[2025-09-05 17:03:33] [Rank 0] Group 7 Loss: 4.1977 +[2025-09-05 17:03:33] [Rank 0] Group 7 Loss: 4.1977 +[2025-09-05 17:03:33] [Rank 0] Group 8 Loss: 4.4559 +[2025-09-05 17:03:33] [Rank 0] Group 8 Loss: 4.4559 +[2025-09-05 17:03:33] [Rank 0] Group 9 Loss: 4.6118 +[2025-09-05 17:03:33] [Rank 0] Group 9 Loss: 4.6118 +[2025-09-05 17:03:33] [Rank 0] Group 10 Loss: 4.7689 +[2025-09-05 17:03:33] [Rank 0] Group 10 Loss: 4.7689 +[2025-09-05 17:03:33] [Rank 0] Group 11 Loss: 4.7709 +[2025-09-05 17:03:33] [Rank 0] Group 11 Loss: 4.7709 +[2025-09-05 17:03:33] [Rank 0] Group 12 Loss: 4.7707 +[2025-09-05 17:03:33] [Rank 0] Group 12 Loss: 4.7707 +[2025-09-05 17:03:33] [Rank 0] Group 13 Loss: 4.8162 +[2025-09-05 17:03:33] [Rank 0] Group 13 Loss: 4.8162 +[2025-09-05 17:03:33] [Rank 0] Group 14 Loss: 4.8840 +[2025-09-05 17:03:33] [Rank 0] Group 14 Loss: 4.8840 +[2025-09-05 17:03:33] [Rank 0] Group 15 Loss: 4.8771 +[2025-09-05 17:03:33] [Rank 0] Group 15 Loss: 4.8771 +[2025-09-05 17:03:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:03:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:03:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:03:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:03:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:03:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:03:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:03:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:03:33] [Rank 0] Group 4 FTA: 0.6600 +[2025-09-05 17:03:33] [Rank 0] Group 4 FTA: 0.6600 +[2025-09-05 17:03:33] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:03:33] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:03:33] [Rank 0] Group 6 FTA: 0.4500 +[2025-09-05 17:03:33] [Rank 0] Group 6 FTA: 0.4500 +[2025-09-05 17:03:33] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 17:03:33] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 17:03:33] [Rank 0] Group 8 FTA: 0.4000 +[2025-09-05 17:03:33] [Rank 0] Group 8 FTA: 0.4000 +[2025-09-05 17:03:33] [Rank 0] Group 9 FTA: 0.3100 +[2025-09-05 17:03:33] [Rank 0] Group 9 FTA: 0.3100 +[2025-09-05 17:03:33] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 17:03:33] [Rank 0] Group 10 FTA: 0.3300 +[2025-09-05 17:03:33] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 17:03:33] [Rank 0] Group 11 FTA: 0.2000 +[2025-09-05 17:03:33] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 17:03:33] [Rank 0] Group 12 FTA: 0.1600 +[2025-09-05 17:03:33] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:03:33] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:03:33] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:03:33] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:03:33] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:03:33] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:03:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:03:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:03:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:03:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:03:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:03:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:03:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:03:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:03:34] [Rank 0] step:3501/10000 train_time:162470ms step_avg:46.41ms +[2025-09-05 17:03:34] [Rank 0] step:3501/10000 train_time:162470ms step_avg:46.41ms +[2025-09-05 17:03:35] [Rank 0] step:3521/10000 train_time:163139ms step_avg:46.33ms +[2025-09-05 17:03:35] [Rank 0] step:3521/10000 train_time:163139ms step_avg:46.33ms +[2025-09-05 17:03:36] [Rank 0] step:3541/10000 train_time:163876ms step_avg:46.28ms +[2025-09-05 17:03:36] [Rank 0] step:3541/10000 train_time:163876ms step_avg:46.28ms +[2025-09-05 17:03:37] [Rank 0] step:3561/10000 train_time:164613ms step_avg:46.23ms +[2025-09-05 17:03:37] [Rank 0] step:3561/10000 train_time:164613ms step_avg:46.23ms +[2025-09-05 17:03:38] [Rank 0] step:3581/10000 train_time:165494ms step_avg:46.21ms +[2025-09-05 17:03:38] [Rank 0] step:3581/10000 train_time:165494ms step_avg:46.21ms +[2025-09-05 17:03:38] [Rank 0] step:3601/10000 train_time:166231ms step_avg:46.16ms +[2025-09-05 17:03:38] [Rank 0] step:3601/10000 train_time:166231ms step_avg:46.16ms +[2025-09-05 17:03:39] [Rank 0] step:3621/10000 train_time:166967ms step_avg:46.11ms +[2025-09-05 17:03:39] [Rank 0] step:3621/10000 train_time:166967ms step_avg:46.11ms +[2025-09-05 17:03:40] [Rank 0] step:3641/10000 train_time:168322ms step_avg:46.23ms +[2025-09-05 17:03:40] [Rank 0] step:3641/10000 train_time:168322ms step_avg:46.23ms +[2025-09-05 17:03:41] [Rank 0] step:3661/10000 train_time:169057ms step_avg:46.18ms +[2025-09-05 17:03:41] [Rank 0] step:3661/10000 train_time:169057ms step_avg:46.18ms +[2025-09-05 17:03:42] [Rank 0] step:3681/10000 train_time:169793ms step_avg:46.13ms +[2025-09-05 17:03:42] [Rank 0] step:3681/10000 train_time:169793ms step_avg:46.13ms +[2025-09-05 17:03:43] [Rank 0] step:3701/10000 train_time:170530ms step_avg:46.08ms +[2025-09-05 17:03:43] [Rank 0] step:3701/10000 train_time:170530ms step_avg:46.08ms +[2025-09-05 17:03:43] [Rank 0] step:3721/10000 train_time:171266ms step_avg:46.03ms +[2025-09-05 17:03:43] [Rank 0] step:3721/10000 train_time:171266ms step_avg:46.03ms +[2025-09-05 17:03:44] [Rank 0] step:3741/10000 train_time:172002ms step_avg:45.98ms +[2025-09-05 17:03:44] [Rank 0] step:3741/10000 train_time:172002ms step_avg:45.98ms +[2025-09-05 17:03:45] [Rank 0] step:3761/10000 train_time:172738ms step_avg:45.93ms +[2025-09-05 17:03:45] [Rank 0] step:3761/10000 train_time:172738ms step_avg:45.93ms +[2025-09-05 17:03:46] [Rank 0] step:3781/10000 train_time:173474ms step_avg:45.88ms +[2025-09-05 17:03:46] [Rank 0] step:3781/10000 train_time:173474ms step_avg:45.88ms +[2025-09-05 17:03:46] [Rank 0] step:3801/10000 train_time:174210ms step_avg:45.83ms +[2025-09-05 17:03:46] [Rank 0] step:3801/10000 train_time:174210ms step_avg:45.83ms +[2025-09-05 17:03:47] [Rank 0] step:3821/10000 train_time:174945ms step_avg:45.79ms +[2025-09-05 17:03:47] [Rank 0] step:3821/10000 train_time:174945ms step_avg:45.79ms +[2025-09-05 17:03:48] [Rank 0] step:3841/10000 train_time:175681ms step_avg:45.74ms +[2025-09-05 17:03:48] [Rank 0] step:3841/10000 train_time:175681ms step_avg:45.74ms +[2025-09-05 17:03:48] [Rank 0] step:3861/10000 train_time:176417ms step_avg:45.69ms +[2025-09-05 17:03:48] [Rank 0] step:3861/10000 train_time:176417ms step_avg:45.69ms +[2025-09-05 17:03:49] [Rank 0] step:3881/10000 train_time:177154ms step_avg:45.65ms +[2025-09-05 17:03:49] [Rank 0] step:3881/10000 train_time:177154ms step_avg:45.65ms +[2025-09-05 17:03:50] [Rank 0] step:3901/10000 train_time:177890ms step_avg:45.60ms +[2025-09-05 17:03:50] [Rank 0] step:3901/10000 train_time:177890ms step_avg:45.60ms +[2025-09-05 17:03:51] [Rank 0] step:3921/10000 train_time:178626ms step_avg:45.56ms +[2025-09-05 17:03:51] [Rank 0] step:3921/10000 train_time:178626ms step_avg:45.56ms +[2025-09-05 17:03:51] [Rank 0] step:3941/10000 train_time:179361ms step_avg:45.51ms +[2025-09-05 17:03:51] [Rank 0] step:3941/10000 train_time:179361ms step_avg:45.51ms +[2025-09-05 17:03:52] [Rank 0] step:3961/10000 train_time:180097ms step_avg:45.47ms +[2025-09-05 17:03:52] [Rank 0] step:3961/10000 train_time:180097ms step_avg:45.47ms +[2025-09-05 17:03:53] [Rank 0] step:3981/10000 train_time:180833ms step_avg:45.42ms +[2025-09-05 17:03:53] [Rank 0] step:3981/10000 train_time:180833ms step_avg:45.42ms +[2025-09-05 17:03:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:03:54] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:03:54] [Rank 0] PRINT: step:4000/10000 train_loss:1.4455 val_loss:1.4325 train_time:181650ms step_avg:45.41ms +[2025-09-05 17:03:54] [Rank 0] PRINT: step:4000/10000 train_loss:1.4455 val_loss:1.4325 train_time:181650ms step_avg:45.41ms +[2025-09-05 17:03:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:03:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:03:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:03:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:05:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:05:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:05:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:05:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:05:15] [Rank 0] Total Loss: 4.1429 +[2025-09-05 17:05:15] [Rank 0] Total Loss: 4.1429 +[2025-09-05 17:05:15] [Rank 0] Total FTA (Unweighted): 0.5125 +[2025-09-05 17:05:15] [Rank 0] Total FTA (Unweighted): 0.5125 +[2025-09-05 17:05:15] [Rank 0] Total FTA (Weighted): 0.5125 +[2025-09-05 17:05:15] [Rank 0] Total FTA (Weighted): 0.5125 +[2025-09-05 17:05:15] [Rank 0] Group 0 Loss: 3.4556 +[2025-09-05 17:05:15] [Rank 0] Group 0 Loss: 3.4556 +[2025-09-05 17:05:15] [Rank 0] Group 1 Loss: 3.2649 +[2025-09-05 17:05:15] [Rank 0] Group 1 Loss: 3.2649 +[2025-09-05 17:05:15] [Rank 0] Group 2 Loss: 3.1196 +[2025-09-05 17:05:15] [Rank 0] Group 2 Loss: 3.1196 +[2025-09-05 17:05:15] [Rank 0] Group 3 Loss: 3.5323 +[2025-09-05 17:05:15] [Rank 0] Group 3 Loss: 3.5323 +[2025-09-05 17:05:15] [Rank 0] Group 4 Loss: 3.6141 +[2025-09-05 17:05:15] [Rank 0] Group 4 Loss: 3.6141 +[2025-09-05 17:05:15] [Rank 0] Group 5 Loss: 3.8016 +[2025-09-05 17:05:15] [Rank 0] Group 5 Loss: 3.8016 +[2025-09-05 17:05:15] [Rank 0] Group 6 Loss: 3.8923 +[2025-09-05 17:05:15] [Rank 0] Group 6 Loss: 3.8923 +[2025-09-05 17:05:15] [Rank 0] Group 7 Loss: 4.1280 +[2025-09-05 17:05:15] [Rank 0] Group 7 Loss: 4.1280 +[2025-09-05 17:05:15] [Rank 0] Group 8 Loss: 4.4213 +[2025-09-05 17:05:15] [Rank 0] Group 8 Loss: 4.4213 +[2025-09-05 17:05:15] [Rank 0] Group 9 Loss: 4.5476 +[2025-09-05 17:05:15] [Rank 0] Group 9 Loss: 4.5476 +[2025-09-05 17:05:15] [Rank 0] Group 10 Loss: 4.7081 +[2025-09-05 17:05:15] [Rank 0] Group 10 Loss: 4.7081 +[2025-09-05 17:05:15] [Rank 0] Group 11 Loss: 4.7156 +[2025-09-05 17:05:15] [Rank 0] Group 11 Loss: 4.7156 +[2025-09-05 17:05:15] [Rank 0] Group 12 Loss: 4.6937 +[2025-09-05 17:05:15] [Rank 0] Group 12 Loss: 4.6937 +[2025-09-05 17:05:15] [Rank 0] Group 13 Loss: 4.7555 +[2025-09-05 17:05:15] [Rank 0] Group 13 Loss: 4.7555 +[2025-09-05 17:05:15] [Rank 0] Group 14 Loss: 4.8214 +[2025-09-05 17:05:15] [Rank 0] Group 14 Loss: 4.8214 +[2025-09-05 17:05:15] [Rank 0] Group 15 Loss: 4.8144 +[2025-09-05 17:05:15] [Rank 0] Group 15 Loss: 4.8144 +[2025-09-05 17:05:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:05:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:05:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:05:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:05:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:05:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:05:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:05:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:05:15] [Rank 0] Group 4 FTA: 0.8000 +[2025-09-05 17:05:15] [Rank 0] Group 4 FTA: 0.8000 +[2025-09-05 17:05:15] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:05:15] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:05:15] [Rank 0] Group 6 FTA: 0.4500 +[2025-09-05 17:05:15] [Rank 0] Group 6 FTA: 0.4500 +[2025-09-05 17:05:15] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 17:05:15] [Rank 0] Group 7 FTA: 0.4100 +[2025-09-05 17:05:15] [Rank 0] Group 8 FTA: 0.4300 +[2025-09-05 17:05:15] [Rank 0] Group 8 FTA: 0.4300 +[2025-09-05 17:05:15] [Rank 0] Group 9 FTA: 0.3400 +[2025-09-05 17:05:15] [Rank 0] Group 9 FTA: 0.3400 +[2025-09-05 17:05:15] [Rank 0] Group 10 FTA: 0.4000 +[2025-09-05 17:05:15] [Rank 0] Group 10 FTA: 0.4000 +[2025-09-05 17:05:15] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 17:05:15] [Rank 0] Group 11 FTA: 0.2600 +[2025-09-05 17:05:15] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 17:05:15] [Rank 0] Group 12 FTA: 0.1300 +[2025-09-05 17:05:15] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:05:15] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:05:15] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:05:15] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:05:15] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:05:15] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:05:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:05:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:05:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:05:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:05:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:05:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:05:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:05:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:05:17] [Rank 0] step:4001/10000 train_time:181659ms step_avg:45.40ms +[2025-09-05 17:05:17] [Rank 0] step:4001/10000 train_time:181659ms step_avg:45.40ms +[2025-09-05 17:05:18] [Rank 0] step:4021/10000 train_time:182940ms step_avg:45.50ms +[2025-09-05 17:05:18] [Rank 0] step:4021/10000 train_time:182940ms step_avg:45.50ms +[2025-09-05 17:05:19] [Rank 0] step:4041/10000 train_time:183677ms step_avg:45.45ms +[2025-09-05 17:05:19] [Rank 0] step:4041/10000 train_time:183677ms step_avg:45.45ms +[2025-09-05 17:05:19] [Rank 0] step:4061/10000 train_time:184413ms step_avg:45.41ms +[2025-09-05 17:05:19] [Rank 0] step:4061/10000 train_time:184413ms step_avg:45.41ms +[2025-09-05 17:05:20] [Rank 0] step:4081/10000 train_time:185149ms step_avg:45.37ms +[2025-09-05 17:05:20] [Rank 0] step:4081/10000 train_time:185149ms step_avg:45.37ms +[2025-09-05 17:05:21] [Rank 0] step:4101/10000 train_time:185885ms step_avg:45.33ms +[2025-09-05 17:05:21] [Rank 0] step:4101/10000 train_time:185885ms step_avg:45.33ms +[2025-09-05 17:05:22] [Rank 0] step:4121/10000 train_time:186621ms step_avg:45.29ms +[2025-09-05 17:05:22] [Rank 0] step:4121/10000 train_time:186621ms step_avg:45.29ms +[2025-09-05 17:05:22] [Rank 0] step:4141/10000 train_time:187358ms step_avg:45.24ms +[2025-09-05 17:05:22] [Rank 0] step:4141/10000 train_time:187358ms step_avg:45.24ms +[2025-09-05 17:05:23] [Rank 0] step:4161/10000 train_time:188094ms step_avg:45.20ms +[2025-09-05 17:05:23] [Rank 0] step:4161/10000 train_time:188094ms step_avg:45.20ms +[2025-09-05 17:05:24] [Rank 0] step:4181/10000 train_time:188830ms step_avg:45.16ms +[2025-09-05 17:05:24] [Rank 0] step:4181/10000 train_time:188830ms step_avg:45.16ms +[2025-09-05 17:05:25] [Rank 0] step:4201/10000 train_time:189567ms step_avg:45.12ms +[2025-09-05 17:05:25] [Rank 0] step:4201/10000 train_time:189567ms step_avg:45.12ms +[2025-09-05 17:05:25] [Rank 0] step:4221/10000 train_time:190303ms step_avg:45.08ms +[2025-09-05 17:05:25] [Rank 0] step:4221/10000 train_time:190303ms step_avg:45.08ms +[2025-09-05 17:05:26] [Rank 0] step:4241/10000 train_time:191039ms step_avg:45.05ms +[2025-09-05 17:05:26] [Rank 0] step:4241/10000 train_time:191039ms step_avg:45.05ms +[2025-09-05 17:05:27] [Rank 0] step:4261/10000 train_time:191776ms step_avg:45.01ms +[2025-09-05 17:05:27] [Rank 0] step:4261/10000 train_time:191776ms step_avg:45.01ms +[2025-09-05 17:05:28] [Rank 0] step:4281/10000 train_time:192512ms step_avg:44.97ms +[2025-09-05 17:05:28] [Rank 0] step:4281/10000 train_time:192512ms step_avg:44.97ms +[2025-09-05 17:05:28] [Rank 0] step:4301/10000 train_time:193248ms step_avg:44.93ms +[2025-09-05 17:05:28] [Rank 0] step:4301/10000 train_time:193248ms step_avg:44.93ms +[2025-09-05 17:05:29] [Rank 0] step:4321/10000 train_time:193985ms step_avg:44.89ms +[2025-09-05 17:05:29] [Rank 0] step:4321/10000 train_time:193985ms step_avg:44.89ms +[2025-09-05 17:05:30] [Rank 0] step:4341/10000 train_time:194720ms step_avg:44.86ms +[2025-09-05 17:05:30] [Rank 0] step:4341/10000 train_time:194720ms step_avg:44.86ms +[2025-09-05 17:05:31] [Rank 0] step:4361/10000 train_time:195457ms step_avg:44.82ms +[2025-09-05 17:05:31] [Rank 0] step:4361/10000 train_time:195457ms step_avg:44.82ms +[2025-09-05 17:05:31] [Rank 0] step:4381/10000 train_time:196193ms step_avg:44.78ms +[2025-09-05 17:05:31] [Rank 0] step:4381/10000 train_time:196193ms step_avg:44.78ms +[2025-09-05 17:05:32] [Rank 0] step:4401/10000 train_time:196929ms step_avg:44.75ms +[2025-09-05 17:05:32] [Rank 0] step:4401/10000 train_time:196929ms step_avg:44.75ms +[2025-09-05 17:05:33] [Rank 0] step:4421/10000 train_time:197665ms step_avg:44.71ms +[2025-09-05 17:05:33] [Rank 0] step:4421/10000 train_time:197665ms step_avg:44.71ms +[2025-09-05 17:05:33] [Rank 0] step:4441/10000 train_time:198400ms step_avg:44.67ms +[2025-09-05 17:05:33] [Rank 0] step:4441/10000 train_time:198400ms step_avg:44.67ms +[2025-09-05 17:05:34] [Rank 0] step:4461/10000 train_time:199137ms step_avg:44.64ms +[2025-09-05 17:05:34] [Rank 0] step:4461/10000 train_time:199137ms step_avg:44.64ms +[2025-09-05 17:05:35] [Rank 0] step:4481/10000 train_time:199873ms step_avg:44.60ms +[2025-09-05 17:05:35] [Rank 0] step:4481/10000 train_time:199873ms step_avg:44.60ms +[2025-09-05 17:05:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:05:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:05:36] [Rank 0] PRINT: step:4500/10000 train_loss:1.4394 val_loss:1.4291 train_time:200690ms step_avg:44.60ms +[2025-09-05 17:05:36] [Rank 0] PRINT: step:4500/10000 train_loss:1.4394 val_loss:1.4291 train_time:200690ms step_avg:44.60ms +[2025-09-05 17:05:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:05:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:05:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:05:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:06:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:06:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:06:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:06:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:06:57] [Rank 0] Total Loss: 4.3318 +[2025-09-05 17:06:57] [Rank 0] Total Loss: 4.3318 +[2025-09-05 17:06:57] [Rank 0] Total FTA (Unweighted): 0.5125 +[2025-09-05 17:06:57] [Rank 0] Total FTA (Unweighted): 0.5125 +[2025-09-05 17:06:57] [Rank 0] Total FTA (Weighted): 0.5125 +[2025-09-05 17:06:57] [Rank 0] Total FTA (Weighted): 0.5125 +[2025-09-05 17:06:57] [Rank 0] Group 0 Loss: 3.6416 +[2025-09-05 17:06:57] [Rank 0] Group 0 Loss: 3.6416 +[2025-09-05 17:06:57] [Rank 0] Group 1 Loss: 3.4049 +[2025-09-05 17:06:57] [Rank 0] Group 1 Loss: 3.4049 +[2025-09-05 17:06:57] [Rank 0] Group 2 Loss: 3.2263 +[2025-09-05 17:06:57] [Rank 0] Group 2 Loss: 3.2263 +[2025-09-05 17:06:57] [Rank 0] Group 3 Loss: 3.7317 +[2025-09-05 17:06:57] [Rank 0] Group 3 Loss: 3.7317 +[2025-09-05 17:06:57] [Rank 0] Group 4 Loss: 3.8414 +[2025-09-05 17:06:57] [Rank 0] Group 4 Loss: 3.8414 +[2025-09-05 17:06:57] [Rank 0] Group 5 Loss: 4.0220 +[2025-09-05 17:06:57] [Rank 0] Group 5 Loss: 4.0220 +[2025-09-05 17:06:57] [Rank 0] Group 6 Loss: 4.0728 +[2025-09-05 17:06:57] [Rank 0] Group 6 Loss: 4.0728 +[2025-09-05 17:06:57] [Rank 0] Group 7 Loss: 4.2940 +[2025-09-05 17:06:57] [Rank 0] Group 7 Loss: 4.2940 +[2025-09-05 17:06:57] [Rank 0] Group 8 Loss: 4.5931 +[2025-09-05 17:06:57] [Rank 0] Group 8 Loss: 4.5931 +[2025-09-05 17:06:57] [Rank 0] Group 9 Loss: 4.7449 +[2025-09-05 17:06:57] [Rank 0] Group 9 Loss: 4.7449 +[2025-09-05 17:06:57] [Rank 0] Group 10 Loss: 4.9186 +[2025-09-05 17:06:57] [Rank 0] Group 10 Loss: 4.9186 +[2025-09-05 17:06:57] [Rank 0] Group 11 Loss: 4.9086 +[2025-09-05 17:06:57] [Rank 0] Group 11 Loss: 4.9086 +[2025-09-05 17:06:57] [Rank 0] Group 12 Loss: 4.8887 +[2025-09-05 17:06:57] [Rank 0] Group 12 Loss: 4.8887 +[2025-09-05 17:06:57] [Rank 0] Group 13 Loss: 4.9879 +[2025-09-05 17:06:57] [Rank 0] Group 13 Loss: 4.9879 +[2025-09-05 17:06:57] [Rank 0] Group 14 Loss: 5.0319 +[2025-09-05 17:06:57] [Rank 0] Group 14 Loss: 5.0319 +[2025-09-05 17:06:57] [Rank 0] Group 15 Loss: 5.0005 +[2025-09-05 17:06:57] [Rank 0] Group 15 Loss: 5.0005 +[2025-09-05 17:06:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:06:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:06:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:06:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:06:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:06:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:06:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:06:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:06:57] [Rank 0] Group 4 FTA: 0.7800 +[2025-09-05 17:06:57] [Rank 0] Group 4 FTA: 0.7800 +[2025-09-05 17:06:57] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:06:57] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:06:57] [Rank 0] Group 6 FTA: 0.4800 +[2025-09-05 17:06:57] [Rank 0] Group 6 FTA: 0.4800 +[2025-09-05 17:06:57] [Rank 0] Group 7 FTA: 0.4500 +[2025-09-05 17:06:57] [Rank 0] Group 7 FTA: 0.4500 +[2025-09-05 17:06:57] [Rank 0] Group 8 FTA: 0.4400 +[2025-09-05 17:06:57] [Rank 0] Group 8 FTA: 0.4400 +[2025-09-05 17:06:57] [Rank 0] Group 9 FTA: 0.3300 +[2025-09-05 17:06:57] [Rank 0] Group 9 FTA: 0.3300 +[2025-09-05 17:06:57] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 17:06:57] [Rank 0] Group 10 FTA: 0.3600 +[2025-09-05 17:06:57] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 17:06:57] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 17:06:57] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 17:06:57] [Rank 0] Group 12 FTA: 0.1400 +[2025-09-05 17:06:57] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:06:57] [Rank 0] Group 13 FTA: 0.1700 +[2025-09-05 17:06:57] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:06:57] [Rank 0] Group 14 FTA: 0.1200 +[2025-09-05 17:06:57] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:06:57] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:06:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:06:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:06:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:06:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:06:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:06:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:06:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:06:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:06:58] [Rank 0] step:4501/10000 train_time:200699ms step_avg:44.59ms +[2025-09-05 17:06:58] [Rank 0] step:4501/10000 train_time:200699ms step_avg:44.59ms +[2025-09-05 17:06:59] [Rank 0] step:4521/10000 train_time:201366ms step_avg:44.54ms +[2025-09-05 17:06:59] [Rank 0] step:4521/10000 train_time:201366ms step_avg:44.54ms +[2025-09-05 17:07:00] [Rank 0] step:4541/10000 train_time:202102ms step_avg:44.51ms +[2025-09-05 17:07:00] [Rank 0] step:4541/10000 train_time:202102ms step_avg:44.51ms +[2025-09-05 17:07:00] [Rank 0] step:4561/10000 train_time:202838ms step_avg:44.47ms +[2025-09-05 17:07:00] [Rank 0] step:4561/10000 train_time:202838ms step_avg:44.47ms +[2025-09-05 17:07:01] [Rank 0] step:4581/10000 train_time:203574ms step_avg:44.44ms +[2025-09-05 17:07:01] [Rank 0] step:4581/10000 train_time:203574ms step_avg:44.44ms +[2025-09-05 17:07:02] [Rank 0] step:4601/10000 train_time:204310ms step_avg:44.41ms +[2025-09-05 17:07:02] [Rank 0] step:4601/10000 train_time:204310ms step_avg:44.41ms +[2025-09-05 17:07:02] [Rank 0] step:4621/10000 train_time:205046ms step_avg:44.37ms +[2025-09-05 17:07:02] [Rank 0] step:4621/10000 train_time:205046ms step_avg:44.37ms +[2025-09-05 17:07:03] [Rank 0] step:4641/10000 train_time:205783ms step_avg:44.34ms +[2025-09-05 17:07:03] [Rank 0] step:4641/10000 train_time:205783ms step_avg:44.34ms +[2025-09-05 17:07:04] [Rank 0] step:4661/10000 train_time:206519ms step_avg:44.31ms +[2025-09-05 17:07:04] [Rank 0] step:4661/10000 train_time:206519ms step_avg:44.31ms +[2025-09-05 17:07:05] [Rank 0] step:4681/10000 train_time:207255ms step_avg:44.28ms +[2025-09-05 17:07:05] [Rank 0] step:4681/10000 train_time:207255ms step_avg:44.28ms +[2025-09-05 17:07:05] [Rank 0] step:4701/10000 train_time:207991ms step_avg:44.24ms +[2025-09-05 17:07:05] [Rank 0] step:4701/10000 train_time:207991ms step_avg:44.24ms +[2025-09-05 17:07:06] [Rank 0] step:4721/10000 train_time:208727ms step_avg:44.21ms +[2025-09-05 17:07:06] [Rank 0] step:4721/10000 train_time:208727ms step_avg:44.21ms +[2025-09-05 17:07:07] [Rank 0] step:4741/10000 train_time:209463ms step_avg:44.18ms +[2025-09-05 17:07:07] [Rank 0] step:4741/10000 train_time:209463ms step_avg:44.18ms +[2025-09-05 17:07:08] [Rank 0] step:4761/10000 train_time:210200ms step_avg:44.15ms +[2025-09-05 17:07:08] [Rank 0] step:4761/10000 train_time:210200ms step_avg:44.15ms +[2025-09-05 17:07:08] [Rank 0] step:4781/10000 train_time:210935ms step_avg:44.12ms +[2025-09-05 17:07:08] [Rank 0] step:4781/10000 train_time:210935ms step_avg:44.12ms +[2025-09-05 17:07:09] [Rank 0] step:4801/10000 train_time:211671ms step_avg:44.09ms +[2025-09-05 17:07:09] [Rank 0] step:4801/10000 train_time:211671ms step_avg:44.09ms +[2025-09-05 17:07:10] [Rank 0] step:4821/10000 train_time:212406ms step_avg:44.06ms +[2025-09-05 17:07:10] [Rank 0] step:4821/10000 train_time:212406ms step_avg:44.06ms +[2025-09-05 17:07:11] [Rank 0] step:4841/10000 train_time:213452ms step_avg:44.09ms +[2025-09-05 17:07:11] [Rank 0] step:4841/10000 train_time:213452ms step_avg:44.09ms +[2025-09-05 17:07:12] [Rank 0] step:4861/10000 train_time:214188ms step_avg:44.06ms +[2025-09-05 17:07:12] [Rank 0] step:4861/10000 train_time:214188ms step_avg:44.06ms +[2025-09-05 17:07:12] [Rank 0] step:4881/10000 train_time:214925ms step_avg:44.03ms +[2025-09-05 17:07:12] [Rank 0] step:4881/10000 train_time:214925ms step_avg:44.03ms +[2025-09-05 17:07:13] [Rank 0] step:4901/10000 train_time:215660ms step_avg:44.00ms +[2025-09-05 17:07:13] [Rank 0] step:4901/10000 train_time:215660ms step_avg:44.00ms +[2025-09-05 17:07:14] [Rank 0] step:4921/10000 train_time:216396ms step_avg:43.97ms +[2025-09-05 17:07:14] [Rank 0] step:4921/10000 train_time:216396ms step_avg:43.97ms +[2025-09-05 17:07:15] [Rank 0] step:4941/10000 train_time:217132ms step_avg:43.94ms +[2025-09-05 17:07:15] [Rank 0] step:4941/10000 train_time:217132ms step_avg:43.94ms +[2025-09-05 17:07:15] [Rank 0] step:4961/10000 train_time:217868ms step_avg:43.92ms +[2025-09-05 17:07:15] [Rank 0] step:4961/10000 train_time:217868ms step_avg:43.92ms +[2025-09-05 17:07:16] [Rank 0] step:4981/10000 train_time:218604ms step_avg:43.89ms +[2025-09-05 17:07:16] [Rank 0] step:4981/10000 train_time:218604ms step_avg:43.89ms +[2025-09-05 17:07:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:07:17] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:07:17] [Rank 0] PRINT: step:5000/10000 train_loss:1.4372 val_loss:1.4274 train_time:219421ms step_avg:43.88ms +[2025-09-05 17:07:17] [Rank 0] PRINT: step:5000/10000 train_loss:1.4372 val_loss:1.4274 train_time:219421ms step_avg:43.88ms +[2025-09-05 17:07:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:07:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:07:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:07:17] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:08:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:08:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:08:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:08:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:08:38] [Rank 0] Total Loss: 4.2102 +[2025-09-05 17:08:38] [Rank 0] Total Loss: 4.2102 +[2025-09-05 17:08:38] [Rank 0] Total FTA (Unweighted): 0.5400 +[2025-09-05 17:08:38] [Rank 0] Total FTA (Unweighted): 0.5400 +[2025-09-05 17:08:38] [Rank 0] Total FTA (Weighted): 0.5400 +[2025-09-05 17:08:38] [Rank 0] Total FTA (Weighted): 0.5400 +[2025-09-05 17:08:38] [Rank 0] Group 0 Loss: 3.4385 +[2025-09-05 17:08:38] [Rank 0] Group 0 Loss: 3.4385 +[2025-09-05 17:08:38] [Rank 0] Group 1 Loss: 3.2872 +[2025-09-05 17:08:38] [Rank 0] Group 1 Loss: 3.2872 +[2025-09-05 17:08:38] [Rank 0] Group 2 Loss: 3.1977 +[2025-09-05 17:08:38] [Rank 0] Group 2 Loss: 3.1977 +[2025-09-05 17:08:38] [Rank 0] Group 3 Loss: 3.5862 +[2025-09-05 17:08:38] [Rank 0] Group 3 Loss: 3.5862 +[2025-09-05 17:08:38] [Rank 0] Group 4 Loss: 3.6779 +[2025-09-05 17:08:38] [Rank 0] Group 4 Loss: 3.6779 +[2025-09-05 17:08:38] [Rank 0] Group 5 Loss: 3.9125 +[2025-09-05 17:08:38] [Rank 0] Group 5 Loss: 3.9125 +[2025-09-05 17:08:38] [Rank 0] Group 6 Loss: 4.0210 +[2025-09-05 17:08:38] [Rank 0] Group 6 Loss: 4.0210 +[2025-09-05 17:08:38] [Rank 0] Group 7 Loss: 4.1964 +[2025-09-05 17:08:38] [Rank 0] Group 7 Loss: 4.1964 +[2025-09-05 17:08:38] [Rank 0] Group 8 Loss: 4.4896 +[2025-09-05 17:08:38] [Rank 0] Group 8 Loss: 4.4896 +[2025-09-05 17:08:38] [Rank 0] Group 9 Loss: 4.6102 +[2025-09-05 17:08:38] [Rank 0] Group 9 Loss: 4.6102 +[2025-09-05 17:08:38] [Rank 0] Group 10 Loss: 4.7819 +[2025-09-05 17:08:38] [Rank 0] Group 10 Loss: 4.7819 +[2025-09-05 17:08:38] [Rank 0] Group 11 Loss: 4.8201 +[2025-09-05 17:08:38] [Rank 0] Group 11 Loss: 4.8201 +[2025-09-05 17:08:38] [Rank 0] Group 12 Loss: 4.7700 +[2025-09-05 17:08:38] [Rank 0] Group 12 Loss: 4.7700 +[2025-09-05 17:08:38] [Rank 0] Group 13 Loss: 4.8379 +[2025-09-05 17:08:38] [Rank 0] Group 13 Loss: 4.8379 +[2025-09-05 17:08:38] [Rank 0] Group 14 Loss: 4.8900 +[2025-09-05 17:08:38] [Rank 0] Group 14 Loss: 4.8900 +[2025-09-05 17:08:38] [Rank 0] Group 15 Loss: 4.8467 +[2025-09-05 17:08:38] [Rank 0] Group 15 Loss: 4.8467 +[2025-09-05 17:08:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:08:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:08:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:08:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:08:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:08:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:08:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:08:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:08:38] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 17:08:38] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 17:08:38] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 17:08:38] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 17:08:38] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 17:08:38] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 17:08:38] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 17:08:38] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 17:08:38] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 17:08:38] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 17:08:38] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 17:08:38] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 17:08:38] [Rank 0] Group 10 FTA: 0.4200 +[2025-09-05 17:08:38] [Rank 0] Group 10 FTA: 0.4200 +[2025-09-05 17:08:38] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 17:08:38] [Rank 0] Group 11 FTA: 0.2700 +[2025-09-05 17:08:38] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 17:08:38] [Rank 0] Group 12 FTA: 0.2000 +[2025-09-05 17:08:38] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:08:38] [Rank 0] Group 13 FTA: 0.1400 +[2025-09-05 17:08:38] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:08:38] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:08:38] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:08:38] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:08:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:08:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:08:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:08:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:08:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:08:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:08:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:08:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:08:40] [Rank 0] step:5001/10000 train_time:219431ms step_avg:43.88ms +[2025-09-05 17:08:40] [Rank 0] step:5001/10000 train_time:219431ms step_avg:43.88ms +[2025-09-05 17:08:41] [Rank 0] step:5021/10000 train_time:220093ms step_avg:43.83ms +[2025-09-05 17:08:41] [Rank 0] step:5021/10000 train_time:220093ms step_avg:43.83ms +[2025-09-05 17:08:41] [Rank 0] step:5041/10000 train_time:220829ms step_avg:43.81ms +[2025-09-05 17:08:41] [Rank 0] step:5041/10000 train_time:220829ms step_avg:43.81ms +[2025-09-05 17:08:42] [Rank 0] step:5061/10000 train_time:221564ms step_avg:43.78ms +[2025-09-05 17:08:42] [Rank 0] step:5061/10000 train_time:221564ms step_avg:43.78ms +[2025-09-05 17:08:43] [Rank 0] step:5081/10000 train_time:222300ms step_avg:43.75ms +[2025-09-05 17:08:43] [Rank 0] step:5081/10000 train_time:222300ms step_avg:43.75ms +[2025-09-05 17:08:43] [Rank 0] step:5101/10000 train_time:223036ms step_avg:43.72ms +[2025-09-05 17:08:43] [Rank 0] step:5101/10000 train_time:223036ms step_avg:43.72ms +[2025-09-05 17:08:44] [Rank 0] step:5121/10000 train_time:223773ms step_avg:43.70ms +[2025-09-05 17:08:44] [Rank 0] step:5121/10000 train_time:223773ms step_avg:43.70ms +[2025-09-05 17:08:45] [Rank 0] step:5141/10000 train_time:224510ms step_avg:43.67ms +[2025-09-05 17:08:45] [Rank 0] step:5141/10000 train_time:224510ms step_avg:43.67ms +[2025-09-05 17:08:46] [Rank 0] step:5161/10000 train_time:225247ms step_avg:43.64ms +[2025-09-05 17:08:46] [Rank 0] step:5161/10000 train_time:225247ms step_avg:43.64ms +[2025-09-05 17:08:46] [Rank 0] step:5181/10000 train_time:225984ms step_avg:43.62ms +[2025-09-05 17:08:46] [Rank 0] step:5181/10000 train_time:225984ms step_avg:43.62ms +[2025-09-05 17:08:47] [Rank 0] step:5201/10000 train_time:226720ms step_avg:43.59ms +[2025-09-05 17:08:47] [Rank 0] step:5201/10000 train_time:226720ms step_avg:43.59ms +[2025-09-05 17:08:48] [Rank 0] step:5221/10000 train_time:227457ms step_avg:43.57ms +[2025-09-05 17:08:48] [Rank 0] step:5221/10000 train_time:227457ms step_avg:43.57ms +[2025-09-05 17:08:49] [Rank 0] step:5241/10000 train_time:228194ms step_avg:43.54ms +[2025-09-05 17:08:49] [Rank 0] step:5241/10000 train_time:228194ms step_avg:43.54ms +[2025-09-05 17:08:49] [Rank 0] step:5261/10000 train_time:228930ms step_avg:43.51ms +[2025-09-05 17:08:49] [Rank 0] step:5261/10000 train_time:228930ms step_avg:43.51ms +[2025-09-05 17:08:50] [Rank 0] step:5281/10000 train_time:229666ms step_avg:43.49ms +[2025-09-05 17:08:50] [Rank 0] step:5281/10000 train_time:229666ms step_avg:43.49ms +[2025-09-05 17:08:51] [Rank 0] step:5301/10000 train_time:230518ms step_avg:43.49ms +[2025-09-05 17:08:51] [Rank 0] step:5301/10000 train_time:230518ms step_avg:43.49ms +[2025-09-05 17:08:52] [Rank 0] step:5321/10000 train_time:231254ms step_avg:43.46ms +[2025-09-05 17:08:52] [Rank 0] step:5321/10000 train_time:231254ms step_avg:43.46ms +[2025-09-05 17:08:52] [Rank 0] step:5341/10000 train_time:231991ms step_avg:43.44ms +[2025-09-05 17:08:52] [Rank 0] step:5341/10000 train_time:231991ms step_avg:43.44ms +[2025-09-05 17:08:53] [Rank 0] step:5361/10000 train_time:232866ms step_avg:43.44ms +[2025-09-05 17:08:53] [Rank 0] step:5361/10000 train_time:232866ms step_avg:43.44ms +[2025-09-05 17:08:54] [Rank 0] step:5381/10000 train_time:233602ms step_avg:43.41ms +[2025-09-05 17:08:54] [Rank 0] step:5381/10000 train_time:233602ms step_avg:43.41ms +[2025-09-05 17:08:55] [Rank 0] step:5401/10000 train_time:234339ms step_avg:43.39ms +[2025-09-05 17:08:55] [Rank 0] step:5401/10000 train_time:234339ms step_avg:43.39ms +[2025-09-05 17:08:56] [Rank 0] step:5421/10000 train_time:235075ms step_avg:43.36ms +[2025-09-05 17:08:56] [Rank 0] step:5421/10000 train_time:235075ms step_avg:43.36ms +[2025-09-05 17:08:56] [Rank 0] step:5441/10000 train_time:235812ms step_avg:43.34ms +[2025-09-05 17:08:56] [Rank 0] step:5441/10000 train_time:235812ms step_avg:43.34ms +[2025-09-05 17:08:57] [Rank 0] step:5461/10000 train_time:236548ms step_avg:43.32ms +[2025-09-05 17:08:57] [Rank 0] step:5461/10000 train_time:236548ms step_avg:43.32ms +[2025-09-05 17:08:58] [Rank 0] step:5481/10000 train_time:237285ms step_avg:43.29ms +[2025-09-05 17:08:58] [Rank 0] step:5481/10000 train_time:237285ms step_avg:43.29ms +[2025-09-05 17:08:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:08:58] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:08:59] [Rank 0] PRINT: step:5500/10000 train_loss:1.4367 val_loss:1.4275 train_time:238102ms step_avg:43.29ms +[2025-09-05 17:08:59] [Rank 0] PRINT: step:5500/10000 train_loss:1.4367 val_loss:1.4275 train_time:238102ms step_avg:43.29ms +[2025-09-05 17:08:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:08:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:08:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:08:59] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:10:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:10:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:10:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:10:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:10:20] [Rank 0] Total Loss: 4.1710 +[2025-09-05 17:10:20] [Rank 0] Total Loss: 4.1710 +[2025-09-05 17:10:20] [Rank 0] Total FTA (Unweighted): 0.5300 +[2025-09-05 17:10:20] [Rank 0] Total FTA (Unweighted): 0.5300 +[2025-09-05 17:10:20] [Rank 0] Total FTA (Weighted): 0.5300 +[2025-09-05 17:10:20] [Rank 0] Total FTA (Weighted): 0.5300 +[2025-09-05 17:10:20] [Rank 0] Group 0 Loss: 3.3945 +[2025-09-05 17:10:20] [Rank 0] Group 0 Loss: 3.3945 +[2025-09-05 17:10:20] [Rank 0] Group 1 Loss: 3.3379 +[2025-09-05 17:10:20] [Rank 0] Group 1 Loss: 3.3379 +[2025-09-05 17:10:20] [Rank 0] Group 2 Loss: 3.2124 +[2025-09-05 17:10:20] [Rank 0] Group 2 Loss: 3.2124 +[2025-09-05 17:10:20] [Rank 0] Group 3 Loss: 3.5721 +[2025-09-05 17:10:20] [Rank 0] Group 3 Loss: 3.5721 +[2025-09-05 17:10:20] [Rank 0] Group 4 Loss: 3.6403 +[2025-09-05 17:10:20] [Rank 0] Group 4 Loss: 3.6403 +[2025-09-05 17:10:20] [Rank 0] Group 5 Loss: 3.8789 +[2025-09-05 17:10:20] [Rank 0] Group 5 Loss: 3.8789 +[2025-09-05 17:10:20] [Rank 0] Group 6 Loss: 3.9878 +[2025-09-05 17:10:20] [Rank 0] Group 6 Loss: 3.9878 +[2025-09-05 17:10:20] [Rank 0] Group 7 Loss: 4.1414 +[2025-09-05 17:10:20] [Rank 0] Group 7 Loss: 4.1414 +[2025-09-05 17:10:20] [Rank 0] Group 8 Loss: 4.4526 +[2025-09-05 17:10:20] [Rank 0] Group 8 Loss: 4.4526 +[2025-09-05 17:10:20] [Rank 0] Group 9 Loss: 4.5742 +[2025-09-05 17:10:20] [Rank 0] Group 9 Loss: 4.5742 +[2025-09-05 17:10:20] [Rank 0] Group 10 Loss: 4.7327 +[2025-09-05 17:10:20] [Rank 0] Group 10 Loss: 4.7327 +[2025-09-05 17:10:20] [Rank 0] Group 11 Loss: 4.7379 +[2025-09-05 17:10:20] [Rank 0] Group 11 Loss: 4.7379 +[2025-09-05 17:10:20] [Rank 0] Group 12 Loss: 4.6805 +[2025-09-05 17:10:20] [Rank 0] Group 12 Loss: 4.6805 +[2025-09-05 17:10:20] [Rank 0] Group 13 Loss: 4.7713 +[2025-09-05 17:10:20] [Rank 0] Group 13 Loss: 4.7713 +[2025-09-05 17:10:20] [Rank 0] Group 14 Loss: 4.8249 +[2025-09-05 17:10:20] [Rank 0] Group 14 Loss: 4.8249 +[2025-09-05 17:10:20] [Rank 0] Group 15 Loss: 4.7970 +[2025-09-05 17:10:20] [Rank 0] Group 15 Loss: 4.7970 +[2025-09-05 17:10:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:10:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:10:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:10:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:10:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:10:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:10:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:10:20] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:10:20] [Rank 0] Group 4 FTA: 0.8600 +[2025-09-05 17:10:20] [Rank 0] Group 4 FTA: 0.8600 +[2025-09-05 17:10:20] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:10:20] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:10:20] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 17:10:20] [Rank 0] Group 6 FTA: 0.4900 +[2025-09-05 17:10:20] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 17:10:20] [Rank 0] Group 7 FTA: 0.4600 +[2025-09-05 17:10:20] [Rank 0] Group 8 FTA: 0.4600 +[2025-09-05 17:10:20] [Rank 0] Group 8 FTA: 0.4600 +[2025-09-05 17:10:20] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-05 17:10:20] [Rank 0] Group 9 FTA: 0.3600 +[2025-09-05 17:10:20] [Rank 0] Group 10 FTA: 0.4300 +[2025-09-05 17:10:20] [Rank 0] Group 10 FTA: 0.4300 +[2025-09-05 17:10:20] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 17:10:20] [Rank 0] Group 11 FTA: 0.3000 +[2025-09-05 17:10:20] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 17:10:20] [Rank 0] Group 12 FTA: 0.1700 +[2025-09-05 17:10:20] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 17:10:20] [Rank 0] Group 13 FTA: 0.1500 +[2025-09-05 17:10:20] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:10:20] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:10:20] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:10:20] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:10:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:10:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:10:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:10:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:10:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:10:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:10:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:10:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:10:22] [Rank 0] step:5501/10000 train_time:238111ms step_avg:43.29ms +[2025-09-05 17:10:22] [Rank 0] step:5501/10000 train_time:238111ms step_avg:43.29ms +[2025-09-05 17:10:22] [Rank 0] step:5521/10000 train_time:238778ms step_avg:43.25ms +[2025-09-05 17:10:22] [Rank 0] step:5521/10000 train_time:238778ms step_avg:43.25ms +[2025-09-05 17:10:23] [Rank 0] step:5541/10000 train_time:239515ms step_avg:43.23ms +[2025-09-05 17:10:23] [Rank 0] step:5541/10000 train_time:239515ms step_avg:43.23ms +[2025-09-05 17:10:24] [Rank 0] step:5561/10000 train_time:240252ms step_avg:43.20ms +[2025-09-05 17:10:24] [Rank 0] step:5561/10000 train_time:240252ms step_avg:43.20ms +[2025-09-05 17:10:24] [Rank 0] step:5581/10000 train_time:240989ms step_avg:43.18ms +[2025-09-05 17:10:24] [Rank 0] step:5581/10000 train_time:240989ms step_avg:43.18ms +[2025-09-05 17:10:25] [Rank 0] step:5601/10000 train_time:241725ms step_avg:43.16ms +[2025-09-05 17:10:25] [Rank 0] step:5601/10000 train_time:241725ms step_avg:43.16ms +[2025-09-05 17:10:26] [Rank 0] step:5621/10000 train_time:242461ms step_avg:43.13ms +[2025-09-05 17:10:26] [Rank 0] step:5621/10000 train_time:242461ms step_avg:43.13ms +[2025-09-05 17:10:27] [Rank 0] step:5641/10000 train_time:243809ms step_avg:43.22ms +[2025-09-05 17:10:27] [Rank 0] step:5641/10000 train_time:243809ms step_avg:43.22ms +[2025-09-05 17:10:28] [Rank 0] step:5661/10000 train_time:244545ms step_avg:43.20ms +[2025-09-05 17:10:28] [Rank 0] step:5661/10000 train_time:244545ms step_avg:43.20ms +[2025-09-05 17:10:29] [Rank 0] step:5681/10000 train_time:245282ms step_avg:43.18ms +[2025-09-05 17:10:29] [Rank 0] step:5681/10000 train_time:245282ms step_avg:43.18ms +[2025-09-05 17:10:30] [Rank 0] step:5701/10000 train_time:246019ms step_avg:43.15ms +[2025-09-05 17:10:30] [Rank 0] step:5701/10000 train_time:246019ms step_avg:43.15ms +[2025-09-05 17:10:30] [Rank 0] step:5721/10000 train_time:246755ms step_avg:43.13ms +[2025-09-05 17:10:30] [Rank 0] step:5721/10000 train_time:246755ms step_avg:43.13ms +[2025-09-05 17:10:31] [Rank 0] step:5741/10000 train_time:247492ms step_avg:43.11ms +[2025-09-05 17:10:31] [Rank 0] step:5741/10000 train_time:247492ms step_avg:43.11ms +[2025-09-05 17:10:32] [Rank 0] step:5761/10000 train_time:248228ms step_avg:43.09ms +[2025-09-05 17:10:32] [Rank 0] step:5761/10000 train_time:248228ms step_avg:43.09ms +[2025-09-05 17:10:32] [Rank 0] step:5781/10000 train_time:248964ms step_avg:43.07ms +[2025-09-05 17:10:32] [Rank 0] step:5781/10000 train_time:248964ms step_avg:43.07ms +[2025-09-05 17:10:33] [Rank 0] step:5801/10000 train_time:249700ms step_avg:43.04ms +[2025-09-05 17:10:33] [Rank 0] step:5801/10000 train_time:249700ms step_avg:43.04ms +[2025-09-05 17:10:34] [Rank 0] step:5821/10000 train_time:250436ms step_avg:43.02ms +[2025-09-05 17:10:34] [Rank 0] step:5821/10000 train_time:250436ms step_avg:43.02ms +[2025-09-05 17:10:35] [Rank 0] step:5841/10000 train_time:251172ms step_avg:43.00ms +[2025-09-05 17:10:35] [Rank 0] step:5841/10000 train_time:251172ms step_avg:43.00ms +[2025-09-05 17:10:35] [Rank 0] step:5861/10000 train_time:251908ms step_avg:42.98ms +[2025-09-05 17:10:35] [Rank 0] step:5861/10000 train_time:251908ms step_avg:42.98ms +[2025-09-05 17:10:36] [Rank 0] step:5881/10000 train_time:252645ms step_avg:42.96ms +[2025-09-05 17:10:36] [Rank 0] step:5881/10000 train_time:252645ms step_avg:42.96ms +[2025-09-05 17:10:37] [Rank 0] step:5901/10000 train_time:253380ms step_avg:42.94ms +[2025-09-05 17:10:37] [Rank 0] step:5901/10000 train_time:253380ms step_avg:42.94ms +[2025-09-05 17:10:38] [Rank 0] step:5921/10000 train_time:254117ms step_avg:42.92ms +[2025-09-05 17:10:38] [Rank 0] step:5921/10000 train_time:254117ms step_avg:42.92ms +[2025-09-05 17:10:38] [Rank 0] step:5941/10000 train_time:254855ms step_avg:42.90ms +[2025-09-05 17:10:38] [Rank 0] step:5941/10000 train_time:254855ms step_avg:42.90ms +[2025-09-05 17:10:39] [Rank 0] step:5961/10000 train_time:255592ms step_avg:42.88ms +[2025-09-05 17:10:39] [Rank 0] step:5961/10000 train_time:255592ms step_avg:42.88ms +[2025-09-05 17:10:40] [Rank 0] step:5981/10000 train_time:256328ms step_avg:42.86ms +[2025-09-05 17:10:40] [Rank 0] step:5981/10000 train_time:256328ms step_avg:42.86ms +[2025-09-05 17:10:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:10:41] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:10:41] [Rank 0] PRINT: step:6000/10000 train_loss:1.4383 val_loss:1.4306 train_time:257144ms step_avg:42.86ms +[2025-09-05 17:10:41] [Rank 0] PRINT: step:6000/10000 train_loss:1.4383 val_loss:1.4306 train_time:257144ms step_avg:42.86ms +[2025-09-05 17:10:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:10:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:10:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:10:41] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:12:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:12:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:12:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:12:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:12:02] [Rank 0] Total Loss: 4.2013 +[2025-09-05 17:12:02] [Rank 0] Total Loss: 4.2013 +[2025-09-05 17:12:02] [Rank 0] Total FTA (Unweighted): 0.5425 +[2025-09-05 17:12:02] [Rank 0] Total FTA (Unweighted): 0.5425 +[2025-09-05 17:12:02] [Rank 0] Total FTA (Weighted): 0.5425 +[2025-09-05 17:12:02] [Rank 0] Total FTA (Weighted): 0.5425 +[2025-09-05 17:12:02] [Rank 0] Group 0 Loss: 3.4165 +[2025-09-05 17:12:02] [Rank 0] Group 0 Loss: 3.4165 +[2025-09-05 17:12:02] [Rank 0] Group 1 Loss: 3.2799 +[2025-09-05 17:12:02] [Rank 0] Group 1 Loss: 3.2799 +[2025-09-05 17:12:02] [Rank 0] Group 2 Loss: 3.2802 +[2025-09-05 17:12:02] [Rank 0] Group 2 Loss: 3.2802 +[2025-09-05 17:12:02] [Rank 0] Group 3 Loss: 3.6003 +[2025-09-05 17:12:02] [Rank 0] Group 3 Loss: 3.6003 +[2025-09-05 17:12:02] [Rank 0] Group 4 Loss: 3.6555 +[2025-09-05 17:12:02] [Rank 0] Group 4 Loss: 3.6555 +[2025-09-05 17:12:02] [Rank 0] Group 5 Loss: 3.9128 +[2025-09-05 17:12:02] [Rank 0] Group 5 Loss: 3.9128 +[2025-09-05 17:12:02] [Rank 0] Group 6 Loss: 4.0043 +[2025-09-05 17:12:02] [Rank 0] Group 6 Loss: 4.0043 +[2025-09-05 17:12:02] [Rank 0] Group 7 Loss: 4.1900 +[2025-09-05 17:12:02] [Rank 0] Group 7 Loss: 4.1900 +[2025-09-05 17:12:02] [Rank 0] Group 8 Loss: 4.4870 +[2025-09-05 17:12:02] [Rank 0] Group 8 Loss: 4.4870 +[2025-09-05 17:12:02] [Rank 0] Group 9 Loss: 4.5833 +[2025-09-05 17:12:02] [Rank 0] Group 9 Loss: 4.5833 +[2025-09-05 17:12:02] [Rank 0] Group 10 Loss: 4.8014 +[2025-09-05 17:12:02] [Rank 0] Group 10 Loss: 4.8014 +[2025-09-05 17:12:02] [Rank 0] Group 11 Loss: 4.7843 +[2025-09-05 17:12:02] [Rank 0] Group 11 Loss: 4.7843 +[2025-09-05 17:12:02] [Rank 0] Group 12 Loss: 4.7327 +[2025-09-05 17:12:02] [Rank 0] Group 12 Loss: 4.7327 +[2025-09-05 17:12:02] [Rank 0] Group 13 Loss: 4.7605 +[2025-09-05 17:12:02] [Rank 0] Group 13 Loss: 4.7605 +[2025-09-05 17:12:02] [Rank 0] Group 14 Loss: 4.8664 +[2025-09-05 17:12:02] [Rank 0] Group 14 Loss: 4.8664 +[2025-09-05 17:12:02] [Rank 0] Group 15 Loss: 4.8663 +[2025-09-05 17:12:02] [Rank 0] Group 15 Loss: 4.8663 +[2025-09-05 17:12:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:12:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:12:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:12:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:12:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:12:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:12:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:12:02] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:12:02] [Rank 0] Group 4 FTA: 0.9100 +[2025-09-05 17:12:02] [Rank 0] Group 4 FTA: 0.9100 +[2025-09-05 17:12:02] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:12:02] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:12:02] [Rank 0] Group 6 FTA: 0.5000 +[2025-09-05 17:12:02] [Rank 0] Group 6 FTA: 0.5000 +[2025-09-05 17:12:02] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 17:12:02] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 17:12:02] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 17:12:02] [Rank 0] Group 8 FTA: 0.4700 +[2025-09-05 17:12:02] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 17:12:02] [Rank 0] Group 9 FTA: 0.3900 +[2025-09-05 17:12:02] [Rank 0] Group 10 FTA: 0.4400 +[2025-09-05 17:12:02] [Rank 0] Group 10 FTA: 0.4400 +[2025-09-05 17:12:02] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 17:12:02] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 17:12:02] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 17:12:02] [Rank 0] Group 12 FTA: 0.1900 +[2025-09-05 17:12:02] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 17:12:02] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 17:12:02] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:12:02] [Rank 0] Group 14 FTA: 0.1700 +[2025-09-05 17:12:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:12:02] [Rank 0] Group 15 FTA: 0.0900 +[2025-09-05 17:12:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:12:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:12:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:12:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:12:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:12:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:12:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:12:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:12:04] [Rank 0] step:6001/10000 train_time:257154ms step_avg:42.85ms +[2025-09-05 17:12:04] [Rank 0] step:6001/10000 train_time:257154ms step_avg:42.85ms +[2025-09-05 17:12:05] [Rank 0] step:6021/10000 train_time:258424ms step_avg:42.92ms +[2025-09-05 17:12:05] [Rank 0] step:6021/10000 train_time:258424ms step_avg:42.92ms +[2025-09-05 17:12:06] [Rank 0] step:6041/10000 train_time:259161ms step_avg:42.90ms +[2025-09-05 17:12:06] [Rank 0] step:6041/10000 train_time:259161ms step_avg:42.90ms +[2025-09-05 17:12:06] [Rank 0] step:6061/10000 train_time:259897ms step_avg:42.88ms +[2025-09-05 17:12:06] [Rank 0] step:6061/10000 train_time:259897ms step_avg:42.88ms +[2025-09-05 17:12:07] [Rank 0] step:6081/10000 train_time:260633ms step_avg:42.86ms +[2025-09-05 17:12:07] [Rank 0] step:6081/10000 train_time:260633ms step_avg:42.86ms +[2025-09-05 17:12:08] [Rank 0] step:6101/10000 train_time:261369ms step_avg:42.84ms +[2025-09-05 17:12:08] [Rank 0] step:6101/10000 train_time:261369ms step_avg:42.84ms +[2025-09-05 17:12:09] [Rank 0] step:6121/10000 train_time:262105ms step_avg:42.82ms +[2025-09-05 17:12:09] [Rank 0] step:6121/10000 train_time:262105ms step_avg:42.82ms +[2025-09-05 17:12:09] [Rank 0] step:6141/10000 train_time:262842ms step_avg:42.80ms +[2025-09-05 17:12:09] [Rank 0] step:6141/10000 train_time:262842ms step_avg:42.80ms +[2025-09-05 17:12:10] [Rank 0] step:6161/10000 train_time:263577ms step_avg:42.78ms +[2025-09-05 17:12:10] [Rank 0] step:6161/10000 train_time:263577ms step_avg:42.78ms +[2025-09-05 17:12:11] [Rank 0] step:6181/10000 train_time:264313ms step_avg:42.76ms +[2025-09-05 17:12:11] [Rank 0] step:6181/10000 train_time:264313ms step_avg:42.76ms +[2025-09-05 17:12:12] [Rank 0] step:6201/10000 train_time:265049ms step_avg:42.74ms +[2025-09-05 17:12:12] [Rank 0] step:6201/10000 train_time:265049ms step_avg:42.74ms +[2025-09-05 17:12:12] [Rank 0] step:6221/10000 train_time:265786ms step_avg:42.72ms +[2025-09-05 17:12:12] [Rank 0] step:6221/10000 train_time:265786ms step_avg:42.72ms +[2025-09-05 17:12:13] [Rank 0] step:6241/10000 train_time:266521ms step_avg:42.70ms +[2025-09-05 17:12:13] [Rank 0] step:6241/10000 train_time:266521ms step_avg:42.70ms +[2025-09-05 17:12:14] [Rank 0] step:6261/10000 train_time:267257ms step_avg:42.69ms +[2025-09-05 17:12:14] [Rank 0] step:6261/10000 train_time:267257ms step_avg:42.69ms +[2025-09-05 17:12:15] [Rank 0] step:6281/10000 train_time:267994ms step_avg:42.67ms +[2025-09-05 17:12:15] [Rank 0] step:6281/10000 train_time:267994ms step_avg:42.67ms +[2025-09-05 17:12:15] [Rank 0] step:6301/10000 train_time:268729ms step_avg:42.65ms +[2025-09-05 17:12:15] [Rank 0] step:6301/10000 train_time:268729ms step_avg:42.65ms +[2025-09-05 17:12:16] [Rank 0] step:6321/10000 train_time:269465ms step_avg:42.63ms +[2025-09-05 17:12:16] [Rank 0] step:6321/10000 train_time:269465ms step_avg:42.63ms +[2025-09-05 17:12:17] [Rank 0] step:6341/10000 train_time:270202ms step_avg:42.61ms +[2025-09-05 17:12:17] [Rank 0] step:6341/10000 train_time:270202ms step_avg:42.61ms +[2025-09-05 17:12:17] [Rank 0] step:6361/10000 train_time:270938ms step_avg:42.59ms +[2025-09-05 17:12:17] [Rank 0] step:6361/10000 train_time:270938ms step_avg:42.59ms +[2025-09-05 17:12:18] [Rank 0] step:6381/10000 train_time:271674ms step_avg:42.58ms +[2025-09-05 17:12:18] [Rank 0] step:6381/10000 train_time:271674ms step_avg:42.58ms +[2025-09-05 17:12:19] [Rank 0] step:6401/10000 train_time:272411ms step_avg:42.56ms +[2025-09-05 17:12:19] [Rank 0] step:6401/10000 train_time:272411ms step_avg:42.56ms +[2025-09-05 17:12:20] [Rank 0] step:6421/10000 train_time:273146ms step_avg:42.54ms +[2025-09-05 17:12:20] [Rank 0] step:6421/10000 train_time:273146ms step_avg:42.54ms +[2025-09-05 17:12:20] [Rank 0] step:6441/10000 train_time:273882ms step_avg:42.52ms +[2025-09-05 17:12:20] [Rank 0] step:6441/10000 train_time:273882ms step_avg:42.52ms +[2025-09-05 17:12:21] [Rank 0] step:6461/10000 train_time:274619ms step_avg:42.50ms +[2025-09-05 17:12:21] [Rank 0] step:6461/10000 train_time:274619ms step_avg:42.50ms +[2025-09-05 17:12:22] [Rank 0] step:6481/10000 train_time:275355ms step_avg:42.49ms +[2025-09-05 17:12:22] [Rank 0] step:6481/10000 train_time:275355ms step_avg:42.49ms +[2025-09-05 17:12:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:12:23] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:12:23] [Rank 0] PRINT: step:6500/10000 train_loss:1.4396 val_loss:1.4306 train_time:276172ms step_avg:42.49ms +[2025-09-05 17:12:23] [Rank 0] PRINT: step:6500/10000 train_loss:1.4396 val_loss:1.4306 train_time:276172ms step_avg:42.49ms +[2025-09-05 17:12:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:12:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:12:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:12:23] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:13:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:13:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:13:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:13:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:13:45] [Rank 0] Total Loss: 4.3224 +[2025-09-05 17:13:45] [Rank 0] Total Loss: 4.3224 +[2025-09-05 17:13:45] [Rank 0] Total FTA (Unweighted): 0.5513 +[2025-09-05 17:13:45] [Rank 0] Total FTA (Unweighted): 0.5513 +[2025-09-05 17:13:45] [Rank 0] Total FTA (Weighted): 0.5513 +[2025-09-05 17:13:45] [Rank 0] Total FTA (Weighted): 0.5513 +[2025-09-05 17:13:45] [Rank 0] Group 0 Loss: 3.6844 +[2025-09-05 17:13:45] [Rank 0] Group 0 Loss: 3.6844 +[2025-09-05 17:13:45] [Rank 0] Group 1 Loss: 3.4315 +[2025-09-05 17:13:45] [Rank 0] Group 1 Loss: 3.4315 +[2025-09-05 17:13:45] [Rank 0] Group 2 Loss: 3.3203 +[2025-09-05 17:13:45] [Rank 0] Group 2 Loss: 3.3203 +[2025-09-05 17:13:45] [Rank 0] Group 3 Loss: 3.6757 +[2025-09-05 17:13:45] [Rank 0] Group 3 Loss: 3.6757 +[2025-09-05 17:13:45] [Rank 0] Group 4 Loss: 3.8267 +[2025-09-05 17:13:45] [Rank 0] Group 4 Loss: 3.8267 +[2025-09-05 17:13:45] [Rank 0] Group 5 Loss: 4.0221 +[2025-09-05 17:13:45] [Rank 0] Group 5 Loss: 4.0221 +[2025-09-05 17:13:45] [Rank 0] Group 6 Loss: 4.1234 +[2025-09-05 17:13:45] [Rank 0] Group 6 Loss: 4.1234 +[2025-09-05 17:13:45] [Rank 0] Group 7 Loss: 4.2869 +[2025-09-05 17:13:45] [Rank 0] Group 7 Loss: 4.2869 +[2025-09-05 17:13:45] [Rank 0] Group 8 Loss: 4.5879 +[2025-09-05 17:13:45] [Rank 0] Group 8 Loss: 4.5879 +[2025-09-05 17:13:45] [Rank 0] Group 9 Loss: 4.6975 +[2025-09-05 17:13:45] [Rank 0] Group 9 Loss: 4.6975 +[2025-09-05 17:13:45] [Rank 0] Group 10 Loss: 4.8900 +[2025-09-05 17:13:45] [Rank 0] Group 10 Loss: 4.8900 +[2025-09-05 17:13:45] [Rank 0] Group 11 Loss: 4.9248 +[2025-09-05 17:13:45] [Rank 0] Group 11 Loss: 4.9248 +[2025-09-05 17:13:45] [Rank 0] Group 12 Loss: 4.8440 +[2025-09-05 17:13:45] [Rank 0] Group 12 Loss: 4.8440 +[2025-09-05 17:13:45] [Rank 0] Group 13 Loss: 4.9093 +[2025-09-05 17:13:45] [Rank 0] Group 13 Loss: 4.9093 +[2025-09-05 17:13:45] [Rank 0] Group 14 Loss: 4.9641 +[2025-09-05 17:13:45] [Rank 0] Group 14 Loss: 4.9641 +[2025-09-05 17:13:45] [Rank 0] Group 15 Loss: 4.9700 +[2025-09-05 17:13:45] [Rank 0] Group 15 Loss: 4.9700 +[2025-09-05 17:13:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:13:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:13:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:13:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:13:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:13:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:13:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:13:45] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:13:45] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 17:13:45] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 17:13:45] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:13:45] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:13:45] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 17:13:45] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 17:13:45] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 17:13:45] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 17:13:45] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 17:13:45] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 17:13:45] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 17:13:45] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 17:13:45] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 17:13:45] [Rank 0] Group 10 FTA: 0.4800 +[2025-09-05 17:13:45] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 17:13:45] [Rank 0] Group 11 FTA: 0.3200 +[2025-09-05 17:13:45] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 17:13:45] [Rank 0] Group 12 FTA: 0.2100 +[2025-09-05 17:13:45] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 17:13:45] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 17:13:45] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:13:45] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:13:45] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:13:45] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:13:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:13:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:13:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:13:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:13:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:13:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:13:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:13:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:13:47] [Rank 0] step:6501/10000 train_time:276181ms step_avg:42.48ms +[2025-09-05 17:13:47] [Rank 0] step:6501/10000 train_time:276181ms step_avg:42.48ms +[2025-09-05 17:13:47] [Rank 0] step:6521/10000 train_time:276859ms step_avg:42.46ms +[2025-09-05 17:13:47] [Rank 0] step:6521/10000 train_time:276859ms step_avg:42.46ms +[2025-09-05 17:13:48] [Rank 0] step:6541/10000 train_time:277595ms step_avg:42.44ms +[2025-09-05 17:13:48] [Rank 0] step:6541/10000 train_time:277595ms step_avg:42.44ms +[2025-09-05 17:13:49] [Rank 0] step:6561/10000 train_time:278331ms step_avg:42.42ms +[2025-09-05 17:13:49] [Rank 0] step:6561/10000 train_time:278331ms step_avg:42.42ms +[2025-09-05 17:13:50] [Rank 0] step:6581/10000 train_time:279067ms step_avg:42.41ms +[2025-09-05 17:13:50] [Rank 0] step:6581/10000 train_time:279067ms step_avg:42.41ms +[2025-09-05 17:13:50] [Rank 0] step:6601/10000 train_time:279803ms step_avg:42.39ms +[2025-09-05 17:13:50] [Rank 0] step:6601/10000 train_time:279803ms step_avg:42.39ms +[2025-09-05 17:13:51] [Rank 0] step:6621/10000 train_time:280539ms step_avg:42.37ms +[2025-09-05 17:13:51] [Rank 0] step:6621/10000 train_time:280539ms step_avg:42.37ms +[2025-09-05 17:13:52] [Rank 0] step:6641/10000 train_time:281275ms step_avg:42.35ms +[2025-09-05 17:13:52] [Rank 0] step:6641/10000 train_time:281275ms step_avg:42.35ms +[2025-09-05 17:13:53] [Rank 0] step:6661/10000 train_time:282011ms step_avg:42.34ms +[2025-09-05 17:13:53] [Rank 0] step:6661/10000 train_time:282011ms step_avg:42.34ms +[2025-09-05 17:13:53] [Rank 0] step:6681/10000 train_time:282747ms step_avg:42.32ms +[2025-09-05 17:13:53] [Rank 0] step:6681/10000 train_time:282747ms step_avg:42.32ms +[2025-09-05 17:13:54] [Rank 0] step:6701/10000 train_time:283483ms step_avg:42.30ms +[2025-09-05 17:13:54] [Rank 0] step:6701/10000 train_time:283483ms step_avg:42.30ms +[2025-09-05 17:13:55] [Rank 0] step:6721/10000 train_time:284221ms step_avg:42.29ms +[2025-09-05 17:13:55] [Rank 0] step:6721/10000 train_time:284221ms step_avg:42.29ms +[2025-09-05 17:13:55] [Rank 0] step:6741/10000 train_time:284956ms step_avg:42.27ms +[2025-09-05 17:13:55] [Rank 0] step:6741/10000 train_time:284956ms step_avg:42.27ms +[2025-09-05 17:13:56] [Rank 0] step:6761/10000 train_time:285692ms step_avg:42.26ms +[2025-09-05 17:13:56] [Rank 0] step:6761/10000 train_time:285692ms step_avg:42.26ms +[2025-09-05 17:13:57] [Rank 0] step:6781/10000 train_time:286428ms step_avg:42.24ms +[2025-09-05 17:13:57] [Rank 0] step:6781/10000 train_time:286428ms step_avg:42.24ms +[2025-09-05 17:13:58] [Rank 0] step:6801/10000 train_time:287164ms step_avg:42.22ms +[2025-09-05 17:13:58] [Rank 0] step:6801/10000 train_time:287164ms step_avg:42.22ms +[2025-09-05 17:13:58] [Rank 0] step:6821/10000 train_time:287900ms step_avg:42.21ms +[2025-09-05 17:13:58] [Rank 0] step:6821/10000 train_time:287900ms step_avg:42.21ms +[2025-09-05 17:14:00] [Rank 0] step:6841/10000 train_time:289252ms step_avg:42.28ms +[2025-09-05 17:14:00] [Rank 0] step:6841/10000 train_time:289252ms step_avg:42.28ms +[2025-09-05 17:14:01] [Rank 0] step:6861/10000 train_time:289988ms step_avg:42.27ms +[2025-09-05 17:14:01] [Rank 0] step:6861/10000 train_time:289988ms step_avg:42.27ms +[2025-09-05 17:14:01] [Rank 0] step:6881/10000 train_time:290724ms step_avg:42.25ms +[2025-09-05 17:14:01] [Rank 0] step:6881/10000 train_time:290724ms step_avg:42.25ms +[2025-09-05 17:14:02] [Rank 0] step:6901/10000 train_time:291460ms step_avg:42.23ms +[2025-09-05 17:14:02] [Rank 0] step:6901/10000 train_time:291460ms step_avg:42.23ms +[2025-09-05 17:14:03] [Rank 0] step:6921/10000 train_time:292196ms step_avg:42.22ms +[2025-09-05 17:14:03] [Rank 0] step:6921/10000 train_time:292196ms step_avg:42.22ms +[2025-09-05 17:14:03] [Rank 0] step:6941/10000 train_time:292932ms step_avg:42.20ms +[2025-09-05 17:14:03] [Rank 0] step:6941/10000 train_time:292932ms step_avg:42.20ms +[2025-09-05 17:14:04] [Rank 0] step:6961/10000 train_time:293669ms step_avg:42.19ms +[2025-09-05 17:14:04] [Rank 0] step:6961/10000 train_time:293669ms step_avg:42.19ms +[2025-09-05 17:14:05] [Rank 0] step:6981/10000 train_time:294405ms step_avg:42.17ms +[2025-09-05 17:14:05] [Rank 0] step:6981/10000 train_time:294405ms step_avg:42.17ms +[2025-09-05 17:14:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:14:06] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:14:06] [Rank 0] PRINT: step:7000/10000 train_loss:1.4393 val_loss:1.4306 train_time:295222ms step_avg:42.17ms +[2025-09-05 17:14:06] [Rank 0] PRINT: step:7000/10000 train_loss:1.4393 val_loss:1.4306 train_time:295222ms step_avg:42.17ms +[2025-09-05 17:14:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:14:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:14:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:14:06] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:15:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:15:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:15:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:15:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:15:27] [Rank 0] Total Loss: 4.1799 +[2025-09-05 17:15:27] [Rank 0] Total Loss: 4.1799 +[2025-09-05 17:15:27] [Rank 0] Total FTA (Unweighted): 0.5544 +[2025-09-05 17:15:27] [Rank 0] Total FTA (Unweighted): 0.5544 +[2025-09-05 17:15:27] [Rank 0] Total FTA (Weighted): 0.5544 +[2025-09-05 17:15:27] [Rank 0] Total FTA (Weighted): 0.5544 +[2025-09-05 17:15:27] [Rank 0] Group 0 Loss: 3.5224 +[2025-09-05 17:15:27] [Rank 0] Group 0 Loss: 3.5224 +[2025-09-05 17:15:27] [Rank 0] Group 1 Loss: 3.2380 +[2025-09-05 17:15:27] [Rank 0] Group 1 Loss: 3.2380 +[2025-09-05 17:15:27] [Rank 0] Group 2 Loss: 3.1961 +[2025-09-05 17:15:27] [Rank 0] Group 2 Loss: 3.1961 +[2025-09-05 17:15:27] [Rank 0] Group 3 Loss: 3.5291 +[2025-09-05 17:15:27] [Rank 0] Group 3 Loss: 3.5291 +[2025-09-05 17:15:27] [Rank 0] Group 4 Loss: 3.6542 +[2025-09-05 17:15:27] [Rank 0] Group 4 Loss: 3.6542 +[2025-09-05 17:15:27] [Rank 0] Group 5 Loss: 3.8996 +[2025-09-05 17:15:27] [Rank 0] Group 5 Loss: 3.8996 +[2025-09-05 17:15:27] [Rank 0] Group 6 Loss: 3.9985 +[2025-09-05 17:15:27] [Rank 0] Group 6 Loss: 3.9985 +[2025-09-05 17:15:27] [Rank 0] Group 7 Loss: 4.1600 +[2025-09-05 17:15:27] [Rank 0] Group 7 Loss: 4.1600 +[2025-09-05 17:15:27] [Rank 0] Group 8 Loss: 4.4280 +[2025-09-05 17:15:27] [Rank 0] Group 8 Loss: 4.4280 +[2025-09-05 17:15:27] [Rank 0] Group 9 Loss: 4.6034 +[2025-09-05 17:15:27] [Rank 0] Group 9 Loss: 4.6034 +[2025-09-05 17:15:27] [Rank 0] Group 10 Loss: 4.7655 +[2025-09-05 17:15:27] [Rank 0] Group 10 Loss: 4.7655 +[2025-09-05 17:15:27] [Rank 0] Group 11 Loss: 4.7616 +[2025-09-05 17:15:27] [Rank 0] Group 11 Loss: 4.7616 +[2025-09-05 17:15:27] [Rank 0] Group 12 Loss: 4.7140 +[2025-09-05 17:15:27] [Rank 0] Group 12 Loss: 4.7140 +[2025-09-05 17:15:27] [Rank 0] Group 13 Loss: 4.7605 +[2025-09-05 17:15:27] [Rank 0] Group 13 Loss: 4.7605 +[2025-09-05 17:15:27] [Rank 0] Group 14 Loss: 4.8229 +[2025-09-05 17:15:27] [Rank 0] Group 14 Loss: 4.8229 +[2025-09-05 17:15:27] [Rank 0] Group 15 Loss: 4.8242 +[2025-09-05 17:15:27] [Rank 0] Group 15 Loss: 4.8242 +[2025-09-05 17:15:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:15:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:15:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:15:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:15:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:15:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:15:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:15:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:15:27] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 17:15:27] [Rank 0] Group 4 FTA: 0.9400 +[2025-09-05 17:15:27] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:15:27] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:15:27] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 17:15:27] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 17:15:27] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 17:15:27] [Rank 0] Group 7 FTA: 0.4800 +[2025-09-05 17:15:27] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 17:15:27] [Rank 0] Group 8 FTA: 0.4800 +[2025-09-05 17:15:27] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 17:15:27] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 17:15:27] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 17:15:27] [Rank 0] Group 10 FTA: 0.4600 +[2025-09-05 17:15:27] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 17:15:27] [Rank 0] Group 11 FTA: 0.3700 +[2025-09-05 17:15:28] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 17:15:28] [Rank 0] Group 12 FTA: 0.2700 +[2025-09-05 17:15:28] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 17:15:28] [Rank 0] Group 13 FTA: 0.1600 +[2025-09-05 17:15:28] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:15:28] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:15:28] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:15:28] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:15:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:15:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:15:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:15:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:15:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:15:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:15:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:15:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:15:29] [Rank 0] step:7001/10000 train_time:295232ms step_avg:42.17ms +[2025-09-05 17:15:29] [Rank 0] step:7001/10000 train_time:295232ms step_avg:42.17ms +[2025-09-05 17:15:30] [Rank 0] step:7021/10000 train_time:295910ms step_avg:42.15ms +[2025-09-05 17:15:30] [Rank 0] step:7021/10000 train_time:295910ms step_avg:42.15ms +[2025-09-05 17:15:30] [Rank 0] step:7041/10000 train_time:296646ms step_avg:42.13ms +[2025-09-05 17:15:30] [Rank 0] step:7041/10000 train_time:296646ms step_avg:42.13ms +[2025-09-05 17:15:31] [Rank 0] step:7061/10000 train_time:297384ms step_avg:42.12ms +[2025-09-05 17:15:31] [Rank 0] step:7061/10000 train_time:297384ms step_avg:42.12ms +[2025-09-05 17:15:32] [Rank 0] step:7081/10000 train_time:298120ms step_avg:42.10ms +[2025-09-05 17:15:32] [Rank 0] step:7081/10000 train_time:298120ms step_avg:42.10ms +[2025-09-05 17:15:33] [Rank 0] step:7101/10000 train_time:298856ms step_avg:42.09ms +[2025-09-05 17:15:33] [Rank 0] step:7101/10000 train_time:298856ms step_avg:42.09ms +[2025-09-05 17:15:33] [Rank 0] step:7121/10000 train_time:299593ms step_avg:42.07ms +[2025-09-05 17:15:33] [Rank 0] step:7121/10000 train_time:299593ms step_avg:42.07ms +[2025-09-05 17:15:34] [Rank 0] step:7141/10000 train_time:300329ms step_avg:42.06ms +[2025-09-05 17:15:34] [Rank 0] step:7141/10000 train_time:300329ms step_avg:42.06ms +[2025-09-05 17:15:35] [Rank 0] step:7161/10000 train_time:301066ms step_avg:42.04ms +[2025-09-05 17:15:35] [Rank 0] step:7161/10000 train_time:301066ms step_avg:42.04ms +[2025-09-05 17:15:36] [Rank 0] step:7181/10000 train_time:301802ms step_avg:42.03ms +[2025-09-05 17:15:36] [Rank 0] step:7181/10000 train_time:301802ms step_avg:42.03ms +[2025-09-05 17:15:36] [Rank 0] step:7201/10000 train_time:302538ms step_avg:42.01ms +[2025-09-05 17:15:36] [Rank 0] step:7201/10000 train_time:302538ms step_avg:42.01ms +[2025-09-05 17:15:37] [Rank 0] step:7221/10000 train_time:303275ms step_avg:42.00ms +[2025-09-05 17:15:37] [Rank 0] step:7221/10000 train_time:303275ms step_avg:42.00ms +[2025-09-05 17:15:38] [Rank 0] step:7241/10000 train_time:304012ms step_avg:41.98ms +[2025-09-05 17:15:38] [Rank 0] step:7241/10000 train_time:304012ms step_avg:41.98ms +[2025-09-05 17:15:39] [Rank 0] step:7261/10000 train_time:304749ms step_avg:41.97ms +[2025-09-05 17:15:39] [Rank 0] step:7261/10000 train_time:304749ms step_avg:41.97ms +[2025-09-05 17:15:39] [Rank 0] step:7281/10000 train_time:305485ms step_avg:41.96ms +[2025-09-05 17:15:39] [Rank 0] step:7281/10000 train_time:305485ms step_avg:41.96ms +[2025-09-05 17:15:40] [Rank 0] step:7301/10000 train_time:306223ms step_avg:41.94ms +[2025-09-05 17:15:40] [Rank 0] step:7301/10000 train_time:306223ms step_avg:41.94ms +[2025-09-05 17:15:41] [Rank 0] step:7321/10000 train_time:306959ms step_avg:41.93ms +[2025-09-05 17:15:41] [Rank 0] step:7321/10000 train_time:306959ms step_avg:41.93ms +[2025-09-05 17:15:41] [Rank 0] step:7341/10000 train_time:307696ms step_avg:41.91ms +[2025-09-05 17:15:41] [Rank 0] step:7341/10000 train_time:307696ms step_avg:41.91ms +[2025-09-05 17:15:42] [Rank 0] step:7361/10000 train_time:308432ms step_avg:41.90ms +[2025-09-05 17:15:42] [Rank 0] step:7361/10000 train_time:308432ms step_avg:41.90ms +[2025-09-05 17:15:43] [Rank 0] step:7381/10000 train_time:309168ms step_avg:41.89ms +[2025-09-05 17:15:43] [Rank 0] step:7381/10000 train_time:309168ms step_avg:41.89ms +[2025-09-05 17:15:44] [Rank 0] step:7401/10000 train_time:309904ms step_avg:41.87ms +[2025-09-05 17:15:44] [Rank 0] step:7401/10000 train_time:309904ms step_avg:41.87ms +[2025-09-05 17:15:44] [Rank 0] step:7421/10000 train_time:310640ms step_avg:41.86ms +[2025-09-05 17:15:44] [Rank 0] step:7421/10000 train_time:310640ms step_avg:41.86ms +[2025-09-05 17:15:45] [Rank 0] step:7441/10000 train_time:311376ms step_avg:41.85ms +[2025-09-05 17:15:45] [Rank 0] step:7441/10000 train_time:311376ms step_avg:41.85ms +[2025-09-05 17:15:46] [Rank 0] step:7461/10000 train_time:312112ms step_avg:41.83ms +[2025-09-05 17:15:46] [Rank 0] step:7461/10000 train_time:312112ms step_avg:41.83ms +[2025-09-05 17:15:47] [Rank 0] step:7481/10000 train_time:312848ms step_avg:41.82ms +[2025-09-05 17:15:47] [Rank 0] step:7481/10000 train_time:312848ms step_avg:41.82ms +[2025-09-05 17:15:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:15:47] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:15:48] [Rank 0] PRINT: step:7500/10000 train_loss:1.4405 val_loss:1.4325 train_time:313665ms step_avg:41.82ms +[2025-09-05 17:15:48] [Rank 0] PRINT: step:7500/10000 train_loss:1.4405 val_loss:1.4325 train_time:313665ms step_avg:41.82ms +[2025-09-05 17:15:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:15:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:15:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:15:48] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:17:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:17:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:17:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:17:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:17:09] [Rank 0] Total Loss: 4.2385 +[2025-09-05 17:17:09] [Rank 0] Total Loss: 4.2385 +[2025-09-05 17:17:09] [Rank 0] Total FTA (Unweighted): 0.5619 +[2025-09-05 17:17:09] [Rank 0] Total FTA (Unweighted): 0.5619 +[2025-09-05 17:17:09] [Rank 0] Total FTA (Weighted): 0.5619 +[2025-09-05 17:17:09] [Rank 0] Total FTA (Weighted): 0.5619 +[2025-09-05 17:17:09] [Rank 0] Group 0 Loss: 3.4434 +[2025-09-05 17:17:09] [Rank 0] Group 0 Loss: 3.4434 +[2025-09-05 17:17:09] [Rank 0] Group 1 Loss: 3.3528 +[2025-09-05 17:17:09] [Rank 0] Group 1 Loss: 3.3528 +[2025-09-05 17:17:09] [Rank 0] Group 2 Loss: 3.3255 +[2025-09-05 17:17:09] [Rank 0] Group 2 Loss: 3.3255 +[2025-09-05 17:17:09] [Rank 0] Group 3 Loss: 3.5818 +[2025-09-05 17:17:09] [Rank 0] Group 3 Loss: 3.5818 +[2025-09-05 17:17:09] [Rank 0] Group 4 Loss: 3.7264 +[2025-09-05 17:17:09] [Rank 0] Group 4 Loss: 3.7264 +[2025-09-05 17:17:09] [Rank 0] Group 5 Loss: 3.9331 +[2025-09-05 17:17:09] [Rank 0] Group 5 Loss: 3.9331 +[2025-09-05 17:17:09] [Rank 0] Group 6 Loss: 4.0699 +[2025-09-05 17:17:09] [Rank 0] Group 6 Loss: 4.0699 +[2025-09-05 17:17:09] [Rank 0] Group 7 Loss: 4.2232 +[2025-09-05 17:17:09] [Rank 0] Group 7 Loss: 4.2232 +[2025-09-05 17:17:09] [Rank 0] Group 8 Loss: 4.5238 +[2025-09-05 17:17:09] [Rank 0] Group 8 Loss: 4.5238 +[2025-09-05 17:17:09] [Rank 0] Group 9 Loss: 4.6549 +[2025-09-05 17:17:09] [Rank 0] Group 9 Loss: 4.6549 +[2025-09-05 17:17:09] [Rank 0] Group 10 Loss: 4.8233 +[2025-09-05 17:17:09] [Rank 0] Group 10 Loss: 4.8233 +[2025-09-05 17:17:09] [Rank 0] Group 11 Loss: 4.8164 +[2025-09-05 17:17:09] [Rank 0] Group 11 Loss: 4.8164 +[2025-09-05 17:17:09] [Rank 0] Group 12 Loss: 4.7398 +[2025-09-05 17:17:09] [Rank 0] Group 12 Loss: 4.7398 +[2025-09-05 17:17:09] [Rank 0] Group 13 Loss: 4.8488 +[2025-09-05 17:17:09] [Rank 0] Group 13 Loss: 4.8488 +[2025-09-05 17:17:09] [Rank 0] Group 14 Loss: 4.8801 +[2025-09-05 17:17:09] [Rank 0] Group 14 Loss: 4.8801 +[2025-09-05 17:17:09] [Rank 0] Group 15 Loss: 4.8731 +[2025-09-05 17:17:09] [Rank 0] Group 15 Loss: 4.8731 +[2025-09-05 17:17:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:17:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:17:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:17:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:17:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:17:09] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:17:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:17:09] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:17:09] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 17:17:09] [Rank 0] Group 4 FTA: 0.9500 +[2025-09-05 17:17:09] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:17:09] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:17:09] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 17:17:09] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 17:17:09] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 17:17:09] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 17:17:09] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 17:17:09] [Rank 0] Group 8 FTA: 0.4900 +[2025-09-05 17:17:09] [Rank 0] Group 9 FTA: 0.3800 +[2025-09-05 17:17:09] [Rank 0] Group 9 FTA: 0.3800 +[2025-09-05 17:17:09] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 17:17:09] [Rank 0] Group 10 FTA: 0.4900 +[2025-09-05 17:17:09] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 17:17:09] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 17:17:09] [Rank 0] Group 12 FTA: 0.3100 +[2025-09-05 17:17:09] [Rank 0] Group 12 FTA: 0.3100 +[2025-09-05 17:17:09] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 17:17:09] [Rank 0] Group 13 FTA: 0.2200 +[2025-09-05 17:17:09] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:17:09] [Rank 0] Group 14 FTA: 0.1500 +[2025-09-05 17:17:09] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:17:09] [Rank 0] Group 15 FTA: 0.0700 +[2025-09-05 17:17:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:17:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:17:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:17:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:17:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:17:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:17:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:17:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:17:11] [Rank 0] step:7501/10000 train_time:313674ms step_avg:41.82ms +[2025-09-05 17:17:11] [Rank 0] step:7501/10000 train_time:313674ms step_avg:41.82ms +[2025-09-05 17:17:11] [Rank 0] step:7521/10000 train_time:314336ms step_avg:41.79ms +[2025-09-05 17:17:11] [Rank 0] step:7521/10000 train_time:314336ms step_avg:41.79ms +[2025-09-05 17:17:12] [Rank 0] step:7541/10000 train_time:315072ms step_avg:41.78ms +[2025-09-05 17:17:12] [Rank 0] step:7541/10000 train_time:315072ms step_avg:41.78ms +[2025-09-05 17:17:13] [Rank 0] step:7561/10000 train_time:315807ms step_avg:41.77ms +[2025-09-05 17:17:13] [Rank 0] step:7561/10000 train_time:315807ms step_avg:41.77ms +[2025-09-05 17:17:13] [Rank 0] step:7581/10000 train_time:316544ms step_avg:41.75ms +[2025-09-05 17:17:13] [Rank 0] step:7581/10000 train_time:316544ms step_avg:41.75ms +[2025-09-05 17:17:14] [Rank 0] step:7601/10000 train_time:317279ms step_avg:41.74ms +[2025-09-05 17:17:14] [Rank 0] step:7601/10000 train_time:317279ms step_avg:41.74ms +[2025-09-05 17:17:15] [Rank 0] step:7621/10000 train_time:318016ms step_avg:41.73ms +[2025-09-05 17:17:15] [Rank 0] step:7621/10000 train_time:318016ms step_avg:41.73ms +[2025-09-05 17:17:16] [Rank 0] step:7641/10000 train_time:318976ms step_avg:41.75ms +[2025-09-05 17:17:16] [Rank 0] step:7641/10000 train_time:318976ms step_avg:41.75ms +[2025-09-05 17:17:17] [Rank 0] step:7661/10000 train_time:320100ms step_avg:41.78ms +[2025-09-05 17:17:17] [Rank 0] step:7661/10000 train_time:320100ms step_avg:41.78ms +[2025-09-05 17:17:18] [Rank 0] step:7681/10000 train_time:320836ms step_avg:41.77ms +[2025-09-05 17:17:18] [Rank 0] step:7681/10000 train_time:320836ms step_avg:41.77ms +[2025-09-05 17:17:19] [Rank 0] step:7701/10000 train_time:321716ms step_avg:41.78ms +[2025-09-05 17:17:19] [Rank 0] step:7701/10000 train_time:321716ms step_avg:41.78ms +[2025-09-05 17:17:19] [Rank 0] step:7721/10000 train_time:322477ms step_avg:41.77ms +[2025-09-05 17:17:19] [Rank 0] step:7721/10000 train_time:322477ms step_avg:41.77ms +[2025-09-05 17:17:20] [Rank 0] step:7741/10000 train_time:323213ms step_avg:41.75ms +[2025-09-05 17:17:20] [Rank 0] step:7741/10000 train_time:323213ms step_avg:41.75ms +[2025-09-05 17:17:21] [Rank 0] step:7761/10000 train_time:323949ms step_avg:41.74ms +[2025-09-05 17:17:21] [Rank 0] step:7761/10000 train_time:323949ms step_avg:41.74ms +[2025-09-05 17:17:22] [Rank 0] step:7781/10000 train_time:324685ms step_avg:41.73ms +[2025-09-05 17:17:22] [Rank 0] step:7781/10000 train_time:324685ms step_avg:41.73ms +[2025-09-05 17:17:22] [Rank 0] step:7801/10000 train_time:325421ms step_avg:41.72ms +[2025-09-05 17:17:22] [Rank 0] step:7801/10000 train_time:325421ms step_avg:41.72ms +[2025-09-05 17:17:23] [Rank 0] step:7821/10000 train_time:326157ms step_avg:41.70ms +[2025-09-05 17:17:23] [Rank 0] step:7821/10000 train_time:326157ms step_avg:41.70ms +[2025-09-05 17:17:24] [Rank 0] step:7841/10000 train_time:326893ms step_avg:41.69ms +[2025-09-05 17:17:24] [Rank 0] step:7841/10000 train_time:326893ms step_avg:41.69ms +[2025-09-05 17:17:25] [Rank 0] step:7861/10000 train_time:327628ms step_avg:41.68ms +[2025-09-05 17:17:25] [Rank 0] step:7861/10000 train_time:327628ms step_avg:41.68ms +[2025-09-05 17:17:25] [Rank 0] step:7881/10000 train_time:328364ms step_avg:41.67ms +[2025-09-05 17:17:25] [Rank 0] step:7881/10000 train_time:328364ms step_avg:41.67ms +[2025-09-05 17:17:26] [Rank 0] step:7901/10000 train_time:329100ms step_avg:41.65ms +[2025-09-05 17:17:26] [Rank 0] step:7901/10000 train_time:329100ms step_avg:41.65ms +[2025-09-05 17:17:27] [Rank 0] step:7921/10000 train_time:329836ms step_avg:41.64ms +[2025-09-05 17:17:27] [Rank 0] step:7921/10000 train_time:329836ms step_avg:41.64ms +[2025-09-05 17:17:28] [Rank 0] step:7941/10000 train_time:330571ms step_avg:41.63ms +[2025-09-05 17:17:28] [Rank 0] step:7941/10000 train_time:330571ms step_avg:41.63ms +[2025-09-05 17:17:28] [Rank 0] step:7961/10000 train_time:331308ms step_avg:41.62ms +[2025-09-05 17:17:28] [Rank 0] step:7961/10000 train_time:331308ms step_avg:41.62ms +[2025-09-05 17:17:29] [Rank 0] step:7981/10000 train_time:332043ms step_avg:41.60ms +[2025-09-05 17:17:29] [Rank 0] step:7981/10000 train_time:332043ms step_avg:41.60ms +[2025-09-05 17:17:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:17:30] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:17:30] [Rank 0] PRINT: step:8000/10000 train_loss:1.4400 val_loss:1.4305 train_time:332860ms step_avg:41.61ms +[2025-09-05 17:17:30] [Rank 0] PRINT: step:8000/10000 train_loss:1.4400 val_loss:1.4305 train_time:332860ms step_avg:41.61ms +[2025-09-05 17:17:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:17:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:17:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:17:30] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:18:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:18:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:18:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:18:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:18:51] [Rank 0] Total Loss: 4.1587 +[2025-09-05 17:18:51] [Rank 0] Total Loss: 4.1587 +[2025-09-05 17:18:51] [Rank 0] Total FTA (Unweighted): 0.5631 +[2025-09-05 17:18:51] [Rank 0] Total FTA (Unweighted): 0.5631 +[2025-09-05 17:18:51] [Rank 0] Total FTA (Weighted): 0.5631 +[2025-09-05 17:18:51] [Rank 0] Total FTA (Weighted): 0.5631 +[2025-09-05 17:18:51] [Rank 0] Group 0 Loss: 3.4222 +[2025-09-05 17:18:51] [Rank 0] Group 0 Loss: 3.4222 +[2025-09-05 17:18:51] [Rank 0] Group 1 Loss: 3.3239 +[2025-09-05 17:18:51] [Rank 0] Group 1 Loss: 3.3239 +[2025-09-05 17:18:51] [Rank 0] Group 2 Loss: 3.1423 +[2025-09-05 17:18:51] [Rank 0] Group 2 Loss: 3.1423 +[2025-09-05 17:18:51] [Rank 0] Group 3 Loss: 3.5276 +[2025-09-05 17:18:51] [Rank 0] Group 3 Loss: 3.5276 +[2025-09-05 17:18:51] [Rank 0] Group 4 Loss: 3.6582 +[2025-09-05 17:18:51] [Rank 0] Group 4 Loss: 3.6582 +[2025-09-05 17:18:51] [Rank 0] Group 5 Loss: 3.8784 +[2025-09-05 17:18:51] [Rank 0] Group 5 Loss: 3.8784 +[2025-09-05 17:18:51] [Rank 0] Group 6 Loss: 3.9702 +[2025-09-05 17:18:51] [Rank 0] Group 6 Loss: 3.9702 +[2025-09-05 17:18:51] [Rank 0] Group 7 Loss: 4.1402 +[2025-09-05 17:18:51] [Rank 0] Group 7 Loss: 4.1402 +[2025-09-05 17:18:51] [Rank 0] Group 8 Loss: 4.4464 +[2025-09-05 17:18:51] [Rank 0] Group 8 Loss: 4.4464 +[2025-09-05 17:18:51] [Rank 0] Group 9 Loss: 4.5595 +[2025-09-05 17:18:51] [Rank 0] Group 9 Loss: 4.5595 +[2025-09-05 17:18:51] [Rank 0] Group 10 Loss: 4.7377 +[2025-09-05 17:18:51] [Rank 0] Group 10 Loss: 4.7377 +[2025-09-05 17:18:51] [Rank 0] Group 11 Loss: 4.7277 +[2025-09-05 17:18:51] [Rank 0] Group 11 Loss: 4.7277 +[2025-09-05 17:18:51] [Rank 0] Group 12 Loss: 4.6767 +[2025-09-05 17:18:51] [Rank 0] Group 12 Loss: 4.6767 +[2025-09-05 17:18:51] [Rank 0] Group 13 Loss: 4.7583 +[2025-09-05 17:18:51] [Rank 0] Group 13 Loss: 4.7583 +[2025-09-05 17:18:51] [Rank 0] Group 14 Loss: 4.7820 +[2025-09-05 17:18:51] [Rank 0] Group 14 Loss: 4.7820 +[2025-09-05 17:18:51] [Rank 0] Group 15 Loss: 4.7884 +[2025-09-05 17:18:51] [Rank 0] Group 15 Loss: 4.7884 +[2025-09-05 17:18:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:18:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:18:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:18:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:18:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:18:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:18:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:18:51] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:18:51] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 17:18:51] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 17:18:51] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:18:51] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:18:51] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 17:18:51] [Rank 0] Group 6 FTA: 0.5100 +[2025-09-05 17:18:51] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 17:18:51] [Rank 0] Group 7 FTA: 0.4700 +[2025-09-05 17:18:51] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 17:18:51] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 17:18:51] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 17:18:51] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 17:18:51] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 17:18:51] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 17:18:51] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 17:18:51] [Rank 0] Group 11 FTA: 0.3400 +[2025-09-05 17:18:51] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 17:18:51] [Rank 0] Group 12 FTA: 0.3400 +[2025-09-05 17:18:51] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 17:18:51] [Rank 0] Group 13 FTA: 0.2100 +[2025-09-05 17:18:51] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:18:51] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:18:51] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:18:51] [Rank 0] Group 15 FTA: 0.0800 +[2025-09-05 17:18:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:18:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:18:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:18:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:18:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:18:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:18:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:18:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:18:52] [Rank 0] step:8001/10000 train_time:332870ms step_avg:41.60ms +[2025-09-05 17:18:52] [Rank 0] step:8001/10000 train_time:332870ms step_avg:41.60ms +[2025-09-05 17:18:54] [Rank 0] step:8021/10000 train_time:334142ms step_avg:41.66ms +[2025-09-05 17:18:54] [Rank 0] step:8021/10000 train_time:334142ms step_avg:41.66ms +[2025-09-05 17:18:54] [Rank 0] step:8041/10000 train_time:334877ms step_avg:41.65ms +[2025-09-05 17:18:54] [Rank 0] step:8041/10000 train_time:334877ms step_avg:41.65ms +[2025-09-05 17:18:55] [Rank 0] step:8061/10000 train_time:335613ms step_avg:41.63ms +[2025-09-05 17:18:55] [Rank 0] step:8061/10000 train_time:335613ms step_avg:41.63ms +[2025-09-05 17:18:56] [Rank 0] step:8081/10000 train_time:336349ms step_avg:41.62ms +[2025-09-05 17:18:56] [Rank 0] step:8081/10000 train_time:336349ms step_avg:41.62ms +[2025-09-05 17:18:57] [Rank 0] step:8101/10000 train_time:337085ms step_avg:41.61ms +[2025-09-05 17:18:57] [Rank 0] step:8101/10000 train_time:337085ms step_avg:41.61ms +[2025-09-05 17:18:57] [Rank 0] step:8121/10000 train_time:337821ms step_avg:41.60ms +[2025-09-05 17:18:57] [Rank 0] step:8121/10000 train_time:337821ms step_avg:41.60ms +[2025-09-05 17:18:58] [Rank 0] step:8141/10000 train_time:338557ms step_avg:41.59ms +[2025-09-05 17:18:58] [Rank 0] step:8141/10000 train_time:338557ms step_avg:41.59ms +[2025-09-05 17:18:59] [Rank 0] step:8161/10000 train_time:339293ms step_avg:41.57ms +[2025-09-05 17:18:59] [Rank 0] step:8161/10000 train_time:339293ms step_avg:41.57ms +[2025-09-05 17:19:00] [Rank 0] step:8181/10000 train_time:340029ms step_avg:41.56ms +[2025-09-05 17:19:00] [Rank 0] step:8181/10000 train_time:340029ms step_avg:41.56ms +[2025-09-05 17:19:00] [Rank 0] step:8201/10000 train_time:340765ms step_avg:41.55ms +[2025-09-05 17:19:00] [Rank 0] step:8201/10000 train_time:340765ms step_avg:41.55ms +[2025-09-05 17:19:01] [Rank 0] step:8221/10000 train_time:341501ms step_avg:41.54ms +[2025-09-05 17:19:01] [Rank 0] step:8221/10000 train_time:341501ms step_avg:41.54ms +[2025-09-05 17:19:02] [Rank 0] step:8241/10000 train_time:342237ms step_avg:41.53ms +[2025-09-05 17:19:02] [Rank 0] step:8241/10000 train_time:342237ms step_avg:41.53ms +[2025-09-05 17:19:03] [Rank 0] step:8261/10000 train_time:342973ms step_avg:41.52ms +[2025-09-05 17:19:03] [Rank 0] step:8261/10000 train_time:342973ms step_avg:41.52ms +[2025-09-05 17:19:03] [Rank 0] step:8281/10000 train_time:343709ms step_avg:41.51ms +[2025-09-05 17:19:03] [Rank 0] step:8281/10000 train_time:343709ms step_avg:41.51ms +[2025-09-05 17:19:04] [Rank 0] step:8301/10000 train_time:344445ms step_avg:41.49ms +[2025-09-05 17:19:04] [Rank 0] step:8301/10000 train_time:344445ms step_avg:41.49ms +[2025-09-05 17:19:05] [Rank 0] step:8321/10000 train_time:345181ms step_avg:41.48ms +[2025-09-05 17:19:05] [Rank 0] step:8321/10000 train_time:345181ms step_avg:41.48ms +[2025-09-05 17:19:06] [Rank 0] step:8341/10000 train_time:345917ms step_avg:41.47ms +[2025-09-05 17:19:06] [Rank 0] step:8341/10000 train_time:345917ms step_avg:41.47ms +[2025-09-05 17:19:06] [Rank 0] step:8361/10000 train_time:346654ms step_avg:41.46ms +[2025-09-05 17:19:06] [Rank 0] step:8361/10000 train_time:346654ms step_avg:41.46ms +[2025-09-05 17:19:07] [Rank 0] step:8381/10000 train_time:347389ms step_avg:41.45ms +[2025-09-05 17:19:07] [Rank 0] step:8381/10000 train_time:347389ms step_avg:41.45ms +[2025-09-05 17:19:08] [Rank 0] step:8401/10000 train_time:348126ms step_avg:41.44ms +[2025-09-05 17:19:08] [Rank 0] step:8401/10000 train_time:348126ms step_avg:41.44ms +[2025-09-05 17:19:08] [Rank 0] step:8421/10000 train_time:348862ms step_avg:41.43ms +[2025-09-05 17:19:08] [Rank 0] step:8421/10000 train_time:348862ms step_avg:41.43ms +[2025-09-05 17:19:09] [Rank 0] step:8441/10000 train_time:349599ms step_avg:41.42ms +[2025-09-05 17:19:09] [Rank 0] step:8441/10000 train_time:349599ms step_avg:41.42ms +[2025-09-05 17:19:10] [Rank 0] step:8461/10000 train_time:350335ms step_avg:41.41ms +[2025-09-05 17:19:10] [Rank 0] step:8461/10000 train_time:350335ms step_avg:41.41ms +[2025-09-05 17:19:11] [Rank 0] step:8481/10000 train_time:351071ms step_avg:41.39ms +[2025-09-05 17:19:11] [Rank 0] step:8481/10000 train_time:351071ms step_avg:41.39ms +[2025-09-05 17:19:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:19:11] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:19:12] [Rank 0] PRINT: step:8500/10000 train_loss:1.4379 val_loss:1.4270 train_time:351887ms step_avg:41.40ms +[2025-09-05 17:19:12] [Rank 0] PRINT: step:8500/10000 train_loss:1.4379 val_loss:1.4270 train_time:351887ms step_avg:41.40ms +[2025-09-05 17:19:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:19:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:19:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:19:12] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:20:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:20:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:20:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:20:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:20:32] [Rank 0] Total Loss: 4.2190 +[2025-09-05 17:20:32] [Rank 0] Total Loss: 4.2190 +[2025-09-05 17:20:32] [Rank 0] Total FTA (Unweighted): 0.5825 +[2025-09-05 17:20:32] [Rank 0] Total FTA (Unweighted): 0.5825 +[2025-09-05 17:20:32] [Rank 0] Total FTA (Weighted): 0.5825 +[2025-09-05 17:20:32] [Rank 0] Total FTA (Weighted): 0.5825 +[2025-09-05 17:20:32] [Rank 0] Group 0 Loss: 3.4893 +[2025-09-05 17:20:32] [Rank 0] Group 0 Loss: 3.4893 +[2025-09-05 17:20:32] [Rank 0] Group 1 Loss: 3.3374 +[2025-09-05 17:20:32] [Rank 0] Group 1 Loss: 3.3374 +[2025-09-05 17:20:32] [Rank 0] Group 2 Loss: 3.2135 +[2025-09-05 17:20:32] [Rank 0] Group 2 Loss: 3.2135 +[2025-09-05 17:20:32] [Rank 0] Group 3 Loss: 3.6085 +[2025-09-05 17:20:32] [Rank 0] Group 3 Loss: 3.6085 +[2025-09-05 17:20:32] [Rank 0] Group 4 Loss: 3.7161 +[2025-09-05 17:20:32] [Rank 0] Group 4 Loss: 3.7161 +[2025-09-05 17:20:32] [Rank 0] Group 5 Loss: 3.9463 +[2025-09-05 17:20:32] [Rank 0] Group 5 Loss: 3.9463 +[2025-09-05 17:20:32] [Rank 0] Group 6 Loss: 4.0101 +[2025-09-05 17:20:32] [Rank 0] Group 6 Loss: 4.0101 +[2025-09-05 17:20:32] [Rank 0] Group 7 Loss: 4.1847 +[2025-09-05 17:20:32] [Rank 0] Group 7 Loss: 4.1847 +[2025-09-05 17:20:32] [Rank 0] Group 8 Loss: 4.5090 +[2025-09-05 17:20:32] [Rank 0] Group 8 Loss: 4.5090 +[2025-09-05 17:20:32] [Rank 0] Group 9 Loss: 4.6218 +[2025-09-05 17:20:32] [Rank 0] Group 9 Loss: 4.6218 +[2025-09-05 17:20:32] [Rank 0] Group 10 Loss: 4.8088 +[2025-09-05 17:20:32] [Rank 0] Group 10 Loss: 4.8088 +[2025-09-05 17:20:32] [Rank 0] Group 11 Loss: 4.8096 +[2025-09-05 17:20:32] [Rank 0] Group 11 Loss: 4.8096 +[2025-09-05 17:20:32] [Rank 0] Group 12 Loss: 4.7362 +[2025-09-05 17:20:32] [Rank 0] Group 12 Loss: 4.7362 +[2025-09-05 17:20:32] [Rank 0] Group 13 Loss: 4.8214 +[2025-09-05 17:20:32] [Rank 0] Group 13 Loss: 4.8214 +[2025-09-05 17:20:32] [Rank 0] Group 14 Loss: 4.8597 +[2025-09-05 17:20:32] [Rank 0] Group 14 Loss: 4.8597 +[2025-09-05 17:20:32] [Rank 0] Group 15 Loss: 4.8315 +[2025-09-05 17:20:32] [Rank 0] Group 15 Loss: 4.8315 +[2025-09-05 17:20:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:20:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:20:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:20:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:20:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:20:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:20:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:20:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:20:33] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 17:20:33] [Rank 0] Group 4 FTA: 0.9700 +[2025-09-05 17:20:33] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:20:33] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:20:33] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 17:20:33] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 17:20:33] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 17:20:33] [Rank 0] Group 7 FTA: 0.4900 +[2025-09-05 17:20:33] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 17:20:33] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 17:20:33] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 17:20:33] [Rank 0] Group 9 FTA: 0.4000 +[2025-09-05 17:20:33] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 17:20:33] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 17:20:33] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 17:20:33] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 17:20:33] [Rank 0] Group 12 FTA: 0.4000 +[2025-09-05 17:20:33] [Rank 0] Group 12 FTA: 0.4000 +[2025-09-05 17:20:33] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 17:20:33] [Rank 0] Group 13 FTA: 0.2800 +[2025-09-05 17:20:33] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:20:33] [Rank 0] Group 14 FTA: 0.1400 +[2025-09-05 17:20:33] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:20:33] [Rank 0] Group 15 FTA: 0.1300 +[2025-09-05 17:20:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:20:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:20:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:20:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:20:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:20:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:20:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:20:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:20:34] [Rank 0] step:8501/10000 train_time:351899ms step_avg:41.40ms +[2025-09-05 17:20:34] [Rank 0] step:8501/10000 train_time:351899ms step_avg:41.40ms +[2025-09-05 17:20:35] [Rank 0] step:8521/10000 train_time:352560ms step_avg:41.38ms +[2025-09-05 17:20:35] [Rank 0] step:8521/10000 train_time:352560ms step_avg:41.38ms +[2025-09-05 17:20:36] [Rank 0] step:8541/10000 train_time:353296ms step_avg:41.36ms +[2025-09-05 17:20:36] [Rank 0] step:8541/10000 train_time:353296ms step_avg:41.36ms +[2025-09-05 17:20:36] [Rank 0] step:8561/10000 train_time:354033ms step_avg:41.35ms +[2025-09-05 17:20:36] [Rank 0] step:8561/10000 train_time:354033ms step_avg:41.35ms +[2025-09-05 17:20:37] [Rank 0] step:8581/10000 train_time:354769ms step_avg:41.34ms +[2025-09-05 17:20:37] [Rank 0] step:8581/10000 train_time:354769ms step_avg:41.34ms +[2025-09-05 17:20:38] [Rank 0] step:8601/10000 train_time:355505ms step_avg:41.33ms +[2025-09-05 17:20:38] [Rank 0] step:8601/10000 train_time:355505ms step_avg:41.33ms +[2025-09-05 17:20:39] [Rank 0] step:8621/10000 train_time:356241ms step_avg:41.32ms +[2025-09-05 17:20:39] [Rank 0] step:8621/10000 train_time:356241ms step_avg:41.32ms +[2025-09-05 17:20:39] [Rank 0] step:8641/10000 train_time:356978ms step_avg:41.31ms +[2025-09-05 17:20:39] [Rank 0] step:8641/10000 train_time:356978ms step_avg:41.31ms +[2025-09-05 17:20:40] [Rank 0] step:8661/10000 train_time:357714ms step_avg:41.30ms +[2025-09-05 17:20:40] [Rank 0] step:8661/10000 train_time:357714ms step_avg:41.30ms +[2025-09-05 17:20:41] [Rank 0] step:8681/10000 train_time:358450ms step_avg:41.29ms +[2025-09-05 17:20:41] [Rank 0] step:8681/10000 train_time:358450ms step_avg:41.29ms +[2025-09-05 17:20:42] [Rank 0] step:8701/10000 train_time:359185ms step_avg:41.28ms +[2025-09-05 17:20:42] [Rank 0] step:8701/10000 train_time:359185ms step_avg:41.28ms +[2025-09-05 17:20:42] [Rank 0] step:8721/10000 train_time:359921ms step_avg:41.27ms +[2025-09-05 17:20:42] [Rank 0] step:8721/10000 train_time:359921ms step_avg:41.27ms +[2025-09-05 17:20:43] [Rank 0] step:8741/10000 train_time:360658ms step_avg:41.26ms +[2025-09-05 17:20:43] [Rank 0] step:8741/10000 train_time:360658ms step_avg:41.26ms +[2025-09-05 17:20:44] [Rank 0] step:8761/10000 train_time:361393ms step_avg:41.25ms +[2025-09-05 17:20:44] [Rank 0] step:8761/10000 train_time:361393ms step_avg:41.25ms +[2025-09-05 17:20:44] [Rank 0] step:8781/10000 train_time:362129ms step_avg:41.24ms +[2025-09-05 17:20:44] [Rank 0] step:8781/10000 train_time:362129ms step_avg:41.24ms +[2025-09-05 17:20:45] [Rank 0] step:8801/10000 train_time:362865ms step_avg:41.23ms +[2025-09-05 17:20:45] [Rank 0] step:8801/10000 train_time:362865ms step_avg:41.23ms +[2025-09-05 17:20:46] [Rank 0] step:8821/10000 train_time:363702ms step_avg:41.23ms +[2025-09-05 17:20:46] [Rank 0] step:8821/10000 train_time:363702ms step_avg:41.23ms +[2025-09-05 17:20:47] [Rank 0] step:8841/10000 train_time:365057ms step_avg:41.29ms +[2025-09-05 17:20:47] [Rank 0] step:8841/10000 train_time:365057ms step_avg:41.29ms +[2025-09-05 17:20:48] [Rank 0] step:8861/10000 train_time:365895ms step_avg:41.29ms +[2025-09-05 17:20:48] [Rank 0] step:8861/10000 train_time:365895ms step_avg:41.29ms +[2025-09-05 17:20:49] [Rank 0] step:8881/10000 train_time:366636ms step_avg:41.28ms +[2025-09-05 17:20:49] [Rank 0] step:8881/10000 train_time:366636ms step_avg:41.28ms +[2025-09-05 17:20:50] [Rank 0] step:8901/10000 train_time:367372ms step_avg:41.27ms +[2025-09-05 17:20:50] [Rank 0] step:8901/10000 train_time:367372ms step_avg:41.27ms +[2025-09-05 17:20:50] [Rank 0] step:8921/10000 train_time:368107ms step_avg:41.26ms +[2025-09-05 17:20:50] [Rank 0] step:8921/10000 train_time:368107ms step_avg:41.26ms +[2025-09-05 17:20:51] [Rank 0] step:8941/10000 train_time:368845ms step_avg:41.25ms +[2025-09-05 17:20:51] [Rank 0] step:8941/10000 train_time:368845ms step_avg:41.25ms +[2025-09-05 17:20:52] [Rank 0] step:8961/10000 train_time:369582ms step_avg:41.24ms +[2025-09-05 17:20:52] [Rank 0] step:8961/10000 train_time:369582ms step_avg:41.24ms +[2025-09-05 17:20:53] [Rank 0] step:8981/10000 train_time:370318ms step_avg:41.23ms +[2025-09-05 17:20:53] [Rank 0] step:8981/10000 train_time:370318ms step_avg:41.23ms +[2025-09-05 17:20:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:20:53] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:20:54] [Rank 0] PRINT: step:9000/10000 train_loss:1.4334 val_loss:1.4227 train_time:371134ms step_avg:41.24ms +[2025-09-05 17:20:54] [Rank 0] PRINT: step:9000/10000 train_loss:1.4334 val_loss:1.4227 train_time:371134ms step_avg:41.24ms +[2025-09-05 17:20:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:20:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:20:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:20:54] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:22:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:22:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:22:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:22:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:22:15] [Rank 0] Total Loss: 4.2294 +[2025-09-05 17:22:15] [Rank 0] Total Loss: 4.2294 +[2025-09-05 17:22:15] [Rank 0] Total FTA (Unweighted): 0.5794 +[2025-09-05 17:22:15] [Rank 0] Total FTA (Unweighted): 0.5794 +[2025-09-05 17:22:15] [Rank 0] Total FTA (Weighted): 0.5794 +[2025-09-05 17:22:15] [Rank 0] Total FTA (Weighted): 0.5794 +[2025-09-05 17:22:15] [Rank 0] Group 0 Loss: 3.5891 +[2025-09-05 17:22:15] [Rank 0] Group 0 Loss: 3.5891 +[2025-09-05 17:22:15] [Rank 0] Group 1 Loss: 3.3358 +[2025-09-05 17:22:15] [Rank 0] Group 1 Loss: 3.3358 +[2025-09-05 17:22:15] [Rank 0] Group 2 Loss: 3.2163 +[2025-09-05 17:22:15] [Rank 0] Group 2 Loss: 3.2163 +[2025-09-05 17:22:15] [Rank 0] Group 3 Loss: 3.6224 +[2025-09-05 17:22:15] [Rank 0] Group 3 Loss: 3.6224 +[2025-09-05 17:22:15] [Rank 0] Group 4 Loss: 3.7368 +[2025-09-05 17:22:15] [Rank 0] Group 4 Loss: 3.7368 +[2025-09-05 17:22:15] [Rank 0] Group 5 Loss: 3.9498 +[2025-09-05 17:22:15] [Rank 0] Group 5 Loss: 3.9498 +[2025-09-05 17:22:15] [Rank 0] Group 6 Loss: 4.0248 +[2025-09-05 17:22:15] [Rank 0] Group 6 Loss: 4.0248 +[2025-09-05 17:22:15] [Rank 0] Group 7 Loss: 4.1904 +[2025-09-05 17:22:15] [Rank 0] Group 7 Loss: 4.1904 +[2025-09-05 17:22:15] [Rank 0] Group 8 Loss: 4.4930 +[2025-09-05 17:22:15] [Rank 0] Group 8 Loss: 4.4930 +[2025-09-05 17:22:15] [Rank 0] Group 9 Loss: 4.6319 +[2025-09-05 17:22:15] [Rank 0] Group 9 Loss: 4.6319 +[2025-09-05 17:22:15] [Rank 0] Group 10 Loss: 4.7835 +[2025-09-05 17:22:15] [Rank 0] Group 10 Loss: 4.7835 +[2025-09-05 17:22:15] [Rank 0] Group 11 Loss: 4.8063 +[2025-09-05 17:22:15] [Rank 0] Group 11 Loss: 4.8063 +[2025-09-05 17:22:15] [Rank 0] Group 12 Loss: 4.7402 +[2025-09-05 17:22:15] [Rank 0] Group 12 Loss: 4.7402 +[2025-09-05 17:22:15] [Rank 0] Group 13 Loss: 4.8398 +[2025-09-05 17:22:15] [Rank 0] Group 13 Loss: 4.8398 +[2025-09-05 17:22:15] [Rank 0] Group 14 Loss: 4.8785 +[2025-09-05 17:22:15] [Rank 0] Group 14 Loss: 4.8785 +[2025-09-05 17:22:15] [Rank 0] Group 15 Loss: 4.8317 +[2025-09-05 17:22:15] [Rank 0] Group 15 Loss: 4.8317 +[2025-09-05 17:22:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:22:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:22:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:22:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:22:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:22:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:22:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:22:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:22:15] [Rank 0] Group 4 FTA: 0.9600 +[2025-09-05 17:22:15] [Rank 0] Group 4 FTA: 0.9600 +[2025-09-05 17:22:15] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 17:22:15] [Rank 0] Group 5 FTA: 0.5900 +[2025-09-05 17:22:15] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 17:22:15] [Rank 0] Group 6 FTA: 0.5300 +[2025-09-05 17:22:15] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 17:22:15] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 17:22:16] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 17:22:16] [Rank 0] Group 8 FTA: 0.5000 +[2025-09-05 17:22:16] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 17:22:16] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 17:22:16] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 17:22:16] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 17:22:16] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 17:22:16] [Rank 0] Group 11 FTA: 0.3800 +[2025-09-05 17:22:16] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 17:22:16] [Rank 0] Group 12 FTA: 0.3700 +[2025-09-05 17:22:16] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 17:22:16] [Rank 0] Group 13 FTA: 0.2400 +[2025-09-05 17:22:16] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:22:16] [Rank 0] Group 14 FTA: 0.1600 +[2025-09-05 17:22:16] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:22:16] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:22:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:22:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:22:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:22:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:22:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:22:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:22:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:22:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:22:17] [Rank 0] step:9001/10000 train_time:371145ms step_avg:41.23ms +[2025-09-05 17:22:17] [Rank 0] step:9001/10000 train_time:371145ms step_avg:41.23ms +[2025-09-05 17:22:18] [Rank 0] step:9021/10000 train_time:371808ms step_avg:41.22ms +[2025-09-05 17:22:18] [Rank 0] step:9021/10000 train_time:371808ms step_avg:41.22ms +[2025-09-05 17:22:18] [Rank 0] step:9041/10000 train_time:372545ms step_avg:41.21ms +[2025-09-05 17:22:18] [Rank 0] step:9041/10000 train_time:372545ms step_avg:41.21ms +[2025-09-05 17:22:19] [Rank 0] step:9061/10000 train_time:373280ms step_avg:41.20ms +[2025-09-05 17:22:19] [Rank 0] step:9061/10000 train_time:373280ms step_avg:41.20ms +[2025-09-05 17:22:20] [Rank 0] step:9081/10000 train_time:374016ms step_avg:41.19ms +[2025-09-05 17:22:20] [Rank 0] step:9081/10000 train_time:374016ms step_avg:41.19ms +[2025-09-05 17:22:21] [Rank 0] step:9101/10000 train_time:374752ms step_avg:41.18ms +[2025-09-05 17:22:21] [Rank 0] step:9101/10000 train_time:374752ms step_avg:41.18ms +[2025-09-05 17:22:21] [Rank 0] step:9121/10000 train_time:375488ms step_avg:41.17ms +[2025-09-05 17:22:21] [Rank 0] step:9121/10000 train_time:375488ms step_avg:41.17ms +[2025-09-05 17:22:22] [Rank 0] step:9141/10000 train_time:376224ms step_avg:41.16ms +[2025-09-05 17:22:22] [Rank 0] step:9141/10000 train_time:376224ms step_avg:41.16ms +[2025-09-05 17:22:23] [Rank 0] step:9161/10000 train_time:376961ms step_avg:41.15ms +[2025-09-05 17:22:23] [Rank 0] step:9161/10000 train_time:376961ms step_avg:41.15ms +[2025-09-05 17:22:23] [Rank 0] step:9181/10000 train_time:377696ms step_avg:41.14ms +[2025-09-05 17:22:23] [Rank 0] step:9181/10000 train_time:377696ms step_avg:41.14ms +[2025-09-05 17:22:24] [Rank 0] step:9201/10000 train_time:378433ms step_avg:41.13ms +[2025-09-05 17:22:24] [Rank 0] step:9201/10000 train_time:378433ms step_avg:41.13ms +[2025-09-05 17:22:25] [Rank 0] step:9221/10000 train_time:379169ms step_avg:41.12ms +[2025-09-05 17:22:25] [Rank 0] step:9221/10000 train_time:379169ms step_avg:41.12ms +[2025-09-05 17:22:26] [Rank 0] step:9241/10000 train_time:379905ms step_avg:41.11ms +[2025-09-05 17:22:26] [Rank 0] step:9241/10000 train_time:379905ms step_avg:41.11ms +[2025-09-05 17:22:26] [Rank 0] step:9261/10000 train_time:380641ms step_avg:41.10ms +[2025-09-05 17:22:26] [Rank 0] step:9261/10000 train_time:380641ms step_avg:41.10ms +[2025-09-05 17:22:27] [Rank 0] step:9281/10000 train_time:381377ms step_avg:41.09ms +[2025-09-05 17:22:27] [Rank 0] step:9281/10000 train_time:381377ms step_avg:41.09ms +[2025-09-05 17:22:28] [Rank 0] step:9301/10000 train_time:382114ms step_avg:41.08ms +[2025-09-05 17:22:28] [Rank 0] step:9301/10000 train_time:382114ms step_avg:41.08ms +[2025-09-05 17:22:29] [Rank 0] step:9321/10000 train_time:382850ms step_avg:41.07ms +[2025-09-05 17:22:29] [Rank 0] step:9321/10000 train_time:382850ms step_avg:41.07ms +[2025-09-05 17:22:29] [Rank 0] step:9341/10000 train_time:383586ms step_avg:41.06ms +[2025-09-05 17:22:29] [Rank 0] step:9341/10000 train_time:383586ms step_avg:41.06ms +[2025-09-05 17:22:30] [Rank 0] step:9361/10000 train_time:384323ms step_avg:41.06ms +[2025-09-05 17:22:30] [Rank 0] step:9361/10000 train_time:384323ms step_avg:41.06ms +[2025-09-05 17:22:31] [Rank 0] step:9381/10000 train_time:385059ms step_avg:41.05ms +[2025-09-05 17:22:31] [Rank 0] step:9381/10000 train_time:385059ms step_avg:41.05ms +[2025-09-05 17:22:32] [Rank 0] step:9401/10000 train_time:385795ms step_avg:41.04ms +[2025-09-05 17:22:32] [Rank 0] step:9401/10000 train_time:385795ms step_avg:41.04ms +[2025-09-05 17:22:32] [Rank 0] step:9421/10000 train_time:386531ms step_avg:41.03ms +[2025-09-05 17:22:32] [Rank 0] step:9421/10000 train_time:386531ms step_avg:41.03ms +[2025-09-05 17:22:33] [Rank 0] step:9441/10000 train_time:387412ms step_avg:41.04ms +[2025-09-05 17:22:33] [Rank 0] step:9441/10000 train_time:387412ms step_avg:41.04ms +[2025-09-05 17:22:34] [Rank 0] step:9461/10000 train_time:388149ms step_avg:41.03ms +[2025-09-05 17:22:34] [Rank 0] step:9461/10000 train_time:388149ms step_avg:41.03ms +[2025-09-05 17:22:35] [Rank 0] step:9481/10000 train_time:388886ms step_avg:41.02ms +[2025-09-05 17:22:35] [Rank 0] step:9481/10000 train_time:388886ms step_avg:41.02ms +[2025-09-05 17:22:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:22:36] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:22:36] [Rank 0] PRINT: step:9500/10000 train_loss:1.4275 val_loss:1.4159 train_time:389842ms step_avg:41.04ms +[2025-09-05 17:22:36] [Rank 0] PRINT: step:9500/10000 train_loss:1.4275 val_loss:1.4159 train_time:389842ms step_avg:41.04ms +[2025-09-05 17:22:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:22:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:22:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:22:36] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:23:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:23:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:23:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:23:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:23:57] [Rank 0] Total Loss: 4.2019 +[2025-09-05 17:23:57] [Rank 0] Total Loss: 4.2019 +[2025-09-05 17:23:57] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 17:23:57] [Rank 0] Total FTA (Unweighted): 0.5894 +[2025-09-05 17:23:57] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 17:23:57] [Rank 0] Total FTA (Weighted): 0.5894 +[2025-09-05 17:23:57] [Rank 0] Group 0 Loss: 3.5250 +[2025-09-05 17:23:57] [Rank 0] Group 0 Loss: 3.5250 +[2025-09-05 17:23:57] [Rank 0] Group 1 Loss: 3.3089 +[2025-09-05 17:23:57] [Rank 0] Group 1 Loss: 3.3089 +[2025-09-05 17:23:57] [Rank 0] Group 2 Loss: 3.1859 +[2025-09-05 17:23:57] [Rank 0] Group 2 Loss: 3.1859 +[2025-09-05 17:23:57] [Rank 0] Group 3 Loss: 3.5827 +[2025-09-05 17:23:57] [Rank 0] Group 3 Loss: 3.5827 +[2025-09-05 17:23:57] [Rank 0] Group 4 Loss: 3.7101 +[2025-09-05 17:23:57] [Rank 0] Group 4 Loss: 3.7101 +[2025-09-05 17:23:57] [Rank 0] Group 5 Loss: 3.9334 +[2025-09-05 17:23:57] [Rank 0] Group 5 Loss: 3.9334 +[2025-09-05 17:23:57] [Rank 0] Group 6 Loss: 4.0402 +[2025-09-05 17:23:57] [Rank 0] Group 6 Loss: 4.0402 +[2025-09-05 17:23:57] [Rank 0] Group 7 Loss: 4.1731 +[2025-09-05 17:23:57] [Rank 0] Group 7 Loss: 4.1731 +[2025-09-05 17:23:57] [Rank 0] Group 8 Loss: 4.4703 +[2025-09-05 17:23:57] [Rank 0] Group 8 Loss: 4.4703 +[2025-09-05 17:23:57] [Rank 0] Group 9 Loss: 4.6006 +[2025-09-05 17:23:57] [Rank 0] Group 9 Loss: 4.6006 +[2025-09-05 17:23:57] [Rank 0] Group 10 Loss: 4.7677 +[2025-09-05 17:23:57] [Rank 0] Group 10 Loss: 4.7677 +[2025-09-05 17:23:57] [Rank 0] Group 11 Loss: 4.7999 +[2025-09-05 17:23:57] [Rank 0] Group 11 Loss: 4.7999 +[2025-09-05 17:23:57] [Rank 0] Group 12 Loss: 4.7230 +[2025-09-05 17:23:57] [Rank 0] Group 12 Loss: 4.7230 +[2025-09-05 17:23:57] [Rank 0] Group 13 Loss: 4.7950 +[2025-09-05 17:23:57] [Rank 0] Group 13 Loss: 4.7950 +[2025-09-05 17:23:57] [Rank 0] Group 14 Loss: 4.8176 +[2025-09-05 17:23:57] [Rank 0] Group 14 Loss: 4.8176 +[2025-09-05 17:23:57] [Rank 0] Group 15 Loss: 4.7975 +[2025-09-05 17:23:57] [Rank 0] Group 15 Loss: 4.7975 +[2025-09-05 17:23:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:23:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:23:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:23:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:23:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:23:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:23:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:23:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:23:57] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 17:23:57] [Rank 0] Group 4 FTA: 0.9300 +[2025-09-05 17:23:57] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 17:23:57] [Rank 0] Group 5 FTA: 0.5800 +[2025-09-05 17:23:57] [Rank 0] Group 6 FTA: 0.5500 +[2025-09-05 17:23:57] [Rank 0] Group 6 FTA: 0.5500 +[2025-09-05 17:23:57] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 17:23:57] [Rank 0] Group 7 FTA: 0.5100 +[2025-09-05 17:23:57] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 17:23:57] [Rank 0] Group 8 FTA: 0.5100 +[2025-09-05 17:23:57] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 17:23:57] [Rank 0] Group 9 FTA: 0.4100 +[2025-09-05 17:23:57] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 17:23:57] [Rank 0] Group 10 FTA: 0.5100 +[2025-09-05 17:23:57] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 17:23:57] [Rank 0] Group 11 FTA: 0.4100 +[2025-09-05 17:23:57] [Rank 0] Group 12 FTA: 0.4400 +[2025-09-05 17:23:57] [Rank 0] Group 12 FTA: 0.4400 +[2025-09-05 17:23:57] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 17:23:57] [Rank 0] Group 13 FTA: 0.2900 +[2025-09-05 17:23:57] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 17:23:57] [Rank 0] Group 14 FTA: 0.1800 +[2025-09-05 17:23:57] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:23:57] [Rank 0] Group 15 FTA: 0.1100 +[2025-09-05 17:23:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:23:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:23:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:23:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:23:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:23:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:23:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:23:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:23:59] [Rank 0] step:9501/10000 train_time:389853ms step_avg:41.03ms +[2025-09-05 17:23:59] [Rank 0] step:9501/10000 train_time:389853ms step_avg:41.03ms +[2025-09-05 17:24:00] [Rank 0] step:9521/10000 train_time:390523ms step_avg:41.02ms +[2025-09-05 17:24:00] [Rank 0] step:9521/10000 train_time:390523ms step_avg:41.02ms +[2025-09-05 17:24:01] [Rank 0] step:9541/10000 train_time:391261ms step_avg:41.01ms +[2025-09-05 17:24:01] [Rank 0] step:9541/10000 train_time:391261ms step_avg:41.01ms +[2025-09-05 17:24:01] [Rank 0] step:9561/10000 train_time:391997ms step_avg:41.00ms +[2025-09-05 17:24:01] [Rank 0] step:9561/10000 train_time:391997ms step_avg:41.00ms +[2025-09-05 17:24:02] [Rank 0] step:9581/10000 train_time:392734ms step_avg:40.99ms +[2025-09-05 17:24:02] [Rank 0] step:9581/10000 train_time:392734ms step_avg:40.99ms +[2025-09-05 17:24:03] [Rank 0] step:9601/10000 train_time:393470ms step_avg:40.98ms +[2025-09-05 17:24:03] [Rank 0] step:9601/10000 train_time:393470ms step_avg:40.98ms +[2025-09-05 17:24:04] [Rank 0] step:9621/10000 train_time:394207ms step_avg:40.97ms +[2025-09-05 17:24:04] [Rank 0] step:9621/10000 train_time:394207ms step_avg:40.97ms +[2025-09-05 17:24:04] [Rank 0] step:9641/10000 train_time:394943ms step_avg:40.96ms +[2025-09-05 17:24:04] [Rank 0] step:9641/10000 train_time:394943ms step_avg:40.96ms +[2025-09-05 17:24:05] [Rank 0] step:9661/10000 train_time:395957ms step_avg:40.99ms +[2025-09-05 17:24:05] [Rank 0] step:9661/10000 train_time:395957ms step_avg:40.99ms +[2025-09-05 17:24:06] [Rank 0] step:9681/10000 train_time:396694ms step_avg:40.98ms +[2025-09-05 17:24:06] [Rank 0] step:9681/10000 train_time:396694ms step_avg:40.98ms +[2025-09-05 17:24:07] [Rank 0] step:9701/10000 train_time:397430ms step_avg:40.97ms +[2025-09-05 17:24:07] [Rank 0] step:9701/10000 train_time:397430ms step_avg:40.97ms +[2025-09-05 17:24:08] [Rank 0] step:9721/10000 train_time:398166ms step_avg:40.96ms +[2025-09-05 17:24:08] [Rank 0] step:9721/10000 train_time:398166ms step_avg:40.96ms +[2025-09-05 17:24:08] [Rank 0] step:9741/10000 train_time:398902ms step_avg:40.95ms +[2025-09-05 17:24:08] [Rank 0] step:9741/10000 train_time:398902ms step_avg:40.95ms +[2025-09-05 17:24:09] [Rank 0] step:9761/10000 train_time:399638ms step_avg:40.94ms +[2025-09-05 17:24:09] [Rank 0] step:9761/10000 train_time:399638ms step_avg:40.94ms +[2025-09-05 17:24:10] [Rank 0] step:9781/10000 train_time:400375ms step_avg:40.93ms +[2025-09-05 17:24:10] [Rank 0] step:9781/10000 train_time:400375ms step_avg:40.93ms +[2025-09-05 17:24:10] [Rank 0] step:9801/10000 train_time:401111ms step_avg:40.93ms +[2025-09-05 17:24:10] [Rank 0] step:9801/10000 train_time:401111ms step_avg:40.93ms +[2025-09-05 17:24:11] [Rank 0] step:9821/10000 train_time:401847ms step_avg:40.92ms +[2025-09-05 17:24:11] [Rank 0] step:9821/10000 train_time:401847ms step_avg:40.92ms +[2025-09-05 17:24:12] [Rank 0] step:9841/10000 train_time:402583ms step_avg:40.91ms +[2025-09-05 17:24:12] [Rank 0] step:9841/10000 train_time:402583ms step_avg:40.91ms +[2025-09-05 17:24:13] [Rank 0] step:9861/10000 train_time:403319ms step_avg:40.90ms +[2025-09-05 17:24:13] [Rank 0] step:9861/10000 train_time:403319ms step_avg:40.90ms +[2025-09-05 17:24:13] [Rank 0] step:9881/10000 train_time:404055ms step_avg:40.89ms +[2025-09-05 17:24:13] [Rank 0] step:9881/10000 train_time:404055ms step_avg:40.89ms +[2025-09-05 17:24:14] [Rank 0] step:9901/10000 train_time:404791ms step_avg:40.88ms +[2025-09-05 17:24:14] [Rank 0] step:9901/10000 train_time:404791ms step_avg:40.88ms +[2025-09-05 17:24:15] [Rank 0] step:9921/10000 train_time:405528ms step_avg:40.88ms +[2025-09-05 17:24:15] [Rank 0] step:9921/10000 train_time:405528ms step_avg:40.88ms +[2025-09-05 17:24:16] [Rank 0] step:9941/10000 train_time:406264ms step_avg:40.87ms +[2025-09-05 17:24:16] [Rank 0] step:9941/10000 train_time:406264ms step_avg:40.87ms +[2025-09-05 17:24:16] [Rank 0] step:9961/10000 train_time:407000ms step_avg:40.86ms +[2025-09-05 17:24:16] [Rank 0] step:9961/10000 train_time:407000ms step_avg:40.86ms +[2025-09-05 17:24:17] [Rank 0] step:9981/10000 train_time:407736ms step_avg:40.85ms +[2025-09-05 17:24:17] [Rank 0] step:9981/10000 train_time:407736ms step_avg:40.85ms +[2025-09-05 17:24:18] [Rank 0] step:10000/10000 train_time:408436ms step_avg:40.84ms +[2025-09-05 17:24:18] [Rank 0] step:10000/10000 train_time:408436ms step_avg:40.84ms +[2025-09-05 17:24:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:24:18] [Rank 0] PRINT: Warning: val_tokens (491520) not perfectly divisible by val_batch_size (65536). Some tokens might be missed. +[2025-09-05 17:24:18] [Rank 0] PRINT: step:10000/10000 train_loss:1.4210 val_loss:1.4091 train_time:408558ms step_avg:40.86ms +[2025-09-05 17:24:18] [Rank 0] PRINT: step:10000/10000 train_loss:1.4210 val_loss:1.4091 train_time:408558ms step_avg:40.86ms +[2025-09-05 17:24:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:24:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-09-05 17:24:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:24:18] [Rank 0] PRINT: Fixed-eval set loaded with 1600 samples. +[2025-09-05 17:25:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:25:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-09-05 17:25:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:25:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-09-05 17:25:40] [Rank 0] Total Loss: 4.2032 +[2025-09-05 17:25:40] [Rank 0] Total Loss: 4.2032 +[2025-09-05 17:25:40] [Rank 0] Total FTA (Unweighted): 0.5938 +[2025-09-05 17:25:40] [Rank 0] Total FTA (Unweighted): 0.5938 +[2025-09-05 17:25:40] [Rank 0] Total FTA (Weighted): 0.5938 +[2025-09-05 17:25:40] [Rank 0] Total FTA (Weighted): 0.5938 +[2025-09-05 17:25:40] [Rank 0] Group 0 Loss: 3.5205 +[2025-09-05 17:25:40] [Rank 0] Group 0 Loss: 3.5205 +[2025-09-05 17:25:40] [Rank 0] Group 1 Loss: 3.3306 +[2025-09-05 17:25:40] [Rank 0] Group 1 Loss: 3.3306 +[2025-09-05 17:25:40] [Rank 0] Group 2 Loss: 3.1532 +[2025-09-05 17:25:40] [Rank 0] Group 2 Loss: 3.1532 +[2025-09-05 17:25:40] [Rank 0] Group 3 Loss: 3.6129 +[2025-09-05 17:25:40] [Rank 0] Group 3 Loss: 3.6129 +[2025-09-05 17:25:40] [Rank 0] Group 4 Loss: 3.7294 +[2025-09-05 17:25:40] [Rank 0] Group 4 Loss: 3.7294 +[2025-09-05 17:25:40] [Rank 0] Group 5 Loss: 3.9241 +[2025-09-05 17:25:40] [Rank 0] Group 5 Loss: 3.9241 +[2025-09-05 17:25:40] [Rank 0] Group 6 Loss: 4.0215 +[2025-09-05 17:25:40] [Rank 0] Group 6 Loss: 4.0215 +[2025-09-05 17:25:40] [Rank 0] Group 7 Loss: 4.1726 +[2025-09-05 17:25:40] [Rank 0] Group 7 Loss: 4.1726 +[2025-09-05 17:25:40] [Rank 0] Group 8 Loss: 4.4735 +[2025-09-05 17:25:40] [Rank 0] Group 8 Loss: 4.4735 +[2025-09-05 17:25:40] [Rank 0] Group 9 Loss: 4.6006 +[2025-09-05 17:25:40] [Rank 0] Group 9 Loss: 4.6006 +[2025-09-05 17:25:40] [Rank 0] Group 10 Loss: 4.7813 +[2025-09-05 17:25:40] [Rank 0] Group 10 Loss: 4.7813 +[2025-09-05 17:25:40] [Rank 0] Group 11 Loss: 4.7879 +[2025-09-05 17:25:40] [Rank 0] Group 11 Loss: 4.7879 +[2025-09-05 17:25:40] [Rank 0] Group 12 Loss: 4.7187 +[2025-09-05 17:25:40] [Rank 0] Group 12 Loss: 4.7187 +[2025-09-05 17:25:40] [Rank 0] Group 13 Loss: 4.7948 +[2025-09-05 17:25:40] [Rank 0] Group 13 Loss: 4.7948 +[2025-09-05 17:25:40] [Rank 0] Group 14 Loss: 4.8146 +[2025-09-05 17:25:40] [Rank 0] Group 14 Loss: 4.8146 +[2025-09-05 17:25:40] [Rank 0] Group 15 Loss: 4.8158 +[2025-09-05 17:25:40] [Rank 0] Group 15 Loss: 4.8158 +[2025-09-05 17:25:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:25:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-09-05 17:25:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:25:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-09-05 17:25:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:25:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-09-05 17:25:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:25:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-09-05 17:25:40] [Rank 0] Group 4 FTA: 0.9600 +[2025-09-05 17:25:40] [Rank 0] Group 4 FTA: 0.9600 +[2025-09-05 17:25:40] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:25:40] [Rank 0] Group 5 FTA: 0.5700 +[2025-09-05 17:25:40] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 17:25:40] [Rank 0] Group 6 FTA: 0.5200 +[2025-09-05 17:25:40] [Rank 0] Group 7 FTA: 0.5300 +[2025-09-05 17:25:40] [Rank 0] Group 7 FTA: 0.5300 +[2025-09-05 17:25:40] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 17:25:40] [Rank 0] Group 8 FTA: 0.5200 +[2025-09-05 17:25:40] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 17:25:40] [Rank 0] Group 9 FTA: 0.4200 +[2025-09-05 17:25:40] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 17:25:40] [Rank 0] Group 10 FTA: 0.5000 +[2025-09-05 17:25:40] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 17:25:40] [Rank 0] Group 11 FTA: 0.4000 +[2025-09-05 17:25:40] [Rank 0] Group 12 FTA: 0.4500 +[2025-09-05 17:25:40] [Rank 0] Group 12 FTA: 0.4500 +[2025-09-05 17:25:40] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-05 17:25:40] [Rank 0] Group 13 FTA: 0.3000 +[2025-09-05 17:25:40] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 17:25:40] [Rank 0] Group 14 FTA: 0.2100 +[2025-09-05 17:25:40] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 17:25:40] [Rank 0] Group 15 FTA: 0.1200 +[2025-09-05 17:25:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:25:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_loss_curves.png +[2025-09-05 17:25:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:25:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/per_class_acc_curves.png +[2025-09-05 17:25:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:25:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_loss_curve.png +[2025-09-05 17:25:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:25:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_qa_sgd_gated/lr_search_long/mode_9_param_gated_lr_0.5_seed_46/total_acc_curve.png +[2025-09-05 17:25:41] [Rank 0] step:10001/10000 train_time:408568ms step_avg:40.85ms +[2025-09-05 17:25:41] [Rank 0] step:10001/10000 train_time:408568ms step_avg:40.85ms +[2025-09-05 17:25:41] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 17:25:41 2025 --- +[2025-09-05 17:25:41] [Rank 0] PRINT: --- Training Finished: Fri Sep 5 17:25:41 2025 --- +[2025-09-05 17:25:41] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB +[2025-09-05 17:25:41] [Rank 0] PRINT: Peak memory allocated: 3620 MiB reserved: 4788 MiB